##// END OF EJS Templates
py3: conditionalize httplib import...
Pulkit Goyal -
r29455:0c741fd6 default
parent child Browse files
Show More
@@ -1,649 +1,650
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # check-code - a style and portability checker for Mercurial
3 # check-code - a style and portability checker for Mercurial
4 #
4 #
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """style and portability checker for Mercurial
10 """style and portability checker for Mercurial
11
11
12 when a rule triggers wrong, do one of the following (prefer one from top):
12 when a rule triggers wrong, do one of the following (prefer one from top):
13 * do the work-around the rule suggests
13 * do the work-around the rule suggests
14 * doublecheck that it is a false match
14 * doublecheck that it is a false match
15 * improve the rule pattern
15 * improve the rule pattern
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
17 (you can append a short comment and match this, like: #re-raises)
17 (you can append a short comment and match this, like: #re-raises)
18 * change the pattern to a warning and list the exception in test-check-code-hg
18 * change the pattern to a warning and list the exception in test-check-code-hg
19 * ONLY use no--check-code for skipping entire files from external sources
19 * ONLY use no--check-code for skipping entire files from external sources
20 """
20 """
21
21
22 from __future__ import absolute_import, print_function
22 from __future__ import absolute_import, print_function
23 import glob
23 import glob
24 import keyword
24 import keyword
25 import optparse
25 import optparse
26 import os
26 import os
27 import re
27 import re
28 import sys
28 import sys
29 if sys.version_info[0] < 3:
29 if sys.version_info[0] < 3:
30 opentext = open
30 opentext = open
31 else:
31 else:
32 def opentext(f):
32 def opentext(f):
33 return open(f, encoding='ascii')
33 return open(f, encoding='ascii')
34 try:
34 try:
35 xrange
35 xrange
36 except NameError:
36 except NameError:
37 xrange = range
37 xrange = range
38 try:
38 try:
39 import re2
39 import re2
40 except ImportError:
40 except ImportError:
41 re2 = None
41 re2 = None
42
42
43 def compilere(pat, multiline=False):
43 def compilere(pat, multiline=False):
44 if multiline:
44 if multiline:
45 pat = '(?m)' + pat
45 pat = '(?m)' + pat
46 if re2:
46 if re2:
47 try:
47 try:
48 return re2.compile(pat)
48 return re2.compile(pat)
49 except re2.error:
49 except re2.error:
50 pass
50 pass
51 return re.compile(pat)
51 return re.compile(pat)
52
52
53 # check "rules depending on implementation of repquote()" in each
53 # check "rules depending on implementation of repquote()" in each
54 # patterns (especially pypats), before changing around repquote()
54 # patterns (especially pypats), before changing around repquote()
55 _repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q',
55 _repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q',
56 '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'}
56 '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'}
57 def _repquoteencodechr(i):
57 def _repquoteencodechr(i):
58 if i > 255:
58 if i > 255:
59 return 'u'
59 return 'u'
60 c = chr(i)
60 c = chr(i)
61 if c in _repquotefixedmap:
61 if c in _repquotefixedmap:
62 return _repquotefixedmap[c]
62 return _repquotefixedmap[c]
63 if c.isalpha():
63 if c.isalpha():
64 return 'x'
64 return 'x'
65 if c.isdigit():
65 if c.isdigit():
66 return 'n'
66 return 'n'
67 return 'o'
67 return 'o'
68 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
68 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
69
69
70 def repquote(m):
70 def repquote(m):
71 t = m.group('text')
71 t = m.group('text')
72 t = t.translate(_repquotett)
72 t = t.translate(_repquotett)
73 return m.group('quote') + t + m.group('quote')
73 return m.group('quote') + t + m.group('quote')
74
74
75 def reppython(m):
75 def reppython(m):
76 comment = m.group('comment')
76 comment = m.group('comment')
77 if comment:
77 if comment:
78 l = len(comment.rstrip())
78 l = len(comment.rstrip())
79 return "#" * l + comment[l:]
79 return "#" * l + comment[l:]
80 return repquote(m)
80 return repquote(m)
81
81
82 def repcomment(m):
82 def repcomment(m):
83 return m.group(1) + "#" * len(m.group(2))
83 return m.group(1) + "#" * len(m.group(2))
84
84
85 def repccomment(m):
85 def repccomment(m):
86 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
86 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
87 return m.group(1) + t + "*/"
87 return m.group(1) + t + "*/"
88
88
89 def repcallspaces(m):
89 def repcallspaces(m):
90 t = re.sub(r"\n\s+", "\n", m.group(2))
90 t = re.sub(r"\n\s+", "\n", m.group(2))
91 return m.group(1) + t
91 return m.group(1) + t
92
92
93 def repinclude(m):
93 def repinclude(m):
94 return m.group(1) + "<foo>"
94 return m.group(1) + "<foo>"
95
95
96 def rephere(m):
96 def rephere(m):
97 t = re.sub(r"\S", "x", m.group(2))
97 t = re.sub(r"\S", "x", m.group(2))
98 return m.group(1) + t
98 return m.group(1) + t
99
99
100
100
101 testpats = [
101 testpats = [
102 [
102 [
103 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
103 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
104 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
104 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
105 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
105 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
106 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
106 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
107 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
107 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
108 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
108 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
109 (r'echo -n', "don't use 'echo -n', use printf"),
109 (r'echo -n', "don't use 'echo -n', use printf"),
110 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
110 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
111 (r'head -c', "don't use 'head -c', use 'dd'"),
111 (r'head -c', "don't use 'head -c', use 'dd'"),
112 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
112 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
113 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
113 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
114 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
114 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
115 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
115 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
116 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
116 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
117 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
117 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
118 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
118 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
119 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
119 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
120 "use egrep for extended grep syntax"),
120 "use egrep for extended grep syntax"),
121 (r'/bin/', "don't use explicit paths for tools"),
121 (r'/bin/', "don't use explicit paths for tools"),
122 (r'[^\n]\Z', "no trailing newline"),
122 (r'[^\n]\Z', "no trailing newline"),
123 (r'export .*=', "don't export and assign at once"),
123 (r'export .*=', "don't export and assign at once"),
124 (r'^source\b', "don't use 'source', use '.'"),
124 (r'^source\b', "don't use 'source', use '.'"),
125 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
125 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
126 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
126 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
127 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
127 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
128 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
128 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
129 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
129 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
130 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
130 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
131 (r'^alias\b.*=', "don't use alias, use a function"),
131 (r'^alias\b.*=', "don't use alias, use a function"),
132 (r'if\s*!', "don't use '!' to negate exit status"),
132 (r'if\s*!', "don't use '!' to negate exit status"),
133 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
133 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
134 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
134 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
135 (r'^( *)\t', "don't use tabs to indent"),
135 (r'^( *)\t', "don't use tabs to indent"),
136 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
136 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
137 "put a backslash-escaped newline after sed 'i' command"),
137 "put a backslash-escaped newline after sed 'i' command"),
138 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
138 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
139 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
139 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
140 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
140 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
141 (r'\butil\.Abort\b', "directly use error.Abort"),
141 (r'\butil\.Abort\b', "directly use error.Abort"),
142 (r'\|&', "don't use |&, use 2>&1"),
142 (r'\|&', "don't use |&, use 2>&1"),
143 (r'\w = +\w', "only one space after = allowed"),
143 (r'\w = +\w', "only one space after = allowed"),
144 (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
144 (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
145 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'")
145 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'")
146 ],
146 ],
147 # warnings
147 # warnings
148 [
148 [
149 (r'^function', "don't use 'function', use old style"),
149 (r'^function', "don't use 'function', use old style"),
150 (r'^diff.*-\w*N', "don't use 'diff -N'"),
150 (r'^diff.*-\w*N', "don't use 'diff -N'"),
151 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
151 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
152 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
152 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
153 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
153 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
154 ]
154 ]
155 ]
155 ]
156
156
157 testfilters = [
157 testfilters = [
158 (r"( *)(#([^\n]*\S)?)", repcomment),
158 (r"( *)(#([^\n]*\S)?)", repcomment),
159 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
159 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
160 ]
160 ]
161
161
162 winglobmsg = "use (glob) to match Windows paths too"
162 winglobmsg = "use (glob) to match Windows paths too"
163 uprefix = r"^ \$ "
163 uprefix = r"^ \$ "
164 utestpats = [
164 utestpats = [
165 [
165 [
166 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
166 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
167 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
167 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
168 "use regex test output patterns instead of sed"),
168 "use regex test output patterns instead of sed"),
169 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
169 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
170 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
170 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
171 (uprefix + r'.*\|\| echo.*(fail|error)',
171 (uprefix + r'.*\|\| echo.*(fail|error)',
172 "explicit exit code checks unnecessary"),
172 "explicit exit code checks unnecessary"),
173 (uprefix + r'set -e', "don't use set -e"),
173 (uprefix + r'set -e', "don't use set -e"),
174 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
174 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
175 (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
175 (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
176 "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
176 "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
177 '# no-msys'), # in test-pull.t which is skipped on windows
177 '# no-msys'), # in test-pull.t which is skipped on windows
178 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
178 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
179 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
179 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
180 winglobmsg),
180 winglobmsg),
181 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
181 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
182 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
182 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
183 (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg),
183 (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg),
184 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
184 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
185 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
185 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
186 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
186 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
187 (r'^ moving \S+/.*[^)]$', winglobmsg),
187 (r'^ moving \S+/.*[^)]$', winglobmsg),
188 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
188 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
189 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
189 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
190 (r'^ .*file://\$TESTTMP',
190 (r'^ .*file://\$TESTTMP',
191 'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
191 'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
192 (r'^ (cat|find): .*: No such file or directory',
192 (r'^ (cat|find): .*: No such file or directory',
193 'use test -f to test for file existence'),
193 'use test -f to test for file existence'),
194 (r'^ diff -[^ -]*p',
194 (r'^ diff -[^ -]*p',
195 "don't use (external) diff with -p for portability"),
195 "don't use (external) diff with -p for portability"),
196 (r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
196 (r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
197 "glob timezone field in diff output for portability"),
197 "glob timezone field in diff output for portability"),
198 (r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
198 (r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
199 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"),
199 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"),
200 (r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
200 (r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
201 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"),
201 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"),
202 (r'^ @@ -[0-9]+ [+][0-9]+ @@',
202 (r'^ @@ -[0-9]+ [+][0-9]+ @@',
203 "use '@@ -N* +N* @@ (glob)' style chunk header for portability"),
203 "use '@@ -N* +N* @@ (glob)' style chunk header for portability"),
204 (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
204 (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
205 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
205 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
206 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"),
206 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"),
207 ],
207 ],
208 # warnings
208 # warnings
209 [
209 [
210 (r'^ [^*?/\n]* \(glob\)$',
210 (r'^ [^*?/\n]* \(glob\)$',
211 "glob match with no glob character (?*/)"),
211 "glob match with no glob character (?*/)"),
212 ]
212 ]
213 ]
213 ]
214
214
215 for i in [0, 1]:
215 for i in [0, 1]:
216 for tp in testpats[i]:
216 for tp in testpats[i]:
217 p = tp[0]
217 p = tp[0]
218 m = tp[1]
218 m = tp[1]
219 if p.startswith(r'^'):
219 if p.startswith(r'^'):
220 p = r"^ [$>] (%s)" % p[1:]
220 p = r"^ [$>] (%s)" % p[1:]
221 else:
221 else:
222 p = r"^ [$>] .*(%s)" % p
222 p = r"^ [$>] .*(%s)" % p
223 utestpats[i].append((p, m) + tp[2:])
223 utestpats[i].append((p, m) + tp[2:])
224
224
225 utestfilters = [
225 utestfilters = [
226 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
226 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
227 (r"( *)(#([^\n]*\S)?)", repcomment),
227 (r"( *)(#([^\n]*\S)?)", repcomment),
228 ]
228 ]
229
229
230 pypats = [
230 pypats = [
231 [
231 [
232 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
232 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
233 "tuple parameter unpacking not available in Python 3+"),
233 "tuple parameter unpacking not available in Python 3+"),
234 (r'lambda\s*\(.*,.*\)',
234 (r'lambda\s*\(.*,.*\)',
235 "tuple parameter unpacking not available in Python 3+"),
235 "tuple parameter unpacking not available in Python 3+"),
236 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
236 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
237 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
237 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
238 (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
238 (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
239 'dict-from-generator'),
239 'dict-from-generator'),
240 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
240 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
241 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
241 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
242 (r'^\s*\t', "don't use tabs"),
242 (r'^\s*\t', "don't use tabs"),
243 (r'\S;\s*\n', "semicolon"),
243 (r'\S;\s*\n', "semicolon"),
244 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
244 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
245 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
245 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
246 (r'(\w|\)),\w', "missing whitespace after ,"),
246 (r'(\w|\)),\w', "missing whitespace after ,"),
247 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
247 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
248 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
248 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
249 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
249 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
250 (r'.{81}', "line too long"),
250 (r'.{81}', "line too long"),
251 (r'[^\n]\Z', "no trailing newline"),
251 (r'[^\n]\Z', "no trailing newline"),
252 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
252 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
253 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
253 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
254 # "don't use underbars in identifiers"),
254 # "don't use underbars in identifiers"),
255 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
255 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
256 "don't use camelcase in identifiers"),
256 "don't use camelcase in identifiers"),
257 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
257 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
258 "linebreak after :"),
258 "linebreak after :"),
259 (r'class\s[^( \n]+:', "old-style class, use class foo(object)",
259 (r'class\s[^( \n]+:', "old-style class, use class foo(object)",
260 r'#.*old-style'),
260 r'#.*old-style'),
261 (r'class\s[^( \n]+\(\):',
261 (r'class\s[^( \n]+\(\):',
262 "class foo() creates old style object, use class foo(object)",
262 "class foo() creates old style object, use class foo(object)",
263 r'#.*old-style'),
263 r'#.*old-style'),
264 (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist
264 (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist
265 if k not in ('print', 'exec')),
265 if k not in ('print', 'exec')),
266 "Python keyword is not a function"),
266 "Python keyword is not a function"),
267 (r',]', "unneeded trailing ',' in list"),
267 (r',]', "unneeded trailing ',' in list"),
268 # (r'class\s[A-Z][^\(]*\((?!Exception)',
268 # (r'class\s[A-Z][^\(]*\((?!Exception)',
269 # "don't capitalize non-exception classes"),
269 # "don't capitalize non-exception classes"),
270 # (r'in range\(', "use xrange"),
270 # (r'in range\(', "use xrange"),
271 # (r'^\s*print\s+', "avoid using print in core and extensions"),
271 # (r'^\s*print\s+', "avoid using print in core and extensions"),
272 (r'[\x80-\xff]', "non-ASCII character literal"),
272 (r'[\x80-\xff]', "non-ASCII character literal"),
273 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
273 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
274 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
274 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
275 "gratuitous whitespace after Python keyword"),
275 "gratuitous whitespace after Python keyword"),
276 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
276 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
277 # (r'\s\s=', "gratuitous whitespace before ="),
277 # (r'\s\s=', "gratuitous whitespace before ="),
278 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
278 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
279 "missing whitespace around operator"),
279 "missing whitespace around operator"),
280 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
280 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
281 "missing whitespace around operator"),
281 "missing whitespace around operator"),
282 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
282 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
283 "missing whitespace around operator"),
283 "missing whitespace around operator"),
284 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
284 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
285 "wrong whitespace around ="),
285 "wrong whitespace around ="),
286 (r'\([^()]*( =[^=]|[^<>!=]= )',
286 (r'\([^()]*( =[^=]|[^<>!=]= )',
287 "no whitespace around = for named parameters"),
287 "no whitespace around = for named parameters"),
288 (r'raise Exception', "don't raise generic exceptions"),
288 (r'raise Exception', "don't raise generic exceptions"),
289 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
289 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
290 "don't use old-style two-argument raise, use Exception(message)"),
290 "don't use old-style two-argument raise, use Exception(message)"),
291 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
291 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
292 (r' [=!]=\s+(True|False|None)',
292 (r' [=!]=\s+(True|False|None)',
293 "comparison with singleton, use 'is' or 'is not' instead"),
293 "comparison with singleton, use 'is' or 'is not' instead"),
294 (r'^\s*(while|if) [01]:',
294 (r'^\s*(while|if) [01]:',
295 "use True/False for constant Boolean expression"),
295 "use True/False for constant Boolean expression"),
296 (r'(?:(?<!def)\s+|\()hasattr',
296 (r'(?:(?<!def)\s+|\()hasattr',
297 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
297 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
298 (r'opener\([^)]*\).read\(',
298 (r'opener\([^)]*\).read\(',
299 "use opener.read() instead"),
299 "use opener.read() instead"),
300 (r'opener\([^)]*\).write\(',
300 (r'opener\([^)]*\).write\(',
301 "use opener.write() instead"),
301 "use opener.write() instead"),
302 (r'[\s\(](open|file)\([^)]*\)\.read\(',
302 (r'[\s\(](open|file)\([^)]*\)\.read\(',
303 "use util.readfile() instead"),
303 "use util.readfile() instead"),
304 (r'[\s\(](open|file)\([^)]*\)\.write\(',
304 (r'[\s\(](open|file)\([^)]*\)\.write\(',
305 "use util.writefile() instead"),
305 "use util.writefile() instead"),
306 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
306 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
307 "always assign an opened file to a variable, and close it afterwards"),
307 "always assign an opened file to a variable, and close it afterwards"),
308 (r'[\s\(](open|file)\([^)]*\)\.',
308 (r'[\s\(](open|file)\([^)]*\)\.',
309 "always assign an opened file to a variable, and close it afterwards"),
309 "always assign an opened file to a variable, and close it afterwards"),
310 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
310 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
311 (r'\.debug\(\_', "don't mark debug messages for translation"),
311 (r'\.debug\(\_', "don't mark debug messages for translation"),
312 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
312 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
313 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
313 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
314 (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
314 (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
315 'legacy exception syntax; use "as" instead of ","'),
315 'legacy exception syntax; use "as" instead of ","'),
316 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
316 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
317 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
317 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
318 (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
318 (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
319 (r'os\.path\.join\(.*, *(""|\'\')\)',
319 (r'os\.path\.join\(.*, *(""|\'\')\)',
320 "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
320 "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
321 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
321 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
322 # XXX only catch mutable arguments on the first line of the definition
322 # XXX only catch mutable arguments on the first line of the definition
323 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
323 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
324 (r'\butil\.Abort\b', "directly use error.Abort"),
324 (r'\butil\.Abort\b', "directly use error.Abort"),
325 (r'^import Queue', "don't use Queue, use util.queue + util.empty"),
325 (r'^import Queue', "don't use Queue, use util.queue + util.empty"),
326 (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
326 (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
327 (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
327 (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
328 (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
328 (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
329 (r'^import urlparse', "don't use urlparse, use util.urlparse"),
329 (r'^import urlparse', "don't use urlparse, use util.urlparse"),
330 (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
330 (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
331 (r'^import cPickle', "don't use cPickle, use util.pickle"),
331 (r'^import cPickle', "don't use cPickle, use util.pickle"),
332 (r'^import pickle', "don't use pickle, use util.pickle"),
332 (r'^import pickle', "don't use pickle, use util.pickle"),
333 (r'^import httplib', "don't use httplib, use util.httplib"),
333 (r'\.next\(\)', "don't use .next(), use next(...)"),
334 (r'\.next\(\)', "don't use .next(), use next(...)"),
334
335
335 # rules depending on implementation of repquote()
336 # rules depending on implementation of repquote()
336 (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
337 (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
337 'string join across lines with no space'),
338 'string join across lines with no space'),
338 (r'''(?x)ui\.(status|progress|write|note|warn)\(
339 (r'''(?x)ui\.(status|progress|write|note|warn)\(
339 [ \t\n#]*
340 [ \t\n#]*
340 (?# any strings/comments might precede a string, which
341 (?# any strings/comments might precede a string, which
341 # contains translatable message)
342 # contains translatable message)
342 ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
343 ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
343 (?# sequence consisting of below might precede translatable message
344 (?# sequence consisting of below might precede translatable message
344 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
345 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
345 # - escaped character: "\\", "\n", "\0" ...
346 # - escaped character: "\\", "\n", "\0" ...
346 # - character other than '%', 'b' as '\', and 'x' as alphabet)
347 # - character other than '%', 'b' as '\', and 'x' as alphabet)
347 (['"]|\'\'\'|""")
348 (['"]|\'\'\'|""")
348 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
349 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
349 (?# this regexp can't use [^...] style,
350 (?# this regexp can't use [^...] style,
350 # because _preparepats forcibly adds "\n" into [^...],
351 # because _preparepats forcibly adds "\n" into [^...],
351 # even though this regexp wants match it against "\n")''',
352 # even though this regexp wants match it against "\n")''',
352 "missing _() in ui message (use () to hide false-positives)"),
353 "missing _() in ui message (use () to hide false-positives)"),
353 ],
354 ],
354 # warnings
355 # warnings
355 [
356 [
356 # rules depending on implementation of repquote()
357 # rules depending on implementation of repquote()
357 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
358 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
358 ]
359 ]
359 ]
360 ]
360
361
361 pyfilters = [
362 pyfilters = [
362 (r"""(?msx)(?P<comment>\#.*?$)|
363 (r"""(?msx)(?P<comment>\#.*?$)|
363 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
364 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
364 (?P<text>(([^\\]|\\.)*?))
365 (?P<text>(([^\\]|\\.)*?))
365 (?P=quote))""", reppython),
366 (?P=quote))""", reppython),
366 ]
367 ]
367
368
368 txtfilters = []
369 txtfilters = []
369
370
370 txtpats = [
371 txtpats = [
371 [
372 [
372 ('\s$', 'trailing whitespace'),
373 ('\s$', 'trailing whitespace'),
373 ('.. note::[ \n][^\n]', 'add two newlines after note::')
374 ('.. note::[ \n][^\n]', 'add two newlines after note::')
374 ],
375 ],
375 []
376 []
376 ]
377 ]
377
378
378 cpats = [
379 cpats = [
379 [
380 [
380 (r'//', "don't use //-style comments"),
381 (r'//', "don't use //-style comments"),
381 (r'^ ', "don't use spaces to indent"),
382 (r'^ ', "don't use spaces to indent"),
382 (r'\S\t', "don't use tabs except for indent"),
383 (r'\S\t', "don't use tabs except for indent"),
383 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
384 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
384 (r'.{81}', "line too long"),
385 (r'.{81}', "line too long"),
385 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
386 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
386 (r'return\(', "return is not a function"),
387 (r'return\(', "return is not a function"),
387 (r' ;', "no space before ;"),
388 (r' ;', "no space before ;"),
388 (r'[^;] \)', "no space before )"),
389 (r'[^;] \)', "no space before )"),
389 (r'[)][{]', "space between ) and {"),
390 (r'[)][{]', "space between ) and {"),
390 (r'\w+\* \w+', "use int *foo, not int* foo"),
391 (r'\w+\* \w+', "use int *foo, not int* foo"),
391 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
392 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
392 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
393 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
393 (r'\w,\w', "missing whitespace after ,"),
394 (r'\w,\w', "missing whitespace after ,"),
394 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
395 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
395 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
396 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
396 (r'^#\s+\w', "use #foo, not # foo"),
397 (r'^#\s+\w', "use #foo, not # foo"),
397 (r'[^\n]\Z', "no trailing newline"),
398 (r'[^\n]\Z', "no trailing newline"),
398 (r'^\s*#import\b', "use only #include in standard C code"),
399 (r'^\s*#import\b', "use only #include in standard C code"),
399 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
400 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
400 (r'strcat\(', "don't use strcat"),
401 (r'strcat\(', "don't use strcat"),
401
402
402 # rules depending on implementation of repquote()
403 # rules depending on implementation of repquote()
403 ],
404 ],
404 # warnings
405 # warnings
405 [
406 [
406 # rules depending on implementation of repquote()
407 # rules depending on implementation of repquote()
407 ]
408 ]
408 ]
409 ]
409
410
410 cfilters = [
411 cfilters = [
411 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
412 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
412 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
413 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
413 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
414 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
414 (r'(\()([^)]+\))', repcallspaces),
415 (r'(\()([^)]+\))', repcallspaces),
415 ]
416 ]
416
417
417 inutilpats = [
418 inutilpats = [
418 [
419 [
419 (r'\bui\.', "don't use ui in util"),
420 (r'\bui\.', "don't use ui in util"),
420 ],
421 ],
421 # warnings
422 # warnings
422 []
423 []
423 ]
424 ]
424
425
425 inrevlogpats = [
426 inrevlogpats = [
426 [
427 [
427 (r'\brepo\.', "don't use repo in revlog"),
428 (r'\brepo\.', "don't use repo in revlog"),
428 ],
429 ],
429 # warnings
430 # warnings
430 []
431 []
431 ]
432 ]
432
433
433 webtemplatefilters = []
434 webtemplatefilters = []
434
435
435 webtemplatepats = [
436 webtemplatepats = [
436 [],
437 [],
437 [
438 [
438 (r'{desc(\|(?!websub|firstline)[^\|]*)+}',
439 (r'{desc(\|(?!websub|firstline)[^\|]*)+}',
439 'follow desc keyword with either firstline or websub'),
440 'follow desc keyword with either firstline or websub'),
440 ]
441 ]
441 ]
442 ]
442
443
443 checks = [
444 checks = [
444 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
445 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
445 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
446 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
446 ('c', r'.*\.[ch]$', '', cfilters, cpats),
447 ('c', r'.*\.[ch]$', '', cfilters, cpats),
447 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
448 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
448 ('layering violation repo in revlog', r'mercurial/revlog\.py', '',
449 ('layering violation repo in revlog', r'mercurial/revlog\.py', '',
449 pyfilters, inrevlogpats),
450 pyfilters, inrevlogpats),
450 ('layering violation ui in util', r'mercurial/util\.py', '', pyfilters,
451 ('layering violation ui in util', r'mercurial/util\.py', '', pyfilters,
451 inutilpats),
452 inutilpats),
452 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
453 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
453 ('web template', r'mercurial/templates/.*\.tmpl', '',
454 ('web template', r'mercurial/templates/.*\.tmpl', '',
454 webtemplatefilters, webtemplatepats),
455 webtemplatefilters, webtemplatepats),
455 ]
456 ]
456
457
457 def _preparepats():
458 def _preparepats():
458 for c in checks:
459 for c in checks:
459 failandwarn = c[-1]
460 failandwarn = c[-1]
460 for pats in failandwarn:
461 for pats in failandwarn:
461 for i, pseq in enumerate(pats):
462 for i, pseq in enumerate(pats):
462 # fix-up regexes for multi-line searches
463 # fix-up regexes for multi-line searches
463 p = pseq[0]
464 p = pseq[0]
464 # \s doesn't match \n
465 # \s doesn't match \n
465 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
466 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
466 # [^...] doesn't match newline
467 # [^...] doesn't match newline
467 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
468 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
468
469
469 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
470 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
470 filters = c[3]
471 filters = c[3]
471 for i, flt in enumerate(filters):
472 for i, flt in enumerate(filters):
472 filters[i] = re.compile(flt[0]), flt[1]
473 filters[i] = re.compile(flt[0]), flt[1]
473 _preparepats()
474 _preparepats()
474
475
475 class norepeatlogger(object):
476 class norepeatlogger(object):
476 def __init__(self):
477 def __init__(self):
477 self._lastseen = None
478 self._lastseen = None
478
479
479 def log(self, fname, lineno, line, msg, blame):
480 def log(self, fname, lineno, line, msg, blame):
480 """print error related a to given line of a given file.
481 """print error related a to given line of a given file.
481
482
482 The faulty line will also be printed but only once in the case
483 The faulty line will also be printed but only once in the case
483 of multiple errors.
484 of multiple errors.
484
485
485 :fname: filename
486 :fname: filename
486 :lineno: line number
487 :lineno: line number
487 :line: actual content of the line
488 :line: actual content of the line
488 :msg: error message
489 :msg: error message
489 """
490 """
490 msgid = fname, lineno, line
491 msgid = fname, lineno, line
491 if msgid != self._lastseen:
492 if msgid != self._lastseen:
492 if blame:
493 if blame:
493 print("%s:%d (%s):" % (fname, lineno, blame))
494 print("%s:%d (%s):" % (fname, lineno, blame))
494 else:
495 else:
495 print("%s:%d:" % (fname, lineno))
496 print("%s:%d:" % (fname, lineno))
496 print(" > %s" % line)
497 print(" > %s" % line)
497 self._lastseen = msgid
498 self._lastseen = msgid
498 print(" " + msg)
499 print(" " + msg)
499
500
500 _defaultlogger = norepeatlogger()
501 _defaultlogger = norepeatlogger()
501
502
502 def getblame(f):
503 def getblame(f):
503 lines = []
504 lines = []
504 for l in os.popen('hg annotate -un %s' % f):
505 for l in os.popen('hg annotate -un %s' % f):
505 start, line = l.split(':', 1)
506 start, line = l.split(':', 1)
506 user, rev = start.split()
507 user, rev = start.split()
507 lines.append((line[1:-1], user, rev))
508 lines.append((line[1:-1], user, rev))
508 return lines
509 return lines
509
510
510 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
511 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
511 blame=False, debug=False, lineno=True):
512 blame=False, debug=False, lineno=True):
512 """checks style and portability of a given file
513 """checks style and portability of a given file
513
514
514 :f: filepath
515 :f: filepath
515 :logfunc: function used to report error
516 :logfunc: function used to report error
516 logfunc(filename, linenumber, linecontent, errormessage)
517 logfunc(filename, linenumber, linecontent, errormessage)
517 :maxerr: number of error to display before aborting.
518 :maxerr: number of error to display before aborting.
518 Set to false (default) to report all errors
519 Set to false (default) to report all errors
519
520
520 return True if no error is found, False otherwise.
521 return True if no error is found, False otherwise.
521 """
522 """
522 blamecache = None
523 blamecache = None
523 result = True
524 result = True
524
525
525 try:
526 try:
526 with opentext(f) as fp:
527 with opentext(f) as fp:
527 try:
528 try:
528 pre = post = fp.read()
529 pre = post = fp.read()
529 except UnicodeDecodeError as e:
530 except UnicodeDecodeError as e:
530 print("%s while reading %s" % (e, f))
531 print("%s while reading %s" % (e, f))
531 return result
532 return result
532 except IOError as e:
533 except IOError as e:
533 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
534 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
534 return result
535 return result
535
536
536 for name, match, magic, filters, pats in checks:
537 for name, match, magic, filters, pats in checks:
537 if debug:
538 if debug:
538 print(name, f)
539 print(name, f)
539 fc = 0
540 fc = 0
540 if not (re.match(match, f) or (magic and re.search(magic, pre))):
541 if not (re.match(match, f) or (magic and re.search(magic, pre))):
541 if debug:
542 if debug:
542 print("Skipping %s for %s it doesn't match %s" % (
543 print("Skipping %s for %s it doesn't match %s" % (
543 name, match, f))
544 name, match, f))
544 continue
545 continue
545 if "no-" "check-code" in pre:
546 if "no-" "check-code" in pre:
546 # If you're looking at this line, it's because a file has:
547 # If you're looking at this line, it's because a file has:
547 # no- check- code
548 # no- check- code
548 # but the reason to output skipping is to make life for
549 # but the reason to output skipping is to make life for
549 # tests easier. So, instead of writing it with a normal
550 # tests easier. So, instead of writing it with a normal
550 # spelling, we write it with the expected spelling from
551 # spelling, we write it with the expected spelling from
551 # tests/test-check-code.t
552 # tests/test-check-code.t
552 print("Skipping %s it has no-che?k-code (glob)" % f)
553 print("Skipping %s it has no-che?k-code (glob)" % f)
553 return "Skip" # skip checking this file
554 return "Skip" # skip checking this file
554 for p, r in filters:
555 for p, r in filters:
555 post = re.sub(p, r, post)
556 post = re.sub(p, r, post)
556 nerrs = len(pats[0]) # nerr elements are errors
557 nerrs = len(pats[0]) # nerr elements are errors
557 if warnings:
558 if warnings:
558 pats = pats[0] + pats[1]
559 pats = pats[0] + pats[1]
559 else:
560 else:
560 pats = pats[0]
561 pats = pats[0]
561 # print post # uncomment to show filtered version
562 # print post # uncomment to show filtered version
562
563
563 if debug:
564 if debug:
564 print("Checking %s for %s" % (name, f))
565 print("Checking %s for %s" % (name, f))
565
566
566 prelines = None
567 prelines = None
567 errors = []
568 errors = []
568 for i, pat in enumerate(pats):
569 for i, pat in enumerate(pats):
569 if len(pat) == 3:
570 if len(pat) == 3:
570 p, msg, ignore = pat
571 p, msg, ignore = pat
571 else:
572 else:
572 p, msg = pat
573 p, msg = pat
573 ignore = None
574 ignore = None
574 if i >= nerrs:
575 if i >= nerrs:
575 msg = "warning: " + msg
576 msg = "warning: " + msg
576
577
577 pos = 0
578 pos = 0
578 n = 0
579 n = 0
579 for m in p.finditer(post):
580 for m in p.finditer(post):
580 if prelines is None:
581 if prelines is None:
581 prelines = pre.splitlines()
582 prelines = pre.splitlines()
582 postlines = post.splitlines(True)
583 postlines = post.splitlines(True)
583
584
584 start = m.start()
585 start = m.start()
585 while n < len(postlines):
586 while n < len(postlines):
586 step = len(postlines[n])
587 step = len(postlines[n])
587 if pos + step > start:
588 if pos + step > start:
588 break
589 break
589 pos += step
590 pos += step
590 n += 1
591 n += 1
591 l = prelines[n]
592 l = prelines[n]
592
593
593 if ignore and re.search(ignore, l, re.MULTILINE):
594 if ignore and re.search(ignore, l, re.MULTILINE):
594 if debug:
595 if debug:
595 print("Skipping %s for %s:%s (ignore pattern)" % (
596 print("Skipping %s for %s:%s (ignore pattern)" % (
596 name, f, n))
597 name, f, n))
597 continue
598 continue
598 bd = ""
599 bd = ""
599 if blame:
600 if blame:
600 bd = 'working directory'
601 bd = 'working directory'
601 if not blamecache:
602 if not blamecache:
602 blamecache = getblame(f)
603 blamecache = getblame(f)
603 if n < len(blamecache):
604 if n < len(blamecache):
604 bl, bu, br = blamecache[n]
605 bl, bu, br = blamecache[n]
605 if bl == l:
606 if bl == l:
606 bd = '%s@%s' % (bu, br)
607 bd = '%s@%s' % (bu, br)
607
608
608 errors.append((f, lineno and n + 1, l, msg, bd))
609 errors.append((f, lineno and n + 1, l, msg, bd))
609 result = False
610 result = False
610
611
611 errors.sort()
612 errors.sort()
612 for e in errors:
613 for e in errors:
613 logfunc(*e)
614 logfunc(*e)
614 fc += 1
615 fc += 1
615 if maxerr and fc >= maxerr:
616 if maxerr and fc >= maxerr:
616 print(" (too many errors, giving up)")
617 print(" (too many errors, giving up)")
617 break
618 break
618
619
619 return result
620 return result
620
621
621 if __name__ == "__main__":
622 if __name__ == "__main__":
622 parser = optparse.OptionParser("%prog [options] [files]")
623 parser = optparse.OptionParser("%prog [options] [files]")
623 parser.add_option("-w", "--warnings", action="store_true",
624 parser.add_option("-w", "--warnings", action="store_true",
624 help="include warning-level checks")
625 help="include warning-level checks")
625 parser.add_option("-p", "--per-file", type="int",
626 parser.add_option("-p", "--per-file", type="int",
626 help="max warnings per file")
627 help="max warnings per file")
627 parser.add_option("-b", "--blame", action="store_true",
628 parser.add_option("-b", "--blame", action="store_true",
628 help="use annotate to generate blame info")
629 help="use annotate to generate blame info")
629 parser.add_option("", "--debug", action="store_true",
630 parser.add_option("", "--debug", action="store_true",
630 help="show debug information")
631 help="show debug information")
631 parser.add_option("", "--nolineno", action="store_false",
632 parser.add_option("", "--nolineno", action="store_false",
632 dest='lineno', help="don't show line numbers")
633 dest='lineno', help="don't show line numbers")
633
634
634 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
635 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
635 lineno=True)
636 lineno=True)
636 (options, args) = parser.parse_args()
637 (options, args) = parser.parse_args()
637
638
638 if len(args) == 0:
639 if len(args) == 0:
639 check = glob.glob("*")
640 check = glob.glob("*")
640 else:
641 else:
641 check = args
642 check = args
642
643
643 ret = 0
644 ret = 0
644 for f in check:
645 for f in check:
645 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
646 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
646 blame=options.blame, debug=options.debug,
647 blame=options.blame, debug=options.debug,
647 lineno=options.lineno):
648 lineno=options.lineno):
648 ret = 1
649 ret = 1
649 sys.exit(ret)
650 sys.exit(ret)
@@ -1,308 +1,308
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import httplib
13 import os
12 import os
14 import socket
13 import socket
15 import tempfile
14 import tempfile
16 import zlib
15 import zlib
17
16
18 from .i18n import _
17 from .i18n import _
19 from .node import nullid
18 from .node import nullid
20 from . import (
19 from . import (
21 bundle2,
20 bundle2,
22 error,
21 error,
23 httpconnection,
22 httpconnection,
24 statichttprepo,
23 statichttprepo,
25 url,
24 url,
26 util,
25 util,
27 wireproto,
26 wireproto,
28 )
27 )
29
28
29 httplib = util.httplib
30 urlerr = util.urlerr
30 urlerr = util.urlerr
31 urlreq = util.urlreq
31 urlreq = util.urlreq
32
32
33 def zgenerator(f):
33 def zgenerator(f):
34 zd = zlib.decompressobj()
34 zd = zlib.decompressobj()
35 try:
35 try:
36 for chunk in util.filechunkiter(f):
36 for chunk in util.filechunkiter(f):
37 while chunk:
37 while chunk:
38 yield zd.decompress(chunk, 2**18)
38 yield zd.decompress(chunk, 2**18)
39 chunk = zd.unconsumed_tail
39 chunk = zd.unconsumed_tail
40 except httplib.HTTPException:
40 except httplib.HTTPException:
41 raise IOError(None, _('connection ended unexpectedly'))
41 raise IOError(None, _('connection ended unexpectedly'))
42 yield zd.flush()
42 yield zd.flush()
43
43
44 class httppeer(wireproto.wirepeer):
44 class httppeer(wireproto.wirepeer):
45 def __init__(self, ui, path):
45 def __init__(self, ui, path):
46 self.path = path
46 self.path = path
47 self.caps = None
47 self.caps = None
48 self.handler = None
48 self.handler = None
49 self.urlopener = None
49 self.urlopener = None
50 self.requestbuilder = None
50 self.requestbuilder = None
51 u = util.url(path)
51 u = util.url(path)
52 if u.query or u.fragment:
52 if u.query or u.fragment:
53 raise error.Abort(_('unsupported URL component: "%s"') %
53 raise error.Abort(_('unsupported URL component: "%s"') %
54 (u.query or u.fragment))
54 (u.query or u.fragment))
55
55
56 # urllib cannot handle URLs with embedded user or passwd
56 # urllib cannot handle URLs with embedded user or passwd
57 self._url, authinfo = u.authinfo()
57 self._url, authinfo = u.authinfo()
58
58
59 self.ui = ui
59 self.ui = ui
60 self.ui.debug('using %s\n' % self._url)
60 self.ui.debug('using %s\n' % self._url)
61
61
62 self.urlopener = url.opener(ui, authinfo)
62 self.urlopener = url.opener(ui, authinfo)
63 self.requestbuilder = urlreq.request
63 self.requestbuilder = urlreq.request
64
64
65 def __del__(self):
65 def __del__(self):
66 if self.urlopener:
66 if self.urlopener:
67 for h in self.urlopener.handlers:
67 for h in self.urlopener.handlers:
68 h.close()
68 h.close()
69 getattr(h, "close_all", lambda : None)()
69 getattr(h, "close_all", lambda : None)()
70
70
71 def url(self):
71 def url(self):
72 return self.path
72 return self.path
73
73
74 # look up capabilities only when needed
74 # look up capabilities only when needed
75
75
76 def _fetchcaps(self):
76 def _fetchcaps(self):
77 self.caps = set(self._call('capabilities').split())
77 self.caps = set(self._call('capabilities').split())
78
78
79 def _capabilities(self):
79 def _capabilities(self):
80 if self.caps is None:
80 if self.caps is None:
81 try:
81 try:
82 self._fetchcaps()
82 self._fetchcaps()
83 except error.RepoError:
83 except error.RepoError:
84 self.caps = set()
84 self.caps = set()
85 self.ui.debug('capabilities: %s\n' %
85 self.ui.debug('capabilities: %s\n' %
86 (' '.join(self.caps or ['none'])))
86 (' '.join(self.caps or ['none'])))
87 return self.caps
87 return self.caps
88
88
89 def lock(self):
89 def lock(self):
90 raise error.Abort(_('operation not supported over http'))
90 raise error.Abort(_('operation not supported over http'))
91
91
92 def _callstream(self, cmd, **args):
92 def _callstream(self, cmd, **args):
93 if cmd == 'pushkey':
93 if cmd == 'pushkey':
94 args['data'] = ''
94 args['data'] = ''
95 data = args.pop('data', None)
95 data = args.pop('data', None)
96 headers = args.pop('headers', {})
96 headers = args.pop('headers', {})
97
97
98 self.ui.debug("sending %s command\n" % cmd)
98 self.ui.debug("sending %s command\n" % cmd)
99 q = [('cmd', cmd)]
99 q = [('cmd', cmd)]
100 headersize = 0
100 headersize = 0
101 # Important: don't use self.capable() here or else you end up
101 # Important: don't use self.capable() here or else you end up
102 # with infinite recursion when trying to look up capabilities
102 # with infinite recursion when trying to look up capabilities
103 # for the first time.
103 # for the first time.
104 postargsok = self.caps is not None and 'httppostargs' in self.caps
104 postargsok = self.caps is not None and 'httppostargs' in self.caps
105 # TODO: support for httppostargs when data is a file-like
105 # TODO: support for httppostargs when data is a file-like
106 # object rather than a basestring
106 # object rather than a basestring
107 canmungedata = not data or isinstance(data, basestring)
107 canmungedata = not data or isinstance(data, basestring)
108 if postargsok and canmungedata:
108 if postargsok and canmungedata:
109 strargs = urlreq.urlencode(sorted(args.items()))
109 strargs = urlreq.urlencode(sorted(args.items()))
110 if strargs:
110 if strargs:
111 if not data:
111 if not data:
112 data = strargs
112 data = strargs
113 elif isinstance(data, basestring):
113 elif isinstance(data, basestring):
114 data = strargs + data
114 data = strargs + data
115 headers['X-HgArgs-Post'] = len(strargs)
115 headers['X-HgArgs-Post'] = len(strargs)
116 else:
116 else:
117 if len(args) > 0:
117 if len(args) > 0:
118 httpheader = self.capable('httpheader')
118 httpheader = self.capable('httpheader')
119 if httpheader:
119 if httpheader:
120 headersize = int(httpheader.split(',', 1)[0])
120 headersize = int(httpheader.split(',', 1)[0])
121 if headersize > 0:
121 if headersize > 0:
122 # The headers can typically carry more data than the URL.
122 # The headers can typically carry more data than the URL.
123 encargs = urlreq.urlencode(sorted(args.items()))
123 encargs = urlreq.urlencode(sorted(args.items()))
124 headerfmt = 'X-HgArg-%s'
124 headerfmt = 'X-HgArg-%s'
125 contentlen = headersize - len(headerfmt % '000' + ': \r\n')
125 contentlen = headersize - len(headerfmt % '000' + ': \r\n')
126 headernum = 0
126 headernum = 0
127 varyheaders = []
127 varyheaders = []
128 for i in xrange(0, len(encargs), contentlen):
128 for i in xrange(0, len(encargs), contentlen):
129 headernum += 1
129 headernum += 1
130 header = headerfmt % str(headernum)
130 header = headerfmt % str(headernum)
131 headers[header] = encargs[i:i + contentlen]
131 headers[header] = encargs[i:i + contentlen]
132 varyheaders.append(header)
132 varyheaders.append(header)
133 headers['Vary'] = ','.join(varyheaders)
133 headers['Vary'] = ','.join(varyheaders)
134 else:
134 else:
135 q += sorted(args.items())
135 q += sorted(args.items())
136 qs = '?%s' % urlreq.urlencode(q)
136 qs = '?%s' % urlreq.urlencode(q)
137 cu = "%s%s" % (self._url, qs)
137 cu = "%s%s" % (self._url, qs)
138 size = 0
138 size = 0
139 if util.safehasattr(data, 'length'):
139 if util.safehasattr(data, 'length'):
140 size = data.length
140 size = data.length
141 elif data is not None:
141 elif data is not None:
142 size = len(data)
142 size = len(data)
143 if size and self.ui.configbool('ui', 'usehttp2', False):
143 if size and self.ui.configbool('ui', 'usehttp2', False):
144 headers['Expect'] = '100-Continue'
144 headers['Expect'] = '100-Continue'
145 headers['X-HgHttp2'] = '1'
145 headers['X-HgHttp2'] = '1'
146 if data is not None and 'Content-Type' not in headers:
146 if data is not None and 'Content-Type' not in headers:
147 headers['Content-Type'] = 'application/mercurial-0.1'
147 headers['Content-Type'] = 'application/mercurial-0.1'
148 req = self.requestbuilder(cu, data, headers)
148 req = self.requestbuilder(cu, data, headers)
149 if data is not None:
149 if data is not None:
150 self.ui.debug("sending %s bytes\n" % size)
150 self.ui.debug("sending %s bytes\n" % size)
151 req.add_unredirected_header('Content-Length', '%d' % size)
151 req.add_unredirected_header('Content-Length', '%d' % size)
152 try:
152 try:
153 resp = self.urlopener.open(req)
153 resp = self.urlopener.open(req)
154 except urlerr.httperror as inst:
154 except urlerr.httperror as inst:
155 if inst.code == 401:
155 if inst.code == 401:
156 raise error.Abort(_('authorization failed'))
156 raise error.Abort(_('authorization failed'))
157 raise
157 raise
158 except httplib.HTTPException as inst:
158 except httplib.HTTPException as inst:
159 self.ui.debug('http error while sending %s command\n' % cmd)
159 self.ui.debug('http error while sending %s command\n' % cmd)
160 self.ui.traceback()
160 self.ui.traceback()
161 raise IOError(None, inst)
161 raise IOError(None, inst)
162 except IndexError:
162 except IndexError:
163 # this only happens with Python 2.3, later versions raise URLError
163 # this only happens with Python 2.3, later versions raise URLError
164 raise error.Abort(_('http error, possibly caused by proxy setting'))
164 raise error.Abort(_('http error, possibly caused by proxy setting'))
165 # record the url we got redirected to
165 # record the url we got redirected to
166 resp_url = resp.geturl()
166 resp_url = resp.geturl()
167 if resp_url.endswith(qs):
167 if resp_url.endswith(qs):
168 resp_url = resp_url[:-len(qs)]
168 resp_url = resp_url[:-len(qs)]
169 if self._url.rstrip('/') != resp_url.rstrip('/'):
169 if self._url.rstrip('/') != resp_url.rstrip('/'):
170 if not self.ui.quiet:
170 if not self.ui.quiet:
171 self.ui.warn(_('real URL is %s\n') % resp_url)
171 self.ui.warn(_('real URL is %s\n') % resp_url)
172 self._url = resp_url
172 self._url = resp_url
173 try:
173 try:
174 proto = resp.getheader('content-type')
174 proto = resp.getheader('content-type')
175 except AttributeError:
175 except AttributeError:
176 proto = resp.headers.get('content-type', '')
176 proto = resp.headers.get('content-type', '')
177
177
178 safeurl = util.hidepassword(self._url)
178 safeurl = util.hidepassword(self._url)
179 if proto.startswith('application/hg-error'):
179 if proto.startswith('application/hg-error'):
180 raise error.OutOfBandError(resp.read())
180 raise error.OutOfBandError(resp.read())
181 # accept old "text/plain" and "application/hg-changegroup" for now
181 # accept old "text/plain" and "application/hg-changegroup" for now
182 if not (proto.startswith('application/mercurial-') or
182 if not (proto.startswith('application/mercurial-') or
183 (proto.startswith('text/plain')
183 (proto.startswith('text/plain')
184 and not resp.headers.get('content-length')) or
184 and not resp.headers.get('content-length')) or
185 proto.startswith('application/hg-changegroup')):
185 proto.startswith('application/hg-changegroup')):
186 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
186 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
187 raise error.RepoError(
187 raise error.RepoError(
188 _("'%s' does not appear to be an hg repository:\n"
188 _("'%s' does not appear to be an hg repository:\n"
189 "---%%<--- (%s)\n%s\n---%%<---\n")
189 "---%%<--- (%s)\n%s\n---%%<---\n")
190 % (safeurl, proto or 'no content-type', resp.read(1024)))
190 % (safeurl, proto or 'no content-type', resp.read(1024)))
191
191
192 if proto.startswith('application/mercurial-'):
192 if proto.startswith('application/mercurial-'):
193 try:
193 try:
194 version = proto.split('-', 1)[1]
194 version = proto.split('-', 1)[1]
195 version_info = tuple([int(n) for n in version.split('.')])
195 version_info = tuple([int(n) for n in version.split('.')])
196 except ValueError:
196 except ValueError:
197 raise error.RepoError(_("'%s' sent a broken Content-Type "
197 raise error.RepoError(_("'%s' sent a broken Content-Type "
198 "header (%s)") % (safeurl, proto))
198 "header (%s)") % (safeurl, proto))
199 if version_info > (0, 1):
199 if version_info > (0, 1):
200 raise error.RepoError(_("'%s' uses newer protocol %s") %
200 raise error.RepoError(_("'%s' uses newer protocol %s") %
201 (safeurl, version))
201 (safeurl, version))
202
202
203 return resp
203 return resp
204
204
205 def _call(self, cmd, **args):
205 def _call(self, cmd, **args):
206 fp = self._callstream(cmd, **args)
206 fp = self._callstream(cmd, **args)
207 try:
207 try:
208 return fp.read()
208 return fp.read()
209 finally:
209 finally:
210 # if using keepalive, allow connection to be reused
210 # if using keepalive, allow connection to be reused
211 fp.close()
211 fp.close()
212
212
213 def _callpush(self, cmd, cg, **args):
213 def _callpush(self, cmd, cg, **args):
214 # have to stream bundle to a temp file because we do not have
214 # have to stream bundle to a temp file because we do not have
215 # http 1.1 chunked transfer.
215 # http 1.1 chunked transfer.
216
216
217 types = self.capable('unbundle')
217 types = self.capable('unbundle')
218 try:
218 try:
219 types = types.split(',')
219 types = types.split(',')
220 except AttributeError:
220 except AttributeError:
221 # servers older than d1b16a746db6 will send 'unbundle' as a
221 # servers older than d1b16a746db6 will send 'unbundle' as a
222 # boolean capability. They only support headerless/uncompressed
222 # boolean capability. They only support headerless/uncompressed
223 # bundles.
223 # bundles.
224 types = [""]
224 types = [""]
225 for x in types:
225 for x in types:
226 if x in bundle2.bundletypes:
226 if x in bundle2.bundletypes:
227 type = x
227 type = x
228 break
228 break
229
229
230 tempname = bundle2.writebundle(self.ui, cg, None, type)
230 tempname = bundle2.writebundle(self.ui, cg, None, type)
231 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
231 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
232 headers = {'Content-Type': 'application/mercurial-0.1'}
232 headers = {'Content-Type': 'application/mercurial-0.1'}
233
233
234 try:
234 try:
235 r = self._call(cmd, data=fp, headers=headers, **args)
235 r = self._call(cmd, data=fp, headers=headers, **args)
236 vals = r.split('\n', 1)
236 vals = r.split('\n', 1)
237 if len(vals) < 2:
237 if len(vals) < 2:
238 raise error.ResponseError(_("unexpected response:"), r)
238 raise error.ResponseError(_("unexpected response:"), r)
239 return vals
239 return vals
240 except socket.error as err:
240 except socket.error as err:
241 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
241 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
242 raise error.Abort(_('push failed: %s') % err.args[1])
242 raise error.Abort(_('push failed: %s') % err.args[1])
243 raise error.Abort(err.args[1])
243 raise error.Abort(err.args[1])
244 finally:
244 finally:
245 fp.close()
245 fp.close()
246 os.unlink(tempname)
246 os.unlink(tempname)
247
247
248 def _calltwowaystream(self, cmd, fp, **args):
248 def _calltwowaystream(self, cmd, fp, **args):
249 fh = None
249 fh = None
250 fp_ = None
250 fp_ = None
251 filename = None
251 filename = None
252 try:
252 try:
253 # dump bundle to disk
253 # dump bundle to disk
254 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
254 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
255 fh = os.fdopen(fd, "wb")
255 fh = os.fdopen(fd, "wb")
256 d = fp.read(4096)
256 d = fp.read(4096)
257 while d:
257 while d:
258 fh.write(d)
258 fh.write(d)
259 d = fp.read(4096)
259 d = fp.read(4096)
260 fh.close()
260 fh.close()
261 # start http push
261 # start http push
262 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
262 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
263 headers = {'Content-Type': 'application/mercurial-0.1'}
263 headers = {'Content-Type': 'application/mercurial-0.1'}
264 return self._callstream(cmd, data=fp_, headers=headers, **args)
264 return self._callstream(cmd, data=fp_, headers=headers, **args)
265 finally:
265 finally:
266 if fp_ is not None:
266 if fp_ is not None:
267 fp_.close()
267 fp_.close()
268 if fh is not None:
268 if fh is not None:
269 fh.close()
269 fh.close()
270 os.unlink(filename)
270 os.unlink(filename)
271
271
272 def _callcompressable(self, cmd, **args):
272 def _callcompressable(self, cmd, **args):
273 stream = self._callstream(cmd, **args)
273 stream = self._callstream(cmd, **args)
274 return util.chunkbuffer(zgenerator(stream))
274 return util.chunkbuffer(zgenerator(stream))
275
275
276 def _abort(self, exception):
276 def _abort(self, exception):
277 raise exception
277 raise exception
278
278
279 class httpspeer(httppeer):
279 class httpspeer(httppeer):
280 def __init__(self, ui, path):
280 def __init__(self, ui, path):
281 if not url.has_https:
281 if not url.has_https:
282 raise error.Abort(_('Python support for SSL and HTTPS '
282 raise error.Abort(_('Python support for SSL and HTTPS '
283 'is not installed'))
283 'is not installed'))
284 httppeer.__init__(self, ui, path)
284 httppeer.__init__(self, ui, path)
285
285
286 def instance(ui, path, create):
286 def instance(ui, path, create):
287 if create:
287 if create:
288 raise error.Abort(_('cannot create new http repository'))
288 raise error.Abort(_('cannot create new http repository'))
289 try:
289 try:
290 if path.startswith('https:'):
290 if path.startswith('https:'):
291 inst = httpspeer(ui, path)
291 inst = httpspeer(ui, path)
292 else:
292 else:
293 inst = httppeer(ui, path)
293 inst = httppeer(ui, path)
294 try:
294 try:
295 # Try to do useful work when checking compatibility.
295 # Try to do useful work when checking compatibility.
296 # Usually saves a roundtrip since we want the caps anyway.
296 # Usually saves a roundtrip since we want the caps anyway.
297 inst._fetchcaps()
297 inst._fetchcaps()
298 except error.RepoError:
298 except error.RepoError:
299 # No luck, try older compatibility check.
299 # No luck, try older compatibility check.
300 inst.between([(nullid, nullid)])
300 inst.between([(nullid, nullid)])
301 return inst
301 return inst
302 except error.RepoError as httpexception:
302 except error.RepoError as httpexception:
303 try:
303 try:
304 r = statichttprepo.instance(ui, "static-" + path, create)
304 r = statichttprepo.instance(ui, "static-" + path, create)
305 ui.note(_('(falling back to static-http)\n'))
305 ui.note(_('(falling back to static-http)\n'))
306 return r
306 return r
307 except error.RepoError:
307 except error.RepoError:
308 raise httpexception # use the original http RepoError instead
308 raise httpexception # use the original http RepoError instead
@@ -1,759 +1,759
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, see
12 # License along with this library; if not, see
13 # <http://www.gnu.org/licenses/>.
13 # <http://www.gnu.org/licenses/>.
14
14
15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
17
17
18 # Modified by Benoit Boissinot:
18 # Modified by Benoit Boissinot:
19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
20 # Modified by Dirkjan Ochtman:
20 # Modified by Dirkjan Ochtman:
21 # - import md5 function from a local util module
21 # - import md5 function from a local util module
22 # Modified by Augie Fackler:
22 # Modified by Augie Fackler:
23 # - add safesend method and use it to prevent broken pipe errors
23 # - add safesend method and use it to prevent broken pipe errors
24 # on large POST requests
24 # on large POST requests
25
25
26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
27
27
28 >>> import urllib2
28 >>> import urllib2
29 >>> from keepalive import HTTPHandler
29 >>> from keepalive import HTTPHandler
30 >>> keepalive_handler = HTTPHandler()
30 >>> keepalive_handler = HTTPHandler()
31 >>> opener = urlreq.buildopener(keepalive_handler)
31 >>> opener = urlreq.buildopener(keepalive_handler)
32 >>> urlreq.installopener(opener)
32 >>> urlreq.installopener(opener)
33 >>>
33 >>>
34 >>> fo = urlreq.urlopen('http://www.python.org')
34 >>> fo = urlreq.urlopen('http://www.python.org')
35
35
36 If a connection to a given host is requested, and all of the existing
36 If a connection to a given host is requested, and all of the existing
37 connections are still in use, another connection will be opened. If
37 connections are still in use, another connection will be opened. If
38 the handler tries to use an existing connection but it fails in some
38 the handler tries to use an existing connection but it fails in some
39 way, it will be closed and removed from the pool.
39 way, it will be closed and removed from the pool.
40
40
41 To remove the handler, simply re-run build_opener with no arguments, and
41 To remove the handler, simply re-run build_opener with no arguments, and
42 install that opener.
42 install that opener.
43
43
44 You can explicitly close connections by using the close_connection()
44 You can explicitly close connections by using the close_connection()
45 method of the returned file-like object (described below) or you can
45 method of the returned file-like object (described below) or you can
46 use the handler methods:
46 use the handler methods:
47
47
48 close_connection(host)
48 close_connection(host)
49 close_all()
49 close_all()
50 open_connections()
50 open_connections()
51
51
52 NOTE: using the close_connection and close_all methods of the handler
52 NOTE: using the close_connection and close_all methods of the handler
53 should be done with care when using multiple threads.
53 should be done with care when using multiple threads.
54 * there is nothing that prevents another thread from creating new
54 * there is nothing that prevents another thread from creating new
55 connections immediately after connections are closed
55 connections immediately after connections are closed
56 * no checks are done to prevent in-use connections from being closed
56 * no checks are done to prevent in-use connections from being closed
57
57
58 >>> keepalive_handler.close_all()
58 >>> keepalive_handler.close_all()
59
59
60 EXTRA ATTRIBUTES AND METHODS
60 EXTRA ATTRIBUTES AND METHODS
61
61
62 Upon a status of 200, the object returned has a few additional
62 Upon a status of 200, the object returned has a few additional
63 attributes and methods, which should not be used if you want to
63 attributes and methods, which should not be used if you want to
64 remain consistent with the normal urllib2-returned objects:
64 remain consistent with the normal urllib2-returned objects:
65
65
66 close_connection() - close the connection to the host
66 close_connection() - close the connection to the host
67 readlines() - you know, readlines()
67 readlines() - you know, readlines()
68 status - the return status (i.e. 404)
68 status - the return status (i.e. 404)
69 reason - english translation of status (i.e. 'File not found')
69 reason - english translation of status (i.e. 'File not found')
70
70
71 If you want the best of both worlds, use this inside an
71 If you want the best of both worlds, use this inside an
72 AttributeError-catching try:
72 AttributeError-catching try:
73
73
74 >>> try: status = fo.status
74 >>> try: status = fo.status
75 >>> except AttributeError: status = None
75 >>> except AttributeError: status = None
76
76
77 Unfortunately, these are ONLY there if status == 200, so it's not
77 Unfortunately, these are ONLY there if status == 200, so it's not
78 easy to distinguish between non-200 responses. The reason is that
78 easy to distinguish between non-200 responses. The reason is that
79 urllib2 tries to do clever things with error codes 301, 302, 401,
79 urllib2 tries to do clever things with error codes 301, 302, 401,
80 and 407, and it wraps the object upon return.
80 and 407, and it wraps the object upon return.
81
81
82 For python versions earlier than 2.4, you can avoid this fancy error
82 For python versions earlier than 2.4, you can avoid this fancy error
83 handling by setting the module-level global HANDLE_ERRORS to zero.
83 handling by setting the module-level global HANDLE_ERRORS to zero.
84 You see, prior to 2.4, it's the HTTP Handler's job to determine what
84 You see, prior to 2.4, it's the HTTP Handler's job to determine what
85 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
85 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
86 means "pass everything up". In python 2.4, however, this job no
86 means "pass everything up". In python 2.4, however, this job no
87 longer belongs to the HTTP Handler and is now done by a NEW handler,
87 longer belongs to the HTTP Handler and is now done by a NEW handler,
88 HTTPErrorProcessor. Here's the bottom line:
88 HTTPErrorProcessor. Here's the bottom line:
89
89
90 python version < 2.4
90 python version < 2.4
91 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
91 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
92 errors
92 errors
93 HANDLE_ERRORS == 0 pass everything up, error processing is
93 HANDLE_ERRORS == 0 pass everything up, error processing is
94 left to the calling code
94 left to the calling code
95 python version >= 2.4
95 python version >= 2.4
96 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
96 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
97 HANDLE_ERRORS == 0 (default) pass everything up, let the
97 HANDLE_ERRORS == 0 (default) pass everything up, let the
98 other handlers (specifically,
98 other handlers (specifically,
99 HTTPErrorProcessor) decide what to do
99 HTTPErrorProcessor) decide what to do
100
100
101 In practice, setting the variable either way makes little difference
101 In practice, setting the variable either way makes little difference
102 in python 2.4, so for the most consistent behavior across versions,
102 in python 2.4, so for the most consistent behavior across versions,
103 you probably just want to use the defaults, which will give you
103 you probably just want to use the defaults, which will give you
104 exceptions on errors.
104 exceptions on errors.
105
105
106 """
106 """
107
107
108 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
108 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
109
109
110 from __future__ import absolute_import, print_function
110 from __future__ import absolute_import, print_function
111
111
112 import errno
112 import errno
113 import hashlib
113 import hashlib
114 import httplib
115 import socket
114 import socket
116 import sys
115 import sys
117 import thread
116 import thread
118
117
119 from . import (
118 from . import (
120 util,
119 util,
121 )
120 )
122
121
122 httplib = util.httplib
123 urlerr = util.urlerr
123 urlerr = util.urlerr
124 urlreq = util.urlreq
124 urlreq = util.urlreq
125
125
126 DEBUG = None
126 DEBUG = None
127
127
128 if sys.version_info < (2, 4):
128 if sys.version_info < (2, 4):
129 HANDLE_ERRORS = 1
129 HANDLE_ERRORS = 1
130 else: HANDLE_ERRORS = 0
130 else: HANDLE_ERRORS = 0
131
131
132 class ConnectionManager(object):
132 class ConnectionManager(object):
133 """
133 """
134 The connection manager must be able to:
134 The connection manager must be able to:
135 * keep track of all existing
135 * keep track of all existing
136 """
136 """
137 def __init__(self):
137 def __init__(self):
138 self._lock = thread.allocate_lock()
138 self._lock = thread.allocate_lock()
139 self._hostmap = {} # map hosts to a list of connections
139 self._hostmap = {} # map hosts to a list of connections
140 self._connmap = {} # map connections to host
140 self._connmap = {} # map connections to host
141 self._readymap = {} # map connection to ready state
141 self._readymap = {} # map connection to ready state
142
142
143 def add(self, host, connection, ready):
143 def add(self, host, connection, ready):
144 self._lock.acquire()
144 self._lock.acquire()
145 try:
145 try:
146 if host not in self._hostmap:
146 if host not in self._hostmap:
147 self._hostmap[host] = []
147 self._hostmap[host] = []
148 self._hostmap[host].append(connection)
148 self._hostmap[host].append(connection)
149 self._connmap[connection] = host
149 self._connmap[connection] = host
150 self._readymap[connection] = ready
150 self._readymap[connection] = ready
151 finally:
151 finally:
152 self._lock.release()
152 self._lock.release()
153
153
154 def remove(self, connection):
154 def remove(self, connection):
155 self._lock.acquire()
155 self._lock.acquire()
156 try:
156 try:
157 try:
157 try:
158 host = self._connmap[connection]
158 host = self._connmap[connection]
159 except KeyError:
159 except KeyError:
160 pass
160 pass
161 else:
161 else:
162 del self._connmap[connection]
162 del self._connmap[connection]
163 del self._readymap[connection]
163 del self._readymap[connection]
164 self._hostmap[host].remove(connection)
164 self._hostmap[host].remove(connection)
165 if not self._hostmap[host]: del self._hostmap[host]
165 if not self._hostmap[host]: del self._hostmap[host]
166 finally:
166 finally:
167 self._lock.release()
167 self._lock.release()
168
168
169 def set_ready(self, connection, ready):
169 def set_ready(self, connection, ready):
170 try:
170 try:
171 self._readymap[connection] = ready
171 self._readymap[connection] = ready
172 except KeyError:
172 except KeyError:
173 pass
173 pass
174
174
175 def get_ready_conn(self, host):
175 def get_ready_conn(self, host):
176 conn = None
176 conn = None
177 self._lock.acquire()
177 self._lock.acquire()
178 try:
178 try:
179 if host in self._hostmap:
179 if host in self._hostmap:
180 for c in self._hostmap[host]:
180 for c in self._hostmap[host]:
181 if self._readymap[c]:
181 if self._readymap[c]:
182 self._readymap[c] = 0
182 self._readymap[c] = 0
183 conn = c
183 conn = c
184 break
184 break
185 finally:
185 finally:
186 self._lock.release()
186 self._lock.release()
187 return conn
187 return conn
188
188
189 def get_all(self, host=None):
189 def get_all(self, host=None):
190 if host:
190 if host:
191 return list(self._hostmap.get(host, []))
191 return list(self._hostmap.get(host, []))
192 else:
192 else:
193 return dict(self._hostmap)
193 return dict(self._hostmap)
194
194
195 class KeepAliveHandler(object):
195 class KeepAliveHandler(object):
196 def __init__(self):
196 def __init__(self):
197 self._cm = ConnectionManager()
197 self._cm = ConnectionManager()
198
198
199 #### Connection Management
199 #### Connection Management
200 def open_connections(self):
200 def open_connections(self):
201 """return a list of connected hosts and the number of connections
201 """return a list of connected hosts and the number of connections
202 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
202 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
203 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
203 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
204
204
205 def close_connection(self, host):
205 def close_connection(self, host):
206 """close connection(s) to <host>
206 """close connection(s) to <host>
207 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
207 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
208 no error occurs if there is no connection to that host."""
208 no error occurs if there is no connection to that host."""
209 for h in self._cm.get_all(host):
209 for h in self._cm.get_all(host):
210 self._cm.remove(h)
210 self._cm.remove(h)
211 h.close()
211 h.close()
212
212
213 def close_all(self):
213 def close_all(self):
214 """close all open connections"""
214 """close all open connections"""
215 for host, conns in self._cm.get_all().iteritems():
215 for host, conns in self._cm.get_all().iteritems():
216 for h in conns:
216 for h in conns:
217 self._cm.remove(h)
217 self._cm.remove(h)
218 h.close()
218 h.close()
219
219
220 def _request_closed(self, request, host, connection):
220 def _request_closed(self, request, host, connection):
221 """tells us that this request is now closed and that the
221 """tells us that this request is now closed and that the
222 connection is ready for another request"""
222 connection is ready for another request"""
223 self._cm.set_ready(connection, 1)
223 self._cm.set_ready(connection, 1)
224
224
225 def _remove_connection(self, host, connection, close=0):
225 def _remove_connection(self, host, connection, close=0):
226 if close:
226 if close:
227 connection.close()
227 connection.close()
228 self._cm.remove(connection)
228 self._cm.remove(connection)
229
229
230 #### Transaction Execution
230 #### Transaction Execution
231 def http_open(self, req):
231 def http_open(self, req):
232 return self.do_open(HTTPConnection, req)
232 return self.do_open(HTTPConnection, req)
233
233
234 def do_open(self, http_class, req):
234 def do_open(self, http_class, req):
235 host = req.get_host()
235 host = req.get_host()
236 if not host:
236 if not host:
237 raise urlerr.urlerror('no host given')
237 raise urlerr.urlerror('no host given')
238
238
239 try:
239 try:
240 h = self._cm.get_ready_conn(host)
240 h = self._cm.get_ready_conn(host)
241 while h:
241 while h:
242 r = self._reuse_connection(h, req, host)
242 r = self._reuse_connection(h, req, host)
243
243
244 # if this response is non-None, then it worked and we're
244 # if this response is non-None, then it worked and we're
245 # done. Break out, skipping the else block.
245 # done. Break out, skipping the else block.
246 if r:
246 if r:
247 break
247 break
248
248
249 # connection is bad - possibly closed by server
249 # connection is bad - possibly closed by server
250 # discard it and ask for the next free connection
250 # discard it and ask for the next free connection
251 h.close()
251 h.close()
252 self._cm.remove(h)
252 self._cm.remove(h)
253 h = self._cm.get_ready_conn(host)
253 h = self._cm.get_ready_conn(host)
254 else:
254 else:
255 # no (working) free connections were found. Create a new one.
255 # no (working) free connections were found. Create a new one.
256 h = http_class(host)
256 h = http_class(host)
257 if DEBUG:
257 if DEBUG:
258 DEBUG.info("creating new connection to %s (%d)",
258 DEBUG.info("creating new connection to %s (%d)",
259 host, id(h))
259 host, id(h))
260 self._cm.add(host, h, 0)
260 self._cm.add(host, h, 0)
261 self._start_transaction(h, req)
261 self._start_transaction(h, req)
262 r = h.getresponse()
262 r = h.getresponse()
263 except (socket.error, httplib.HTTPException) as err:
263 except (socket.error, httplib.HTTPException) as err:
264 raise urlerr.urlerror(err)
264 raise urlerr.urlerror(err)
265
265
266 # if not a persistent connection, don't try to reuse it
266 # if not a persistent connection, don't try to reuse it
267 if r.will_close:
267 if r.will_close:
268 self._cm.remove(h)
268 self._cm.remove(h)
269
269
270 if DEBUG:
270 if DEBUG:
271 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
271 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
272 r._handler = self
272 r._handler = self
273 r._host = host
273 r._host = host
274 r._url = req.get_full_url()
274 r._url = req.get_full_url()
275 r._connection = h
275 r._connection = h
276 r.code = r.status
276 r.code = r.status
277 r.headers = r.msg
277 r.headers = r.msg
278 r.msg = r.reason
278 r.msg = r.reason
279
279
280 if r.status == 200 or not HANDLE_ERRORS:
280 if r.status == 200 or not HANDLE_ERRORS:
281 return r
281 return r
282 else:
282 else:
283 return self.parent.error('http', req, r,
283 return self.parent.error('http', req, r,
284 r.status, r.msg, r.headers)
284 r.status, r.msg, r.headers)
285
285
286 def _reuse_connection(self, h, req, host):
286 def _reuse_connection(self, h, req, host):
287 """start the transaction with a re-used connection
287 """start the transaction with a re-used connection
288 return a response object (r) upon success or None on failure.
288 return a response object (r) upon success or None on failure.
289 This DOES not close or remove bad connections in cases where
289 This DOES not close or remove bad connections in cases where
290 it returns. However, if an unexpected exception occurs, it
290 it returns. However, if an unexpected exception occurs, it
291 will close and remove the connection before re-raising.
291 will close and remove the connection before re-raising.
292 """
292 """
293 try:
293 try:
294 self._start_transaction(h, req)
294 self._start_transaction(h, req)
295 r = h.getresponse()
295 r = h.getresponse()
296 # note: just because we got something back doesn't mean it
296 # note: just because we got something back doesn't mean it
297 # worked. We'll check the version below, too.
297 # worked. We'll check the version below, too.
298 except (socket.error, httplib.HTTPException):
298 except (socket.error, httplib.HTTPException):
299 r = None
299 r = None
300 except: # re-raises
300 except: # re-raises
301 # adding this block just in case we've missed
301 # adding this block just in case we've missed
302 # something we will still raise the exception, but
302 # something we will still raise the exception, but
303 # lets try and close the connection and remove it
303 # lets try and close the connection and remove it
304 # first. We previously got into a nasty loop
304 # first. We previously got into a nasty loop
305 # where an exception was uncaught, and so the
305 # where an exception was uncaught, and so the
306 # connection stayed open. On the next try, the
306 # connection stayed open. On the next try, the
307 # same exception was raised, etc. The trade-off is
307 # same exception was raised, etc. The trade-off is
308 # that it's now possible this call will raise
308 # that it's now possible this call will raise
309 # a DIFFERENT exception
309 # a DIFFERENT exception
310 if DEBUG:
310 if DEBUG:
311 DEBUG.error("unexpected exception - closing "
311 DEBUG.error("unexpected exception - closing "
312 "connection to %s (%d)", host, id(h))
312 "connection to %s (%d)", host, id(h))
313 self._cm.remove(h)
313 self._cm.remove(h)
314 h.close()
314 h.close()
315 raise
315 raise
316
316
317 if r is None or r.version == 9:
317 if r is None or r.version == 9:
318 # httplib falls back to assuming HTTP 0.9 if it gets a
318 # httplib falls back to assuming HTTP 0.9 if it gets a
319 # bad header back. This is most likely to happen if
319 # bad header back. This is most likely to happen if
320 # the socket has been closed by the server since we
320 # the socket has been closed by the server since we
321 # last used the connection.
321 # last used the connection.
322 if DEBUG:
322 if DEBUG:
323 DEBUG.info("failed to re-use connection to %s (%d)",
323 DEBUG.info("failed to re-use connection to %s (%d)",
324 host, id(h))
324 host, id(h))
325 r = None
325 r = None
326 else:
326 else:
327 if DEBUG:
327 if DEBUG:
328 DEBUG.info("re-using connection to %s (%d)", host, id(h))
328 DEBUG.info("re-using connection to %s (%d)", host, id(h))
329
329
330 return r
330 return r
331
331
332 def _start_transaction(self, h, req):
332 def _start_transaction(self, h, req):
333 # What follows mostly reimplements HTTPConnection.request()
333 # What follows mostly reimplements HTTPConnection.request()
334 # except it adds self.parent.addheaders in the mix.
334 # except it adds self.parent.addheaders in the mix.
335 headers = req.headers.copy()
335 headers = req.headers.copy()
336 if sys.version_info >= (2, 4):
336 if sys.version_info >= (2, 4):
337 headers.update(req.unredirected_hdrs)
337 headers.update(req.unredirected_hdrs)
338 headers.update(self.parent.addheaders)
338 headers.update(self.parent.addheaders)
339 headers = dict((n.lower(), v) for n, v in headers.items())
339 headers = dict((n.lower(), v) for n, v in headers.items())
340 skipheaders = {}
340 skipheaders = {}
341 for n in ('host', 'accept-encoding'):
341 for n in ('host', 'accept-encoding'):
342 if n in headers:
342 if n in headers:
343 skipheaders['skip_' + n.replace('-', '_')] = 1
343 skipheaders['skip_' + n.replace('-', '_')] = 1
344 try:
344 try:
345 if req.has_data():
345 if req.has_data():
346 data = req.get_data()
346 data = req.get_data()
347 h.putrequest('POST', req.get_selector(), **skipheaders)
347 h.putrequest('POST', req.get_selector(), **skipheaders)
348 if 'content-type' not in headers:
348 if 'content-type' not in headers:
349 h.putheader('Content-type',
349 h.putheader('Content-type',
350 'application/x-www-form-urlencoded')
350 'application/x-www-form-urlencoded')
351 if 'content-length' not in headers:
351 if 'content-length' not in headers:
352 h.putheader('Content-length', '%d' % len(data))
352 h.putheader('Content-length', '%d' % len(data))
353 else:
353 else:
354 h.putrequest('GET', req.get_selector(), **skipheaders)
354 h.putrequest('GET', req.get_selector(), **skipheaders)
355 except socket.error as err:
355 except socket.error as err:
356 raise urlerr.urlerror(err)
356 raise urlerr.urlerror(err)
357 for k, v in headers.items():
357 for k, v in headers.items():
358 h.putheader(k, v)
358 h.putheader(k, v)
359 h.endheaders()
359 h.endheaders()
360 if req.has_data():
360 if req.has_data():
361 h.send(data)
361 h.send(data)
362
362
363 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
363 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
364 pass
364 pass
365
365
366 class HTTPResponse(httplib.HTTPResponse):
366 class HTTPResponse(httplib.HTTPResponse):
367 # we need to subclass HTTPResponse in order to
367 # we need to subclass HTTPResponse in order to
368 # 1) add readline() and readlines() methods
368 # 1) add readline() and readlines() methods
369 # 2) add close_connection() methods
369 # 2) add close_connection() methods
370 # 3) add info() and geturl() methods
370 # 3) add info() and geturl() methods
371
371
372 # in order to add readline(), read must be modified to deal with a
372 # in order to add readline(), read must be modified to deal with a
373 # buffer. example: readline must read a buffer and then spit back
373 # buffer. example: readline must read a buffer and then spit back
374 # one line at a time. The only real alternative is to read one
374 # one line at a time. The only real alternative is to read one
375 # BYTE at a time (ick). Once something has been read, it can't be
375 # BYTE at a time (ick). Once something has been read, it can't be
376 # put back (ok, maybe it can, but that's even uglier than this),
376 # put back (ok, maybe it can, but that's even uglier than this),
377 # so if you THEN do a normal read, you must first take stuff from
377 # so if you THEN do a normal read, you must first take stuff from
378 # the buffer.
378 # the buffer.
379
379
380 # the read method wraps the original to accommodate buffering,
380 # the read method wraps the original to accommodate buffering,
381 # although read() never adds to the buffer.
381 # although read() never adds to the buffer.
382 # Both readline and readlines have been stolen with almost no
382 # Both readline and readlines have been stolen with almost no
383 # modification from socket.py
383 # modification from socket.py
384
384
385
385
386 def __init__(self, sock, debuglevel=0, strict=0, method=None):
386 def __init__(self, sock, debuglevel=0, strict=0, method=None):
387 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
387 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
388 self.fileno = sock.fileno
388 self.fileno = sock.fileno
389 self.code = None
389 self.code = None
390 self._rbuf = ''
390 self._rbuf = ''
391 self._rbufsize = 8096
391 self._rbufsize = 8096
392 self._handler = None # inserted by the handler later
392 self._handler = None # inserted by the handler later
393 self._host = None # (same)
393 self._host = None # (same)
394 self._url = None # (same)
394 self._url = None # (same)
395 self._connection = None # (same)
395 self._connection = None # (same)
396
396
397 _raw_read = httplib.HTTPResponse.read
397 _raw_read = httplib.HTTPResponse.read
398
398
399 def close(self):
399 def close(self):
400 if self.fp:
400 if self.fp:
401 self.fp.close()
401 self.fp.close()
402 self.fp = None
402 self.fp = None
403 if self._handler:
403 if self._handler:
404 self._handler._request_closed(self, self._host,
404 self._handler._request_closed(self, self._host,
405 self._connection)
405 self._connection)
406
406
407 def close_connection(self):
407 def close_connection(self):
408 self._handler._remove_connection(self._host, self._connection, close=1)
408 self._handler._remove_connection(self._host, self._connection, close=1)
409 self.close()
409 self.close()
410
410
411 def info(self):
411 def info(self):
412 return self.headers
412 return self.headers
413
413
414 def geturl(self):
414 def geturl(self):
415 return self._url
415 return self._url
416
416
417 def read(self, amt=None):
417 def read(self, amt=None):
418 # the _rbuf test is only in this first if for speed. It's not
418 # the _rbuf test is only in this first if for speed. It's not
419 # logically necessary
419 # logically necessary
420 if self._rbuf and not amt is None:
420 if self._rbuf and not amt is None:
421 L = len(self._rbuf)
421 L = len(self._rbuf)
422 if amt > L:
422 if amt > L:
423 amt -= L
423 amt -= L
424 else:
424 else:
425 s = self._rbuf[:amt]
425 s = self._rbuf[:amt]
426 self._rbuf = self._rbuf[amt:]
426 self._rbuf = self._rbuf[amt:]
427 return s
427 return s
428
428
429 s = self._rbuf + self._raw_read(amt)
429 s = self._rbuf + self._raw_read(amt)
430 self._rbuf = ''
430 self._rbuf = ''
431 return s
431 return s
432
432
433 # stolen from Python SVN #68532 to fix issue1088
433 # stolen from Python SVN #68532 to fix issue1088
434 def _read_chunked(self, amt):
434 def _read_chunked(self, amt):
435 chunk_left = self.chunk_left
435 chunk_left = self.chunk_left
436 value = ''
436 value = ''
437
437
438 # XXX This accumulates chunks by repeated string concatenation,
438 # XXX This accumulates chunks by repeated string concatenation,
439 # which is not efficient as the number or size of chunks gets big.
439 # which is not efficient as the number or size of chunks gets big.
440 while True:
440 while True:
441 if chunk_left is None:
441 if chunk_left is None:
442 line = self.fp.readline()
442 line = self.fp.readline()
443 i = line.find(';')
443 i = line.find(';')
444 if i >= 0:
444 if i >= 0:
445 line = line[:i] # strip chunk-extensions
445 line = line[:i] # strip chunk-extensions
446 try:
446 try:
447 chunk_left = int(line, 16)
447 chunk_left = int(line, 16)
448 except ValueError:
448 except ValueError:
449 # close the connection as protocol synchronization is
449 # close the connection as protocol synchronization is
450 # probably lost
450 # probably lost
451 self.close()
451 self.close()
452 raise httplib.IncompleteRead(value)
452 raise httplib.IncompleteRead(value)
453 if chunk_left == 0:
453 if chunk_left == 0:
454 break
454 break
455 if amt is None:
455 if amt is None:
456 value += self._safe_read(chunk_left)
456 value += self._safe_read(chunk_left)
457 elif amt < chunk_left:
457 elif amt < chunk_left:
458 value += self._safe_read(amt)
458 value += self._safe_read(amt)
459 self.chunk_left = chunk_left - amt
459 self.chunk_left = chunk_left - amt
460 return value
460 return value
461 elif amt == chunk_left:
461 elif amt == chunk_left:
462 value += self._safe_read(amt)
462 value += self._safe_read(amt)
463 self._safe_read(2) # toss the CRLF at the end of the chunk
463 self._safe_read(2) # toss the CRLF at the end of the chunk
464 self.chunk_left = None
464 self.chunk_left = None
465 return value
465 return value
466 else:
466 else:
467 value += self._safe_read(chunk_left)
467 value += self._safe_read(chunk_left)
468 amt -= chunk_left
468 amt -= chunk_left
469
469
470 # we read the whole chunk, get another
470 # we read the whole chunk, get another
471 self._safe_read(2) # toss the CRLF at the end of the chunk
471 self._safe_read(2) # toss the CRLF at the end of the chunk
472 chunk_left = None
472 chunk_left = None
473
473
474 # read and discard trailer up to the CRLF terminator
474 # read and discard trailer up to the CRLF terminator
475 ### note: we shouldn't have any trailers!
475 ### note: we shouldn't have any trailers!
476 while True:
476 while True:
477 line = self.fp.readline()
477 line = self.fp.readline()
478 if not line:
478 if not line:
479 # a vanishingly small number of sites EOF without
479 # a vanishingly small number of sites EOF without
480 # sending the trailer
480 # sending the trailer
481 break
481 break
482 if line == '\r\n':
482 if line == '\r\n':
483 break
483 break
484
484
485 # we read everything; close the "file"
485 # we read everything; close the "file"
486 self.close()
486 self.close()
487
487
488 return value
488 return value
489
489
490 def readline(self, limit=-1):
490 def readline(self, limit=-1):
491 i = self._rbuf.find('\n')
491 i = self._rbuf.find('\n')
492 while i < 0 and not (0 < limit <= len(self._rbuf)):
492 while i < 0 and not (0 < limit <= len(self._rbuf)):
493 new = self._raw_read(self._rbufsize)
493 new = self._raw_read(self._rbufsize)
494 if not new:
494 if not new:
495 break
495 break
496 i = new.find('\n')
496 i = new.find('\n')
497 if i >= 0:
497 if i >= 0:
498 i = i + len(self._rbuf)
498 i = i + len(self._rbuf)
499 self._rbuf = self._rbuf + new
499 self._rbuf = self._rbuf + new
500 if i < 0:
500 if i < 0:
501 i = len(self._rbuf)
501 i = len(self._rbuf)
502 else:
502 else:
503 i = i + 1
503 i = i + 1
504 if 0 <= limit < len(self._rbuf):
504 if 0 <= limit < len(self._rbuf):
505 i = limit
505 i = limit
506 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
506 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
507 return data
507 return data
508
508
509 def readlines(self, sizehint=0):
509 def readlines(self, sizehint=0):
510 total = 0
510 total = 0
511 list = []
511 list = []
512 while True:
512 while True:
513 line = self.readline()
513 line = self.readline()
514 if not line:
514 if not line:
515 break
515 break
516 list.append(line)
516 list.append(line)
517 total += len(line)
517 total += len(line)
518 if sizehint and total >= sizehint:
518 if sizehint and total >= sizehint:
519 break
519 break
520 return list
520 return list
521
521
522 def safesend(self, str):
522 def safesend(self, str):
523 """Send `str' to the server.
523 """Send `str' to the server.
524
524
525 Shamelessly ripped off from httplib to patch a bad behavior.
525 Shamelessly ripped off from httplib to patch a bad behavior.
526 """
526 """
527 # _broken_pipe_resp is an attribute we set in this function
527 # _broken_pipe_resp is an attribute we set in this function
528 # if the socket is closed while we're sending data but
528 # if the socket is closed while we're sending data but
529 # the server sent us a response before hanging up.
529 # the server sent us a response before hanging up.
530 # In that case, we want to pretend to send the rest of the
530 # In that case, we want to pretend to send the rest of the
531 # outgoing data, and then let the user use getresponse()
531 # outgoing data, and then let the user use getresponse()
532 # (which we wrap) to get this last response before
532 # (which we wrap) to get this last response before
533 # opening a new socket.
533 # opening a new socket.
534 if getattr(self, '_broken_pipe_resp', None) is not None:
534 if getattr(self, '_broken_pipe_resp', None) is not None:
535 return
535 return
536
536
537 if self.sock is None:
537 if self.sock is None:
538 if self.auto_open:
538 if self.auto_open:
539 self.connect()
539 self.connect()
540 else:
540 else:
541 raise httplib.NotConnected
541 raise httplib.NotConnected
542
542
543 # send the data to the server. if we get a broken pipe, then close
543 # send the data to the server. if we get a broken pipe, then close
544 # the socket. we want to reconnect when somebody tries to send again.
544 # the socket. we want to reconnect when somebody tries to send again.
545 #
545 #
546 # NOTE: we DO propagate the error, though, because we cannot simply
546 # NOTE: we DO propagate the error, though, because we cannot simply
547 # ignore the error... the caller will know if they can retry.
547 # ignore the error... the caller will know if they can retry.
548 if self.debuglevel > 0:
548 if self.debuglevel > 0:
549 print("send:", repr(str))
549 print("send:", repr(str))
550 try:
550 try:
551 blocksize = 8192
551 blocksize = 8192
552 read = getattr(str, 'read', None)
552 read = getattr(str, 'read', None)
553 if read is not None:
553 if read is not None:
554 if self.debuglevel > 0:
554 if self.debuglevel > 0:
555 print("sending a read()able")
555 print("sending a read()able")
556 data = read(blocksize)
556 data = read(blocksize)
557 while data:
557 while data:
558 self.sock.sendall(data)
558 self.sock.sendall(data)
559 data = read(blocksize)
559 data = read(blocksize)
560 else:
560 else:
561 self.sock.sendall(str)
561 self.sock.sendall(str)
562 except socket.error as v:
562 except socket.error as v:
563 reraise = True
563 reraise = True
564 if v[0] == errno.EPIPE: # Broken pipe
564 if v[0] == errno.EPIPE: # Broken pipe
565 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
565 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
566 self._broken_pipe_resp = None
566 self._broken_pipe_resp = None
567 self._broken_pipe_resp = self.getresponse()
567 self._broken_pipe_resp = self.getresponse()
568 reraise = False
568 reraise = False
569 self.close()
569 self.close()
570 if reraise:
570 if reraise:
571 raise
571 raise
572
572
573 def wrapgetresponse(cls):
573 def wrapgetresponse(cls):
574 """Wraps getresponse in cls with a broken-pipe sane version.
574 """Wraps getresponse in cls with a broken-pipe sane version.
575 """
575 """
576 def safegetresponse(self):
576 def safegetresponse(self):
577 # In safesend() we might set the _broken_pipe_resp
577 # In safesend() we might set the _broken_pipe_resp
578 # attribute, in which case the socket has already
578 # attribute, in which case the socket has already
579 # been closed and we just need to give them the response
579 # been closed and we just need to give them the response
580 # back. Otherwise, we use the normal response path.
580 # back. Otherwise, we use the normal response path.
581 r = getattr(self, '_broken_pipe_resp', None)
581 r = getattr(self, '_broken_pipe_resp', None)
582 if r is not None:
582 if r is not None:
583 return r
583 return r
584 return cls.getresponse(self)
584 return cls.getresponse(self)
585 safegetresponse.__doc__ = cls.getresponse.__doc__
585 safegetresponse.__doc__ = cls.getresponse.__doc__
586 return safegetresponse
586 return safegetresponse
587
587
588 class HTTPConnection(httplib.HTTPConnection):
588 class HTTPConnection(httplib.HTTPConnection):
589 # use the modified response class
589 # use the modified response class
590 response_class = HTTPResponse
590 response_class = HTTPResponse
591 send = safesend
591 send = safesend
592 getresponse = wrapgetresponse(httplib.HTTPConnection)
592 getresponse = wrapgetresponse(httplib.HTTPConnection)
593
593
594
594
595 #########################################################################
595 #########################################################################
596 ##### TEST FUNCTIONS
596 ##### TEST FUNCTIONS
597 #########################################################################
597 #########################################################################
598
598
599 def error_handler(url):
599 def error_handler(url):
600 global HANDLE_ERRORS
600 global HANDLE_ERRORS
601 orig = HANDLE_ERRORS
601 orig = HANDLE_ERRORS
602 keepalive_handler = HTTPHandler()
602 keepalive_handler = HTTPHandler()
603 opener = urlreq.buildopener(keepalive_handler)
603 opener = urlreq.buildopener(keepalive_handler)
604 urlreq.installopener(opener)
604 urlreq.installopener(opener)
605 pos = {0: 'off', 1: 'on'}
605 pos = {0: 'off', 1: 'on'}
606 for i in (0, 1):
606 for i in (0, 1):
607 print(" fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i))
607 print(" fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i))
608 HANDLE_ERRORS = i
608 HANDLE_ERRORS = i
609 try:
609 try:
610 fo = urlreq.urlopen(url)
610 fo = urlreq.urlopen(url)
611 fo.read()
611 fo.read()
612 fo.close()
612 fo.close()
613 try:
613 try:
614 status, reason = fo.status, fo.reason
614 status, reason = fo.status, fo.reason
615 except AttributeError:
615 except AttributeError:
616 status, reason = None, None
616 status, reason = None, None
617 except IOError as e:
617 except IOError as e:
618 print(" EXCEPTION: %s" % e)
618 print(" EXCEPTION: %s" % e)
619 raise
619 raise
620 else:
620 else:
621 print(" status = %s, reason = %s" % (status, reason))
621 print(" status = %s, reason = %s" % (status, reason))
622 HANDLE_ERRORS = orig
622 HANDLE_ERRORS = orig
623 hosts = keepalive_handler.open_connections()
623 hosts = keepalive_handler.open_connections()
624 print("open connections:", hosts)
624 print("open connections:", hosts)
625 keepalive_handler.close_all()
625 keepalive_handler.close_all()
626
626
627 def continuity(url):
627 def continuity(url):
628 md5 = hashlib.md5
628 md5 = hashlib.md5
629 format = '%25s: %s'
629 format = '%25s: %s'
630
630
631 # first fetch the file with the normal http handler
631 # first fetch the file with the normal http handler
632 opener = urlreq.buildopener()
632 opener = urlreq.buildopener()
633 urlreq.installopener(opener)
633 urlreq.installopener(opener)
634 fo = urlreq.urlopen(url)
634 fo = urlreq.urlopen(url)
635 foo = fo.read()
635 foo = fo.read()
636 fo.close()
636 fo.close()
637 m = md5(foo)
637 m = md5(foo)
638 print(format % ('normal urllib', m.hexdigest()))
638 print(format % ('normal urllib', m.hexdigest()))
639
639
640 # now install the keepalive handler and try again
640 # now install the keepalive handler and try again
641 opener = urlreq.buildopener(HTTPHandler())
641 opener = urlreq.buildopener(HTTPHandler())
642 urlreq.installopener(opener)
642 urlreq.installopener(opener)
643
643
644 fo = urlreq.urlopen(url)
644 fo = urlreq.urlopen(url)
645 foo = fo.read()
645 foo = fo.read()
646 fo.close()
646 fo.close()
647 m = md5(foo)
647 m = md5(foo)
648 print(format % ('keepalive read', m.hexdigest()))
648 print(format % ('keepalive read', m.hexdigest()))
649
649
650 fo = urlreq.urlopen(url)
650 fo = urlreq.urlopen(url)
651 foo = ''
651 foo = ''
652 while True:
652 while True:
653 f = fo.readline()
653 f = fo.readline()
654 if f:
654 if f:
655 foo = foo + f
655 foo = foo + f
656 else: break
656 else: break
657 fo.close()
657 fo.close()
658 m = md5(foo)
658 m = md5(foo)
659 print(format % ('keepalive readline', m.hexdigest()))
659 print(format % ('keepalive readline', m.hexdigest()))
660
660
661 def comp(N, url):
661 def comp(N, url):
662 print(' making %i connections to:\n %s' % (N, url))
662 print(' making %i connections to:\n %s' % (N, url))
663
663
664 sys.stdout.write(' first using the normal urllib handlers')
664 sys.stdout.write(' first using the normal urllib handlers')
665 # first use normal opener
665 # first use normal opener
666 opener = urlreq.buildopener()
666 opener = urlreq.buildopener()
667 urlreq.installopener(opener)
667 urlreq.installopener(opener)
668 t1 = fetch(N, url)
668 t1 = fetch(N, url)
669 print(' TIME: %.3f s' % t1)
669 print(' TIME: %.3f s' % t1)
670
670
671 sys.stdout.write(' now using the keepalive handler ')
671 sys.stdout.write(' now using the keepalive handler ')
672 # now install the keepalive handler and try again
672 # now install the keepalive handler and try again
673 opener = urlreq.buildopener(HTTPHandler())
673 opener = urlreq.buildopener(HTTPHandler())
674 urlreq.installopener(opener)
674 urlreq.installopener(opener)
675 t2 = fetch(N, url)
675 t2 = fetch(N, url)
676 print(' TIME: %.3f s' % t2)
676 print(' TIME: %.3f s' % t2)
677 print(' improvement factor: %.2f' % (t1 / t2))
677 print(' improvement factor: %.2f' % (t1 / t2))
678
678
679 def fetch(N, url, delay=0):
679 def fetch(N, url, delay=0):
680 import time
680 import time
681 lens = []
681 lens = []
682 starttime = time.time()
682 starttime = time.time()
683 for i in range(N):
683 for i in range(N):
684 if delay and i > 0:
684 if delay and i > 0:
685 time.sleep(delay)
685 time.sleep(delay)
686 fo = urlreq.urlopen(url)
686 fo = urlreq.urlopen(url)
687 foo = fo.read()
687 foo = fo.read()
688 fo.close()
688 fo.close()
689 lens.append(len(foo))
689 lens.append(len(foo))
690 diff = time.time() - starttime
690 diff = time.time() - starttime
691
691
692 j = 0
692 j = 0
693 for i in lens[1:]:
693 for i in lens[1:]:
694 j = j + 1
694 j = j + 1
695 if not i == lens[0]:
695 if not i == lens[0]:
696 print("WARNING: inconsistent length on read %i: %i" % (j, i))
696 print("WARNING: inconsistent length on read %i: %i" % (j, i))
697
697
698 return diff
698 return diff
699
699
700 def test_timeout(url):
700 def test_timeout(url):
701 global DEBUG
701 global DEBUG
702 dbbackup = DEBUG
702 dbbackup = DEBUG
703 class FakeLogger(object):
703 class FakeLogger(object):
704 def debug(self, msg, *args):
704 def debug(self, msg, *args):
705 print(msg % args)
705 print(msg % args)
706 info = warning = error = debug
706 info = warning = error = debug
707 DEBUG = FakeLogger()
707 DEBUG = FakeLogger()
708 print(" fetching the file to establish a connection")
708 print(" fetching the file to establish a connection")
709 fo = urlreq.urlopen(url)
709 fo = urlreq.urlopen(url)
710 data1 = fo.read()
710 data1 = fo.read()
711 fo.close()
711 fo.close()
712
712
713 i = 20
713 i = 20
714 print(" waiting %i seconds for the server to close the connection" % i)
714 print(" waiting %i seconds for the server to close the connection" % i)
715 while i > 0:
715 while i > 0:
716 sys.stdout.write('\r %2i' % i)
716 sys.stdout.write('\r %2i' % i)
717 sys.stdout.flush()
717 sys.stdout.flush()
718 time.sleep(1)
718 time.sleep(1)
719 i -= 1
719 i -= 1
720 sys.stderr.write('\r')
720 sys.stderr.write('\r')
721
721
722 print(" fetching the file a second time")
722 print(" fetching the file a second time")
723 fo = urlreq.urlopen(url)
723 fo = urlreq.urlopen(url)
724 data2 = fo.read()
724 data2 = fo.read()
725 fo.close()
725 fo.close()
726
726
727 if data1 == data2:
727 if data1 == data2:
728 print(' data are identical')
728 print(' data are identical')
729 else:
729 else:
730 print(' ERROR: DATA DIFFER')
730 print(' ERROR: DATA DIFFER')
731
731
732 DEBUG = dbbackup
732 DEBUG = dbbackup
733
733
734
734
735 def test(url, N=10):
735 def test(url, N=10):
736 print("checking error handler (do this on a non-200)")
736 print("checking error handler (do this on a non-200)")
737 try: error_handler(url)
737 try: error_handler(url)
738 except IOError:
738 except IOError:
739 print("exiting - exception will prevent further tests")
739 print("exiting - exception will prevent further tests")
740 sys.exit()
740 sys.exit()
741 print('')
741 print('')
742 print("performing continuity test (making sure stuff isn't corrupted)")
742 print("performing continuity test (making sure stuff isn't corrupted)")
743 continuity(url)
743 continuity(url)
744 print('')
744 print('')
745 print("performing speed comparison")
745 print("performing speed comparison")
746 comp(N, url)
746 comp(N, url)
747 print('')
747 print('')
748 print("performing dropped-connection check")
748 print("performing dropped-connection check")
749 test_timeout(url)
749 test_timeout(url)
750
750
751 if __name__ == '__main__':
751 if __name__ == '__main__':
752 import time
752 import time
753 try:
753 try:
754 N = int(sys.argv[1])
754 N = int(sys.argv[1])
755 url = sys.argv[2]
755 url = sys.argv[2]
756 except (IndexError, ValueError):
756 except (IndexError, ValueError):
757 print("%s <integer> <url>" % sys.argv[0])
757 print("%s <integer> <url>" % sys.argv[0])
758 else:
758 else:
759 test(url, N)
759 test(url, N)
@@ -1,152 +1,159
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 try:
13 try:
14 import cPickle as pickle
14 import cPickle as pickle
15 pickle.dumps
15 pickle.dumps
16 except ImportError:
16 except ImportError:
17 import pickle
17 import pickle
18 pickle.dumps # silence pyflakes
18 pickle.dumps # silence pyflakes
19
19
20 try:
20 try:
21 import httplib
22 httplib.HTTPException
23 except ImportError:
24 import http.client as httplib
25 httplib.HTTPException
26
27 try:
21 import SocketServer as socketserver
28 import SocketServer as socketserver
22 socketserver.ThreadingMixIn
29 socketserver.ThreadingMixIn
23 except ImportError:
30 except ImportError:
24 import socketserver
31 import socketserver
25 socketserver.ThreadingMixIn
32 socketserver.ThreadingMixIn
26
33
27 try:
34 try:
28 import xmlrpclib
35 import xmlrpclib
29 xmlrpclib.Transport
36 xmlrpclib.Transport
30 except ImportError:
37 except ImportError:
31 import xmlrpc.client as xmlrpclib
38 import xmlrpc.client as xmlrpclib
32 xmlrpclib.Transport
39 xmlrpclib.Transport
33
40
34 try:
41 try:
35 import urlparse
42 import urlparse
36 urlparse.urlparse
43 urlparse.urlparse
37 except ImportError:
44 except ImportError:
38 import urllib.parse as urlparse
45 import urllib.parse as urlparse
39 urlparse.urlparse
46 urlparse.urlparse
40
47
41 try:
48 try:
42 import cStringIO as io
49 import cStringIO as io
43 stringio = io.StringIO
50 stringio = io.StringIO
44 except ImportError:
51 except ImportError:
45 import io
52 import io
46 stringio = io.StringIO
53 stringio = io.StringIO
47
54
48 try:
55 try:
49 import Queue as _queue
56 import Queue as _queue
50 _queue.Queue
57 _queue.Queue
51 except ImportError:
58 except ImportError:
52 import queue as _queue
59 import queue as _queue
53 empty = _queue.Empty
60 empty = _queue.Empty
54 queue = _queue.Queue
61 queue = _queue.Queue
55
62
56 class _pycompatstub(object):
63 class _pycompatstub(object):
57 pass
64 pass
58
65
59 def _alias(alias, origin, items):
66 def _alias(alias, origin, items):
60 """ populate a _pycompatstub
67 """ populate a _pycompatstub
61
68
62 copies items from origin to alias
69 copies items from origin to alias
63 """
70 """
64 def hgcase(item):
71 def hgcase(item):
65 return item.replace('_', '').lower()
72 return item.replace('_', '').lower()
66 for item in items:
73 for item in items:
67 try:
74 try:
68 setattr(alias, hgcase(item), getattr(origin, item))
75 setattr(alias, hgcase(item), getattr(origin, item))
69 except AttributeError:
76 except AttributeError:
70 pass
77 pass
71
78
72 urlreq = _pycompatstub()
79 urlreq = _pycompatstub()
73 urlerr = _pycompatstub()
80 urlerr = _pycompatstub()
74 try:
81 try:
75 import urllib2
82 import urllib2
76 import urllib
83 import urllib
77 _alias(urlreq, urllib, (
84 _alias(urlreq, urllib, (
78 "addclosehook",
85 "addclosehook",
79 "addinfourl",
86 "addinfourl",
80 "ftpwrapper",
87 "ftpwrapper",
81 "pathname2url",
88 "pathname2url",
82 "quote",
89 "quote",
83 "splitattr",
90 "splitattr",
84 "splitpasswd",
91 "splitpasswd",
85 "splitport",
92 "splitport",
86 "splituser",
93 "splituser",
87 "unquote",
94 "unquote",
88 "url2pathname",
95 "url2pathname",
89 "urlencode",
96 "urlencode",
90 "urlencode",
97 "urlencode",
91 ))
98 ))
92 _alias(urlreq, urllib2, (
99 _alias(urlreq, urllib2, (
93 "AbstractHTTPHandler",
100 "AbstractHTTPHandler",
94 "BaseHandler",
101 "BaseHandler",
95 "build_opener",
102 "build_opener",
96 "FileHandler",
103 "FileHandler",
97 "FTPHandler",
104 "FTPHandler",
98 "HTTPBasicAuthHandler",
105 "HTTPBasicAuthHandler",
99 "HTTPDigestAuthHandler",
106 "HTTPDigestAuthHandler",
100 "HTTPHandler",
107 "HTTPHandler",
101 "HTTPPasswordMgrWithDefaultRealm",
108 "HTTPPasswordMgrWithDefaultRealm",
102 "HTTPSHandler",
109 "HTTPSHandler",
103 "install_opener",
110 "install_opener",
104 "ProxyHandler",
111 "ProxyHandler",
105 "Request",
112 "Request",
106 "urlopen",
113 "urlopen",
107 ))
114 ))
108 _alias(urlerr, urllib2, (
115 _alias(urlerr, urllib2, (
109 "HTTPError",
116 "HTTPError",
110 "URLError",
117 "URLError",
111 ))
118 ))
112
119
113 except ImportError:
120 except ImportError:
114 import urllib.request
121 import urllib.request
115 _alias(urlreq, urllib.request, (
122 _alias(urlreq, urllib.request, (
116 "AbstractHTTPHandler",
123 "AbstractHTTPHandler",
117 "addclosehook",
124 "addclosehook",
118 "addinfourl",
125 "addinfourl",
119 "BaseHandler",
126 "BaseHandler",
120 "build_opener",
127 "build_opener",
121 "FileHandler",
128 "FileHandler",
122 "FTPHandler",
129 "FTPHandler",
123 "ftpwrapper",
130 "ftpwrapper",
124 "HTTPHandler",
131 "HTTPHandler",
125 "HTTPSHandler",
132 "HTTPSHandler",
126 "install_opener",
133 "install_opener",
127 "pathname2url",
134 "pathname2url",
128 "HTTPBasicAuthHandler",
135 "HTTPBasicAuthHandler",
129 "HTTPDigestAuthHandler",
136 "HTTPDigestAuthHandler",
130 "HTTPPasswordMgrWithDefaultRealm",
137 "HTTPPasswordMgrWithDefaultRealm",
131 "ProxyHandler",
138 "ProxyHandler",
132 "quote",
139 "quote",
133 "Request",
140 "Request",
134 "splitattr",
141 "splitattr",
135 "splitpasswd",
142 "splitpasswd",
136 "splitport",
143 "splitport",
137 "splituser",
144 "splituser",
138 "unquote",
145 "unquote",
139 "url2pathname",
146 "url2pathname",
140 "urlopen",
147 "urlopen",
141 ))
148 ))
142 import urllib.error
149 import urllib.error
143 _alias(urlerr, urllib.error, (
150 _alias(urlerr, urllib.error, (
144 "HTTPError",
151 "HTTPError",
145 "URLError",
152 "URLError",
146 ))
153 ))
147
154
148 try:
155 try:
149 xrange
156 xrange
150 except NameError:
157 except NameError:
151 import builtins
158 import builtins
152 builtins.xrange = range
159 builtins.xrange = range
@@ -1,522 +1,522
1 # url.py - HTTP handling for mercurial
1 # url.py - HTTP handling for mercurial
2 #
2 #
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import base64
12 import base64
13 import httplib
14 import os
13 import os
15 import socket
14 import socket
16
15
17 from .i18n import _
16 from .i18n import _
18 from . import (
17 from . import (
19 error,
18 error,
20 httpconnection as httpconnectionmod,
19 httpconnection as httpconnectionmod,
21 keepalive,
20 keepalive,
22 sslutil,
21 sslutil,
23 util,
22 util,
24 )
23 )
24
25 httplib = util.httplib
25 stringio = util.stringio
26 stringio = util.stringio
26
27 urlerr = util.urlerr
27 urlerr = util.urlerr
28 urlreq = util.urlreq
28 urlreq = util.urlreq
29
29
30 class passwordmgr(object):
30 class passwordmgr(object):
31 def __init__(self, ui, passwddb):
31 def __init__(self, ui, passwddb):
32 self.ui = ui
32 self.ui = ui
33 self.passwddb = passwddb
33 self.passwddb = passwddb
34
34
35 def add_password(self, realm, uri, user, passwd):
35 def add_password(self, realm, uri, user, passwd):
36 return self.passwddb.add_password(realm, uri, user, passwd)
36 return self.passwddb.add_password(realm, uri, user, passwd)
37
37
38 def find_user_password(self, realm, authuri):
38 def find_user_password(self, realm, authuri):
39 authinfo = self.passwddb.find_user_password(realm, authuri)
39 authinfo = self.passwddb.find_user_password(realm, authuri)
40 user, passwd = authinfo
40 user, passwd = authinfo
41 if user and passwd:
41 if user and passwd:
42 self._writedebug(user, passwd)
42 self._writedebug(user, passwd)
43 return (user, passwd)
43 return (user, passwd)
44
44
45 if not user or not passwd:
45 if not user or not passwd:
46 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
46 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
47 if res:
47 if res:
48 group, auth = res
48 group, auth = res
49 user, passwd = auth.get('username'), auth.get('password')
49 user, passwd = auth.get('username'), auth.get('password')
50 self.ui.debug("using auth.%s.* for authentication\n" % group)
50 self.ui.debug("using auth.%s.* for authentication\n" % group)
51 if not user or not passwd:
51 if not user or not passwd:
52 u = util.url(authuri)
52 u = util.url(authuri)
53 u.query = None
53 u.query = None
54 if not self.ui.interactive():
54 if not self.ui.interactive():
55 raise error.Abort(_('http authorization required for %s') %
55 raise error.Abort(_('http authorization required for %s') %
56 util.hidepassword(str(u)))
56 util.hidepassword(str(u)))
57
57
58 self.ui.write(_("http authorization required for %s\n") %
58 self.ui.write(_("http authorization required for %s\n") %
59 util.hidepassword(str(u)))
59 util.hidepassword(str(u)))
60 self.ui.write(_("realm: %s\n") % realm)
60 self.ui.write(_("realm: %s\n") % realm)
61 if user:
61 if user:
62 self.ui.write(_("user: %s\n") % user)
62 self.ui.write(_("user: %s\n") % user)
63 else:
63 else:
64 user = self.ui.prompt(_("user:"), default=None)
64 user = self.ui.prompt(_("user:"), default=None)
65
65
66 if not passwd:
66 if not passwd:
67 passwd = self.ui.getpass()
67 passwd = self.ui.getpass()
68
68
69 self.passwddb.add_password(realm, authuri, user, passwd)
69 self.passwddb.add_password(realm, authuri, user, passwd)
70 self._writedebug(user, passwd)
70 self._writedebug(user, passwd)
71 return (user, passwd)
71 return (user, passwd)
72
72
73 def _writedebug(self, user, passwd):
73 def _writedebug(self, user, passwd):
74 msg = _('http auth: user %s, password %s\n')
74 msg = _('http auth: user %s, password %s\n')
75 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
75 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
76
76
77 def find_stored_password(self, authuri):
77 def find_stored_password(self, authuri):
78 return self.passwddb.find_user_password(None, authuri)
78 return self.passwddb.find_user_password(None, authuri)
79
79
80 class proxyhandler(urlreq.proxyhandler):
80 class proxyhandler(urlreq.proxyhandler):
81 def __init__(self, ui):
81 def __init__(self, ui):
82 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
82 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
83 # XXX proxyauthinfo = None
83 # XXX proxyauthinfo = None
84
84
85 if proxyurl:
85 if proxyurl:
86 # proxy can be proper url or host[:port]
86 # proxy can be proper url or host[:port]
87 if not (proxyurl.startswith('http:') or
87 if not (proxyurl.startswith('http:') or
88 proxyurl.startswith('https:')):
88 proxyurl.startswith('https:')):
89 proxyurl = 'http://' + proxyurl + '/'
89 proxyurl = 'http://' + proxyurl + '/'
90 proxy = util.url(proxyurl)
90 proxy = util.url(proxyurl)
91 if not proxy.user:
91 if not proxy.user:
92 proxy.user = ui.config("http_proxy", "user")
92 proxy.user = ui.config("http_proxy", "user")
93 proxy.passwd = ui.config("http_proxy", "passwd")
93 proxy.passwd = ui.config("http_proxy", "passwd")
94
94
95 # see if we should use a proxy for this url
95 # see if we should use a proxy for this url
96 no_list = ["localhost", "127.0.0.1"]
96 no_list = ["localhost", "127.0.0.1"]
97 no_list.extend([p.lower() for
97 no_list.extend([p.lower() for
98 p in ui.configlist("http_proxy", "no")])
98 p in ui.configlist("http_proxy", "no")])
99 no_list.extend([p.strip().lower() for
99 no_list.extend([p.strip().lower() for
100 p in os.getenv("no_proxy", '').split(',')
100 p in os.getenv("no_proxy", '').split(',')
101 if p.strip()])
101 if p.strip()])
102 # "http_proxy.always" config is for running tests on localhost
102 # "http_proxy.always" config is for running tests on localhost
103 if ui.configbool("http_proxy", "always"):
103 if ui.configbool("http_proxy", "always"):
104 self.no_list = []
104 self.no_list = []
105 else:
105 else:
106 self.no_list = no_list
106 self.no_list = no_list
107
107
108 proxyurl = str(proxy)
108 proxyurl = str(proxy)
109 proxies = {'http': proxyurl, 'https': proxyurl}
109 proxies = {'http': proxyurl, 'https': proxyurl}
110 ui.debug('proxying through http://%s:%s\n' %
110 ui.debug('proxying through http://%s:%s\n' %
111 (proxy.host, proxy.port))
111 (proxy.host, proxy.port))
112 else:
112 else:
113 proxies = {}
113 proxies = {}
114
114
115 # urllib2 takes proxy values from the environment and those
115 # urllib2 takes proxy values from the environment and those
116 # will take precedence if found. So, if there's a config entry
116 # will take precedence if found. So, if there's a config entry
117 # defining a proxy, drop the environment ones
117 # defining a proxy, drop the environment ones
118 if ui.config("http_proxy", "host"):
118 if ui.config("http_proxy", "host"):
119 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
119 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
120 try:
120 try:
121 if env in os.environ:
121 if env in os.environ:
122 del os.environ[env]
122 del os.environ[env]
123 except OSError:
123 except OSError:
124 pass
124 pass
125
125
126 urlreq.proxyhandler.__init__(self, proxies)
126 urlreq.proxyhandler.__init__(self, proxies)
127 self.ui = ui
127 self.ui = ui
128
128
129 def proxy_open(self, req, proxy, type_):
129 def proxy_open(self, req, proxy, type_):
130 host = req.get_host().split(':')[0]
130 host = req.get_host().split(':')[0]
131 for e in self.no_list:
131 for e in self.no_list:
132 if host == e:
132 if host == e:
133 return None
133 return None
134 if e.startswith('*.') and host.endswith(e[2:]):
134 if e.startswith('*.') and host.endswith(e[2:]):
135 return None
135 return None
136 if e.startswith('.') and host.endswith(e[1:]):
136 if e.startswith('.') and host.endswith(e[1:]):
137 return None
137 return None
138
138
139 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
139 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
140
140
141 def _gen_sendfile(orgsend):
141 def _gen_sendfile(orgsend):
142 def _sendfile(self, data):
142 def _sendfile(self, data):
143 # send a file
143 # send a file
144 if isinstance(data, httpconnectionmod.httpsendfile):
144 if isinstance(data, httpconnectionmod.httpsendfile):
145 # if auth required, some data sent twice, so rewind here
145 # if auth required, some data sent twice, so rewind here
146 data.seek(0)
146 data.seek(0)
147 for chunk in util.filechunkiter(data):
147 for chunk in util.filechunkiter(data):
148 orgsend(self, chunk)
148 orgsend(self, chunk)
149 else:
149 else:
150 orgsend(self, data)
150 orgsend(self, data)
151 return _sendfile
151 return _sendfile
152
152
153 has_https = util.safehasattr(urlreq, 'httpshandler')
153 has_https = util.safehasattr(urlreq, 'httpshandler')
154 if has_https:
154 if has_https:
155 try:
155 try:
156 _create_connection = socket.create_connection
156 _create_connection = socket.create_connection
157 except AttributeError:
157 except AttributeError:
158 _GLOBAL_DEFAULT_TIMEOUT = object()
158 _GLOBAL_DEFAULT_TIMEOUT = object()
159
159
160 def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
160 def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
161 source_address=None):
161 source_address=None):
162 # lifted from Python 2.6
162 # lifted from Python 2.6
163
163
164 msg = "getaddrinfo returns an empty list"
164 msg = "getaddrinfo returns an empty list"
165 host, port = address
165 host, port = address
166 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
166 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
167 af, socktype, proto, canonname, sa = res
167 af, socktype, proto, canonname, sa = res
168 sock = None
168 sock = None
169 try:
169 try:
170 sock = socket.socket(af, socktype, proto)
170 sock = socket.socket(af, socktype, proto)
171 if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
171 if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
172 sock.settimeout(timeout)
172 sock.settimeout(timeout)
173 if source_address:
173 if source_address:
174 sock.bind(source_address)
174 sock.bind(source_address)
175 sock.connect(sa)
175 sock.connect(sa)
176 return sock
176 return sock
177
177
178 except socket.error as msg:
178 except socket.error as msg:
179 if sock is not None:
179 if sock is not None:
180 sock.close()
180 sock.close()
181
181
182 raise socket.error(msg)
182 raise socket.error(msg)
183
183
184 class httpconnection(keepalive.HTTPConnection):
184 class httpconnection(keepalive.HTTPConnection):
185 # must be able to send big bundle as stream.
185 # must be able to send big bundle as stream.
186 send = _gen_sendfile(keepalive.HTTPConnection.send)
186 send = _gen_sendfile(keepalive.HTTPConnection.send)
187
187
188 def connect(self):
188 def connect(self):
189 if has_https and self.realhostport: # use CONNECT proxy
189 if has_https and self.realhostport: # use CONNECT proxy
190 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
190 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
191 self.sock.connect((self.host, self.port))
191 self.sock.connect((self.host, self.port))
192 if _generic_proxytunnel(self):
192 if _generic_proxytunnel(self):
193 # we do not support client X.509 certificates
193 # we do not support client X.509 certificates
194 self.sock = sslutil.wrapsocket(self.sock, None, None, None,
194 self.sock = sslutil.wrapsocket(self.sock, None, None, None,
195 serverhostname=self.host)
195 serverhostname=self.host)
196 else:
196 else:
197 keepalive.HTTPConnection.connect(self)
197 keepalive.HTTPConnection.connect(self)
198
198
199 def getresponse(self):
199 def getresponse(self):
200 proxyres = getattr(self, 'proxyres', None)
200 proxyres = getattr(self, 'proxyres', None)
201 if proxyres:
201 if proxyres:
202 if proxyres.will_close:
202 if proxyres.will_close:
203 self.close()
203 self.close()
204 self.proxyres = None
204 self.proxyres = None
205 return proxyres
205 return proxyres
206 return keepalive.HTTPConnection.getresponse(self)
206 return keepalive.HTTPConnection.getresponse(self)
207
207
208 # general transaction handler to support different ways to handle
208 # general transaction handler to support different ways to handle
209 # HTTPS proxying before and after Python 2.6.3.
209 # HTTPS proxying before and after Python 2.6.3.
210 def _generic_start_transaction(handler, h, req):
210 def _generic_start_transaction(handler, h, req):
211 tunnel_host = getattr(req, '_tunnel_host', None)
211 tunnel_host = getattr(req, '_tunnel_host', None)
212 if tunnel_host:
212 if tunnel_host:
213 if tunnel_host[:7] not in ['http://', 'https:/']:
213 if tunnel_host[:7] not in ['http://', 'https:/']:
214 tunnel_host = 'https://' + tunnel_host
214 tunnel_host = 'https://' + tunnel_host
215 new_tunnel = True
215 new_tunnel = True
216 else:
216 else:
217 tunnel_host = req.get_selector()
217 tunnel_host = req.get_selector()
218 new_tunnel = False
218 new_tunnel = False
219
219
220 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
220 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
221 u = util.url(tunnel_host)
221 u = util.url(tunnel_host)
222 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
222 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
223 h.realhostport = ':'.join([u.host, (u.port or '443')])
223 h.realhostport = ':'.join([u.host, (u.port or '443')])
224 h.headers = req.headers.copy()
224 h.headers = req.headers.copy()
225 h.headers.update(handler.parent.addheaders)
225 h.headers.update(handler.parent.addheaders)
226 return
226 return
227
227
228 h.realhostport = None
228 h.realhostport = None
229 h.headers = None
229 h.headers = None
230
230
231 def _generic_proxytunnel(self):
231 def _generic_proxytunnel(self):
232 proxyheaders = dict(
232 proxyheaders = dict(
233 [(x, self.headers[x]) for x in self.headers
233 [(x, self.headers[x]) for x in self.headers
234 if x.lower().startswith('proxy-')])
234 if x.lower().startswith('proxy-')])
235 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
235 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
236 for header in proxyheaders.iteritems():
236 for header in proxyheaders.iteritems():
237 self.send('%s: %s\r\n' % header)
237 self.send('%s: %s\r\n' % header)
238 self.send('\r\n')
238 self.send('\r\n')
239
239
240 # majority of the following code is duplicated from
240 # majority of the following code is duplicated from
241 # httplib.HTTPConnection as there are no adequate places to
241 # httplib.HTTPConnection as there are no adequate places to
242 # override functions to provide the needed functionality
242 # override functions to provide the needed functionality
243 res = self.response_class(self.sock,
243 res = self.response_class(self.sock,
244 strict=self.strict,
244 strict=self.strict,
245 method=self._method)
245 method=self._method)
246
246
247 while True:
247 while True:
248 version, status, reason = res._read_status()
248 version, status, reason = res._read_status()
249 if status != httplib.CONTINUE:
249 if status != httplib.CONTINUE:
250 break
250 break
251 while True:
251 while True:
252 skip = res.fp.readline().strip()
252 skip = res.fp.readline().strip()
253 if not skip:
253 if not skip:
254 break
254 break
255 res.status = status
255 res.status = status
256 res.reason = reason.strip()
256 res.reason = reason.strip()
257
257
258 if res.status == 200:
258 if res.status == 200:
259 while True:
259 while True:
260 line = res.fp.readline()
260 line = res.fp.readline()
261 if line == '\r\n':
261 if line == '\r\n':
262 break
262 break
263 return True
263 return True
264
264
265 if version == 'HTTP/1.0':
265 if version == 'HTTP/1.0':
266 res.version = 10
266 res.version = 10
267 elif version.startswith('HTTP/1.'):
267 elif version.startswith('HTTP/1.'):
268 res.version = 11
268 res.version = 11
269 elif version == 'HTTP/0.9':
269 elif version == 'HTTP/0.9':
270 res.version = 9
270 res.version = 9
271 else:
271 else:
272 raise httplib.UnknownProtocol(version)
272 raise httplib.UnknownProtocol(version)
273
273
274 if res.version == 9:
274 if res.version == 9:
275 res.length = None
275 res.length = None
276 res.chunked = 0
276 res.chunked = 0
277 res.will_close = 1
277 res.will_close = 1
278 res.msg = httplib.HTTPMessage(stringio())
278 res.msg = httplib.HTTPMessage(stringio())
279 return False
279 return False
280
280
281 res.msg = httplib.HTTPMessage(res.fp)
281 res.msg = httplib.HTTPMessage(res.fp)
282 res.msg.fp = None
282 res.msg.fp = None
283
283
284 # are we using the chunked-style of transfer encoding?
284 # are we using the chunked-style of transfer encoding?
285 trenc = res.msg.getheader('transfer-encoding')
285 trenc = res.msg.getheader('transfer-encoding')
286 if trenc and trenc.lower() == "chunked":
286 if trenc and trenc.lower() == "chunked":
287 res.chunked = 1
287 res.chunked = 1
288 res.chunk_left = None
288 res.chunk_left = None
289 else:
289 else:
290 res.chunked = 0
290 res.chunked = 0
291
291
292 # will the connection close at the end of the response?
292 # will the connection close at the end of the response?
293 res.will_close = res._check_close()
293 res.will_close = res._check_close()
294
294
295 # do we have a Content-Length?
295 # do we have a Content-Length?
296 # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
296 # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
297 # transfer-encoding is "chunked"
297 # transfer-encoding is "chunked"
298 length = res.msg.getheader('content-length')
298 length = res.msg.getheader('content-length')
299 if length and not res.chunked:
299 if length and not res.chunked:
300 try:
300 try:
301 res.length = int(length)
301 res.length = int(length)
302 except ValueError:
302 except ValueError:
303 res.length = None
303 res.length = None
304 else:
304 else:
305 if res.length < 0: # ignore nonsensical negative lengths
305 if res.length < 0: # ignore nonsensical negative lengths
306 res.length = None
306 res.length = None
307 else:
307 else:
308 res.length = None
308 res.length = None
309
309
310 # does the body have a fixed length? (of zero)
310 # does the body have a fixed length? (of zero)
311 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
311 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
312 100 <= status < 200 or # 1xx codes
312 100 <= status < 200 or # 1xx codes
313 res._method == 'HEAD'):
313 res._method == 'HEAD'):
314 res.length = 0
314 res.length = 0
315
315
316 # if the connection remains open, and we aren't using chunked, and
316 # if the connection remains open, and we aren't using chunked, and
317 # a content-length was not provided, then assume that the connection
317 # a content-length was not provided, then assume that the connection
318 # WILL close.
318 # WILL close.
319 if (not res.will_close and
319 if (not res.will_close and
320 not res.chunked and
320 not res.chunked and
321 res.length is None):
321 res.length is None):
322 res.will_close = 1
322 res.will_close = 1
323
323
324 self.proxyres = res
324 self.proxyres = res
325
325
326 return False
326 return False
327
327
328 class httphandler(keepalive.HTTPHandler):
328 class httphandler(keepalive.HTTPHandler):
329 def http_open(self, req):
329 def http_open(self, req):
330 return self.do_open(httpconnection, req)
330 return self.do_open(httpconnection, req)
331
331
332 def _start_transaction(self, h, req):
332 def _start_transaction(self, h, req):
333 _generic_start_transaction(self, h, req)
333 _generic_start_transaction(self, h, req)
334 return keepalive.HTTPHandler._start_transaction(self, h, req)
334 return keepalive.HTTPHandler._start_transaction(self, h, req)
335
335
336 if has_https:
336 if has_https:
337 class httpsconnection(httplib.HTTPConnection):
337 class httpsconnection(httplib.HTTPConnection):
338 response_class = keepalive.HTTPResponse
338 response_class = keepalive.HTTPResponse
339 default_port = httplib.HTTPS_PORT
339 default_port = httplib.HTTPS_PORT
340 # must be able to send big bundle as stream.
340 # must be able to send big bundle as stream.
341 send = _gen_sendfile(keepalive.safesend)
341 send = _gen_sendfile(keepalive.safesend)
342 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
342 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
343
343
344 def __init__(self, host, port=None, key_file=None, cert_file=None,
344 def __init__(self, host, port=None, key_file=None, cert_file=None,
345 *args, **kwargs):
345 *args, **kwargs):
346 httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
346 httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
347 self.key_file = key_file
347 self.key_file = key_file
348 self.cert_file = cert_file
348 self.cert_file = cert_file
349
349
350 def connect(self):
350 def connect(self):
351 self.sock = _create_connection((self.host, self.port))
351 self.sock = _create_connection((self.host, self.port))
352
352
353 host = self.host
353 host = self.host
354 if self.realhostport: # use CONNECT proxy
354 if self.realhostport: # use CONNECT proxy
355 _generic_proxytunnel(self)
355 _generic_proxytunnel(self)
356 host = self.realhostport.rsplit(':', 1)[0]
356 host = self.realhostport.rsplit(':', 1)[0]
357 self.sock = sslutil.wrapsocket(
357 self.sock = sslutil.wrapsocket(
358 self.sock, self.key_file, self.cert_file, ui=self.ui,
358 self.sock, self.key_file, self.cert_file, ui=self.ui,
359 serverhostname=host)
359 serverhostname=host)
360 sslutil.validatesocket(self.sock)
360 sslutil.validatesocket(self.sock)
361
361
362 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
362 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
363 def __init__(self, ui):
363 def __init__(self, ui):
364 keepalive.KeepAliveHandler.__init__(self)
364 keepalive.KeepAliveHandler.__init__(self)
365 urlreq.httpshandler.__init__(self)
365 urlreq.httpshandler.__init__(self)
366 self.ui = ui
366 self.ui = ui
367 self.pwmgr = passwordmgr(self.ui,
367 self.pwmgr = passwordmgr(self.ui,
368 self.ui.httppasswordmgrdb)
368 self.ui.httppasswordmgrdb)
369
369
370 def _start_transaction(self, h, req):
370 def _start_transaction(self, h, req):
371 _generic_start_transaction(self, h, req)
371 _generic_start_transaction(self, h, req)
372 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
372 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
373
373
374 def https_open(self, req):
374 def https_open(self, req):
375 # req.get_full_url() does not contain credentials and we may
375 # req.get_full_url() does not contain credentials and we may
376 # need them to match the certificates.
376 # need them to match the certificates.
377 url = req.get_full_url()
377 url = req.get_full_url()
378 user, password = self.pwmgr.find_stored_password(url)
378 user, password = self.pwmgr.find_stored_password(url)
379 res = httpconnectionmod.readauthforuri(self.ui, url, user)
379 res = httpconnectionmod.readauthforuri(self.ui, url, user)
380 if res:
380 if res:
381 group, auth = res
381 group, auth = res
382 self.auth = auth
382 self.auth = auth
383 self.ui.debug("using auth.%s.* for authentication\n" % group)
383 self.ui.debug("using auth.%s.* for authentication\n" % group)
384 else:
384 else:
385 self.auth = None
385 self.auth = None
386 return self.do_open(self._makeconnection, req)
386 return self.do_open(self._makeconnection, req)
387
387
388 def _makeconnection(self, host, port=None, *args, **kwargs):
388 def _makeconnection(self, host, port=None, *args, **kwargs):
389 keyfile = None
389 keyfile = None
390 certfile = None
390 certfile = None
391
391
392 if len(args) >= 1: # key_file
392 if len(args) >= 1: # key_file
393 keyfile = args[0]
393 keyfile = args[0]
394 if len(args) >= 2: # cert_file
394 if len(args) >= 2: # cert_file
395 certfile = args[1]
395 certfile = args[1]
396 args = args[2:]
396 args = args[2:]
397
397
398 # if the user has specified different key/cert files in
398 # if the user has specified different key/cert files in
399 # hgrc, we prefer these
399 # hgrc, we prefer these
400 if self.auth and 'key' in self.auth and 'cert' in self.auth:
400 if self.auth and 'key' in self.auth and 'cert' in self.auth:
401 keyfile = self.auth['key']
401 keyfile = self.auth['key']
402 certfile = self.auth['cert']
402 certfile = self.auth['cert']
403
403
404 conn = httpsconnection(host, port, keyfile, certfile, *args,
404 conn = httpsconnection(host, port, keyfile, certfile, *args,
405 **kwargs)
405 **kwargs)
406 conn.ui = self.ui
406 conn.ui = self.ui
407 return conn
407 return conn
408
408
409 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
409 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
410 def __init__(self, *args, **kwargs):
410 def __init__(self, *args, **kwargs):
411 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
411 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
412 self.retried_req = None
412 self.retried_req = None
413
413
414 def reset_retry_count(self):
414 def reset_retry_count(self):
415 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
415 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
416 # forever. We disable reset_retry_count completely and reset in
416 # forever. We disable reset_retry_count completely and reset in
417 # http_error_auth_reqed instead.
417 # http_error_auth_reqed instead.
418 pass
418 pass
419
419
420 def http_error_auth_reqed(self, auth_header, host, req, headers):
420 def http_error_auth_reqed(self, auth_header, host, req, headers):
421 # Reset the retry counter once for each request.
421 # Reset the retry counter once for each request.
422 if req is not self.retried_req:
422 if req is not self.retried_req:
423 self.retried_req = req
423 self.retried_req = req
424 self.retried = 0
424 self.retried = 0
425 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
425 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
426 self, auth_header, host, req, headers)
426 self, auth_header, host, req, headers)
427
427
428 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
428 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
429 def __init__(self, *args, **kwargs):
429 def __init__(self, *args, **kwargs):
430 self.auth = None
430 self.auth = None
431 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
431 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
432 self.retried_req = None
432 self.retried_req = None
433
433
434 def http_request(self, request):
434 def http_request(self, request):
435 if self.auth:
435 if self.auth:
436 request.add_unredirected_header(self.auth_header, self.auth)
436 request.add_unredirected_header(self.auth_header, self.auth)
437
437
438 return request
438 return request
439
439
440 def https_request(self, request):
440 def https_request(self, request):
441 if self.auth:
441 if self.auth:
442 request.add_unredirected_header(self.auth_header, self.auth)
442 request.add_unredirected_header(self.auth_header, self.auth)
443
443
444 return request
444 return request
445
445
446 def reset_retry_count(self):
446 def reset_retry_count(self):
447 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
447 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
448 # forever. We disable reset_retry_count completely and reset in
448 # forever. We disable reset_retry_count completely and reset in
449 # http_error_auth_reqed instead.
449 # http_error_auth_reqed instead.
450 pass
450 pass
451
451
452 def http_error_auth_reqed(self, auth_header, host, req, headers):
452 def http_error_auth_reqed(self, auth_header, host, req, headers):
453 # Reset the retry counter once for each request.
453 # Reset the retry counter once for each request.
454 if req is not self.retried_req:
454 if req is not self.retried_req:
455 self.retried_req = req
455 self.retried_req = req
456 self.retried = 0
456 self.retried = 0
457 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
457 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
458 self, auth_header, host, req, headers)
458 self, auth_header, host, req, headers)
459
459
460 def retry_http_basic_auth(self, host, req, realm):
460 def retry_http_basic_auth(self, host, req, realm):
461 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
461 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
462 if pw is not None:
462 if pw is not None:
463 raw = "%s:%s" % (user, pw)
463 raw = "%s:%s" % (user, pw)
464 auth = 'Basic %s' % base64.b64encode(raw).strip()
464 auth = 'Basic %s' % base64.b64encode(raw).strip()
465 if req.headers.get(self.auth_header, None) == auth:
465 if req.headers.get(self.auth_header, None) == auth:
466 return None
466 return None
467 self.auth = auth
467 self.auth = auth
468 req.add_unredirected_header(self.auth_header, auth)
468 req.add_unredirected_header(self.auth_header, auth)
469 return self.parent.open(req)
469 return self.parent.open(req)
470 else:
470 else:
471 return None
471 return None
472
472
473 handlerfuncs = []
473 handlerfuncs = []
474
474
475 def opener(ui, authinfo=None):
475 def opener(ui, authinfo=None):
476 '''
476 '''
477 construct an opener suitable for urllib2
477 construct an opener suitable for urllib2
478 authinfo will be added to the password manager
478 authinfo will be added to the password manager
479 '''
479 '''
480 # experimental config: ui.usehttp2
480 # experimental config: ui.usehttp2
481 if ui.configbool('ui', 'usehttp2', False):
481 if ui.configbool('ui', 'usehttp2', False):
482 handlers = [
482 handlers = [
483 httpconnectionmod.http2handler(
483 httpconnectionmod.http2handler(
484 ui,
484 ui,
485 passwordmgr(ui, ui.httppasswordmgrdb))
485 passwordmgr(ui, ui.httppasswordmgrdb))
486 ]
486 ]
487 else:
487 else:
488 handlers = [httphandler()]
488 handlers = [httphandler()]
489 if has_https:
489 if has_https:
490 handlers.append(httpshandler(ui))
490 handlers.append(httpshandler(ui))
491
491
492 handlers.append(proxyhandler(ui))
492 handlers.append(proxyhandler(ui))
493
493
494 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
494 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
495 if authinfo is not None:
495 if authinfo is not None:
496 realm, uris, user, passwd = authinfo
496 realm, uris, user, passwd = authinfo
497 saveduser, savedpass = passmgr.find_stored_password(uris[0])
497 saveduser, savedpass = passmgr.find_stored_password(uris[0])
498 if user != saveduser or passwd:
498 if user != saveduser or passwd:
499 passmgr.add_password(realm, uris, user, passwd)
499 passmgr.add_password(realm, uris, user, passwd)
500 ui.debug('http auth: user %s, password %s\n' %
500 ui.debug('http auth: user %s, password %s\n' %
501 (user, passwd and '*' * len(passwd) or 'not set'))
501 (user, passwd and '*' * len(passwd) or 'not set'))
502
502
503 handlers.extend((httpbasicauthhandler(passmgr),
503 handlers.extend((httpbasicauthhandler(passmgr),
504 httpdigestauthhandler(passmgr)))
504 httpdigestauthhandler(passmgr)))
505 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
505 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
506 opener = urlreq.buildopener(*handlers)
506 opener = urlreq.buildopener(*handlers)
507
507
508 # 1.0 here is the _protocol_ version
508 # 1.0 here is the _protocol_ version
509 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
509 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
510 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
510 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
511 return opener
511 return opener
512
512
513 def open(ui, url_, data=None):
513 def open(ui, url_, data=None):
514 u = util.url(url_)
514 u = util.url(url_)
515 if u.scheme:
515 if u.scheme:
516 u.scheme = u.scheme.lower()
516 u.scheme = u.scheme.lower()
517 url_, authinfo = u.authinfo()
517 url_, authinfo = u.authinfo()
518 else:
518 else:
519 path = util.normpath(os.path.abspath(url_))
519 path = util.normpath(os.path.abspath(url_))
520 url_ = 'file://' + urlreq.pathname2url(path)
520 url_ = 'file://' + urlreq.pathname2url(path)
521 authinfo = None
521 authinfo = None
522 return opener(ui, authinfo).open(url_, data)
522 return opener(ui, authinfo).open(url_, data)
@@ -1,2856 +1,2857
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'httplib',
50 'pickle',
51 'pickle',
51 'queue',
52 'queue',
52 'urlerr',
53 'urlerr',
53 'urlparse',
54 'urlparse',
54 # we do import urlreq, but we do it outside the loop
55 # we do import urlreq, but we do it outside the loop
55 #'urlreq',
56 #'urlreq',
56 'stringio',
57 'stringio',
57 'socketserver',
58 'socketserver',
58 'xmlrpclib',
59 'xmlrpclib',
59 ):
60 ):
60 globals()[attr] = getattr(pycompat, attr)
61 globals()[attr] = getattr(pycompat, attr)
61
62
62 # This line is to make pyflakes happy:
63 # This line is to make pyflakes happy:
63 urlreq = pycompat.urlreq
64 urlreq = pycompat.urlreq
64
65
65 if os.name == 'nt':
66 if os.name == 'nt':
66 from . import windows as platform
67 from . import windows as platform
67 else:
68 else:
68 from . import posix as platform
69 from . import posix as platform
69
70
70 _ = i18n._
71 _ = i18n._
71
72
72 cachestat = platform.cachestat
73 cachestat = platform.cachestat
73 checkexec = platform.checkexec
74 checkexec = platform.checkexec
74 checklink = platform.checklink
75 checklink = platform.checklink
75 copymode = platform.copymode
76 copymode = platform.copymode
76 executablepath = platform.executablepath
77 executablepath = platform.executablepath
77 expandglobs = platform.expandglobs
78 expandglobs = platform.expandglobs
78 explainexit = platform.explainexit
79 explainexit = platform.explainexit
79 findexe = platform.findexe
80 findexe = platform.findexe
80 gethgcmd = platform.gethgcmd
81 gethgcmd = platform.gethgcmd
81 getuser = platform.getuser
82 getuser = platform.getuser
82 getpid = os.getpid
83 getpid = os.getpid
83 groupmembers = platform.groupmembers
84 groupmembers = platform.groupmembers
84 groupname = platform.groupname
85 groupname = platform.groupname
85 hidewindow = platform.hidewindow
86 hidewindow = platform.hidewindow
86 isexec = platform.isexec
87 isexec = platform.isexec
87 isowner = platform.isowner
88 isowner = platform.isowner
88 localpath = platform.localpath
89 localpath = platform.localpath
89 lookupreg = platform.lookupreg
90 lookupreg = platform.lookupreg
90 makedir = platform.makedir
91 makedir = platform.makedir
91 nlinks = platform.nlinks
92 nlinks = platform.nlinks
92 normpath = platform.normpath
93 normpath = platform.normpath
93 normcase = platform.normcase
94 normcase = platform.normcase
94 normcasespec = platform.normcasespec
95 normcasespec = platform.normcasespec
95 normcasefallback = platform.normcasefallback
96 normcasefallback = platform.normcasefallback
96 openhardlinks = platform.openhardlinks
97 openhardlinks = platform.openhardlinks
97 oslink = platform.oslink
98 oslink = platform.oslink
98 parsepatchoutput = platform.parsepatchoutput
99 parsepatchoutput = platform.parsepatchoutput
99 pconvert = platform.pconvert
100 pconvert = platform.pconvert
100 poll = platform.poll
101 poll = platform.poll
101 popen = platform.popen
102 popen = platform.popen
102 posixfile = platform.posixfile
103 posixfile = platform.posixfile
103 quotecommand = platform.quotecommand
104 quotecommand = platform.quotecommand
104 readpipe = platform.readpipe
105 readpipe = platform.readpipe
105 rename = platform.rename
106 rename = platform.rename
106 removedirs = platform.removedirs
107 removedirs = platform.removedirs
107 samedevice = platform.samedevice
108 samedevice = platform.samedevice
108 samefile = platform.samefile
109 samefile = platform.samefile
109 samestat = platform.samestat
110 samestat = platform.samestat
110 setbinary = platform.setbinary
111 setbinary = platform.setbinary
111 setflags = platform.setflags
112 setflags = platform.setflags
112 setsignalhandler = platform.setsignalhandler
113 setsignalhandler = platform.setsignalhandler
113 shellquote = platform.shellquote
114 shellquote = platform.shellquote
114 spawndetached = platform.spawndetached
115 spawndetached = platform.spawndetached
115 split = platform.split
116 split = platform.split
116 sshargs = platform.sshargs
117 sshargs = platform.sshargs
117 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
118 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
118 statisexec = platform.statisexec
119 statisexec = platform.statisexec
119 statislink = platform.statislink
120 statislink = platform.statislink
120 termwidth = platform.termwidth
121 termwidth = platform.termwidth
121 testpid = platform.testpid
122 testpid = platform.testpid
122 umask = platform.umask
123 umask = platform.umask
123 unlink = platform.unlink
124 unlink = platform.unlink
124 unlinkpath = platform.unlinkpath
125 unlinkpath = platform.unlinkpath
125 username = platform.username
126 username = platform.username
126
127
127 # Python compatibility
128 # Python compatibility
128
129
129 _notset = object()
130 _notset = object()
130
131
131 # disable Python's problematic floating point timestamps (issue4836)
132 # disable Python's problematic floating point timestamps (issue4836)
132 # (Python hypocritically says you shouldn't change this behavior in
133 # (Python hypocritically says you shouldn't change this behavior in
133 # libraries, and sure enough Mercurial is not a library.)
134 # libraries, and sure enough Mercurial is not a library.)
134 os.stat_float_times(False)
135 os.stat_float_times(False)
135
136
136 def safehasattr(thing, attr):
137 def safehasattr(thing, attr):
137 return getattr(thing, attr, _notset) is not _notset
138 return getattr(thing, attr, _notset) is not _notset
138
139
139 DIGESTS = {
140 DIGESTS = {
140 'md5': hashlib.md5,
141 'md5': hashlib.md5,
141 'sha1': hashlib.sha1,
142 'sha1': hashlib.sha1,
142 'sha512': hashlib.sha512,
143 'sha512': hashlib.sha512,
143 }
144 }
144 # List of digest types from strongest to weakest
145 # List of digest types from strongest to weakest
145 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
146 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
146
147
147 for k in DIGESTS_BY_STRENGTH:
148 for k in DIGESTS_BY_STRENGTH:
148 assert k in DIGESTS
149 assert k in DIGESTS
149
150
150 class digester(object):
151 class digester(object):
151 """helper to compute digests.
152 """helper to compute digests.
152
153
153 This helper can be used to compute one or more digests given their name.
154 This helper can be used to compute one or more digests given their name.
154
155
155 >>> d = digester(['md5', 'sha1'])
156 >>> d = digester(['md5', 'sha1'])
156 >>> d.update('foo')
157 >>> d.update('foo')
157 >>> [k for k in sorted(d)]
158 >>> [k for k in sorted(d)]
158 ['md5', 'sha1']
159 ['md5', 'sha1']
159 >>> d['md5']
160 >>> d['md5']
160 'acbd18db4cc2f85cedef654fccc4a4d8'
161 'acbd18db4cc2f85cedef654fccc4a4d8'
161 >>> d['sha1']
162 >>> d['sha1']
162 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
163 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
163 >>> digester.preferred(['md5', 'sha1'])
164 >>> digester.preferred(['md5', 'sha1'])
164 'sha1'
165 'sha1'
165 """
166 """
166
167
167 def __init__(self, digests, s=''):
168 def __init__(self, digests, s=''):
168 self._hashes = {}
169 self._hashes = {}
169 for k in digests:
170 for k in digests:
170 if k not in DIGESTS:
171 if k not in DIGESTS:
171 raise Abort(_('unknown digest type: %s') % k)
172 raise Abort(_('unknown digest type: %s') % k)
172 self._hashes[k] = DIGESTS[k]()
173 self._hashes[k] = DIGESTS[k]()
173 if s:
174 if s:
174 self.update(s)
175 self.update(s)
175
176
176 def update(self, data):
177 def update(self, data):
177 for h in self._hashes.values():
178 for h in self._hashes.values():
178 h.update(data)
179 h.update(data)
179
180
180 def __getitem__(self, key):
181 def __getitem__(self, key):
181 if key not in DIGESTS:
182 if key not in DIGESTS:
182 raise Abort(_('unknown digest type: %s') % k)
183 raise Abort(_('unknown digest type: %s') % k)
183 return self._hashes[key].hexdigest()
184 return self._hashes[key].hexdigest()
184
185
185 def __iter__(self):
186 def __iter__(self):
186 return iter(self._hashes)
187 return iter(self._hashes)
187
188
188 @staticmethod
189 @staticmethod
189 def preferred(supported):
190 def preferred(supported):
190 """returns the strongest digest type in both supported and DIGESTS."""
191 """returns the strongest digest type in both supported and DIGESTS."""
191
192
192 for k in DIGESTS_BY_STRENGTH:
193 for k in DIGESTS_BY_STRENGTH:
193 if k in supported:
194 if k in supported:
194 return k
195 return k
195 return None
196 return None
196
197
197 class digestchecker(object):
198 class digestchecker(object):
198 """file handle wrapper that additionally checks content against a given
199 """file handle wrapper that additionally checks content against a given
199 size and digests.
200 size and digests.
200
201
201 d = digestchecker(fh, size, {'md5': '...'})
202 d = digestchecker(fh, size, {'md5': '...'})
202
203
203 When multiple digests are given, all of them are validated.
204 When multiple digests are given, all of them are validated.
204 """
205 """
205
206
206 def __init__(self, fh, size, digests):
207 def __init__(self, fh, size, digests):
207 self._fh = fh
208 self._fh = fh
208 self._size = size
209 self._size = size
209 self._got = 0
210 self._got = 0
210 self._digests = dict(digests)
211 self._digests = dict(digests)
211 self._digester = digester(self._digests.keys())
212 self._digester = digester(self._digests.keys())
212
213
213 def read(self, length=-1):
214 def read(self, length=-1):
214 content = self._fh.read(length)
215 content = self._fh.read(length)
215 self._digester.update(content)
216 self._digester.update(content)
216 self._got += len(content)
217 self._got += len(content)
217 return content
218 return content
218
219
219 def validate(self):
220 def validate(self):
220 if self._size != self._got:
221 if self._size != self._got:
221 raise Abort(_('size mismatch: expected %d, got %d') %
222 raise Abort(_('size mismatch: expected %d, got %d') %
222 (self._size, self._got))
223 (self._size, self._got))
223 for k, v in self._digests.items():
224 for k, v in self._digests.items():
224 if v != self._digester[k]:
225 if v != self._digester[k]:
225 # i18n: first parameter is a digest name
226 # i18n: first parameter is a digest name
226 raise Abort(_('%s mismatch: expected %s, got %s') %
227 raise Abort(_('%s mismatch: expected %s, got %s') %
227 (k, v, self._digester[k]))
228 (k, v, self._digester[k]))
228
229
229 try:
230 try:
230 buffer = buffer
231 buffer = buffer
231 except NameError:
232 except NameError:
232 if sys.version_info[0] < 3:
233 if sys.version_info[0] < 3:
233 def buffer(sliceable, offset=0):
234 def buffer(sliceable, offset=0):
234 return sliceable[offset:]
235 return sliceable[offset:]
235 else:
236 else:
236 def buffer(sliceable, offset=0):
237 def buffer(sliceable, offset=0):
237 return memoryview(sliceable)[offset:]
238 return memoryview(sliceable)[offset:]
238
239
239 closefds = os.name == 'posix'
240 closefds = os.name == 'posix'
240
241
241 _chunksize = 4096
242 _chunksize = 4096
242
243
243 class bufferedinputpipe(object):
244 class bufferedinputpipe(object):
244 """a manually buffered input pipe
245 """a manually buffered input pipe
245
246
246 Python will not let us use buffered IO and lazy reading with 'polling' at
247 Python will not let us use buffered IO and lazy reading with 'polling' at
247 the same time. We cannot probe the buffer state and select will not detect
248 the same time. We cannot probe the buffer state and select will not detect
248 that data are ready to read if they are already buffered.
249 that data are ready to read if they are already buffered.
249
250
250 This class let us work around that by implementing its own buffering
251 This class let us work around that by implementing its own buffering
251 (allowing efficient readline) while offering a way to know if the buffer is
252 (allowing efficient readline) while offering a way to know if the buffer is
252 empty from the output (allowing collaboration of the buffer with polling).
253 empty from the output (allowing collaboration of the buffer with polling).
253
254
254 This class lives in the 'util' module because it makes use of the 'os'
255 This class lives in the 'util' module because it makes use of the 'os'
255 module from the python stdlib.
256 module from the python stdlib.
256 """
257 """
257
258
258 def __init__(self, input):
259 def __init__(self, input):
259 self._input = input
260 self._input = input
260 self._buffer = []
261 self._buffer = []
261 self._eof = False
262 self._eof = False
262 self._lenbuf = 0
263 self._lenbuf = 0
263
264
264 @property
265 @property
265 def hasbuffer(self):
266 def hasbuffer(self):
266 """True is any data is currently buffered
267 """True is any data is currently buffered
267
268
268 This will be used externally a pre-step for polling IO. If there is
269 This will be used externally a pre-step for polling IO. If there is
269 already data then no polling should be set in place."""
270 already data then no polling should be set in place."""
270 return bool(self._buffer)
271 return bool(self._buffer)
271
272
272 @property
273 @property
273 def closed(self):
274 def closed(self):
274 return self._input.closed
275 return self._input.closed
275
276
276 def fileno(self):
277 def fileno(self):
277 return self._input.fileno()
278 return self._input.fileno()
278
279
279 def close(self):
280 def close(self):
280 return self._input.close()
281 return self._input.close()
281
282
282 def read(self, size):
283 def read(self, size):
283 while (not self._eof) and (self._lenbuf < size):
284 while (not self._eof) and (self._lenbuf < size):
284 self._fillbuffer()
285 self._fillbuffer()
285 return self._frombuffer(size)
286 return self._frombuffer(size)
286
287
287 def readline(self, *args, **kwargs):
288 def readline(self, *args, **kwargs):
288 if 1 < len(self._buffer):
289 if 1 < len(self._buffer):
289 # this should not happen because both read and readline end with a
290 # this should not happen because both read and readline end with a
290 # _frombuffer call that collapse it.
291 # _frombuffer call that collapse it.
291 self._buffer = [''.join(self._buffer)]
292 self._buffer = [''.join(self._buffer)]
292 self._lenbuf = len(self._buffer[0])
293 self._lenbuf = len(self._buffer[0])
293 lfi = -1
294 lfi = -1
294 if self._buffer:
295 if self._buffer:
295 lfi = self._buffer[-1].find('\n')
296 lfi = self._buffer[-1].find('\n')
296 while (not self._eof) and lfi < 0:
297 while (not self._eof) and lfi < 0:
297 self._fillbuffer()
298 self._fillbuffer()
298 if self._buffer:
299 if self._buffer:
299 lfi = self._buffer[-1].find('\n')
300 lfi = self._buffer[-1].find('\n')
300 size = lfi + 1
301 size = lfi + 1
301 if lfi < 0: # end of file
302 if lfi < 0: # end of file
302 size = self._lenbuf
303 size = self._lenbuf
303 elif 1 < len(self._buffer):
304 elif 1 < len(self._buffer):
304 # we need to take previous chunks into account
305 # we need to take previous chunks into account
305 size += self._lenbuf - len(self._buffer[-1])
306 size += self._lenbuf - len(self._buffer[-1])
306 return self._frombuffer(size)
307 return self._frombuffer(size)
307
308
308 def _frombuffer(self, size):
309 def _frombuffer(self, size):
309 """return at most 'size' data from the buffer
310 """return at most 'size' data from the buffer
310
311
311 The data are removed from the buffer."""
312 The data are removed from the buffer."""
312 if size == 0 or not self._buffer:
313 if size == 0 or not self._buffer:
313 return ''
314 return ''
314 buf = self._buffer[0]
315 buf = self._buffer[0]
315 if 1 < len(self._buffer):
316 if 1 < len(self._buffer):
316 buf = ''.join(self._buffer)
317 buf = ''.join(self._buffer)
317
318
318 data = buf[:size]
319 data = buf[:size]
319 buf = buf[len(data):]
320 buf = buf[len(data):]
320 if buf:
321 if buf:
321 self._buffer = [buf]
322 self._buffer = [buf]
322 self._lenbuf = len(buf)
323 self._lenbuf = len(buf)
323 else:
324 else:
324 self._buffer = []
325 self._buffer = []
325 self._lenbuf = 0
326 self._lenbuf = 0
326 return data
327 return data
327
328
328 def _fillbuffer(self):
329 def _fillbuffer(self):
329 """read data to the buffer"""
330 """read data to the buffer"""
330 data = os.read(self._input.fileno(), _chunksize)
331 data = os.read(self._input.fileno(), _chunksize)
331 if not data:
332 if not data:
332 self._eof = True
333 self._eof = True
333 else:
334 else:
334 self._lenbuf += len(data)
335 self._lenbuf += len(data)
335 self._buffer.append(data)
336 self._buffer.append(data)
336
337
337 def popen2(cmd, env=None, newlines=False):
338 def popen2(cmd, env=None, newlines=False):
338 # Setting bufsize to -1 lets the system decide the buffer size.
339 # Setting bufsize to -1 lets the system decide the buffer size.
339 # The default for bufsize is 0, meaning unbuffered. This leads to
340 # The default for bufsize is 0, meaning unbuffered. This leads to
340 # poor performance on Mac OS X: http://bugs.python.org/issue4194
341 # poor performance on Mac OS X: http://bugs.python.org/issue4194
341 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
342 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
342 close_fds=closefds,
343 close_fds=closefds,
343 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
344 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
344 universal_newlines=newlines,
345 universal_newlines=newlines,
345 env=env)
346 env=env)
346 return p.stdin, p.stdout
347 return p.stdin, p.stdout
347
348
348 def popen3(cmd, env=None, newlines=False):
349 def popen3(cmd, env=None, newlines=False):
349 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
350 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
350 return stdin, stdout, stderr
351 return stdin, stdout, stderr
351
352
352 def popen4(cmd, env=None, newlines=False, bufsize=-1):
353 def popen4(cmd, env=None, newlines=False, bufsize=-1):
353 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
354 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
354 close_fds=closefds,
355 close_fds=closefds,
355 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
356 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
356 stderr=subprocess.PIPE,
357 stderr=subprocess.PIPE,
357 universal_newlines=newlines,
358 universal_newlines=newlines,
358 env=env)
359 env=env)
359 return p.stdin, p.stdout, p.stderr, p
360 return p.stdin, p.stdout, p.stderr, p
360
361
361 def version():
362 def version():
362 """Return version information if available."""
363 """Return version information if available."""
363 try:
364 try:
364 from . import __version__
365 from . import __version__
365 return __version__.version
366 return __version__.version
366 except ImportError:
367 except ImportError:
367 return 'unknown'
368 return 'unknown'
368
369
369 def versiontuple(v=None, n=4):
370 def versiontuple(v=None, n=4):
370 """Parses a Mercurial version string into an N-tuple.
371 """Parses a Mercurial version string into an N-tuple.
371
372
372 The version string to be parsed is specified with the ``v`` argument.
373 The version string to be parsed is specified with the ``v`` argument.
373 If it isn't defined, the current Mercurial version string will be parsed.
374 If it isn't defined, the current Mercurial version string will be parsed.
374
375
375 ``n`` can be 2, 3, or 4. Here is how some version strings map to
376 ``n`` can be 2, 3, or 4. Here is how some version strings map to
376 returned values:
377 returned values:
377
378
378 >>> v = '3.6.1+190-df9b73d2d444'
379 >>> v = '3.6.1+190-df9b73d2d444'
379 >>> versiontuple(v, 2)
380 >>> versiontuple(v, 2)
380 (3, 6)
381 (3, 6)
381 >>> versiontuple(v, 3)
382 >>> versiontuple(v, 3)
382 (3, 6, 1)
383 (3, 6, 1)
383 >>> versiontuple(v, 4)
384 >>> versiontuple(v, 4)
384 (3, 6, 1, '190-df9b73d2d444')
385 (3, 6, 1, '190-df9b73d2d444')
385
386
386 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
387 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
387 (3, 6, 1, '190-df9b73d2d444+20151118')
388 (3, 6, 1, '190-df9b73d2d444+20151118')
388
389
389 >>> v = '3.6'
390 >>> v = '3.6'
390 >>> versiontuple(v, 2)
391 >>> versiontuple(v, 2)
391 (3, 6)
392 (3, 6)
392 >>> versiontuple(v, 3)
393 >>> versiontuple(v, 3)
393 (3, 6, None)
394 (3, 6, None)
394 >>> versiontuple(v, 4)
395 >>> versiontuple(v, 4)
395 (3, 6, None, None)
396 (3, 6, None, None)
396 """
397 """
397 if not v:
398 if not v:
398 v = version()
399 v = version()
399 parts = v.split('+', 1)
400 parts = v.split('+', 1)
400 if len(parts) == 1:
401 if len(parts) == 1:
401 vparts, extra = parts[0], None
402 vparts, extra = parts[0], None
402 else:
403 else:
403 vparts, extra = parts
404 vparts, extra = parts
404
405
405 vints = []
406 vints = []
406 for i in vparts.split('.'):
407 for i in vparts.split('.'):
407 try:
408 try:
408 vints.append(int(i))
409 vints.append(int(i))
409 except ValueError:
410 except ValueError:
410 break
411 break
411 # (3, 6) -> (3, 6, None)
412 # (3, 6) -> (3, 6, None)
412 while len(vints) < 3:
413 while len(vints) < 3:
413 vints.append(None)
414 vints.append(None)
414
415
415 if n == 2:
416 if n == 2:
416 return (vints[0], vints[1])
417 return (vints[0], vints[1])
417 if n == 3:
418 if n == 3:
418 return (vints[0], vints[1], vints[2])
419 return (vints[0], vints[1], vints[2])
419 if n == 4:
420 if n == 4:
420 return (vints[0], vints[1], vints[2], extra)
421 return (vints[0], vints[1], vints[2], extra)
421
422
422 # used by parsedate
423 # used by parsedate
423 defaultdateformats = (
424 defaultdateformats = (
424 '%Y-%m-%d %H:%M:%S',
425 '%Y-%m-%d %H:%M:%S',
425 '%Y-%m-%d %I:%M:%S%p',
426 '%Y-%m-%d %I:%M:%S%p',
426 '%Y-%m-%d %H:%M',
427 '%Y-%m-%d %H:%M',
427 '%Y-%m-%d %I:%M%p',
428 '%Y-%m-%d %I:%M%p',
428 '%Y-%m-%d',
429 '%Y-%m-%d',
429 '%m-%d',
430 '%m-%d',
430 '%m/%d',
431 '%m/%d',
431 '%m/%d/%y',
432 '%m/%d/%y',
432 '%m/%d/%Y',
433 '%m/%d/%Y',
433 '%a %b %d %H:%M:%S %Y',
434 '%a %b %d %H:%M:%S %Y',
434 '%a %b %d %I:%M:%S%p %Y',
435 '%a %b %d %I:%M:%S%p %Y',
435 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
436 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
436 '%b %d %H:%M:%S %Y',
437 '%b %d %H:%M:%S %Y',
437 '%b %d %I:%M:%S%p %Y',
438 '%b %d %I:%M:%S%p %Y',
438 '%b %d %H:%M:%S',
439 '%b %d %H:%M:%S',
439 '%b %d %I:%M:%S%p',
440 '%b %d %I:%M:%S%p',
440 '%b %d %H:%M',
441 '%b %d %H:%M',
441 '%b %d %I:%M%p',
442 '%b %d %I:%M%p',
442 '%b %d %Y',
443 '%b %d %Y',
443 '%b %d',
444 '%b %d',
444 '%H:%M:%S',
445 '%H:%M:%S',
445 '%I:%M:%S%p',
446 '%I:%M:%S%p',
446 '%H:%M',
447 '%H:%M',
447 '%I:%M%p',
448 '%I:%M%p',
448 )
449 )
449
450
450 extendeddateformats = defaultdateformats + (
451 extendeddateformats = defaultdateformats + (
451 "%Y",
452 "%Y",
452 "%Y-%m",
453 "%Y-%m",
453 "%b",
454 "%b",
454 "%b %Y",
455 "%b %Y",
455 )
456 )
456
457
457 def cachefunc(func):
458 def cachefunc(func):
458 '''cache the result of function calls'''
459 '''cache the result of function calls'''
459 # XXX doesn't handle keywords args
460 # XXX doesn't handle keywords args
460 if func.__code__.co_argcount == 0:
461 if func.__code__.co_argcount == 0:
461 cache = []
462 cache = []
462 def f():
463 def f():
463 if len(cache) == 0:
464 if len(cache) == 0:
464 cache.append(func())
465 cache.append(func())
465 return cache[0]
466 return cache[0]
466 return f
467 return f
467 cache = {}
468 cache = {}
468 if func.__code__.co_argcount == 1:
469 if func.__code__.co_argcount == 1:
469 # we gain a small amount of time because
470 # we gain a small amount of time because
470 # we don't need to pack/unpack the list
471 # we don't need to pack/unpack the list
471 def f(arg):
472 def f(arg):
472 if arg not in cache:
473 if arg not in cache:
473 cache[arg] = func(arg)
474 cache[arg] = func(arg)
474 return cache[arg]
475 return cache[arg]
475 else:
476 else:
476 def f(*args):
477 def f(*args):
477 if args not in cache:
478 if args not in cache:
478 cache[args] = func(*args)
479 cache[args] = func(*args)
479 return cache[args]
480 return cache[args]
480
481
481 return f
482 return f
482
483
483 class sortdict(dict):
484 class sortdict(dict):
484 '''a simple sorted dictionary'''
485 '''a simple sorted dictionary'''
485 def __init__(self, data=None):
486 def __init__(self, data=None):
486 self._list = []
487 self._list = []
487 if data:
488 if data:
488 self.update(data)
489 self.update(data)
489 def copy(self):
490 def copy(self):
490 return sortdict(self)
491 return sortdict(self)
491 def __setitem__(self, key, val):
492 def __setitem__(self, key, val):
492 if key in self:
493 if key in self:
493 self._list.remove(key)
494 self._list.remove(key)
494 self._list.append(key)
495 self._list.append(key)
495 dict.__setitem__(self, key, val)
496 dict.__setitem__(self, key, val)
496 def __iter__(self):
497 def __iter__(self):
497 return self._list.__iter__()
498 return self._list.__iter__()
498 def update(self, src):
499 def update(self, src):
499 if isinstance(src, dict):
500 if isinstance(src, dict):
500 src = src.iteritems()
501 src = src.iteritems()
501 for k, v in src:
502 for k, v in src:
502 self[k] = v
503 self[k] = v
503 def clear(self):
504 def clear(self):
504 dict.clear(self)
505 dict.clear(self)
505 self._list = []
506 self._list = []
506 def items(self):
507 def items(self):
507 return [(k, self[k]) for k in self._list]
508 return [(k, self[k]) for k in self._list]
508 def __delitem__(self, key):
509 def __delitem__(self, key):
509 dict.__delitem__(self, key)
510 dict.__delitem__(self, key)
510 self._list.remove(key)
511 self._list.remove(key)
511 def pop(self, key, *args, **kwargs):
512 def pop(self, key, *args, **kwargs):
512 dict.pop(self, key, *args, **kwargs)
513 dict.pop(self, key, *args, **kwargs)
513 try:
514 try:
514 self._list.remove(key)
515 self._list.remove(key)
515 except ValueError:
516 except ValueError:
516 pass
517 pass
517 def keys(self):
518 def keys(self):
518 return self._list
519 return self._list
519 def iterkeys(self):
520 def iterkeys(self):
520 return self._list.__iter__()
521 return self._list.__iter__()
521 def iteritems(self):
522 def iteritems(self):
522 for k in self._list:
523 for k in self._list:
523 yield k, self[k]
524 yield k, self[k]
524 def insert(self, index, key, val):
525 def insert(self, index, key, val):
525 self._list.insert(index, key)
526 self._list.insert(index, key)
526 dict.__setitem__(self, key, val)
527 dict.__setitem__(self, key, val)
527
528
528 class _lrucachenode(object):
529 class _lrucachenode(object):
529 """A node in a doubly linked list.
530 """A node in a doubly linked list.
530
531
531 Holds a reference to nodes on either side as well as a key-value
532 Holds a reference to nodes on either side as well as a key-value
532 pair for the dictionary entry.
533 pair for the dictionary entry.
533 """
534 """
534 __slots__ = ('next', 'prev', 'key', 'value')
535 __slots__ = ('next', 'prev', 'key', 'value')
535
536
536 def __init__(self):
537 def __init__(self):
537 self.next = None
538 self.next = None
538 self.prev = None
539 self.prev = None
539
540
540 self.key = _notset
541 self.key = _notset
541 self.value = None
542 self.value = None
542
543
543 def markempty(self):
544 def markempty(self):
544 """Mark the node as emptied."""
545 """Mark the node as emptied."""
545 self.key = _notset
546 self.key = _notset
546
547
547 class lrucachedict(object):
548 class lrucachedict(object):
548 """Dict that caches most recent accesses and sets.
549 """Dict that caches most recent accesses and sets.
549
550
550 The dict consists of an actual backing dict - indexed by original
551 The dict consists of an actual backing dict - indexed by original
551 key - and a doubly linked circular list defining the order of entries in
552 key - and a doubly linked circular list defining the order of entries in
552 the cache.
553 the cache.
553
554
554 The head node is the newest entry in the cache. If the cache is full,
555 The head node is the newest entry in the cache. If the cache is full,
555 we recycle head.prev and make it the new head. Cache accesses result in
556 we recycle head.prev and make it the new head. Cache accesses result in
556 the node being moved to before the existing head and being marked as the
557 the node being moved to before the existing head and being marked as the
557 new head node.
558 new head node.
558 """
559 """
559 def __init__(self, max):
560 def __init__(self, max):
560 self._cache = {}
561 self._cache = {}
561
562
562 self._head = head = _lrucachenode()
563 self._head = head = _lrucachenode()
563 head.prev = head
564 head.prev = head
564 head.next = head
565 head.next = head
565 self._size = 1
566 self._size = 1
566 self._capacity = max
567 self._capacity = max
567
568
568 def __len__(self):
569 def __len__(self):
569 return len(self._cache)
570 return len(self._cache)
570
571
571 def __contains__(self, k):
572 def __contains__(self, k):
572 return k in self._cache
573 return k in self._cache
573
574
574 def __iter__(self):
575 def __iter__(self):
575 # We don't have to iterate in cache order, but why not.
576 # We don't have to iterate in cache order, but why not.
576 n = self._head
577 n = self._head
577 for i in range(len(self._cache)):
578 for i in range(len(self._cache)):
578 yield n.key
579 yield n.key
579 n = n.next
580 n = n.next
580
581
581 def __getitem__(self, k):
582 def __getitem__(self, k):
582 node = self._cache[k]
583 node = self._cache[k]
583 self._movetohead(node)
584 self._movetohead(node)
584 return node.value
585 return node.value
585
586
586 def __setitem__(self, k, v):
587 def __setitem__(self, k, v):
587 node = self._cache.get(k)
588 node = self._cache.get(k)
588 # Replace existing value and mark as newest.
589 # Replace existing value and mark as newest.
589 if node is not None:
590 if node is not None:
590 node.value = v
591 node.value = v
591 self._movetohead(node)
592 self._movetohead(node)
592 return
593 return
593
594
594 if self._size < self._capacity:
595 if self._size < self._capacity:
595 node = self._addcapacity()
596 node = self._addcapacity()
596 else:
597 else:
597 # Grab the last/oldest item.
598 # Grab the last/oldest item.
598 node = self._head.prev
599 node = self._head.prev
599
600
600 # At capacity. Kill the old entry.
601 # At capacity. Kill the old entry.
601 if node.key is not _notset:
602 if node.key is not _notset:
602 del self._cache[node.key]
603 del self._cache[node.key]
603
604
604 node.key = k
605 node.key = k
605 node.value = v
606 node.value = v
606 self._cache[k] = node
607 self._cache[k] = node
607 # And mark it as newest entry. No need to adjust order since it
608 # And mark it as newest entry. No need to adjust order since it
608 # is already self._head.prev.
609 # is already self._head.prev.
609 self._head = node
610 self._head = node
610
611
611 def __delitem__(self, k):
612 def __delitem__(self, k):
612 node = self._cache.pop(k)
613 node = self._cache.pop(k)
613 node.markempty()
614 node.markempty()
614
615
615 # Temporarily mark as newest item before re-adjusting head to make
616 # Temporarily mark as newest item before re-adjusting head to make
616 # this node the oldest item.
617 # this node the oldest item.
617 self._movetohead(node)
618 self._movetohead(node)
618 self._head = node.next
619 self._head = node.next
619
620
620 # Additional dict methods.
621 # Additional dict methods.
621
622
622 def get(self, k, default=None):
623 def get(self, k, default=None):
623 try:
624 try:
624 return self._cache[k]
625 return self._cache[k]
625 except KeyError:
626 except KeyError:
626 return default
627 return default
627
628
628 def clear(self):
629 def clear(self):
629 n = self._head
630 n = self._head
630 while n.key is not _notset:
631 while n.key is not _notset:
631 n.markempty()
632 n.markempty()
632 n = n.next
633 n = n.next
633
634
634 self._cache.clear()
635 self._cache.clear()
635
636
636 def copy(self):
637 def copy(self):
637 result = lrucachedict(self._capacity)
638 result = lrucachedict(self._capacity)
638 n = self._head.prev
639 n = self._head.prev
639 # Iterate in oldest-to-newest order, so the copy has the right ordering
640 # Iterate in oldest-to-newest order, so the copy has the right ordering
640 for i in range(len(self._cache)):
641 for i in range(len(self._cache)):
641 result[n.key] = n.value
642 result[n.key] = n.value
642 n = n.prev
643 n = n.prev
643 return result
644 return result
644
645
645 def _movetohead(self, node):
646 def _movetohead(self, node):
646 """Mark a node as the newest, making it the new head.
647 """Mark a node as the newest, making it the new head.
647
648
648 When a node is accessed, it becomes the freshest entry in the LRU
649 When a node is accessed, it becomes the freshest entry in the LRU
649 list, which is denoted by self._head.
650 list, which is denoted by self._head.
650
651
651 Visually, let's make ``N`` the new head node (* denotes head):
652 Visually, let's make ``N`` the new head node (* denotes head):
652
653
653 previous/oldest <-> head <-> next/next newest
654 previous/oldest <-> head <-> next/next newest
654
655
655 ----<->--- A* ---<->-----
656 ----<->--- A* ---<->-----
656 | |
657 | |
657 E <-> D <-> N <-> C <-> B
658 E <-> D <-> N <-> C <-> B
658
659
659 To:
660 To:
660
661
661 ----<->--- N* ---<->-----
662 ----<->--- N* ---<->-----
662 | |
663 | |
663 E <-> D <-> C <-> B <-> A
664 E <-> D <-> C <-> B <-> A
664
665
665 This requires the following moves:
666 This requires the following moves:
666
667
667 C.next = D (node.prev.next = node.next)
668 C.next = D (node.prev.next = node.next)
668 D.prev = C (node.next.prev = node.prev)
669 D.prev = C (node.next.prev = node.prev)
669 E.next = N (head.prev.next = node)
670 E.next = N (head.prev.next = node)
670 N.prev = E (node.prev = head.prev)
671 N.prev = E (node.prev = head.prev)
671 N.next = A (node.next = head)
672 N.next = A (node.next = head)
672 A.prev = N (head.prev = node)
673 A.prev = N (head.prev = node)
673 """
674 """
674 head = self._head
675 head = self._head
675 # C.next = D
676 # C.next = D
676 node.prev.next = node.next
677 node.prev.next = node.next
677 # D.prev = C
678 # D.prev = C
678 node.next.prev = node.prev
679 node.next.prev = node.prev
679 # N.prev = E
680 # N.prev = E
680 node.prev = head.prev
681 node.prev = head.prev
681 # N.next = A
682 # N.next = A
682 # It is tempting to do just "head" here, however if node is
683 # It is tempting to do just "head" here, however if node is
683 # adjacent to head, this will do bad things.
684 # adjacent to head, this will do bad things.
684 node.next = head.prev.next
685 node.next = head.prev.next
685 # E.next = N
686 # E.next = N
686 node.next.prev = node
687 node.next.prev = node
687 # A.prev = N
688 # A.prev = N
688 node.prev.next = node
689 node.prev.next = node
689
690
690 self._head = node
691 self._head = node
691
692
692 def _addcapacity(self):
693 def _addcapacity(self):
693 """Add a node to the circular linked list.
694 """Add a node to the circular linked list.
694
695
695 The new node is inserted before the head node.
696 The new node is inserted before the head node.
696 """
697 """
697 head = self._head
698 head = self._head
698 node = _lrucachenode()
699 node = _lrucachenode()
699 head.prev.next = node
700 head.prev.next = node
700 node.prev = head.prev
701 node.prev = head.prev
701 node.next = head
702 node.next = head
702 head.prev = node
703 head.prev = node
703 self._size += 1
704 self._size += 1
704 return node
705 return node
705
706
706 def lrucachefunc(func):
707 def lrucachefunc(func):
707 '''cache most recent results of function calls'''
708 '''cache most recent results of function calls'''
708 cache = {}
709 cache = {}
709 order = collections.deque()
710 order = collections.deque()
710 if func.__code__.co_argcount == 1:
711 if func.__code__.co_argcount == 1:
711 def f(arg):
712 def f(arg):
712 if arg not in cache:
713 if arg not in cache:
713 if len(cache) > 20:
714 if len(cache) > 20:
714 del cache[order.popleft()]
715 del cache[order.popleft()]
715 cache[arg] = func(arg)
716 cache[arg] = func(arg)
716 else:
717 else:
717 order.remove(arg)
718 order.remove(arg)
718 order.append(arg)
719 order.append(arg)
719 return cache[arg]
720 return cache[arg]
720 else:
721 else:
721 def f(*args):
722 def f(*args):
722 if args not in cache:
723 if args not in cache:
723 if len(cache) > 20:
724 if len(cache) > 20:
724 del cache[order.popleft()]
725 del cache[order.popleft()]
725 cache[args] = func(*args)
726 cache[args] = func(*args)
726 else:
727 else:
727 order.remove(args)
728 order.remove(args)
728 order.append(args)
729 order.append(args)
729 return cache[args]
730 return cache[args]
730
731
731 return f
732 return f
732
733
733 class propertycache(object):
734 class propertycache(object):
734 def __init__(self, func):
735 def __init__(self, func):
735 self.func = func
736 self.func = func
736 self.name = func.__name__
737 self.name = func.__name__
737 def __get__(self, obj, type=None):
738 def __get__(self, obj, type=None):
738 result = self.func(obj)
739 result = self.func(obj)
739 self.cachevalue(obj, result)
740 self.cachevalue(obj, result)
740 return result
741 return result
741
742
742 def cachevalue(self, obj, value):
743 def cachevalue(self, obj, value):
743 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
744 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
744 obj.__dict__[self.name] = value
745 obj.__dict__[self.name] = value
745
746
746 def pipefilter(s, cmd):
747 def pipefilter(s, cmd):
747 '''filter string S through command CMD, returning its output'''
748 '''filter string S through command CMD, returning its output'''
748 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
749 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
749 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
750 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
750 pout, perr = p.communicate(s)
751 pout, perr = p.communicate(s)
751 return pout
752 return pout
752
753
753 def tempfilter(s, cmd):
754 def tempfilter(s, cmd):
754 '''filter string S through a pair of temporary files with CMD.
755 '''filter string S through a pair of temporary files with CMD.
755 CMD is used as a template to create the real command to be run,
756 CMD is used as a template to create the real command to be run,
756 with the strings INFILE and OUTFILE replaced by the real names of
757 with the strings INFILE and OUTFILE replaced by the real names of
757 the temporary files generated.'''
758 the temporary files generated.'''
758 inname, outname = None, None
759 inname, outname = None, None
759 try:
760 try:
760 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
761 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
761 fp = os.fdopen(infd, 'wb')
762 fp = os.fdopen(infd, 'wb')
762 fp.write(s)
763 fp.write(s)
763 fp.close()
764 fp.close()
764 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
765 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
765 os.close(outfd)
766 os.close(outfd)
766 cmd = cmd.replace('INFILE', inname)
767 cmd = cmd.replace('INFILE', inname)
767 cmd = cmd.replace('OUTFILE', outname)
768 cmd = cmd.replace('OUTFILE', outname)
768 code = os.system(cmd)
769 code = os.system(cmd)
769 if sys.platform == 'OpenVMS' and code & 1:
770 if sys.platform == 'OpenVMS' and code & 1:
770 code = 0
771 code = 0
771 if code:
772 if code:
772 raise Abort(_("command '%s' failed: %s") %
773 raise Abort(_("command '%s' failed: %s") %
773 (cmd, explainexit(code)))
774 (cmd, explainexit(code)))
774 return readfile(outname)
775 return readfile(outname)
775 finally:
776 finally:
776 try:
777 try:
777 if inname:
778 if inname:
778 os.unlink(inname)
779 os.unlink(inname)
779 except OSError:
780 except OSError:
780 pass
781 pass
781 try:
782 try:
782 if outname:
783 if outname:
783 os.unlink(outname)
784 os.unlink(outname)
784 except OSError:
785 except OSError:
785 pass
786 pass
786
787
787 filtertable = {
788 filtertable = {
788 'tempfile:': tempfilter,
789 'tempfile:': tempfilter,
789 'pipe:': pipefilter,
790 'pipe:': pipefilter,
790 }
791 }
791
792
792 def filter(s, cmd):
793 def filter(s, cmd):
793 "filter a string through a command that transforms its input to its output"
794 "filter a string through a command that transforms its input to its output"
794 for name, fn in filtertable.iteritems():
795 for name, fn in filtertable.iteritems():
795 if cmd.startswith(name):
796 if cmd.startswith(name):
796 return fn(s, cmd[len(name):].lstrip())
797 return fn(s, cmd[len(name):].lstrip())
797 return pipefilter(s, cmd)
798 return pipefilter(s, cmd)
798
799
799 def binary(s):
800 def binary(s):
800 """return true if a string is binary data"""
801 """return true if a string is binary data"""
801 return bool(s and '\0' in s)
802 return bool(s and '\0' in s)
802
803
803 def increasingchunks(source, min=1024, max=65536):
804 def increasingchunks(source, min=1024, max=65536):
804 '''return no less than min bytes per chunk while data remains,
805 '''return no less than min bytes per chunk while data remains,
805 doubling min after each chunk until it reaches max'''
806 doubling min after each chunk until it reaches max'''
806 def log2(x):
807 def log2(x):
807 if not x:
808 if not x:
808 return 0
809 return 0
809 i = 0
810 i = 0
810 while x:
811 while x:
811 x >>= 1
812 x >>= 1
812 i += 1
813 i += 1
813 return i - 1
814 return i - 1
814
815
815 buf = []
816 buf = []
816 blen = 0
817 blen = 0
817 for chunk in source:
818 for chunk in source:
818 buf.append(chunk)
819 buf.append(chunk)
819 blen += len(chunk)
820 blen += len(chunk)
820 if blen >= min:
821 if blen >= min:
821 if min < max:
822 if min < max:
822 min = min << 1
823 min = min << 1
823 nmin = 1 << log2(blen)
824 nmin = 1 << log2(blen)
824 if nmin > min:
825 if nmin > min:
825 min = nmin
826 min = nmin
826 if min > max:
827 if min > max:
827 min = max
828 min = max
828 yield ''.join(buf)
829 yield ''.join(buf)
829 blen = 0
830 blen = 0
830 buf = []
831 buf = []
831 if buf:
832 if buf:
832 yield ''.join(buf)
833 yield ''.join(buf)
833
834
834 Abort = error.Abort
835 Abort = error.Abort
835
836
836 def always(fn):
837 def always(fn):
837 return True
838 return True
838
839
839 def never(fn):
840 def never(fn):
840 return False
841 return False
841
842
842 def nogc(func):
843 def nogc(func):
843 """disable garbage collector
844 """disable garbage collector
844
845
845 Python's garbage collector triggers a GC each time a certain number of
846 Python's garbage collector triggers a GC each time a certain number of
846 container objects (the number being defined by gc.get_threshold()) are
847 container objects (the number being defined by gc.get_threshold()) are
847 allocated even when marked not to be tracked by the collector. Tracking has
848 allocated even when marked not to be tracked by the collector. Tracking has
848 no effect on when GCs are triggered, only on what objects the GC looks
849 no effect on when GCs are triggered, only on what objects the GC looks
849 into. As a workaround, disable GC while building complex (huge)
850 into. As a workaround, disable GC while building complex (huge)
850 containers.
851 containers.
851
852
852 This garbage collector issue have been fixed in 2.7.
853 This garbage collector issue have been fixed in 2.7.
853 """
854 """
854 def wrapper(*args, **kwargs):
855 def wrapper(*args, **kwargs):
855 gcenabled = gc.isenabled()
856 gcenabled = gc.isenabled()
856 gc.disable()
857 gc.disable()
857 try:
858 try:
858 return func(*args, **kwargs)
859 return func(*args, **kwargs)
859 finally:
860 finally:
860 if gcenabled:
861 if gcenabled:
861 gc.enable()
862 gc.enable()
862 return wrapper
863 return wrapper
863
864
864 def pathto(root, n1, n2):
865 def pathto(root, n1, n2):
865 '''return the relative path from one place to another.
866 '''return the relative path from one place to another.
866 root should use os.sep to separate directories
867 root should use os.sep to separate directories
867 n1 should use os.sep to separate directories
868 n1 should use os.sep to separate directories
868 n2 should use "/" to separate directories
869 n2 should use "/" to separate directories
869 returns an os.sep-separated path.
870 returns an os.sep-separated path.
870
871
871 If n1 is a relative path, it's assumed it's
872 If n1 is a relative path, it's assumed it's
872 relative to root.
873 relative to root.
873 n2 should always be relative to root.
874 n2 should always be relative to root.
874 '''
875 '''
875 if not n1:
876 if not n1:
876 return localpath(n2)
877 return localpath(n2)
877 if os.path.isabs(n1):
878 if os.path.isabs(n1):
878 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
879 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
879 return os.path.join(root, localpath(n2))
880 return os.path.join(root, localpath(n2))
880 n2 = '/'.join((pconvert(root), n2))
881 n2 = '/'.join((pconvert(root), n2))
881 a, b = splitpath(n1), n2.split('/')
882 a, b = splitpath(n1), n2.split('/')
882 a.reverse()
883 a.reverse()
883 b.reverse()
884 b.reverse()
884 while a and b and a[-1] == b[-1]:
885 while a and b and a[-1] == b[-1]:
885 a.pop()
886 a.pop()
886 b.pop()
887 b.pop()
887 b.reverse()
888 b.reverse()
888 return os.sep.join((['..'] * len(a)) + b) or '.'
889 return os.sep.join((['..'] * len(a)) + b) or '.'
889
890
890 def mainfrozen():
891 def mainfrozen():
891 """return True if we are a frozen executable.
892 """return True if we are a frozen executable.
892
893
893 The code supports py2exe (most common, Windows only) and tools/freeze
894 The code supports py2exe (most common, Windows only) and tools/freeze
894 (portable, not much used).
895 (portable, not much used).
895 """
896 """
896 return (safehasattr(sys, "frozen") or # new py2exe
897 return (safehasattr(sys, "frozen") or # new py2exe
897 safehasattr(sys, "importers") or # old py2exe
898 safehasattr(sys, "importers") or # old py2exe
898 imp.is_frozen("__main__")) # tools/freeze
899 imp.is_frozen("__main__")) # tools/freeze
899
900
900 # the location of data files matching the source code
901 # the location of data files matching the source code
901 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
902 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
902 # executable version (py2exe) doesn't support __file__
903 # executable version (py2exe) doesn't support __file__
903 datapath = os.path.dirname(sys.executable)
904 datapath = os.path.dirname(sys.executable)
904 else:
905 else:
905 datapath = os.path.dirname(__file__)
906 datapath = os.path.dirname(__file__)
906
907
907 i18n.setdatapath(datapath)
908 i18n.setdatapath(datapath)
908
909
909 _hgexecutable = None
910 _hgexecutable = None
910
911
911 def hgexecutable():
912 def hgexecutable():
912 """return location of the 'hg' executable.
913 """return location of the 'hg' executable.
913
914
914 Defaults to $HG or 'hg' in the search path.
915 Defaults to $HG or 'hg' in the search path.
915 """
916 """
916 if _hgexecutable is None:
917 if _hgexecutable is None:
917 hg = os.environ.get('HG')
918 hg = os.environ.get('HG')
918 mainmod = sys.modules['__main__']
919 mainmod = sys.modules['__main__']
919 if hg:
920 if hg:
920 _sethgexecutable(hg)
921 _sethgexecutable(hg)
921 elif mainfrozen():
922 elif mainfrozen():
922 if getattr(sys, 'frozen', None) == 'macosx_app':
923 if getattr(sys, 'frozen', None) == 'macosx_app':
923 # Env variable set by py2app
924 # Env variable set by py2app
924 _sethgexecutable(os.environ['EXECUTABLEPATH'])
925 _sethgexecutable(os.environ['EXECUTABLEPATH'])
925 else:
926 else:
926 _sethgexecutable(sys.executable)
927 _sethgexecutable(sys.executable)
927 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
928 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
928 _sethgexecutable(mainmod.__file__)
929 _sethgexecutable(mainmod.__file__)
929 else:
930 else:
930 exe = findexe('hg') or os.path.basename(sys.argv[0])
931 exe = findexe('hg') or os.path.basename(sys.argv[0])
931 _sethgexecutable(exe)
932 _sethgexecutable(exe)
932 return _hgexecutable
933 return _hgexecutable
933
934
934 def _sethgexecutable(path):
935 def _sethgexecutable(path):
935 """set location of the 'hg' executable"""
936 """set location of the 'hg' executable"""
936 global _hgexecutable
937 global _hgexecutable
937 _hgexecutable = path
938 _hgexecutable = path
938
939
939 def _isstdout(f):
940 def _isstdout(f):
940 fileno = getattr(f, 'fileno', None)
941 fileno = getattr(f, 'fileno', None)
941 return fileno and fileno() == sys.__stdout__.fileno()
942 return fileno and fileno() == sys.__stdout__.fileno()
942
943
943 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
944 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
944 '''enhanced shell command execution.
945 '''enhanced shell command execution.
945 run with environment maybe modified, maybe in different dir.
946 run with environment maybe modified, maybe in different dir.
946
947
947 if command fails and onerr is None, return status, else raise onerr
948 if command fails and onerr is None, return status, else raise onerr
948 object as exception.
949 object as exception.
949
950
950 if out is specified, it is assumed to be a file-like object that has a
951 if out is specified, it is assumed to be a file-like object that has a
951 write() method. stdout and stderr will be redirected to out.'''
952 write() method. stdout and stderr will be redirected to out.'''
952 if environ is None:
953 if environ is None:
953 environ = {}
954 environ = {}
954 try:
955 try:
955 sys.stdout.flush()
956 sys.stdout.flush()
956 except Exception:
957 except Exception:
957 pass
958 pass
958 def py2shell(val):
959 def py2shell(val):
959 'convert python object into string that is useful to shell'
960 'convert python object into string that is useful to shell'
960 if val is None or val is False:
961 if val is None or val is False:
961 return '0'
962 return '0'
962 if val is True:
963 if val is True:
963 return '1'
964 return '1'
964 return str(val)
965 return str(val)
965 origcmd = cmd
966 origcmd = cmd
966 cmd = quotecommand(cmd)
967 cmd = quotecommand(cmd)
967 if sys.platform == 'plan9' and (sys.version_info[0] == 2
968 if sys.platform == 'plan9' and (sys.version_info[0] == 2
968 and sys.version_info[1] < 7):
969 and sys.version_info[1] < 7):
969 # subprocess kludge to work around issues in half-baked Python
970 # subprocess kludge to work around issues in half-baked Python
970 # ports, notably bichued/python:
971 # ports, notably bichued/python:
971 if not cwd is None:
972 if not cwd is None:
972 os.chdir(cwd)
973 os.chdir(cwd)
973 rc = os.system(cmd)
974 rc = os.system(cmd)
974 else:
975 else:
975 env = dict(os.environ)
976 env = dict(os.environ)
976 env.update((k, py2shell(v)) for k, v in environ.iteritems())
977 env.update((k, py2shell(v)) for k, v in environ.iteritems())
977 env['HG'] = hgexecutable()
978 env['HG'] = hgexecutable()
978 if out is None or _isstdout(out):
979 if out is None or _isstdout(out):
979 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
980 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
980 env=env, cwd=cwd)
981 env=env, cwd=cwd)
981 else:
982 else:
982 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
983 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
983 env=env, cwd=cwd, stdout=subprocess.PIPE,
984 env=env, cwd=cwd, stdout=subprocess.PIPE,
984 stderr=subprocess.STDOUT)
985 stderr=subprocess.STDOUT)
985 while True:
986 while True:
986 line = proc.stdout.readline()
987 line = proc.stdout.readline()
987 if not line:
988 if not line:
988 break
989 break
989 out.write(line)
990 out.write(line)
990 proc.wait()
991 proc.wait()
991 rc = proc.returncode
992 rc = proc.returncode
992 if sys.platform == 'OpenVMS' and rc & 1:
993 if sys.platform == 'OpenVMS' and rc & 1:
993 rc = 0
994 rc = 0
994 if rc and onerr:
995 if rc and onerr:
995 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
996 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
996 explainexit(rc)[0])
997 explainexit(rc)[0])
997 if errprefix:
998 if errprefix:
998 errmsg = '%s: %s' % (errprefix, errmsg)
999 errmsg = '%s: %s' % (errprefix, errmsg)
999 raise onerr(errmsg)
1000 raise onerr(errmsg)
1000 return rc
1001 return rc
1001
1002
1002 def checksignature(func):
1003 def checksignature(func):
1003 '''wrap a function with code to check for calling errors'''
1004 '''wrap a function with code to check for calling errors'''
1004 def check(*args, **kwargs):
1005 def check(*args, **kwargs):
1005 try:
1006 try:
1006 return func(*args, **kwargs)
1007 return func(*args, **kwargs)
1007 except TypeError:
1008 except TypeError:
1008 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1009 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1009 raise error.SignatureError
1010 raise error.SignatureError
1010 raise
1011 raise
1011
1012
1012 return check
1013 return check
1013
1014
1014 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1015 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1015 '''copy a file, preserving mode and optionally other stat info like
1016 '''copy a file, preserving mode and optionally other stat info like
1016 atime/mtime
1017 atime/mtime
1017
1018
1018 checkambig argument is used with filestat, and is useful only if
1019 checkambig argument is used with filestat, and is useful only if
1019 destination file is guarded by any lock (e.g. repo.lock or
1020 destination file is guarded by any lock (e.g. repo.lock or
1020 repo.wlock).
1021 repo.wlock).
1021
1022
1022 copystat and checkambig should be exclusive.
1023 copystat and checkambig should be exclusive.
1023 '''
1024 '''
1024 assert not (copystat and checkambig)
1025 assert not (copystat and checkambig)
1025 oldstat = None
1026 oldstat = None
1026 if os.path.lexists(dest):
1027 if os.path.lexists(dest):
1027 if checkambig:
1028 if checkambig:
1028 oldstat = checkambig and filestat(dest)
1029 oldstat = checkambig and filestat(dest)
1029 unlink(dest)
1030 unlink(dest)
1030 # hardlinks are problematic on CIFS, quietly ignore this flag
1031 # hardlinks are problematic on CIFS, quietly ignore this flag
1031 # until we find a way to work around it cleanly (issue4546)
1032 # until we find a way to work around it cleanly (issue4546)
1032 if False and hardlink:
1033 if False and hardlink:
1033 try:
1034 try:
1034 oslink(src, dest)
1035 oslink(src, dest)
1035 return
1036 return
1036 except (IOError, OSError):
1037 except (IOError, OSError):
1037 pass # fall back to normal copy
1038 pass # fall back to normal copy
1038 if os.path.islink(src):
1039 if os.path.islink(src):
1039 os.symlink(os.readlink(src), dest)
1040 os.symlink(os.readlink(src), dest)
1040 # copytime is ignored for symlinks, but in general copytime isn't needed
1041 # copytime is ignored for symlinks, but in general copytime isn't needed
1041 # for them anyway
1042 # for them anyway
1042 else:
1043 else:
1043 try:
1044 try:
1044 shutil.copyfile(src, dest)
1045 shutil.copyfile(src, dest)
1045 if copystat:
1046 if copystat:
1046 # copystat also copies mode
1047 # copystat also copies mode
1047 shutil.copystat(src, dest)
1048 shutil.copystat(src, dest)
1048 else:
1049 else:
1049 shutil.copymode(src, dest)
1050 shutil.copymode(src, dest)
1050 if oldstat and oldstat.stat:
1051 if oldstat and oldstat.stat:
1051 newstat = filestat(dest)
1052 newstat = filestat(dest)
1052 if newstat.isambig(oldstat):
1053 if newstat.isambig(oldstat):
1053 # stat of copied file is ambiguous to original one
1054 # stat of copied file is ambiguous to original one
1054 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1055 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1055 os.utime(dest, (advanced, advanced))
1056 os.utime(dest, (advanced, advanced))
1056 except shutil.Error as inst:
1057 except shutil.Error as inst:
1057 raise Abort(str(inst))
1058 raise Abort(str(inst))
1058
1059
1059 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1060 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1060 """Copy a directory tree using hardlinks if possible."""
1061 """Copy a directory tree using hardlinks if possible."""
1061 num = 0
1062 num = 0
1062
1063
1063 if hardlink is None:
1064 if hardlink is None:
1064 hardlink = (os.stat(src).st_dev ==
1065 hardlink = (os.stat(src).st_dev ==
1065 os.stat(os.path.dirname(dst)).st_dev)
1066 os.stat(os.path.dirname(dst)).st_dev)
1066 if hardlink:
1067 if hardlink:
1067 topic = _('linking')
1068 topic = _('linking')
1068 else:
1069 else:
1069 topic = _('copying')
1070 topic = _('copying')
1070
1071
1071 if os.path.isdir(src):
1072 if os.path.isdir(src):
1072 os.mkdir(dst)
1073 os.mkdir(dst)
1073 for name, kind in osutil.listdir(src):
1074 for name, kind in osutil.listdir(src):
1074 srcname = os.path.join(src, name)
1075 srcname = os.path.join(src, name)
1075 dstname = os.path.join(dst, name)
1076 dstname = os.path.join(dst, name)
1076 def nprog(t, pos):
1077 def nprog(t, pos):
1077 if pos is not None:
1078 if pos is not None:
1078 return progress(t, pos + num)
1079 return progress(t, pos + num)
1079 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1080 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1080 num += n
1081 num += n
1081 else:
1082 else:
1082 if hardlink:
1083 if hardlink:
1083 try:
1084 try:
1084 oslink(src, dst)
1085 oslink(src, dst)
1085 except (IOError, OSError):
1086 except (IOError, OSError):
1086 hardlink = False
1087 hardlink = False
1087 shutil.copy(src, dst)
1088 shutil.copy(src, dst)
1088 else:
1089 else:
1089 shutil.copy(src, dst)
1090 shutil.copy(src, dst)
1090 num += 1
1091 num += 1
1091 progress(topic, num)
1092 progress(topic, num)
1092 progress(topic, None)
1093 progress(topic, None)
1093
1094
1094 return hardlink, num
1095 return hardlink, num
1095
1096
1096 _winreservednames = '''con prn aux nul
1097 _winreservednames = '''con prn aux nul
1097 com1 com2 com3 com4 com5 com6 com7 com8 com9
1098 com1 com2 com3 com4 com5 com6 com7 com8 com9
1098 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1099 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1099 _winreservedchars = ':*?"<>|'
1100 _winreservedchars = ':*?"<>|'
1100 def checkwinfilename(path):
1101 def checkwinfilename(path):
1101 r'''Check that the base-relative path is a valid filename on Windows.
1102 r'''Check that the base-relative path is a valid filename on Windows.
1102 Returns None if the path is ok, or a UI string describing the problem.
1103 Returns None if the path is ok, or a UI string describing the problem.
1103
1104
1104 >>> checkwinfilename("just/a/normal/path")
1105 >>> checkwinfilename("just/a/normal/path")
1105 >>> checkwinfilename("foo/bar/con.xml")
1106 >>> checkwinfilename("foo/bar/con.xml")
1106 "filename contains 'con', which is reserved on Windows"
1107 "filename contains 'con', which is reserved on Windows"
1107 >>> checkwinfilename("foo/con.xml/bar")
1108 >>> checkwinfilename("foo/con.xml/bar")
1108 "filename contains 'con', which is reserved on Windows"
1109 "filename contains 'con', which is reserved on Windows"
1109 >>> checkwinfilename("foo/bar/xml.con")
1110 >>> checkwinfilename("foo/bar/xml.con")
1110 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1111 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1111 "filename contains 'AUX', which is reserved on Windows"
1112 "filename contains 'AUX', which is reserved on Windows"
1112 >>> checkwinfilename("foo/bar/bla:.txt")
1113 >>> checkwinfilename("foo/bar/bla:.txt")
1113 "filename contains ':', which is reserved on Windows"
1114 "filename contains ':', which is reserved on Windows"
1114 >>> checkwinfilename("foo/bar/b\07la.txt")
1115 >>> checkwinfilename("foo/bar/b\07la.txt")
1115 "filename contains '\\x07', which is invalid on Windows"
1116 "filename contains '\\x07', which is invalid on Windows"
1116 >>> checkwinfilename("foo/bar/bla ")
1117 >>> checkwinfilename("foo/bar/bla ")
1117 "filename ends with ' ', which is not allowed on Windows"
1118 "filename ends with ' ', which is not allowed on Windows"
1118 >>> checkwinfilename("../bar")
1119 >>> checkwinfilename("../bar")
1119 >>> checkwinfilename("foo\\")
1120 >>> checkwinfilename("foo\\")
1120 "filename ends with '\\', which is invalid on Windows"
1121 "filename ends with '\\', which is invalid on Windows"
1121 >>> checkwinfilename("foo\\/bar")
1122 >>> checkwinfilename("foo\\/bar")
1122 "directory name ends with '\\', which is invalid on Windows"
1123 "directory name ends with '\\', which is invalid on Windows"
1123 '''
1124 '''
1124 if path.endswith('\\'):
1125 if path.endswith('\\'):
1125 return _("filename ends with '\\', which is invalid on Windows")
1126 return _("filename ends with '\\', which is invalid on Windows")
1126 if '\\/' in path:
1127 if '\\/' in path:
1127 return _("directory name ends with '\\', which is invalid on Windows")
1128 return _("directory name ends with '\\', which is invalid on Windows")
1128 for n in path.replace('\\', '/').split('/'):
1129 for n in path.replace('\\', '/').split('/'):
1129 if not n:
1130 if not n:
1130 continue
1131 continue
1131 for c in n:
1132 for c in n:
1132 if c in _winreservedchars:
1133 if c in _winreservedchars:
1133 return _("filename contains '%s', which is reserved "
1134 return _("filename contains '%s', which is reserved "
1134 "on Windows") % c
1135 "on Windows") % c
1135 if ord(c) <= 31:
1136 if ord(c) <= 31:
1136 return _("filename contains %r, which is invalid "
1137 return _("filename contains %r, which is invalid "
1137 "on Windows") % c
1138 "on Windows") % c
1138 base = n.split('.')[0]
1139 base = n.split('.')[0]
1139 if base and base.lower() in _winreservednames:
1140 if base and base.lower() in _winreservednames:
1140 return _("filename contains '%s', which is reserved "
1141 return _("filename contains '%s', which is reserved "
1141 "on Windows") % base
1142 "on Windows") % base
1142 t = n[-1]
1143 t = n[-1]
1143 if t in '. ' and n not in '..':
1144 if t in '. ' and n not in '..':
1144 return _("filename ends with '%s', which is not allowed "
1145 return _("filename ends with '%s', which is not allowed "
1145 "on Windows") % t
1146 "on Windows") % t
1146
1147
1147 if os.name == 'nt':
1148 if os.name == 'nt':
1148 checkosfilename = checkwinfilename
1149 checkosfilename = checkwinfilename
1149 else:
1150 else:
1150 checkosfilename = platform.checkosfilename
1151 checkosfilename = platform.checkosfilename
1151
1152
1152 def makelock(info, pathname):
1153 def makelock(info, pathname):
1153 try:
1154 try:
1154 return os.symlink(info, pathname)
1155 return os.symlink(info, pathname)
1155 except OSError as why:
1156 except OSError as why:
1156 if why.errno == errno.EEXIST:
1157 if why.errno == errno.EEXIST:
1157 raise
1158 raise
1158 except AttributeError: # no symlink in os
1159 except AttributeError: # no symlink in os
1159 pass
1160 pass
1160
1161
1161 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1162 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1162 os.write(ld, info)
1163 os.write(ld, info)
1163 os.close(ld)
1164 os.close(ld)
1164
1165
1165 def readlock(pathname):
1166 def readlock(pathname):
1166 try:
1167 try:
1167 return os.readlink(pathname)
1168 return os.readlink(pathname)
1168 except OSError as why:
1169 except OSError as why:
1169 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1170 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1170 raise
1171 raise
1171 except AttributeError: # no symlink in os
1172 except AttributeError: # no symlink in os
1172 pass
1173 pass
1173 fp = posixfile(pathname)
1174 fp = posixfile(pathname)
1174 r = fp.read()
1175 r = fp.read()
1175 fp.close()
1176 fp.close()
1176 return r
1177 return r
1177
1178
1178 def fstat(fp):
1179 def fstat(fp):
1179 '''stat file object that may not have fileno method.'''
1180 '''stat file object that may not have fileno method.'''
1180 try:
1181 try:
1181 return os.fstat(fp.fileno())
1182 return os.fstat(fp.fileno())
1182 except AttributeError:
1183 except AttributeError:
1183 return os.stat(fp.name)
1184 return os.stat(fp.name)
1184
1185
1185 # File system features
1186 # File system features
1186
1187
1187 def checkcase(path):
1188 def checkcase(path):
1188 """
1189 """
1189 Return true if the given path is on a case-sensitive filesystem
1190 Return true if the given path is on a case-sensitive filesystem
1190
1191
1191 Requires a path (like /foo/.hg) ending with a foldable final
1192 Requires a path (like /foo/.hg) ending with a foldable final
1192 directory component.
1193 directory component.
1193 """
1194 """
1194 s1 = os.lstat(path)
1195 s1 = os.lstat(path)
1195 d, b = os.path.split(path)
1196 d, b = os.path.split(path)
1196 b2 = b.upper()
1197 b2 = b.upper()
1197 if b == b2:
1198 if b == b2:
1198 b2 = b.lower()
1199 b2 = b.lower()
1199 if b == b2:
1200 if b == b2:
1200 return True # no evidence against case sensitivity
1201 return True # no evidence against case sensitivity
1201 p2 = os.path.join(d, b2)
1202 p2 = os.path.join(d, b2)
1202 try:
1203 try:
1203 s2 = os.lstat(p2)
1204 s2 = os.lstat(p2)
1204 if s2 == s1:
1205 if s2 == s1:
1205 return False
1206 return False
1206 return True
1207 return True
1207 except OSError:
1208 except OSError:
1208 return True
1209 return True
1209
1210
1210 try:
1211 try:
1211 import re2
1212 import re2
1212 _re2 = None
1213 _re2 = None
1213 except ImportError:
1214 except ImportError:
1214 _re2 = False
1215 _re2 = False
1215
1216
1216 class _re(object):
1217 class _re(object):
1217 def _checkre2(self):
1218 def _checkre2(self):
1218 global _re2
1219 global _re2
1219 try:
1220 try:
1220 # check if match works, see issue3964
1221 # check if match works, see issue3964
1221 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1222 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1222 except ImportError:
1223 except ImportError:
1223 _re2 = False
1224 _re2 = False
1224
1225
1225 def compile(self, pat, flags=0):
1226 def compile(self, pat, flags=0):
1226 '''Compile a regular expression, using re2 if possible
1227 '''Compile a regular expression, using re2 if possible
1227
1228
1228 For best performance, use only re2-compatible regexp features. The
1229 For best performance, use only re2-compatible regexp features. The
1229 only flags from the re module that are re2-compatible are
1230 only flags from the re module that are re2-compatible are
1230 IGNORECASE and MULTILINE.'''
1231 IGNORECASE and MULTILINE.'''
1231 if _re2 is None:
1232 if _re2 is None:
1232 self._checkre2()
1233 self._checkre2()
1233 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1234 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1234 if flags & remod.IGNORECASE:
1235 if flags & remod.IGNORECASE:
1235 pat = '(?i)' + pat
1236 pat = '(?i)' + pat
1236 if flags & remod.MULTILINE:
1237 if flags & remod.MULTILINE:
1237 pat = '(?m)' + pat
1238 pat = '(?m)' + pat
1238 try:
1239 try:
1239 return re2.compile(pat)
1240 return re2.compile(pat)
1240 except re2.error:
1241 except re2.error:
1241 pass
1242 pass
1242 return remod.compile(pat, flags)
1243 return remod.compile(pat, flags)
1243
1244
1244 @propertycache
1245 @propertycache
1245 def escape(self):
1246 def escape(self):
1246 '''Return the version of escape corresponding to self.compile.
1247 '''Return the version of escape corresponding to self.compile.
1247
1248
1248 This is imperfect because whether re2 or re is used for a particular
1249 This is imperfect because whether re2 or re is used for a particular
1249 function depends on the flags, etc, but it's the best we can do.
1250 function depends on the flags, etc, but it's the best we can do.
1250 '''
1251 '''
1251 global _re2
1252 global _re2
1252 if _re2 is None:
1253 if _re2 is None:
1253 self._checkre2()
1254 self._checkre2()
1254 if _re2:
1255 if _re2:
1255 return re2.escape
1256 return re2.escape
1256 else:
1257 else:
1257 return remod.escape
1258 return remod.escape
1258
1259
1259 re = _re()
1260 re = _re()
1260
1261
1261 _fspathcache = {}
1262 _fspathcache = {}
1262 def fspath(name, root):
1263 def fspath(name, root):
1263 '''Get name in the case stored in the filesystem
1264 '''Get name in the case stored in the filesystem
1264
1265
1265 The name should be relative to root, and be normcase-ed for efficiency.
1266 The name should be relative to root, and be normcase-ed for efficiency.
1266
1267
1267 Note that this function is unnecessary, and should not be
1268 Note that this function is unnecessary, and should not be
1268 called, for case-sensitive filesystems (simply because it's expensive).
1269 called, for case-sensitive filesystems (simply because it's expensive).
1269
1270
1270 The root should be normcase-ed, too.
1271 The root should be normcase-ed, too.
1271 '''
1272 '''
1272 def _makefspathcacheentry(dir):
1273 def _makefspathcacheentry(dir):
1273 return dict((normcase(n), n) for n in os.listdir(dir))
1274 return dict((normcase(n), n) for n in os.listdir(dir))
1274
1275
1275 seps = os.sep
1276 seps = os.sep
1276 if os.altsep:
1277 if os.altsep:
1277 seps = seps + os.altsep
1278 seps = seps + os.altsep
1278 # Protect backslashes. This gets silly very quickly.
1279 # Protect backslashes. This gets silly very quickly.
1279 seps.replace('\\','\\\\')
1280 seps.replace('\\','\\\\')
1280 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1281 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1281 dir = os.path.normpath(root)
1282 dir = os.path.normpath(root)
1282 result = []
1283 result = []
1283 for part, sep in pattern.findall(name):
1284 for part, sep in pattern.findall(name):
1284 if sep:
1285 if sep:
1285 result.append(sep)
1286 result.append(sep)
1286 continue
1287 continue
1287
1288
1288 if dir not in _fspathcache:
1289 if dir not in _fspathcache:
1289 _fspathcache[dir] = _makefspathcacheentry(dir)
1290 _fspathcache[dir] = _makefspathcacheentry(dir)
1290 contents = _fspathcache[dir]
1291 contents = _fspathcache[dir]
1291
1292
1292 found = contents.get(part)
1293 found = contents.get(part)
1293 if not found:
1294 if not found:
1294 # retry "once per directory" per "dirstate.walk" which
1295 # retry "once per directory" per "dirstate.walk" which
1295 # may take place for each patches of "hg qpush", for example
1296 # may take place for each patches of "hg qpush", for example
1296 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1297 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1297 found = contents.get(part)
1298 found = contents.get(part)
1298
1299
1299 result.append(found or part)
1300 result.append(found or part)
1300 dir = os.path.join(dir, part)
1301 dir = os.path.join(dir, part)
1301
1302
1302 return ''.join(result)
1303 return ''.join(result)
1303
1304
1304 def checknlink(testfile):
1305 def checknlink(testfile):
1305 '''check whether hardlink count reporting works properly'''
1306 '''check whether hardlink count reporting works properly'''
1306
1307
1307 # testfile may be open, so we need a separate file for checking to
1308 # testfile may be open, so we need a separate file for checking to
1308 # work around issue2543 (or testfile may get lost on Samba shares)
1309 # work around issue2543 (or testfile may get lost on Samba shares)
1309 f1 = testfile + ".hgtmp1"
1310 f1 = testfile + ".hgtmp1"
1310 if os.path.lexists(f1):
1311 if os.path.lexists(f1):
1311 return False
1312 return False
1312 try:
1313 try:
1313 posixfile(f1, 'w').close()
1314 posixfile(f1, 'w').close()
1314 except IOError:
1315 except IOError:
1315 return False
1316 return False
1316
1317
1317 f2 = testfile + ".hgtmp2"
1318 f2 = testfile + ".hgtmp2"
1318 fd = None
1319 fd = None
1319 try:
1320 try:
1320 oslink(f1, f2)
1321 oslink(f1, f2)
1321 # nlinks() may behave differently for files on Windows shares if
1322 # nlinks() may behave differently for files on Windows shares if
1322 # the file is open.
1323 # the file is open.
1323 fd = posixfile(f2)
1324 fd = posixfile(f2)
1324 return nlinks(f2) > 1
1325 return nlinks(f2) > 1
1325 except OSError:
1326 except OSError:
1326 return False
1327 return False
1327 finally:
1328 finally:
1328 if fd is not None:
1329 if fd is not None:
1329 fd.close()
1330 fd.close()
1330 for f in (f1, f2):
1331 for f in (f1, f2):
1331 try:
1332 try:
1332 os.unlink(f)
1333 os.unlink(f)
1333 except OSError:
1334 except OSError:
1334 pass
1335 pass
1335
1336
1336 def endswithsep(path):
1337 def endswithsep(path):
1337 '''Check path ends with os.sep or os.altsep.'''
1338 '''Check path ends with os.sep or os.altsep.'''
1338 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1339 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1339
1340
1340 def splitpath(path):
1341 def splitpath(path):
1341 '''Split path by os.sep.
1342 '''Split path by os.sep.
1342 Note that this function does not use os.altsep because this is
1343 Note that this function does not use os.altsep because this is
1343 an alternative of simple "xxx.split(os.sep)".
1344 an alternative of simple "xxx.split(os.sep)".
1344 It is recommended to use os.path.normpath() before using this
1345 It is recommended to use os.path.normpath() before using this
1345 function if need.'''
1346 function if need.'''
1346 return path.split(os.sep)
1347 return path.split(os.sep)
1347
1348
1348 def gui():
1349 def gui():
1349 '''Are we running in a GUI?'''
1350 '''Are we running in a GUI?'''
1350 if sys.platform == 'darwin':
1351 if sys.platform == 'darwin':
1351 if 'SSH_CONNECTION' in os.environ:
1352 if 'SSH_CONNECTION' in os.environ:
1352 # handle SSH access to a box where the user is logged in
1353 # handle SSH access to a box where the user is logged in
1353 return False
1354 return False
1354 elif getattr(osutil, 'isgui', None):
1355 elif getattr(osutil, 'isgui', None):
1355 # check if a CoreGraphics session is available
1356 # check if a CoreGraphics session is available
1356 return osutil.isgui()
1357 return osutil.isgui()
1357 else:
1358 else:
1358 # pure build; use a safe default
1359 # pure build; use a safe default
1359 return True
1360 return True
1360 else:
1361 else:
1361 return os.name == "nt" or os.environ.get("DISPLAY")
1362 return os.name == "nt" or os.environ.get("DISPLAY")
1362
1363
1363 def mktempcopy(name, emptyok=False, createmode=None):
1364 def mktempcopy(name, emptyok=False, createmode=None):
1364 """Create a temporary file with the same contents from name
1365 """Create a temporary file with the same contents from name
1365
1366
1366 The permission bits are copied from the original file.
1367 The permission bits are copied from the original file.
1367
1368
1368 If the temporary file is going to be truncated immediately, you
1369 If the temporary file is going to be truncated immediately, you
1369 can use emptyok=True as an optimization.
1370 can use emptyok=True as an optimization.
1370
1371
1371 Returns the name of the temporary file.
1372 Returns the name of the temporary file.
1372 """
1373 """
1373 d, fn = os.path.split(name)
1374 d, fn = os.path.split(name)
1374 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1375 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1375 os.close(fd)
1376 os.close(fd)
1376 # Temporary files are created with mode 0600, which is usually not
1377 # Temporary files are created with mode 0600, which is usually not
1377 # what we want. If the original file already exists, just copy
1378 # what we want. If the original file already exists, just copy
1378 # its mode. Otherwise, manually obey umask.
1379 # its mode. Otherwise, manually obey umask.
1379 copymode(name, temp, createmode)
1380 copymode(name, temp, createmode)
1380 if emptyok:
1381 if emptyok:
1381 return temp
1382 return temp
1382 try:
1383 try:
1383 try:
1384 try:
1384 ifp = posixfile(name, "rb")
1385 ifp = posixfile(name, "rb")
1385 except IOError as inst:
1386 except IOError as inst:
1386 if inst.errno == errno.ENOENT:
1387 if inst.errno == errno.ENOENT:
1387 return temp
1388 return temp
1388 if not getattr(inst, 'filename', None):
1389 if not getattr(inst, 'filename', None):
1389 inst.filename = name
1390 inst.filename = name
1390 raise
1391 raise
1391 ofp = posixfile(temp, "wb")
1392 ofp = posixfile(temp, "wb")
1392 for chunk in filechunkiter(ifp):
1393 for chunk in filechunkiter(ifp):
1393 ofp.write(chunk)
1394 ofp.write(chunk)
1394 ifp.close()
1395 ifp.close()
1395 ofp.close()
1396 ofp.close()
1396 except: # re-raises
1397 except: # re-raises
1397 try: os.unlink(temp)
1398 try: os.unlink(temp)
1398 except OSError: pass
1399 except OSError: pass
1399 raise
1400 raise
1400 return temp
1401 return temp
1401
1402
1402 class filestat(object):
1403 class filestat(object):
1403 """help to exactly detect change of a file
1404 """help to exactly detect change of a file
1404
1405
1405 'stat' attribute is result of 'os.stat()' if specified 'path'
1406 'stat' attribute is result of 'os.stat()' if specified 'path'
1406 exists. Otherwise, it is None. This can avoid preparative
1407 exists. Otherwise, it is None. This can avoid preparative
1407 'exists()' examination on client side of this class.
1408 'exists()' examination on client side of this class.
1408 """
1409 """
1409 def __init__(self, path):
1410 def __init__(self, path):
1410 try:
1411 try:
1411 self.stat = os.stat(path)
1412 self.stat = os.stat(path)
1412 except OSError as err:
1413 except OSError as err:
1413 if err.errno != errno.ENOENT:
1414 if err.errno != errno.ENOENT:
1414 raise
1415 raise
1415 self.stat = None
1416 self.stat = None
1416
1417
1417 __hash__ = object.__hash__
1418 __hash__ = object.__hash__
1418
1419
1419 def __eq__(self, old):
1420 def __eq__(self, old):
1420 try:
1421 try:
1421 # if ambiguity between stat of new and old file is
1422 # if ambiguity between stat of new and old file is
1422 # avoided, comparision of size, ctime and mtime is enough
1423 # avoided, comparision of size, ctime and mtime is enough
1423 # to exactly detect change of a file regardless of platform
1424 # to exactly detect change of a file regardless of platform
1424 return (self.stat.st_size == old.stat.st_size and
1425 return (self.stat.st_size == old.stat.st_size and
1425 self.stat.st_ctime == old.stat.st_ctime and
1426 self.stat.st_ctime == old.stat.st_ctime and
1426 self.stat.st_mtime == old.stat.st_mtime)
1427 self.stat.st_mtime == old.stat.st_mtime)
1427 except AttributeError:
1428 except AttributeError:
1428 return False
1429 return False
1429
1430
1430 def isambig(self, old):
1431 def isambig(self, old):
1431 """Examine whether new (= self) stat is ambiguous against old one
1432 """Examine whether new (= self) stat is ambiguous against old one
1432
1433
1433 "S[N]" below means stat of a file at N-th change:
1434 "S[N]" below means stat of a file at N-th change:
1434
1435
1435 - S[n-1].ctime < S[n].ctime: can detect change of a file
1436 - S[n-1].ctime < S[n].ctime: can detect change of a file
1436 - S[n-1].ctime == S[n].ctime
1437 - S[n-1].ctime == S[n].ctime
1437 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1438 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1438 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1439 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1439 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1440 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1440 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1441 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1441
1442
1442 Case (*2) above means that a file was changed twice or more at
1443 Case (*2) above means that a file was changed twice or more at
1443 same time in sec (= S[n-1].ctime), and comparison of timestamp
1444 same time in sec (= S[n-1].ctime), and comparison of timestamp
1444 is ambiguous.
1445 is ambiguous.
1445
1446
1446 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1447 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1447 timestamp is ambiguous".
1448 timestamp is ambiguous".
1448
1449
1449 But advancing mtime only in case (*2) doesn't work as
1450 But advancing mtime only in case (*2) doesn't work as
1450 expected, because naturally advanced S[n].mtime in case (*1)
1451 expected, because naturally advanced S[n].mtime in case (*1)
1451 might be equal to manually advanced S[n-1 or earlier].mtime.
1452 might be equal to manually advanced S[n-1 or earlier].mtime.
1452
1453
1453 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1454 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1454 treated as ambiguous regardless of mtime, to avoid overlooking
1455 treated as ambiguous regardless of mtime, to avoid overlooking
1455 by confliction between such mtime.
1456 by confliction between such mtime.
1456
1457
1457 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1458 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1458 S[n].mtime", even if size of a file isn't changed.
1459 S[n].mtime", even if size of a file isn't changed.
1459 """
1460 """
1460 try:
1461 try:
1461 return (self.stat.st_ctime == old.stat.st_ctime)
1462 return (self.stat.st_ctime == old.stat.st_ctime)
1462 except AttributeError:
1463 except AttributeError:
1463 return False
1464 return False
1464
1465
1465 def __ne__(self, other):
1466 def __ne__(self, other):
1466 return not self == other
1467 return not self == other
1467
1468
1468 class atomictempfile(object):
1469 class atomictempfile(object):
1469 '''writable file object that atomically updates a file
1470 '''writable file object that atomically updates a file
1470
1471
1471 All writes will go to a temporary copy of the original file. Call
1472 All writes will go to a temporary copy of the original file. Call
1472 close() when you are done writing, and atomictempfile will rename
1473 close() when you are done writing, and atomictempfile will rename
1473 the temporary copy to the original name, making the changes
1474 the temporary copy to the original name, making the changes
1474 visible. If the object is destroyed without being closed, all your
1475 visible. If the object is destroyed without being closed, all your
1475 writes are discarded.
1476 writes are discarded.
1476
1477
1477 checkambig argument of constructor is used with filestat, and is
1478 checkambig argument of constructor is used with filestat, and is
1478 useful only if target file is guarded by any lock (e.g. repo.lock
1479 useful only if target file is guarded by any lock (e.g. repo.lock
1479 or repo.wlock).
1480 or repo.wlock).
1480 '''
1481 '''
1481 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1482 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1482 self.__name = name # permanent name
1483 self.__name = name # permanent name
1483 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1484 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1484 createmode=createmode)
1485 createmode=createmode)
1485 self._fp = posixfile(self._tempname, mode)
1486 self._fp = posixfile(self._tempname, mode)
1486 self._checkambig = checkambig
1487 self._checkambig = checkambig
1487
1488
1488 # delegated methods
1489 # delegated methods
1489 self.read = self._fp.read
1490 self.read = self._fp.read
1490 self.write = self._fp.write
1491 self.write = self._fp.write
1491 self.seek = self._fp.seek
1492 self.seek = self._fp.seek
1492 self.tell = self._fp.tell
1493 self.tell = self._fp.tell
1493 self.fileno = self._fp.fileno
1494 self.fileno = self._fp.fileno
1494
1495
1495 def close(self):
1496 def close(self):
1496 if not self._fp.closed:
1497 if not self._fp.closed:
1497 self._fp.close()
1498 self._fp.close()
1498 filename = localpath(self.__name)
1499 filename = localpath(self.__name)
1499 oldstat = self._checkambig and filestat(filename)
1500 oldstat = self._checkambig and filestat(filename)
1500 if oldstat and oldstat.stat:
1501 if oldstat and oldstat.stat:
1501 rename(self._tempname, filename)
1502 rename(self._tempname, filename)
1502 newstat = filestat(filename)
1503 newstat = filestat(filename)
1503 if newstat.isambig(oldstat):
1504 if newstat.isambig(oldstat):
1504 # stat of changed file is ambiguous to original one
1505 # stat of changed file is ambiguous to original one
1505 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1506 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1506 os.utime(filename, (advanced, advanced))
1507 os.utime(filename, (advanced, advanced))
1507 else:
1508 else:
1508 rename(self._tempname, filename)
1509 rename(self._tempname, filename)
1509
1510
1510 def discard(self):
1511 def discard(self):
1511 if not self._fp.closed:
1512 if not self._fp.closed:
1512 try:
1513 try:
1513 os.unlink(self._tempname)
1514 os.unlink(self._tempname)
1514 except OSError:
1515 except OSError:
1515 pass
1516 pass
1516 self._fp.close()
1517 self._fp.close()
1517
1518
1518 def __del__(self):
1519 def __del__(self):
1519 if safehasattr(self, '_fp'): # constructor actually did something
1520 if safehasattr(self, '_fp'): # constructor actually did something
1520 self.discard()
1521 self.discard()
1521
1522
1522 def __enter__(self):
1523 def __enter__(self):
1523 return self
1524 return self
1524
1525
1525 def __exit__(self, exctype, excvalue, traceback):
1526 def __exit__(self, exctype, excvalue, traceback):
1526 if exctype is not None:
1527 if exctype is not None:
1527 self.discard()
1528 self.discard()
1528 else:
1529 else:
1529 self.close()
1530 self.close()
1530
1531
1531 def makedirs(name, mode=None, notindexed=False):
1532 def makedirs(name, mode=None, notindexed=False):
1532 """recursive directory creation with parent mode inheritance
1533 """recursive directory creation with parent mode inheritance
1533
1534
1534 Newly created directories are marked as "not to be indexed by
1535 Newly created directories are marked as "not to be indexed by
1535 the content indexing service", if ``notindexed`` is specified
1536 the content indexing service", if ``notindexed`` is specified
1536 for "write" mode access.
1537 for "write" mode access.
1537 """
1538 """
1538 try:
1539 try:
1539 makedir(name, notindexed)
1540 makedir(name, notindexed)
1540 except OSError as err:
1541 except OSError as err:
1541 if err.errno == errno.EEXIST:
1542 if err.errno == errno.EEXIST:
1542 return
1543 return
1543 if err.errno != errno.ENOENT or not name:
1544 if err.errno != errno.ENOENT or not name:
1544 raise
1545 raise
1545 parent = os.path.dirname(os.path.abspath(name))
1546 parent = os.path.dirname(os.path.abspath(name))
1546 if parent == name:
1547 if parent == name:
1547 raise
1548 raise
1548 makedirs(parent, mode, notindexed)
1549 makedirs(parent, mode, notindexed)
1549 try:
1550 try:
1550 makedir(name, notindexed)
1551 makedir(name, notindexed)
1551 except OSError as err:
1552 except OSError as err:
1552 # Catch EEXIST to handle races
1553 # Catch EEXIST to handle races
1553 if err.errno == errno.EEXIST:
1554 if err.errno == errno.EEXIST:
1554 return
1555 return
1555 raise
1556 raise
1556 if mode is not None:
1557 if mode is not None:
1557 os.chmod(name, mode)
1558 os.chmod(name, mode)
1558
1559
1559 def readfile(path):
1560 def readfile(path):
1560 with open(path, 'rb') as fp:
1561 with open(path, 'rb') as fp:
1561 return fp.read()
1562 return fp.read()
1562
1563
1563 def writefile(path, text):
1564 def writefile(path, text):
1564 with open(path, 'wb') as fp:
1565 with open(path, 'wb') as fp:
1565 fp.write(text)
1566 fp.write(text)
1566
1567
1567 def appendfile(path, text):
1568 def appendfile(path, text):
1568 with open(path, 'ab') as fp:
1569 with open(path, 'ab') as fp:
1569 fp.write(text)
1570 fp.write(text)
1570
1571
1571 class chunkbuffer(object):
1572 class chunkbuffer(object):
1572 """Allow arbitrary sized chunks of data to be efficiently read from an
1573 """Allow arbitrary sized chunks of data to be efficiently read from an
1573 iterator over chunks of arbitrary size."""
1574 iterator over chunks of arbitrary size."""
1574
1575
1575 def __init__(self, in_iter):
1576 def __init__(self, in_iter):
1576 """in_iter is the iterator that's iterating over the input chunks.
1577 """in_iter is the iterator that's iterating over the input chunks.
1577 targetsize is how big a buffer to try to maintain."""
1578 targetsize is how big a buffer to try to maintain."""
1578 def splitbig(chunks):
1579 def splitbig(chunks):
1579 for chunk in chunks:
1580 for chunk in chunks:
1580 if len(chunk) > 2**20:
1581 if len(chunk) > 2**20:
1581 pos = 0
1582 pos = 0
1582 while pos < len(chunk):
1583 while pos < len(chunk):
1583 end = pos + 2 ** 18
1584 end = pos + 2 ** 18
1584 yield chunk[pos:end]
1585 yield chunk[pos:end]
1585 pos = end
1586 pos = end
1586 else:
1587 else:
1587 yield chunk
1588 yield chunk
1588 self.iter = splitbig(in_iter)
1589 self.iter = splitbig(in_iter)
1589 self._queue = collections.deque()
1590 self._queue = collections.deque()
1590 self._chunkoffset = 0
1591 self._chunkoffset = 0
1591
1592
1592 def read(self, l=None):
1593 def read(self, l=None):
1593 """Read L bytes of data from the iterator of chunks of data.
1594 """Read L bytes of data from the iterator of chunks of data.
1594 Returns less than L bytes if the iterator runs dry.
1595 Returns less than L bytes if the iterator runs dry.
1595
1596
1596 If size parameter is omitted, read everything"""
1597 If size parameter is omitted, read everything"""
1597 if l is None:
1598 if l is None:
1598 return ''.join(self.iter)
1599 return ''.join(self.iter)
1599
1600
1600 left = l
1601 left = l
1601 buf = []
1602 buf = []
1602 queue = self._queue
1603 queue = self._queue
1603 while left > 0:
1604 while left > 0:
1604 # refill the queue
1605 # refill the queue
1605 if not queue:
1606 if not queue:
1606 target = 2**18
1607 target = 2**18
1607 for chunk in self.iter:
1608 for chunk in self.iter:
1608 queue.append(chunk)
1609 queue.append(chunk)
1609 target -= len(chunk)
1610 target -= len(chunk)
1610 if target <= 0:
1611 if target <= 0:
1611 break
1612 break
1612 if not queue:
1613 if not queue:
1613 break
1614 break
1614
1615
1615 # The easy way to do this would be to queue.popleft(), modify the
1616 # The easy way to do this would be to queue.popleft(), modify the
1616 # chunk (if necessary), then queue.appendleft(). However, for cases
1617 # chunk (if necessary), then queue.appendleft(). However, for cases
1617 # where we read partial chunk content, this incurs 2 dequeue
1618 # where we read partial chunk content, this incurs 2 dequeue
1618 # mutations and creates a new str for the remaining chunk in the
1619 # mutations and creates a new str for the remaining chunk in the
1619 # queue. Our code below avoids this overhead.
1620 # queue. Our code below avoids this overhead.
1620
1621
1621 chunk = queue[0]
1622 chunk = queue[0]
1622 chunkl = len(chunk)
1623 chunkl = len(chunk)
1623 offset = self._chunkoffset
1624 offset = self._chunkoffset
1624
1625
1625 # Use full chunk.
1626 # Use full chunk.
1626 if offset == 0 and left >= chunkl:
1627 if offset == 0 and left >= chunkl:
1627 left -= chunkl
1628 left -= chunkl
1628 queue.popleft()
1629 queue.popleft()
1629 buf.append(chunk)
1630 buf.append(chunk)
1630 # self._chunkoffset remains at 0.
1631 # self._chunkoffset remains at 0.
1631 continue
1632 continue
1632
1633
1633 chunkremaining = chunkl - offset
1634 chunkremaining = chunkl - offset
1634
1635
1635 # Use all of unconsumed part of chunk.
1636 # Use all of unconsumed part of chunk.
1636 if left >= chunkremaining:
1637 if left >= chunkremaining:
1637 left -= chunkremaining
1638 left -= chunkremaining
1638 queue.popleft()
1639 queue.popleft()
1639 # offset == 0 is enabled by block above, so this won't merely
1640 # offset == 0 is enabled by block above, so this won't merely
1640 # copy via ``chunk[0:]``.
1641 # copy via ``chunk[0:]``.
1641 buf.append(chunk[offset:])
1642 buf.append(chunk[offset:])
1642 self._chunkoffset = 0
1643 self._chunkoffset = 0
1643
1644
1644 # Partial chunk needed.
1645 # Partial chunk needed.
1645 else:
1646 else:
1646 buf.append(chunk[offset:offset + left])
1647 buf.append(chunk[offset:offset + left])
1647 self._chunkoffset += left
1648 self._chunkoffset += left
1648 left -= chunkremaining
1649 left -= chunkremaining
1649
1650
1650 return ''.join(buf)
1651 return ''.join(buf)
1651
1652
1652 def filechunkiter(f, size=65536, limit=None):
1653 def filechunkiter(f, size=65536, limit=None):
1653 """Create a generator that produces the data in the file size
1654 """Create a generator that produces the data in the file size
1654 (default 65536) bytes at a time, up to optional limit (default is
1655 (default 65536) bytes at a time, up to optional limit (default is
1655 to read all data). Chunks may be less than size bytes if the
1656 to read all data). Chunks may be less than size bytes if the
1656 chunk is the last chunk in the file, or the file is a socket or
1657 chunk is the last chunk in the file, or the file is a socket or
1657 some other type of file that sometimes reads less data than is
1658 some other type of file that sometimes reads less data than is
1658 requested."""
1659 requested."""
1659 assert size >= 0
1660 assert size >= 0
1660 assert limit is None or limit >= 0
1661 assert limit is None or limit >= 0
1661 while True:
1662 while True:
1662 if limit is None:
1663 if limit is None:
1663 nbytes = size
1664 nbytes = size
1664 else:
1665 else:
1665 nbytes = min(limit, size)
1666 nbytes = min(limit, size)
1666 s = nbytes and f.read(nbytes)
1667 s = nbytes and f.read(nbytes)
1667 if not s:
1668 if not s:
1668 break
1669 break
1669 if limit:
1670 if limit:
1670 limit -= len(s)
1671 limit -= len(s)
1671 yield s
1672 yield s
1672
1673
1673 def makedate(timestamp=None):
1674 def makedate(timestamp=None):
1674 '''Return a unix timestamp (or the current time) as a (unixtime,
1675 '''Return a unix timestamp (or the current time) as a (unixtime,
1675 offset) tuple based off the local timezone.'''
1676 offset) tuple based off the local timezone.'''
1676 if timestamp is None:
1677 if timestamp is None:
1677 timestamp = time.time()
1678 timestamp = time.time()
1678 if timestamp < 0:
1679 if timestamp < 0:
1679 hint = _("check your clock")
1680 hint = _("check your clock")
1680 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1681 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1681 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1682 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1682 datetime.datetime.fromtimestamp(timestamp))
1683 datetime.datetime.fromtimestamp(timestamp))
1683 tz = delta.days * 86400 + delta.seconds
1684 tz = delta.days * 86400 + delta.seconds
1684 return timestamp, tz
1685 return timestamp, tz
1685
1686
1686 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1687 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1687 """represent a (unixtime, offset) tuple as a localized time.
1688 """represent a (unixtime, offset) tuple as a localized time.
1688 unixtime is seconds since the epoch, and offset is the time zone's
1689 unixtime is seconds since the epoch, and offset is the time zone's
1689 number of seconds away from UTC.
1690 number of seconds away from UTC.
1690
1691
1691 >>> datestr((0, 0))
1692 >>> datestr((0, 0))
1692 'Thu Jan 01 00:00:00 1970 +0000'
1693 'Thu Jan 01 00:00:00 1970 +0000'
1693 >>> datestr((42, 0))
1694 >>> datestr((42, 0))
1694 'Thu Jan 01 00:00:42 1970 +0000'
1695 'Thu Jan 01 00:00:42 1970 +0000'
1695 >>> datestr((-42, 0))
1696 >>> datestr((-42, 0))
1696 'Wed Dec 31 23:59:18 1969 +0000'
1697 'Wed Dec 31 23:59:18 1969 +0000'
1697 >>> datestr((0x7fffffff, 0))
1698 >>> datestr((0x7fffffff, 0))
1698 'Tue Jan 19 03:14:07 2038 +0000'
1699 'Tue Jan 19 03:14:07 2038 +0000'
1699 >>> datestr((-0x80000000, 0))
1700 >>> datestr((-0x80000000, 0))
1700 'Fri Dec 13 20:45:52 1901 +0000'
1701 'Fri Dec 13 20:45:52 1901 +0000'
1701 """
1702 """
1702 t, tz = date or makedate()
1703 t, tz = date or makedate()
1703 if "%1" in format or "%2" in format or "%z" in format:
1704 if "%1" in format or "%2" in format or "%z" in format:
1704 sign = (tz > 0) and "-" or "+"
1705 sign = (tz > 0) and "-" or "+"
1705 minutes = abs(tz) // 60
1706 minutes = abs(tz) // 60
1706 q, r = divmod(minutes, 60)
1707 q, r = divmod(minutes, 60)
1707 format = format.replace("%z", "%1%2")
1708 format = format.replace("%z", "%1%2")
1708 format = format.replace("%1", "%c%02d" % (sign, q))
1709 format = format.replace("%1", "%c%02d" % (sign, q))
1709 format = format.replace("%2", "%02d" % r)
1710 format = format.replace("%2", "%02d" % r)
1710 d = t - tz
1711 d = t - tz
1711 if d > 0x7fffffff:
1712 if d > 0x7fffffff:
1712 d = 0x7fffffff
1713 d = 0x7fffffff
1713 elif d < -0x80000000:
1714 elif d < -0x80000000:
1714 d = -0x80000000
1715 d = -0x80000000
1715 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1716 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1716 # because they use the gmtime() system call which is buggy on Windows
1717 # because they use the gmtime() system call which is buggy on Windows
1717 # for negative values.
1718 # for negative values.
1718 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1719 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1719 s = t.strftime(format)
1720 s = t.strftime(format)
1720 return s
1721 return s
1721
1722
1722 def shortdate(date=None):
1723 def shortdate(date=None):
1723 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1724 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1724 return datestr(date, format='%Y-%m-%d')
1725 return datestr(date, format='%Y-%m-%d')
1725
1726
1726 def parsetimezone(tz):
1727 def parsetimezone(tz):
1727 """parse a timezone string and return an offset integer"""
1728 """parse a timezone string and return an offset integer"""
1728 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1729 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1729 sign = (tz[0] == "+") and 1 or -1
1730 sign = (tz[0] == "+") and 1 or -1
1730 hours = int(tz[1:3])
1731 hours = int(tz[1:3])
1731 minutes = int(tz[3:5])
1732 minutes = int(tz[3:5])
1732 return -sign * (hours * 60 + minutes) * 60
1733 return -sign * (hours * 60 + minutes) * 60
1733 if tz == "GMT" or tz == "UTC":
1734 if tz == "GMT" or tz == "UTC":
1734 return 0
1735 return 0
1735 return None
1736 return None
1736
1737
1737 def strdate(string, format, defaults=[]):
1738 def strdate(string, format, defaults=[]):
1738 """parse a localized time string and return a (unixtime, offset) tuple.
1739 """parse a localized time string and return a (unixtime, offset) tuple.
1739 if the string cannot be parsed, ValueError is raised."""
1740 if the string cannot be parsed, ValueError is raised."""
1740 # NOTE: unixtime = localunixtime + offset
1741 # NOTE: unixtime = localunixtime + offset
1741 offset, date = parsetimezone(string.split()[-1]), string
1742 offset, date = parsetimezone(string.split()[-1]), string
1742 if offset is not None:
1743 if offset is not None:
1743 date = " ".join(string.split()[:-1])
1744 date = " ".join(string.split()[:-1])
1744
1745
1745 # add missing elements from defaults
1746 # add missing elements from defaults
1746 usenow = False # default to using biased defaults
1747 usenow = False # default to using biased defaults
1747 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1748 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1748 found = [True for p in part if ("%"+p) in format]
1749 found = [True for p in part if ("%"+p) in format]
1749 if not found:
1750 if not found:
1750 date += "@" + defaults[part][usenow]
1751 date += "@" + defaults[part][usenow]
1751 format += "@%" + part[0]
1752 format += "@%" + part[0]
1752 else:
1753 else:
1753 # We've found a specific time element, less specific time
1754 # We've found a specific time element, less specific time
1754 # elements are relative to today
1755 # elements are relative to today
1755 usenow = True
1756 usenow = True
1756
1757
1757 timetuple = time.strptime(date, format)
1758 timetuple = time.strptime(date, format)
1758 localunixtime = int(calendar.timegm(timetuple))
1759 localunixtime = int(calendar.timegm(timetuple))
1759 if offset is None:
1760 if offset is None:
1760 # local timezone
1761 # local timezone
1761 unixtime = int(time.mktime(timetuple))
1762 unixtime = int(time.mktime(timetuple))
1762 offset = unixtime - localunixtime
1763 offset = unixtime - localunixtime
1763 else:
1764 else:
1764 unixtime = localunixtime + offset
1765 unixtime = localunixtime + offset
1765 return unixtime, offset
1766 return unixtime, offset
1766
1767
1767 def parsedate(date, formats=None, bias=None):
1768 def parsedate(date, formats=None, bias=None):
1768 """parse a localized date/time and return a (unixtime, offset) tuple.
1769 """parse a localized date/time and return a (unixtime, offset) tuple.
1769
1770
1770 The date may be a "unixtime offset" string or in one of the specified
1771 The date may be a "unixtime offset" string or in one of the specified
1771 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1772 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1772
1773
1773 >>> parsedate(' today ') == parsedate(\
1774 >>> parsedate(' today ') == parsedate(\
1774 datetime.date.today().strftime('%b %d'))
1775 datetime.date.today().strftime('%b %d'))
1775 True
1776 True
1776 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1777 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1777 datetime.timedelta(days=1)\
1778 datetime.timedelta(days=1)\
1778 ).strftime('%b %d'))
1779 ).strftime('%b %d'))
1779 True
1780 True
1780 >>> now, tz = makedate()
1781 >>> now, tz = makedate()
1781 >>> strnow, strtz = parsedate('now')
1782 >>> strnow, strtz = parsedate('now')
1782 >>> (strnow - now) < 1
1783 >>> (strnow - now) < 1
1783 True
1784 True
1784 >>> tz == strtz
1785 >>> tz == strtz
1785 True
1786 True
1786 """
1787 """
1787 if bias is None:
1788 if bias is None:
1788 bias = {}
1789 bias = {}
1789 if not date:
1790 if not date:
1790 return 0, 0
1791 return 0, 0
1791 if isinstance(date, tuple) and len(date) == 2:
1792 if isinstance(date, tuple) and len(date) == 2:
1792 return date
1793 return date
1793 if not formats:
1794 if not formats:
1794 formats = defaultdateformats
1795 formats = defaultdateformats
1795 date = date.strip()
1796 date = date.strip()
1796
1797
1797 if date == 'now' or date == _('now'):
1798 if date == 'now' or date == _('now'):
1798 return makedate()
1799 return makedate()
1799 if date == 'today' or date == _('today'):
1800 if date == 'today' or date == _('today'):
1800 date = datetime.date.today().strftime('%b %d')
1801 date = datetime.date.today().strftime('%b %d')
1801 elif date == 'yesterday' or date == _('yesterday'):
1802 elif date == 'yesterday' or date == _('yesterday'):
1802 date = (datetime.date.today() -
1803 date = (datetime.date.today() -
1803 datetime.timedelta(days=1)).strftime('%b %d')
1804 datetime.timedelta(days=1)).strftime('%b %d')
1804
1805
1805 try:
1806 try:
1806 when, offset = map(int, date.split(' '))
1807 when, offset = map(int, date.split(' '))
1807 except ValueError:
1808 except ValueError:
1808 # fill out defaults
1809 # fill out defaults
1809 now = makedate()
1810 now = makedate()
1810 defaults = {}
1811 defaults = {}
1811 for part in ("d", "mb", "yY", "HI", "M", "S"):
1812 for part in ("d", "mb", "yY", "HI", "M", "S"):
1812 # this piece is for rounding the specific end of unknowns
1813 # this piece is for rounding the specific end of unknowns
1813 b = bias.get(part)
1814 b = bias.get(part)
1814 if b is None:
1815 if b is None:
1815 if part[0] in "HMS":
1816 if part[0] in "HMS":
1816 b = "00"
1817 b = "00"
1817 else:
1818 else:
1818 b = "0"
1819 b = "0"
1819
1820
1820 # this piece is for matching the generic end to today's date
1821 # this piece is for matching the generic end to today's date
1821 n = datestr(now, "%" + part[0])
1822 n = datestr(now, "%" + part[0])
1822
1823
1823 defaults[part] = (b, n)
1824 defaults[part] = (b, n)
1824
1825
1825 for format in formats:
1826 for format in formats:
1826 try:
1827 try:
1827 when, offset = strdate(date, format, defaults)
1828 when, offset = strdate(date, format, defaults)
1828 except (ValueError, OverflowError):
1829 except (ValueError, OverflowError):
1829 pass
1830 pass
1830 else:
1831 else:
1831 break
1832 break
1832 else:
1833 else:
1833 raise Abort(_('invalid date: %r') % date)
1834 raise Abort(_('invalid date: %r') % date)
1834 # validate explicit (probably user-specified) date and
1835 # validate explicit (probably user-specified) date and
1835 # time zone offset. values must fit in signed 32 bits for
1836 # time zone offset. values must fit in signed 32 bits for
1836 # current 32-bit linux runtimes. timezones go from UTC-12
1837 # current 32-bit linux runtimes. timezones go from UTC-12
1837 # to UTC+14
1838 # to UTC+14
1838 if when < -0x80000000 or when > 0x7fffffff:
1839 if when < -0x80000000 or when > 0x7fffffff:
1839 raise Abort(_('date exceeds 32 bits: %d') % when)
1840 raise Abort(_('date exceeds 32 bits: %d') % when)
1840 if offset < -50400 or offset > 43200:
1841 if offset < -50400 or offset > 43200:
1841 raise Abort(_('impossible time zone offset: %d') % offset)
1842 raise Abort(_('impossible time zone offset: %d') % offset)
1842 return when, offset
1843 return when, offset
1843
1844
1844 def matchdate(date):
1845 def matchdate(date):
1845 """Return a function that matches a given date match specifier
1846 """Return a function that matches a given date match specifier
1846
1847
1847 Formats include:
1848 Formats include:
1848
1849
1849 '{date}' match a given date to the accuracy provided
1850 '{date}' match a given date to the accuracy provided
1850
1851
1851 '<{date}' on or before a given date
1852 '<{date}' on or before a given date
1852
1853
1853 '>{date}' on or after a given date
1854 '>{date}' on or after a given date
1854
1855
1855 >>> p1 = parsedate("10:29:59")
1856 >>> p1 = parsedate("10:29:59")
1856 >>> p2 = parsedate("10:30:00")
1857 >>> p2 = parsedate("10:30:00")
1857 >>> p3 = parsedate("10:30:59")
1858 >>> p3 = parsedate("10:30:59")
1858 >>> p4 = parsedate("10:31:00")
1859 >>> p4 = parsedate("10:31:00")
1859 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1860 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1860 >>> f = matchdate("10:30")
1861 >>> f = matchdate("10:30")
1861 >>> f(p1[0])
1862 >>> f(p1[0])
1862 False
1863 False
1863 >>> f(p2[0])
1864 >>> f(p2[0])
1864 True
1865 True
1865 >>> f(p3[0])
1866 >>> f(p3[0])
1866 True
1867 True
1867 >>> f(p4[0])
1868 >>> f(p4[0])
1868 False
1869 False
1869 >>> f(p5[0])
1870 >>> f(p5[0])
1870 False
1871 False
1871 """
1872 """
1872
1873
1873 def lower(date):
1874 def lower(date):
1874 d = {'mb': "1", 'd': "1"}
1875 d = {'mb': "1", 'd': "1"}
1875 return parsedate(date, extendeddateformats, d)[0]
1876 return parsedate(date, extendeddateformats, d)[0]
1876
1877
1877 def upper(date):
1878 def upper(date):
1878 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1879 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1879 for days in ("31", "30", "29"):
1880 for days in ("31", "30", "29"):
1880 try:
1881 try:
1881 d["d"] = days
1882 d["d"] = days
1882 return parsedate(date, extendeddateformats, d)[0]
1883 return parsedate(date, extendeddateformats, d)[0]
1883 except Abort:
1884 except Abort:
1884 pass
1885 pass
1885 d["d"] = "28"
1886 d["d"] = "28"
1886 return parsedate(date, extendeddateformats, d)[0]
1887 return parsedate(date, extendeddateformats, d)[0]
1887
1888
1888 date = date.strip()
1889 date = date.strip()
1889
1890
1890 if not date:
1891 if not date:
1891 raise Abort(_("dates cannot consist entirely of whitespace"))
1892 raise Abort(_("dates cannot consist entirely of whitespace"))
1892 elif date[0] == "<":
1893 elif date[0] == "<":
1893 if not date[1:]:
1894 if not date[1:]:
1894 raise Abort(_("invalid day spec, use '<DATE'"))
1895 raise Abort(_("invalid day spec, use '<DATE'"))
1895 when = upper(date[1:])
1896 when = upper(date[1:])
1896 return lambda x: x <= when
1897 return lambda x: x <= when
1897 elif date[0] == ">":
1898 elif date[0] == ">":
1898 if not date[1:]:
1899 if not date[1:]:
1899 raise Abort(_("invalid day spec, use '>DATE'"))
1900 raise Abort(_("invalid day spec, use '>DATE'"))
1900 when = lower(date[1:])
1901 when = lower(date[1:])
1901 return lambda x: x >= when
1902 return lambda x: x >= when
1902 elif date[0] == "-":
1903 elif date[0] == "-":
1903 try:
1904 try:
1904 days = int(date[1:])
1905 days = int(date[1:])
1905 except ValueError:
1906 except ValueError:
1906 raise Abort(_("invalid day spec: %s") % date[1:])
1907 raise Abort(_("invalid day spec: %s") % date[1:])
1907 if days < 0:
1908 if days < 0:
1908 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1909 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1909 % date[1:])
1910 % date[1:])
1910 when = makedate()[0] - days * 3600 * 24
1911 when = makedate()[0] - days * 3600 * 24
1911 return lambda x: x >= when
1912 return lambda x: x >= when
1912 elif " to " in date:
1913 elif " to " in date:
1913 a, b = date.split(" to ")
1914 a, b = date.split(" to ")
1914 start, stop = lower(a), upper(b)
1915 start, stop = lower(a), upper(b)
1915 return lambda x: x >= start and x <= stop
1916 return lambda x: x >= start and x <= stop
1916 else:
1917 else:
1917 start, stop = lower(date), upper(date)
1918 start, stop = lower(date), upper(date)
1918 return lambda x: x >= start and x <= stop
1919 return lambda x: x >= start and x <= stop
1919
1920
1920 def stringmatcher(pattern):
1921 def stringmatcher(pattern):
1921 """
1922 """
1922 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1923 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1923 returns the matcher name, pattern, and matcher function.
1924 returns the matcher name, pattern, and matcher function.
1924 missing or unknown prefixes are treated as literal matches.
1925 missing or unknown prefixes are treated as literal matches.
1925
1926
1926 helper for tests:
1927 helper for tests:
1927 >>> def test(pattern, *tests):
1928 >>> def test(pattern, *tests):
1928 ... kind, pattern, matcher = stringmatcher(pattern)
1929 ... kind, pattern, matcher = stringmatcher(pattern)
1929 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1930 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1930
1931
1931 exact matching (no prefix):
1932 exact matching (no prefix):
1932 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1933 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1933 ('literal', 'abcdefg', [False, False, True])
1934 ('literal', 'abcdefg', [False, False, True])
1934
1935
1935 regex matching ('re:' prefix)
1936 regex matching ('re:' prefix)
1936 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1937 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1937 ('re', 'a.+b', [False, False, True])
1938 ('re', 'a.+b', [False, False, True])
1938
1939
1939 force exact matches ('literal:' prefix)
1940 force exact matches ('literal:' prefix)
1940 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1941 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1941 ('literal', 're:foobar', [False, True])
1942 ('literal', 're:foobar', [False, True])
1942
1943
1943 unknown prefixes are ignored and treated as literals
1944 unknown prefixes are ignored and treated as literals
1944 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1945 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1945 ('literal', 'foo:bar', [False, False, True])
1946 ('literal', 'foo:bar', [False, False, True])
1946 """
1947 """
1947 if pattern.startswith('re:'):
1948 if pattern.startswith('re:'):
1948 pattern = pattern[3:]
1949 pattern = pattern[3:]
1949 try:
1950 try:
1950 regex = remod.compile(pattern)
1951 regex = remod.compile(pattern)
1951 except remod.error as e:
1952 except remod.error as e:
1952 raise error.ParseError(_('invalid regular expression: %s')
1953 raise error.ParseError(_('invalid regular expression: %s')
1953 % e)
1954 % e)
1954 return 're', pattern, regex.search
1955 return 're', pattern, regex.search
1955 elif pattern.startswith('literal:'):
1956 elif pattern.startswith('literal:'):
1956 pattern = pattern[8:]
1957 pattern = pattern[8:]
1957 return 'literal', pattern, pattern.__eq__
1958 return 'literal', pattern, pattern.__eq__
1958
1959
1959 def shortuser(user):
1960 def shortuser(user):
1960 """Return a short representation of a user name or email address."""
1961 """Return a short representation of a user name or email address."""
1961 f = user.find('@')
1962 f = user.find('@')
1962 if f >= 0:
1963 if f >= 0:
1963 user = user[:f]
1964 user = user[:f]
1964 f = user.find('<')
1965 f = user.find('<')
1965 if f >= 0:
1966 if f >= 0:
1966 user = user[f + 1:]
1967 user = user[f + 1:]
1967 f = user.find(' ')
1968 f = user.find(' ')
1968 if f >= 0:
1969 if f >= 0:
1969 user = user[:f]
1970 user = user[:f]
1970 f = user.find('.')
1971 f = user.find('.')
1971 if f >= 0:
1972 if f >= 0:
1972 user = user[:f]
1973 user = user[:f]
1973 return user
1974 return user
1974
1975
1975 def emailuser(user):
1976 def emailuser(user):
1976 """Return the user portion of an email address."""
1977 """Return the user portion of an email address."""
1977 f = user.find('@')
1978 f = user.find('@')
1978 if f >= 0:
1979 if f >= 0:
1979 user = user[:f]
1980 user = user[:f]
1980 f = user.find('<')
1981 f = user.find('<')
1981 if f >= 0:
1982 if f >= 0:
1982 user = user[f + 1:]
1983 user = user[f + 1:]
1983 return user
1984 return user
1984
1985
1985 def email(author):
1986 def email(author):
1986 '''get email of author.'''
1987 '''get email of author.'''
1987 r = author.find('>')
1988 r = author.find('>')
1988 if r == -1:
1989 if r == -1:
1989 r = None
1990 r = None
1990 return author[author.find('<') + 1:r]
1991 return author[author.find('<') + 1:r]
1991
1992
1992 def ellipsis(text, maxlength=400):
1993 def ellipsis(text, maxlength=400):
1993 """Trim string to at most maxlength (default: 400) columns in display."""
1994 """Trim string to at most maxlength (default: 400) columns in display."""
1994 return encoding.trim(text, maxlength, ellipsis='...')
1995 return encoding.trim(text, maxlength, ellipsis='...')
1995
1996
1996 def unitcountfn(*unittable):
1997 def unitcountfn(*unittable):
1997 '''return a function that renders a readable count of some quantity'''
1998 '''return a function that renders a readable count of some quantity'''
1998
1999
1999 def go(count):
2000 def go(count):
2000 for multiplier, divisor, format in unittable:
2001 for multiplier, divisor, format in unittable:
2001 if count >= divisor * multiplier:
2002 if count >= divisor * multiplier:
2002 return format % (count / float(divisor))
2003 return format % (count / float(divisor))
2003 return unittable[-1][2] % count
2004 return unittable[-1][2] % count
2004
2005
2005 return go
2006 return go
2006
2007
2007 bytecount = unitcountfn(
2008 bytecount = unitcountfn(
2008 (100, 1 << 30, _('%.0f GB')),
2009 (100, 1 << 30, _('%.0f GB')),
2009 (10, 1 << 30, _('%.1f GB')),
2010 (10, 1 << 30, _('%.1f GB')),
2010 (1, 1 << 30, _('%.2f GB')),
2011 (1, 1 << 30, _('%.2f GB')),
2011 (100, 1 << 20, _('%.0f MB')),
2012 (100, 1 << 20, _('%.0f MB')),
2012 (10, 1 << 20, _('%.1f MB')),
2013 (10, 1 << 20, _('%.1f MB')),
2013 (1, 1 << 20, _('%.2f MB')),
2014 (1, 1 << 20, _('%.2f MB')),
2014 (100, 1 << 10, _('%.0f KB')),
2015 (100, 1 << 10, _('%.0f KB')),
2015 (10, 1 << 10, _('%.1f KB')),
2016 (10, 1 << 10, _('%.1f KB')),
2016 (1, 1 << 10, _('%.2f KB')),
2017 (1, 1 << 10, _('%.2f KB')),
2017 (1, 1, _('%.0f bytes')),
2018 (1, 1, _('%.0f bytes')),
2018 )
2019 )
2019
2020
2020 def uirepr(s):
2021 def uirepr(s):
2021 # Avoid double backslash in Windows path repr()
2022 # Avoid double backslash in Windows path repr()
2022 return repr(s).replace('\\\\', '\\')
2023 return repr(s).replace('\\\\', '\\')
2023
2024
2024 # delay import of textwrap
2025 # delay import of textwrap
2025 def MBTextWrapper(**kwargs):
2026 def MBTextWrapper(**kwargs):
2026 class tw(textwrap.TextWrapper):
2027 class tw(textwrap.TextWrapper):
2027 """
2028 """
2028 Extend TextWrapper for width-awareness.
2029 Extend TextWrapper for width-awareness.
2029
2030
2030 Neither number of 'bytes' in any encoding nor 'characters' is
2031 Neither number of 'bytes' in any encoding nor 'characters' is
2031 appropriate to calculate terminal columns for specified string.
2032 appropriate to calculate terminal columns for specified string.
2032
2033
2033 Original TextWrapper implementation uses built-in 'len()' directly,
2034 Original TextWrapper implementation uses built-in 'len()' directly,
2034 so overriding is needed to use width information of each characters.
2035 so overriding is needed to use width information of each characters.
2035
2036
2036 In addition, characters classified into 'ambiguous' width are
2037 In addition, characters classified into 'ambiguous' width are
2037 treated as wide in East Asian area, but as narrow in other.
2038 treated as wide in East Asian area, but as narrow in other.
2038
2039
2039 This requires use decision to determine width of such characters.
2040 This requires use decision to determine width of such characters.
2040 """
2041 """
2041 def _cutdown(self, ucstr, space_left):
2042 def _cutdown(self, ucstr, space_left):
2042 l = 0
2043 l = 0
2043 colwidth = encoding.ucolwidth
2044 colwidth = encoding.ucolwidth
2044 for i in xrange(len(ucstr)):
2045 for i in xrange(len(ucstr)):
2045 l += colwidth(ucstr[i])
2046 l += colwidth(ucstr[i])
2046 if space_left < l:
2047 if space_left < l:
2047 return (ucstr[:i], ucstr[i:])
2048 return (ucstr[:i], ucstr[i:])
2048 return ucstr, ''
2049 return ucstr, ''
2049
2050
2050 # overriding of base class
2051 # overriding of base class
2051 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2052 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2052 space_left = max(width - cur_len, 1)
2053 space_left = max(width - cur_len, 1)
2053
2054
2054 if self.break_long_words:
2055 if self.break_long_words:
2055 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2056 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2056 cur_line.append(cut)
2057 cur_line.append(cut)
2057 reversed_chunks[-1] = res
2058 reversed_chunks[-1] = res
2058 elif not cur_line:
2059 elif not cur_line:
2059 cur_line.append(reversed_chunks.pop())
2060 cur_line.append(reversed_chunks.pop())
2060
2061
2061 # this overriding code is imported from TextWrapper of Python 2.6
2062 # this overriding code is imported from TextWrapper of Python 2.6
2062 # to calculate columns of string by 'encoding.ucolwidth()'
2063 # to calculate columns of string by 'encoding.ucolwidth()'
2063 def _wrap_chunks(self, chunks):
2064 def _wrap_chunks(self, chunks):
2064 colwidth = encoding.ucolwidth
2065 colwidth = encoding.ucolwidth
2065
2066
2066 lines = []
2067 lines = []
2067 if self.width <= 0:
2068 if self.width <= 0:
2068 raise ValueError("invalid width %r (must be > 0)" % self.width)
2069 raise ValueError("invalid width %r (must be > 0)" % self.width)
2069
2070
2070 # Arrange in reverse order so items can be efficiently popped
2071 # Arrange in reverse order so items can be efficiently popped
2071 # from a stack of chucks.
2072 # from a stack of chucks.
2072 chunks.reverse()
2073 chunks.reverse()
2073
2074
2074 while chunks:
2075 while chunks:
2075
2076
2076 # Start the list of chunks that will make up the current line.
2077 # Start the list of chunks that will make up the current line.
2077 # cur_len is just the length of all the chunks in cur_line.
2078 # cur_len is just the length of all the chunks in cur_line.
2078 cur_line = []
2079 cur_line = []
2079 cur_len = 0
2080 cur_len = 0
2080
2081
2081 # Figure out which static string will prefix this line.
2082 # Figure out which static string will prefix this line.
2082 if lines:
2083 if lines:
2083 indent = self.subsequent_indent
2084 indent = self.subsequent_indent
2084 else:
2085 else:
2085 indent = self.initial_indent
2086 indent = self.initial_indent
2086
2087
2087 # Maximum width for this line.
2088 # Maximum width for this line.
2088 width = self.width - len(indent)
2089 width = self.width - len(indent)
2089
2090
2090 # First chunk on line is whitespace -- drop it, unless this
2091 # First chunk on line is whitespace -- drop it, unless this
2091 # is the very beginning of the text (i.e. no lines started yet).
2092 # is the very beginning of the text (i.e. no lines started yet).
2092 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2093 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2093 del chunks[-1]
2094 del chunks[-1]
2094
2095
2095 while chunks:
2096 while chunks:
2096 l = colwidth(chunks[-1])
2097 l = colwidth(chunks[-1])
2097
2098
2098 # Can at least squeeze this chunk onto the current line.
2099 # Can at least squeeze this chunk onto the current line.
2099 if cur_len + l <= width:
2100 if cur_len + l <= width:
2100 cur_line.append(chunks.pop())
2101 cur_line.append(chunks.pop())
2101 cur_len += l
2102 cur_len += l
2102
2103
2103 # Nope, this line is full.
2104 # Nope, this line is full.
2104 else:
2105 else:
2105 break
2106 break
2106
2107
2107 # The current line is full, and the next chunk is too big to
2108 # The current line is full, and the next chunk is too big to
2108 # fit on *any* line (not just this one).
2109 # fit on *any* line (not just this one).
2109 if chunks and colwidth(chunks[-1]) > width:
2110 if chunks and colwidth(chunks[-1]) > width:
2110 self._handle_long_word(chunks, cur_line, cur_len, width)
2111 self._handle_long_word(chunks, cur_line, cur_len, width)
2111
2112
2112 # If the last chunk on this line is all whitespace, drop it.
2113 # If the last chunk on this line is all whitespace, drop it.
2113 if (self.drop_whitespace and
2114 if (self.drop_whitespace and
2114 cur_line and cur_line[-1].strip() == ''):
2115 cur_line and cur_line[-1].strip() == ''):
2115 del cur_line[-1]
2116 del cur_line[-1]
2116
2117
2117 # Convert current line back to a string and store it in list
2118 # Convert current line back to a string and store it in list
2118 # of all lines (return value).
2119 # of all lines (return value).
2119 if cur_line:
2120 if cur_line:
2120 lines.append(indent + ''.join(cur_line))
2121 lines.append(indent + ''.join(cur_line))
2121
2122
2122 return lines
2123 return lines
2123
2124
2124 global MBTextWrapper
2125 global MBTextWrapper
2125 MBTextWrapper = tw
2126 MBTextWrapper = tw
2126 return tw(**kwargs)
2127 return tw(**kwargs)
2127
2128
2128 def wrap(line, width, initindent='', hangindent=''):
2129 def wrap(line, width, initindent='', hangindent=''):
2129 maxindent = max(len(hangindent), len(initindent))
2130 maxindent = max(len(hangindent), len(initindent))
2130 if width <= maxindent:
2131 if width <= maxindent:
2131 # adjust for weird terminal size
2132 # adjust for weird terminal size
2132 width = max(78, maxindent + 1)
2133 width = max(78, maxindent + 1)
2133 line = line.decode(encoding.encoding, encoding.encodingmode)
2134 line = line.decode(encoding.encoding, encoding.encodingmode)
2134 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2135 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2135 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2136 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2136 wrapper = MBTextWrapper(width=width,
2137 wrapper = MBTextWrapper(width=width,
2137 initial_indent=initindent,
2138 initial_indent=initindent,
2138 subsequent_indent=hangindent)
2139 subsequent_indent=hangindent)
2139 return wrapper.fill(line).encode(encoding.encoding)
2140 return wrapper.fill(line).encode(encoding.encoding)
2140
2141
2141 def iterlines(iterator):
2142 def iterlines(iterator):
2142 for chunk in iterator:
2143 for chunk in iterator:
2143 for line in chunk.splitlines():
2144 for line in chunk.splitlines():
2144 yield line
2145 yield line
2145
2146
2146 def expandpath(path):
2147 def expandpath(path):
2147 return os.path.expanduser(os.path.expandvars(path))
2148 return os.path.expanduser(os.path.expandvars(path))
2148
2149
2149 def hgcmd():
2150 def hgcmd():
2150 """Return the command used to execute current hg
2151 """Return the command used to execute current hg
2151
2152
2152 This is different from hgexecutable() because on Windows we want
2153 This is different from hgexecutable() because on Windows we want
2153 to avoid things opening new shell windows like batch files, so we
2154 to avoid things opening new shell windows like batch files, so we
2154 get either the python call or current executable.
2155 get either the python call or current executable.
2155 """
2156 """
2156 if mainfrozen():
2157 if mainfrozen():
2157 if getattr(sys, 'frozen', None) == 'macosx_app':
2158 if getattr(sys, 'frozen', None) == 'macosx_app':
2158 # Env variable set by py2app
2159 # Env variable set by py2app
2159 return [os.environ['EXECUTABLEPATH']]
2160 return [os.environ['EXECUTABLEPATH']]
2160 else:
2161 else:
2161 return [sys.executable]
2162 return [sys.executable]
2162 return gethgcmd()
2163 return gethgcmd()
2163
2164
2164 def rundetached(args, condfn):
2165 def rundetached(args, condfn):
2165 """Execute the argument list in a detached process.
2166 """Execute the argument list in a detached process.
2166
2167
2167 condfn is a callable which is called repeatedly and should return
2168 condfn is a callable which is called repeatedly and should return
2168 True once the child process is known to have started successfully.
2169 True once the child process is known to have started successfully.
2169 At this point, the child process PID is returned. If the child
2170 At this point, the child process PID is returned. If the child
2170 process fails to start or finishes before condfn() evaluates to
2171 process fails to start or finishes before condfn() evaluates to
2171 True, return -1.
2172 True, return -1.
2172 """
2173 """
2173 # Windows case is easier because the child process is either
2174 # Windows case is easier because the child process is either
2174 # successfully starting and validating the condition or exiting
2175 # successfully starting and validating the condition or exiting
2175 # on failure. We just poll on its PID. On Unix, if the child
2176 # on failure. We just poll on its PID. On Unix, if the child
2176 # process fails to start, it will be left in a zombie state until
2177 # process fails to start, it will be left in a zombie state until
2177 # the parent wait on it, which we cannot do since we expect a long
2178 # the parent wait on it, which we cannot do since we expect a long
2178 # running process on success. Instead we listen for SIGCHLD telling
2179 # running process on success. Instead we listen for SIGCHLD telling
2179 # us our child process terminated.
2180 # us our child process terminated.
2180 terminated = set()
2181 terminated = set()
2181 def handler(signum, frame):
2182 def handler(signum, frame):
2182 terminated.add(os.wait())
2183 terminated.add(os.wait())
2183 prevhandler = None
2184 prevhandler = None
2184 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2185 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2185 if SIGCHLD is not None:
2186 if SIGCHLD is not None:
2186 prevhandler = signal.signal(SIGCHLD, handler)
2187 prevhandler = signal.signal(SIGCHLD, handler)
2187 try:
2188 try:
2188 pid = spawndetached(args)
2189 pid = spawndetached(args)
2189 while not condfn():
2190 while not condfn():
2190 if ((pid in terminated or not testpid(pid))
2191 if ((pid in terminated or not testpid(pid))
2191 and not condfn()):
2192 and not condfn()):
2192 return -1
2193 return -1
2193 time.sleep(0.1)
2194 time.sleep(0.1)
2194 return pid
2195 return pid
2195 finally:
2196 finally:
2196 if prevhandler is not None:
2197 if prevhandler is not None:
2197 signal.signal(signal.SIGCHLD, prevhandler)
2198 signal.signal(signal.SIGCHLD, prevhandler)
2198
2199
2199 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2200 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2200 """Return the result of interpolating items in the mapping into string s.
2201 """Return the result of interpolating items in the mapping into string s.
2201
2202
2202 prefix is a single character string, or a two character string with
2203 prefix is a single character string, or a two character string with
2203 a backslash as the first character if the prefix needs to be escaped in
2204 a backslash as the first character if the prefix needs to be escaped in
2204 a regular expression.
2205 a regular expression.
2205
2206
2206 fn is an optional function that will be applied to the replacement text
2207 fn is an optional function that will be applied to the replacement text
2207 just before replacement.
2208 just before replacement.
2208
2209
2209 escape_prefix is an optional flag that allows using doubled prefix for
2210 escape_prefix is an optional flag that allows using doubled prefix for
2210 its escaping.
2211 its escaping.
2211 """
2212 """
2212 fn = fn or (lambda s: s)
2213 fn = fn or (lambda s: s)
2213 patterns = '|'.join(mapping.keys())
2214 patterns = '|'.join(mapping.keys())
2214 if escape_prefix:
2215 if escape_prefix:
2215 patterns += '|' + prefix
2216 patterns += '|' + prefix
2216 if len(prefix) > 1:
2217 if len(prefix) > 1:
2217 prefix_char = prefix[1:]
2218 prefix_char = prefix[1:]
2218 else:
2219 else:
2219 prefix_char = prefix
2220 prefix_char = prefix
2220 mapping[prefix_char] = prefix_char
2221 mapping[prefix_char] = prefix_char
2221 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2222 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2222 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2223 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2223
2224
2224 def getport(port):
2225 def getport(port):
2225 """Return the port for a given network service.
2226 """Return the port for a given network service.
2226
2227
2227 If port is an integer, it's returned as is. If it's a string, it's
2228 If port is an integer, it's returned as is. If it's a string, it's
2228 looked up using socket.getservbyname(). If there's no matching
2229 looked up using socket.getservbyname(). If there's no matching
2229 service, error.Abort is raised.
2230 service, error.Abort is raised.
2230 """
2231 """
2231 try:
2232 try:
2232 return int(port)
2233 return int(port)
2233 except ValueError:
2234 except ValueError:
2234 pass
2235 pass
2235
2236
2236 try:
2237 try:
2237 return socket.getservbyname(port)
2238 return socket.getservbyname(port)
2238 except socket.error:
2239 except socket.error:
2239 raise Abort(_("no port number associated with service '%s'") % port)
2240 raise Abort(_("no port number associated with service '%s'") % port)
2240
2241
2241 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2242 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2242 '0': False, 'no': False, 'false': False, 'off': False,
2243 '0': False, 'no': False, 'false': False, 'off': False,
2243 'never': False}
2244 'never': False}
2244
2245
2245 def parsebool(s):
2246 def parsebool(s):
2246 """Parse s into a boolean.
2247 """Parse s into a boolean.
2247
2248
2248 If s is not a valid boolean, returns None.
2249 If s is not a valid boolean, returns None.
2249 """
2250 """
2250 return _booleans.get(s.lower(), None)
2251 return _booleans.get(s.lower(), None)
2251
2252
2252 _hexdig = '0123456789ABCDEFabcdef'
2253 _hexdig = '0123456789ABCDEFabcdef'
2253 _hextochr = dict((a + b, chr(int(a + b, 16)))
2254 _hextochr = dict((a + b, chr(int(a + b, 16)))
2254 for a in _hexdig for b in _hexdig)
2255 for a in _hexdig for b in _hexdig)
2255
2256
2256 def _urlunquote(s):
2257 def _urlunquote(s):
2257 """Decode HTTP/HTML % encoding.
2258 """Decode HTTP/HTML % encoding.
2258
2259
2259 >>> _urlunquote('abc%20def')
2260 >>> _urlunquote('abc%20def')
2260 'abc def'
2261 'abc def'
2261 """
2262 """
2262 res = s.split('%')
2263 res = s.split('%')
2263 # fastpath
2264 # fastpath
2264 if len(res) == 1:
2265 if len(res) == 1:
2265 return s
2266 return s
2266 s = res[0]
2267 s = res[0]
2267 for item in res[1:]:
2268 for item in res[1:]:
2268 try:
2269 try:
2269 s += _hextochr[item[:2]] + item[2:]
2270 s += _hextochr[item[:2]] + item[2:]
2270 except KeyError:
2271 except KeyError:
2271 s += '%' + item
2272 s += '%' + item
2272 except UnicodeDecodeError:
2273 except UnicodeDecodeError:
2273 s += unichr(int(item[:2], 16)) + item[2:]
2274 s += unichr(int(item[:2], 16)) + item[2:]
2274 return s
2275 return s
2275
2276
2276 class url(object):
2277 class url(object):
2277 r"""Reliable URL parser.
2278 r"""Reliable URL parser.
2278
2279
2279 This parses URLs and provides attributes for the following
2280 This parses URLs and provides attributes for the following
2280 components:
2281 components:
2281
2282
2282 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2283 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2283
2284
2284 Missing components are set to None. The only exception is
2285 Missing components are set to None. The only exception is
2285 fragment, which is set to '' if present but empty.
2286 fragment, which is set to '' if present but empty.
2286
2287
2287 If parsefragment is False, fragment is included in query. If
2288 If parsefragment is False, fragment is included in query. If
2288 parsequery is False, query is included in path. If both are
2289 parsequery is False, query is included in path. If both are
2289 False, both fragment and query are included in path.
2290 False, both fragment and query are included in path.
2290
2291
2291 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2292 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2292
2293
2293 Note that for backward compatibility reasons, bundle URLs do not
2294 Note that for backward compatibility reasons, bundle URLs do not
2294 take host names. That means 'bundle://../' has a path of '../'.
2295 take host names. That means 'bundle://../' has a path of '../'.
2295
2296
2296 Examples:
2297 Examples:
2297
2298
2298 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2299 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2299 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2300 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2300 >>> url('ssh://[::1]:2200//home/joe/repo')
2301 >>> url('ssh://[::1]:2200//home/joe/repo')
2301 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2302 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2302 >>> url('file:///home/joe/repo')
2303 >>> url('file:///home/joe/repo')
2303 <url scheme: 'file', path: '/home/joe/repo'>
2304 <url scheme: 'file', path: '/home/joe/repo'>
2304 >>> url('file:///c:/temp/foo/')
2305 >>> url('file:///c:/temp/foo/')
2305 <url scheme: 'file', path: 'c:/temp/foo/'>
2306 <url scheme: 'file', path: 'c:/temp/foo/'>
2306 >>> url('bundle:foo')
2307 >>> url('bundle:foo')
2307 <url scheme: 'bundle', path: 'foo'>
2308 <url scheme: 'bundle', path: 'foo'>
2308 >>> url('bundle://../foo')
2309 >>> url('bundle://../foo')
2309 <url scheme: 'bundle', path: '../foo'>
2310 <url scheme: 'bundle', path: '../foo'>
2310 >>> url(r'c:\foo\bar')
2311 >>> url(r'c:\foo\bar')
2311 <url path: 'c:\\foo\\bar'>
2312 <url path: 'c:\\foo\\bar'>
2312 >>> url(r'\\blah\blah\blah')
2313 >>> url(r'\\blah\blah\blah')
2313 <url path: '\\\\blah\\blah\\blah'>
2314 <url path: '\\\\blah\\blah\\blah'>
2314 >>> url(r'\\blah\blah\blah#baz')
2315 >>> url(r'\\blah\blah\blah#baz')
2315 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2316 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2316 >>> url(r'file:///C:\users\me')
2317 >>> url(r'file:///C:\users\me')
2317 <url scheme: 'file', path: 'C:\\users\\me'>
2318 <url scheme: 'file', path: 'C:\\users\\me'>
2318
2319
2319 Authentication credentials:
2320 Authentication credentials:
2320
2321
2321 >>> url('ssh://joe:xyz@x/repo')
2322 >>> url('ssh://joe:xyz@x/repo')
2322 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2323 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2323 >>> url('ssh://joe@x/repo')
2324 >>> url('ssh://joe@x/repo')
2324 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2325 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2325
2326
2326 Query strings and fragments:
2327 Query strings and fragments:
2327
2328
2328 >>> url('http://host/a?b#c')
2329 >>> url('http://host/a?b#c')
2329 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2330 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2330 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2331 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2331 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2332 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2332 """
2333 """
2333
2334
2334 _safechars = "!~*'()+"
2335 _safechars = "!~*'()+"
2335 _safepchars = "/!~*'()+:\\"
2336 _safepchars = "/!~*'()+:\\"
2336 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2337 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2337
2338
2338 def __init__(self, path, parsequery=True, parsefragment=True):
2339 def __init__(self, path, parsequery=True, parsefragment=True):
2339 # We slowly chomp away at path until we have only the path left
2340 # We slowly chomp away at path until we have only the path left
2340 self.scheme = self.user = self.passwd = self.host = None
2341 self.scheme = self.user = self.passwd = self.host = None
2341 self.port = self.path = self.query = self.fragment = None
2342 self.port = self.path = self.query = self.fragment = None
2342 self._localpath = True
2343 self._localpath = True
2343 self._hostport = ''
2344 self._hostport = ''
2344 self._origpath = path
2345 self._origpath = path
2345
2346
2346 if parsefragment and '#' in path:
2347 if parsefragment and '#' in path:
2347 path, self.fragment = path.split('#', 1)
2348 path, self.fragment = path.split('#', 1)
2348 if not path:
2349 if not path:
2349 path = None
2350 path = None
2350
2351
2351 # special case for Windows drive letters and UNC paths
2352 # special case for Windows drive letters and UNC paths
2352 if hasdriveletter(path) or path.startswith(r'\\'):
2353 if hasdriveletter(path) or path.startswith(r'\\'):
2353 self.path = path
2354 self.path = path
2354 return
2355 return
2355
2356
2356 # For compatibility reasons, we can't handle bundle paths as
2357 # For compatibility reasons, we can't handle bundle paths as
2357 # normal URLS
2358 # normal URLS
2358 if path.startswith('bundle:'):
2359 if path.startswith('bundle:'):
2359 self.scheme = 'bundle'
2360 self.scheme = 'bundle'
2360 path = path[7:]
2361 path = path[7:]
2361 if path.startswith('//'):
2362 if path.startswith('//'):
2362 path = path[2:]
2363 path = path[2:]
2363 self.path = path
2364 self.path = path
2364 return
2365 return
2365
2366
2366 if self._matchscheme(path):
2367 if self._matchscheme(path):
2367 parts = path.split(':', 1)
2368 parts = path.split(':', 1)
2368 if parts[0]:
2369 if parts[0]:
2369 self.scheme, path = parts
2370 self.scheme, path = parts
2370 self._localpath = False
2371 self._localpath = False
2371
2372
2372 if not path:
2373 if not path:
2373 path = None
2374 path = None
2374 if self._localpath:
2375 if self._localpath:
2375 self.path = ''
2376 self.path = ''
2376 return
2377 return
2377 else:
2378 else:
2378 if self._localpath:
2379 if self._localpath:
2379 self.path = path
2380 self.path = path
2380 return
2381 return
2381
2382
2382 if parsequery and '?' in path:
2383 if parsequery and '?' in path:
2383 path, self.query = path.split('?', 1)
2384 path, self.query = path.split('?', 1)
2384 if not path:
2385 if not path:
2385 path = None
2386 path = None
2386 if not self.query:
2387 if not self.query:
2387 self.query = None
2388 self.query = None
2388
2389
2389 # // is required to specify a host/authority
2390 # // is required to specify a host/authority
2390 if path and path.startswith('//'):
2391 if path and path.startswith('//'):
2391 parts = path[2:].split('/', 1)
2392 parts = path[2:].split('/', 1)
2392 if len(parts) > 1:
2393 if len(parts) > 1:
2393 self.host, path = parts
2394 self.host, path = parts
2394 else:
2395 else:
2395 self.host = parts[0]
2396 self.host = parts[0]
2396 path = None
2397 path = None
2397 if not self.host:
2398 if not self.host:
2398 self.host = None
2399 self.host = None
2399 # path of file:///d is /d
2400 # path of file:///d is /d
2400 # path of file:///d:/ is d:/, not /d:/
2401 # path of file:///d:/ is d:/, not /d:/
2401 if path and not hasdriveletter(path):
2402 if path and not hasdriveletter(path):
2402 path = '/' + path
2403 path = '/' + path
2403
2404
2404 if self.host and '@' in self.host:
2405 if self.host and '@' in self.host:
2405 self.user, self.host = self.host.rsplit('@', 1)
2406 self.user, self.host = self.host.rsplit('@', 1)
2406 if ':' in self.user:
2407 if ':' in self.user:
2407 self.user, self.passwd = self.user.split(':', 1)
2408 self.user, self.passwd = self.user.split(':', 1)
2408 if not self.host:
2409 if not self.host:
2409 self.host = None
2410 self.host = None
2410
2411
2411 # Don't split on colons in IPv6 addresses without ports
2412 # Don't split on colons in IPv6 addresses without ports
2412 if (self.host and ':' in self.host and
2413 if (self.host and ':' in self.host and
2413 not (self.host.startswith('[') and self.host.endswith(']'))):
2414 not (self.host.startswith('[') and self.host.endswith(']'))):
2414 self._hostport = self.host
2415 self._hostport = self.host
2415 self.host, self.port = self.host.rsplit(':', 1)
2416 self.host, self.port = self.host.rsplit(':', 1)
2416 if not self.host:
2417 if not self.host:
2417 self.host = None
2418 self.host = None
2418
2419
2419 if (self.host and self.scheme == 'file' and
2420 if (self.host and self.scheme == 'file' and
2420 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2421 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2421 raise Abort(_('file:// URLs can only refer to localhost'))
2422 raise Abort(_('file:// URLs can only refer to localhost'))
2422
2423
2423 self.path = path
2424 self.path = path
2424
2425
2425 # leave the query string escaped
2426 # leave the query string escaped
2426 for a in ('user', 'passwd', 'host', 'port',
2427 for a in ('user', 'passwd', 'host', 'port',
2427 'path', 'fragment'):
2428 'path', 'fragment'):
2428 v = getattr(self, a)
2429 v = getattr(self, a)
2429 if v is not None:
2430 if v is not None:
2430 setattr(self, a, _urlunquote(v))
2431 setattr(self, a, _urlunquote(v))
2431
2432
2432 def __repr__(self):
2433 def __repr__(self):
2433 attrs = []
2434 attrs = []
2434 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2435 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2435 'query', 'fragment'):
2436 'query', 'fragment'):
2436 v = getattr(self, a)
2437 v = getattr(self, a)
2437 if v is not None:
2438 if v is not None:
2438 attrs.append('%s: %r' % (a, v))
2439 attrs.append('%s: %r' % (a, v))
2439 return '<url %s>' % ', '.join(attrs)
2440 return '<url %s>' % ', '.join(attrs)
2440
2441
2441 def __str__(self):
2442 def __str__(self):
2442 r"""Join the URL's components back into a URL string.
2443 r"""Join the URL's components back into a URL string.
2443
2444
2444 Examples:
2445 Examples:
2445
2446
2446 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2447 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2447 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2448 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2448 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2449 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2449 'http://user:pw@host:80/?foo=bar&baz=42'
2450 'http://user:pw@host:80/?foo=bar&baz=42'
2450 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2451 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2451 'http://user:pw@host:80/?foo=bar%3dbaz'
2452 'http://user:pw@host:80/?foo=bar%3dbaz'
2452 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2453 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2453 'ssh://user:pw@[::1]:2200//home/joe#'
2454 'ssh://user:pw@[::1]:2200//home/joe#'
2454 >>> str(url('http://localhost:80//'))
2455 >>> str(url('http://localhost:80//'))
2455 'http://localhost:80//'
2456 'http://localhost:80//'
2456 >>> str(url('http://localhost:80/'))
2457 >>> str(url('http://localhost:80/'))
2457 'http://localhost:80/'
2458 'http://localhost:80/'
2458 >>> str(url('http://localhost:80'))
2459 >>> str(url('http://localhost:80'))
2459 'http://localhost:80/'
2460 'http://localhost:80/'
2460 >>> str(url('bundle:foo'))
2461 >>> str(url('bundle:foo'))
2461 'bundle:foo'
2462 'bundle:foo'
2462 >>> str(url('bundle://../foo'))
2463 >>> str(url('bundle://../foo'))
2463 'bundle:../foo'
2464 'bundle:../foo'
2464 >>> str(url('path'))
2465 >>> str(url('path'))
2465 'path'
2466 'path'
2466 >>> str(url('file:///tmp/foo/bar'))
2467 >>> str(url('file:///tmp/foo/bar'))
2467 'file:///tmp/foo/bar'
2468 'file:///tmp/foo/bar'
2468 >>> str(url('file:///c:/tmp/foo/bar'))
2469 >>> str(url('file:///c:/tmp/foo/bar'))
2469 'file:///c:/tmp/foo/bar'
2470 'file:///c:/tmp/foo/bar'
2470 >>> print url(r'bundle:foo\bar')
2471 >>> print url(r'bundle:foo\bar')
2471 bundle:foo\bar
2472 bundle:foo\bar
2472 >>> print url(r'file:///D:\data\hg')
2473 >>> print url(r'file:///D:\data\hg')
2473 file:///D:\data\hg
2474 file:///D:\data\hg
2474 """
2475 """
2475 if self._localpath:
2476 if self._localpath:
2476 s = self.path
2477 s = self.path
2477 if self.scheme == 'bundle':
2478 if self.scheme == 'bundle':
2478 s = 'bundle:' + s
2479 s = 'bundle:' + s
2479 if self.fragment:
2480 if self.fragment:
2480 s += '#' + self.fragment
2481 s += '#' + self.fragment
2481 return s
2482 return s
2482
2483
2483 s = self.scheme + ':'
2484 s = self.scheme + ':'
2484 if self.user or self.passwd or self.host:
2485 if self.user or self.passwd or self.host:
2485 s += '//'
2486 s += '//'
2486 elif self.scheme and (not self.path or self.path.startswith('/')
2487 elif self.scheme and (not self.path or self.path.startswith('/')
2487 or hasdriveletter(self.path)):
2488 or hasdriveletter(self.path)):
2488 s += '//'
2489 s += '//'
2489 if hasdriveletter(self.path):
2490 if hasdriveletter(self.path):
2490 s += '/'
2491 s += '/'
2491 if self.user:
2492 if self.user:
2492 s += urlreq.quote(self.user, safe=self._safechars)
2493 s += urlreq.quote(self.user, safe=self._safechars)
2493 if self.passwd:
2494 if self.passwd:
2494 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2495 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2495 if self.user or self.passwd:
2496 if self.user or self.passwd:
2496 s += '@'
2497 s += '@'
2497 if self.host:
2498 if self.host:
2498 if not (self.host.startswith('[') and self.host.endswith(']')):
2499 if not (self.host.startswith('[') and self.host.endswith(']')):
2499 s += urlreq.quote(self.host)
2500 s += urlreq.quote(self.host)
2500 else:
2501 else:
2501 s += self.host
2502 s += self.host
2502 if self.port:
2503 if self.port:
2503 s += ':' + urlreq.quote(self.port)
2504 s += ':' + urlreq.quote(self.port)
2504 if self.host:
2505 if self.host:
2505 s += '/'
2506 s += '/'
2506 if self.path:
2507 if self.path:
2507 # TODO: similar to the query string, we should not unescape the
2508 # TODO: similar to the query string, we should not unescape the
2508 # path when we store it, the path might contain '%2f' = '/',
2509 # path when we store it, the path might contain '%2f' = '/',
2509 # which we should *not* escape.
2510 # which we should *not* escape.
2510 s += urlreq.quote(self.path, safe=self._safepchars)
2511 s += urlreq.quote(self.path, safe=self._safepchars)
2511 if self.query:
2512 if self.query:
2512 # we store the query in escaped form.
2513 # we store the query in escaped form.
2513 s += '?' + self.query
2514 s += '?' + self.query
2514 if self.fragment is not None:
2515 if self.fragment is not None:
2515 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2516 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2516 return s
2517 return s
2517
2518
2518 def authinfo(self):
2519 def authinfo(self):
2519 user, passwd = self.user, self.passwd
2520 user, passwd = self.user, self.passwd
2520 try:
2521 try:
2521 self.user, self.passwd = None, None
2522 self.user, self.passwd = None, None
2522 s = str(self)
2523 s = str(self)
2523 finally:
2524 finally:
2524 self.user, self.passwd = user, passwd
2525 self.user, self.passwd = user, passwd
2525 if not self.user:
2526 if not self.user:
2526 return (s, None)
2527 return (s, None)
2527 # authinfo[1] is passed to urllib2 password manager, and its
2528 # authinfo[1] is passed to urllib2 password manager, and its
2528 # URIs must not contain credentials. The host is passed in the
2529 # URIs must not contain credentials. The host is passed in the
2529 # URIs list because Python < 2.4.3 uses only that to search for
2530 # URIs list because Python < 2.4.3 uses only that to search for
2530 # a password.
2531 # a password.
2531 return (s, (None, (s, self.host),
2532 return (s, (None, (s, self.host),
2532 self.user, self.passwd or ''))
2533 self.user, self.passwd or ''))
2533
2534
2534 def isabs(self):
2535 def isabs(self):
2535 if self.scheme and self.scheme != 'file':
2536 if self.scheme and self.scheme != 'file':
2536 return True # remote URL
2537 return True # remote URL
2537 if hasdriveletter(self.path):
2538 if hasdriveletter(self.path):
2538 return True # absolute for our purposes - can't be joined()
2539 return True # absolute for our purposes - can't be joined()
2539 if self.path.startswith(r'\\'):
2540 if self.path.startswith(r'\\'):
2540 return True # Windows UNC path
2541 return True # Windows UNC path
2541 if self.path.startswith('/'):
2542 if self.path.startswith('/'):
2542 return True # POSIX-style
2543 return True # POSIX-style
2543 return False
2544 return False
2544
2545
2545 def localpath(self):
2546 def localpath(self):
2546 if self.scheme == 'file' or self.scheme == 'bundle':
2547 if self.scheme == 'file' or self.scheme == 'bundle':
2547 path = self.path or '/'
2548 path = self.path or '/'
2548 # For Windows, we need to promote hosts containing drive
2549 # For Windows, we need to promote hosts containing drive
2549 # letters to paths with drive letters.
2550 # letters to paths with drive letters.
2550 if hasdriveletter(self._hostport):
2551 if hasdriveletter(self._hostport):
2551 path = self._hostport + '/' + self.path
2552 path = self._hostport + '/' + self.path
2552 elif (self.host is not None and self.path
2553 elif (self.host is not None and self.path
2553 and not hasdriveletter(path)):
2554 and not hasdriveletter(path)):
2554 path = '/' + path
2555 path = '/' + path
2555 return path
2556 return path
2556 return self._origpath
2557 return self._origpath
2557
2558
2558 def islocal(self):
2559 def islocal(self):
2559 '''whether localpath will return something that posixfile can open'''
2560 '''whether localpath will return something that posixfile can open'''
2560 return (not self.scheme or self.scheme == 'file'
2561 return (not self.scheme or self.scheme == 'file'
2561 or self.scheme == 'bundle')
2562 or self.scheme == 'bundle')
2562
2563
2563 def hasscheme(path):
2564 def hasscheme(path):
2564 return bool(url(path).scheme)
2565 return bool(url(path).scheme)
2565
2566
2566 def hasdriveletter(path):
2567 def hasdriveletter(path):
2567 return path and path[1:2] == ':' and path[0:1].isalpha()
2568 return path and path[1:2] == ':' and path[0:1].isalpha()
2568
2569
2569 def urllocalpath(path):
2570 def urllocalpath(path):
2570 return url(path, parsequery=False, parsefragment=False).localpath()
2571 return url(path, parsequery=False, parsefragment=False).localpath()
2571
2572
2572 def hidepassword(u):
2573 def hidepassword(u):
2573 '''hide user credential in a url string'''
2574 '''hide user credential in a url string'''
2574 u = url(u)
2575 u = url(u)
2575 if u.passwd:
2576 if u.passwd:
2576 u.passwd = '***'
2577 u.passwd = '***'
2577 return str(u)
2578 return str(u)
2578
2579
2579 def removeauth(u):
2580 def removeauth(u):
2580 '''remove all authentication information from a url string'''
2581 '''remove all authentication information from a url string'''
2581 u = url(u)
2582 u = url(u)
2582 u.user = u.passwd = None
2583 u.user = u.passwd = None
2583 return str(u)
2584 return str(u)
2584
2585
2585 def isatty(fp):
2586 def isatty(fp):
2586 try:
2587 try:
2587 return fp.isatty()
2588 return fp.isatty()
2588 except AttributeError:
2589 except AttributeError:
2589 return False
2590 return False
2590
2591
2591 timecount = unitcountfn(
2592 timecount = unitcountfn(
2592 (1, 1e3, _('%.0f s')),
2593 (1, 1e3, _('%.0f s')),
2593 (100, 1, _('%.1f s')),
2594 (100, 1, _('%.1f s')),
2594 (10, 1, _('%.2f s')),
2595 (10, 1, _('%.2f s')),
2595 (1, 1, _('%.3f s')),
2596 (1, 1, _('%.3f s')),
2596 (100, 0.001, _('%.1f ms')),
2597 (100, 0.001, _('%.1f ms')),
2597 (10, 0.001, _('%.2f ms')),
2598 (10, 0.001, _('%.2f ms')),
2598 (1, 0.001, _('%.3f ms')),
2599 (1, 0.001, _('%.3f ms')),
2599 (100, 0.000001, _('%.1f us')),
2600 (100, 0.000001, _('%.1f us')),
2600 (10, 0.000001, _('%.2f us')),
2601 (10, 0.000001, _('%.2f us')),
2601 (1, 0.000001, _('%.3f us')),
2602 (1, 0.000001, _('%.3f us')),
2602 (100, 0.000000001, _('%.1f ns')),
2603 (100, 0.000000001, _('%.1f ns')),
2603 (10, 0.000000001, _('%.2f ns')),
2604 (10, 0.000000001, _('%.2f ns')),
2604 (1, 0.000000001, _('%.3f ns')),
2605 (1, 0.000000001, _('%.3f ns')),
2605 )
2606 )
2606
2607
2607 _timenesting = [0]
2608 _timenesting = [0]
2608
2609
2609 def timed(func):
2610 def timed(func):
2610 '''Report the execution time of a function call to stderr.
2611 '''Report the execution time of a function call to stderr.
2611
2612
2612 During development, use as a decorator when you need to measure
2613 During development, use as a decorator when you need to measure
2613 the cost of a function, e.g. as follows:
2614 the cost of a function, e.g. as follows:
2614
2615
2615 @util.timed
2616 @util.timed
2616 def foo(a, b, c):
2617 def foo(a, b, c):
2617 pass
2618 pass
2618 '''
2619 '''
2619
2620
2620 def wrapper(*args, **kwargs):
2621 def wrapper(*args, **kwargs):
2621 start = time.time()
2622 start = time.time()
2622 indent = 2
2623 indent = 2
2623 _timenesting[0] += indent
2624 _timenesting[0] += indent
2624 try:
2625 try:
2625 return func(*args, **kwargs)
2626 return func(*args, **kwargs)
2626 finally:
2627 finally:
2627 elapsed = time.time() - start
2628 elapsed = time.time() - start
2628 _timenesting[0] -= indent
2629 _timenesting[0] -= indent
2629 sys.stderr.write('%s%s: %s\n' %
2630 sys.stderr.write('%s%s: %s\n' %
2630 (' ' * _timenesting[0], func.__name__,
2631 (' ' * _timenesting[0], func.__name__,
2631 timecount(elapsed)))
2632 timecount(elapsed)))
2632 return wrapper
2633 return wrapper
2633
2634
2634 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2635 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2635 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2636 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2636
2637
2637 def sizetoint(s):
2638 def sizetoint(s):
2638 '''Convert a space specifier to a byte count.
2639 '''Convert a space specifier to a byte count.
2639
2640
2640 >>> sizetoint('30')
2641 >>> sizetoint('30')
2641 30
2642 30
2642 >>> sizetoint('2.2kb')
2643 >>> sizetoint('2.2kb')
2643 2252
2644 2252
2644 >>> sizetoint('6M')
2645 >>> sizetoint('6M')
2645 6291456
2646 6291456
2646 '''
2647 '''
2647 t = s.strip().lower()
2648 t = s.strip().lower()
2648 try:
2649 try:
2649 for k, u in _sizeunits:
2650 for k, u in _sizeunits:
2650 if t.endswith(k):
2651 if t.endswith(k):
2651 return int(float(t[:-len(k)]) * u)
2652 return int(float(t[:-len(k)]) * u)
2652 return int(t)
2653 return int(t)
2653 except ValueError:
2654 except ValueError:
2654 raise error.ParseError(_("couldn't parse size: %s") % s)
2655 raise error.ParseError(_("couldn't parse size: %s") % s)
2655
2656
2656 class hooks(object):
2657 class hooks(object):
2657 '''A collection of hook functions that can be used to extend a
2658 '''A collection of hook functions that can be used to extend a
2658 function's behavior. Hooks are called in lexicographic order,
2659 function's behavior. Hooks are called in lexicographic order,
2659 based on the names of their sources.'''
2660 based on the names of their sources.'''
2660
2661
2661 def __init__(self):
2662 def __init__(self):
2662 self._hooks = []
2663 self._hooks = []
2663
2664
2664 def add(self, source, hook):
2665 def add(self, source, hook):
2665 self._hooks.append((source, hook))
2666 self._hooks.append((source, hook))
2666
2667
2667 def __call__(self, *args):
2668 def __call__(self, *args):
2668 self._hooks.sort(key=lambda x: x[0])
2669 self._hooks.sort(key=lambda x: x[0])
2669 results = []
2670 results = []
2670 for source, hook in self._hooks:
2671 for source, hook in self._hooks:
2671 results.append(hook(*args))
2672 results.append(hook(*args))
2672 return results
2673 return results
2673
2674
2674 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2675 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2675 '''Yields lines for a nicely formatted stacktrace.
2676 '''Yields lines for a nicely formatted stacktrace.
2676 Skips the 'skip' last entries.
2677 Skips the 'skip' last entries.
2677 Each file+linenumber is formatted according to fileline.
2678 Each file+linenumber is formatted according to fileline.
2678 Each line is formatted according to line.
2679 Each line is formatted according to line.
2679 If line is None, it yields:
2680 If line is None, it yields:
2680 length of longest filepath+line number,
2681 length of longest filepath+line number,
2681 filepath+linenumber,
2682 filepath+linenumber,
2682 function
2683 function
2683
2684
2684 Not be used in production code but very convenient while developing.
2685 Not be used in production code but very convenient while developing.
2685 '''
2686 '''
2686 entries = [(fileline % (fn, ln), func)
2687 entries = [(fileline % (fn, ln), func)
2687 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2688 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2688 if entries:
2689 if entries:
2689 fnmax = max(len(entry[0]) for entry in entries)
2690 fnmax = max(len(entry[0]) for entry in entries)
2690 for fnln, func in entries:
2691 for fnln, func in entries:
2691 if line is None:
2692 if line is None:
2692 yield (fnmax, fnln, func)
2693 yield (fnmax, fnln, func)
2693 else:
2694 else:
2694 yield line % (fnmax, fnln, func)
2695 yield line % (fnmax, fnln, func)
2695
2696
2696 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2697 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2697 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2698 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2698 Skips the 'skip' last entries. By default it will flush stdout first.
2699 Skips the 'skip' last entries. By default it will flush stdout first.
2699 It can be used everywhere and intentionally does not require an ui object.
2700 It can be used everywhere and intentionally does not require an ui object.
2700 Not be used in production code but very convenient while developing.
2701 Not be used in production code but very convenient while developing.
2701 '''
2702 '''
2702 if otherf:
2703 if otherf:
2703 otherf.flush()
2704 otherf.flush()
2704 f.write('%s at:\n' % msg)
2705 f.write('%s at:\n' % msg)
2705 for line in getstackframes(skip + 1):
2706 for line in getstackframes(skip + 1):
2706 f.write(line)
2707 f.write(line)
2707 f.flush()
2708 f.flush()
2708
2709
2709 class dirs(object):
2710 class dirs(object):
2710 '''a multiset of directory names from a dirstate or manifest'''
2711 '''a multiset of directory names from a dirstate or manifest'''
2711
2712
2712 def __init__(self, map, skip=None):
2713 def __init__(self, map, skip=None):
2713 self._dirs = {}
2714 self._dirs = {}
2714 addpath = self.addpath
2715 addpath = self.addpath
2715 if safehasattr(map, 'iteritems') and skip is not None:
2716 if safehasattr(map, 'iteritems') and skip is not None:
2716 for f, s in map.iteritems():
2717 for f, s in map.iteritems():
2717 if s[0] != skip:
2718 if s[0] != skip:
2718 addpath(f)
2719 addpath(f)
2719 else:
2720 else:
2720 for f in map:
2721 for f in map:
2721 addpath(f)
2722 addpath(f)
2722
2723
2723 def addpath(self, path):
2724 def addpath(self, path):
2724 dirs = self._dirs
2725 dirs = self._dirs
2725 for base in finddirs(path):
2726 for base in finddirs(path):
2726 if base in dirs:
2727 if base in dirs:
2727 dirs[base] += 1
2728 dirs[base] += 1
2728 return
2729 return
2729 dirs[base] = 1
2730 dirs[base] = 1
2730
2731
2731 def delpath(self, path):
2732 def delpath(self, path):
2732 dirs = self._dirs
2733 dirs = self._dirs
2733 for base in finddirs(path):
2734 for base in finddirs(path):
2734 if dirs[base] > 1:
2735 if dirs[base] > 1:
2735 dirs[base] -= 1
2736 dirs[base] -= 1
2736 return
2737 return
2737 del dirs[base]
2738 del dirs[base]
2738
2739
2739 def __iter__(self):
2740 def __iter__(self):
2740 return self._dirs.iterkeys()
2741 return self._dirs.iterkeys()
2741
2742
2742 def __contains__(self, d):
2743 def __contains__(self, d):
2743 return d in self._dirs
2744 return d in self._dirs
2744
2745
2745 if safehasattr(parsers, 'dirs'):
2746 if safehasattr(parsers, 'dirs'):
2746 dirs = parsers.dirs
2747 dirs = parsers.dirs
2747
2748
2748 def finddirs(path):
2749 def finddirs(path):
2749 pos = path.rfind('/')
2750 pos = path.rfind('/')
2750 while pos != -1:
2751 while pos != -1:
2751 yield path[:pos]
2752 yield path[:pos]
2752 pos = path.rfind('/', 0, pos)
2753 pos = path.rfind('/', 0, pos)
2753
2754
2754 # compression utility
2755 # compression utility
2755
2756
2756 class nocompress(object):
2757 class nocompress(object):
2757 def compress(self, x):
2758 def compress(self, x):
2758 return x
2759 return x
2759 def flush(self):
2760 def flush(self):
2760 return ""
2761 return ""
2761
2762
2762 compressors = {
2763 compressors = {
2763 None: nocompress,
2764 None: nocompress,
2764 # lambda to prevent early import
2765 # lambda to prevent early import
2765 'BZ': lambda: bz2.BZ2Compressor(),
2766 'BZ': lambda: bz2.BZ2Compressor(),
2766 'GZ': lambda: zlib.compressobj(),
2767 'GZ': lambda: zlib.compressobj(),
2767 }
2768 }
2768 # also support the old form by courtesies
2769 # also support the old form by courtesies
2769 compressors['UN'] = compressors[None]
2770 compressors['UN'] = compressors[None]
2770
2771
2771 def _makedecompressor(decompcls):
2772 def _makedecompressor(decompcls):
2772 def generator(f):
2773 def generator(f):
2773 d = decompcls()
2774 d = decompcls()
2774 for chunk in filechunkiter(f):
2775 for chunk in filechunkiter(f):
2775 yield d.decompress(chunk)
2776 yield d.decompress(chunk)
2776 def func(fh):
2777 def func(fh):
2777 return chunkbuffer(generator(fh))
2778 return chunkbuffer(generator(fh))
2778 return func
2779 return func
2779
2780
2780 class ctxmanager(object):
2781 class ctxmanager(object):
2781 '''A context manager for use in 'with' blocks to allow multiple
2782 '''A context manager for use in 'with' blocks to allow multiple
2782 contexts to be entered at once. This is both safer and more
2783 contexts to be entered at once. This is both safer and more
2783 flexible than contextlib.nested.
2784 flexible than contextlib.nested.
2784
2785
2785 Once Mercurial supports Python 2.7+, this will become mostly
2786 Once Mercurial supports Python 2.7+, this will become mostly
2786 unnecessary.
2787 unnecessary.
2787 '''
2788 '''
2788
2789
2789 def __init__(self, *args):
2790 def __init__(self, *args):
2790 '''Accepts a list of no-argument functions that return context
2791 '''Accepts a list of no-argument functions that return context
2791 managers. These will be invoked at __call__ time.'''
2792 managers. These will be invoked at __call__ time.'''
2792 self._pending = args
2793 self._pending = args
2793 self._atexit = []
2794 self._atexit = []
2794
2795
2795 def __enter__(self):
2796 def __enter__(self):
2796 return self
2797 return self
2797
2798
2798 def enter(self):
2799 def enter(self):
2799 '''Create and enter context managers in the order in which they were
2800 '''Create and enter context managers in the order in which they were
2800 passed to the constructor.'''
2801 passed to the constructor.'''
2801 values = []
2802 values = []
2802 for func in self._pending:
2803 for func in self._pending:
2803 obj = func()
2804 obj = func()
2804 values.append(obj.__enter__())
2805 values.append(obj.__enter__())
2805 self._atexit.append(obj.__exit__)
2806 self._atexit.append(obj.__exit__)
2806 del self._pending
2807 del self._pending
2807 return values
2808 return values
2808
2809
2809 def atexit(self, func, *args, **kwargs):
2810 def atexit(self, func, *args, **kwargs):
2810 '''Add a function to call when this context manager exits. The
2811 '''Add a function to call when this context manager exits. The
2811 ordering of multiple atexit calls is unspecified, save that
2812 ordering of multiple atexit calls is unspecified, save that
2812 they will happen before any __exit__ functions.'''
2813 they will happen before any __exit__ functions.'''
2813 def wrapper(exc_type, exc_val, exc_tb):
2814 def wrapper(exc_type, exc_val, exc_tb):
2814 func(*args, **kwargs)
2815 func(*args, **kwargs)
2815 self._atexit.append(wrapper)
2816 self._atexit.append(wrapper)
2816 return func
2817 return func
2817
2818
2818 def __exit__(self, exc_type, exc_val, exc_tb):
2819 def __exit__(self, exc_type, exc_val, exc_tb):
2819 '''Context managers are exited in the reverse order from which
2820 '''Context managers are exited in the reverse order from which
2820 they were created.'''
2821 they were created.'''
2821 received = exc_type is not None
2822 received = exc_type is not None
2822 suppressed = False
2823 suppressed = False
2823 pending = None
2824 pending = None
2824 self._atexit.reverse()
2825 self._atexit.reverse()
2825 for exitfunc in self._atexit:
2826 for exitfunc in self._atexit:
2826 try:
2827 try:
2827 if exitfunc(exc_type, exc_val, exc_tb):
2828 if exitfunc(exc_type, exc_val, exc_tb):
2828 suppressed = True
2829 suppressed = True
2829 exc_type = None
2830 exc_type = None
2830 exc_val = None
2831 exc_val = None
2831 exc_tb = None
2832 exc_tb = None
2832 except BaseException:
2833 except BaseException:
2833 pending = sys.exc_info()
2834 pending = sys.exc_info()
2834 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2835 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2835 del self._atexit
2836 del self._atexit
2836 if pending:
2837 if pending:
2837 raise exc_val
2838 raise exc_val
2838 return received and suppressed
2839 return received and suppressed
2839
2840
2840 def _bz2():
2841 def _bz2():
2841 d = bz2.BZ2Decompressor()
2842 d = bz2.BZ2Decompressor()
2842 # Bzip2 stream start with BZ, but we stripped it.
2843 # Bzip2 stream start with BZ, but we stripped it.
2843 # we put it back for good measure.
2844 # we put it back for good measure.
2844 d.decompress('BZ')
2845 d.decompress('BZ')
2845 return d
2846 return d
2846
2847
2847 decompressors = {None: lambda fh: fh,
2848 decompressors = {None: lambda fh: fh,
2848 '_truncatedBZ': _makedecompressor(_bz2),
2849 '_truncatedBZ': _makedecompressor(_bz2),
2849 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2850 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2850 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2851 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2851 }
2852 }
2852 # also support the old form by courtesies
2853 # also support the old form by courtesies
2853 decompressors['UN'] = decompressors[None]
2854 decompressors['UN'] = decompressors[None]
2854
2855
2855 # convenient shortcut
2856 # convenient shortcut
2856 dst = debugstacktrace
2857 dst = debugstacktrace
@@ -1,78 +1,83
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 """This does HTTP GET requests given a host:port and path and returns
3 """This does HTTP GET requests given a host:port and path and returns
4 a subset of the headers plus the body of the result."""
4 a subset of the headers plus the body of the result."""
5
5
6 from __future__ import absolute_import, print_function
6 from __future__ import absolute_import, print_function
7
7
8 import httplib
9 import json
8 import json
10 import os
9 import os
11 import sys
10 import sys
12
11
12 from mercurial import (
13 util,
14 )
15
16 httplib = util.httplib
17
13 try:
18 try:
14 import msvcrt
19 import msvcrt
15 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
20 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
16 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
21 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
17 except ImportError:
22 except ImportError:
18 pass
23 pass
19
24
20 twice = False
25 twice = False
21 if '--twice' in sys.argv:
26 if '--twice' in sys.argv:
22 sys.argv.remove('--twice')
27 sys.argv.remove('--twice')
23 twice = True
28 twice = True
24 headeronly = False
29 headeronly = False
25 if '--headeronly' in sys.argv:
30 if '--headeronly' in sys.argv:
26 sys.argv.remove('--headeronly')
31 sys.argv.remove('--headeronly')
27 headeronly = True
32 headeronly = True
28 formatjson = False
33 formatjson = False
29 if '--json' in sys.argv:
34 if '--json' in sys.argv:
30 sys.argv.remove('--json')
35 sys.argv.remove('--json')
31 formatjson = True
36 formatjson = True
32
37
33 tag = None
38 tag = None
34 def request(host, path, show):
39 def request(host, path, show):
35 assert not path.startswith('/'), path
40 assert not path.startswith('/'), path
36 global tag
41 global tag
37 headers = {}
42 headers = {}
38 if tag:
43 if tag:
39 headers['If-None-Match'] = tag
44 headers['If-None-Match'] = tag
40
45
41 conn = httplib.HTTPConnection(host)
46 conn = httplib.HTTPConnection(host)
42 conn.request("GET", '/' + path, None, headers)
47 conn.request("GET", '/' + path, None, headers)
43 response = conn.getresponse()
48 response = conn.getresponse()
44 print(response.status, response.reason)
49 print(response.status, response.reason)
45 if show[:1] == ['-']:
50 if show[:1] == ['-']:
46 show = sorted(h for h, v in response.getheaders()
51 show = sorted(h for h, v in response.getheaders()
47 if h.lower() not in show)
52 if h.lower() not in show)
48 for h in [h.lower() for h in show]:
53 for h in [h.lower() for h in show]:
49 if response.getheader(h, None) is not None:
54 if response.getheader(h, None) is not None:
50 print("%s: %s" % (h, response.getheader(h)))
55 print("%s: %s" % (h, response.getheader(h)))
51 if not headeronly:
56 if not headeronly:
52 print()
57 print()
53 data = response.read()
58 data = response.read()
54
59
55 # Pretty print JSON. This also has the beneficial side-effect
60 # Pretty print JSON. This also has the beneficial side-effect
56 # of verifying emitted JSON is well-formed.
61 # of verifying emitted JSON is well-formed.
57 if formatjson:
62 if formatjson:
58 # json.dumps() will print trailing newlines. Eliminate them
63 # json.dumps() will print trailing newlines. Eliminate them
59 # to make tests easier to write.
64 # to make tests easier to write.
60 data = json.loads(data)
65 data = json.loads(data)
61 lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
66 lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
62 for line in lines:
67 for line in lines:
63 print(line.rstrip())
68 print(line.rstrip())
64 else:
69 else:
65 sys.stdout.write(data)
70 sys.stdout.write(data)
66
71
67 if twice and response.getheader('ETag', None):
72 if twice and response.getheader('ETag', None):
68 tag = response.getheader('ETag')
73 tag = response.getheader('ETag')
69
74
70 return response.status
75 return response.status
71
76
72 status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
77 status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
73 if twice:
78 if twice:
74 status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
79 status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
75
80
76 if 200 <= status <= 305:
81 if 200 <= status <= 305:
77 sys.exit(0)
82 sys.exit(0)
78 sys.exit(1)
83 sys.exit(1)
@@ -1,150 +1,150
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ cd "$TESTDIR"/..
4 $ cd "$TESTDIR"/..
5
5
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 hgext/highlight/__init__.py not using absolute_import
11 hgext/highlight/__init__.py not using absolute_import
12 hgext/highlight/highlight.py not using absolute_import
12 hgext/highlight/highlight.py not using absolute_import
13 hgext/share.py not using absolute_import
13 hgext/share.py not using absolute_import
14 hgext/win32text.py not using absolute_import
14 hgext/win32text.py not using absolute_import
15 i18n/check-translation.py not using absolute_import
15 i18n/check-translation.py not using absolute_import
16 i18n/polib.py not using absolute_import
16 i18n/polib.py not using absolute_import
17 setup.py not using absolute_import
17 setup.py not using absolute_import
18 tests/heredoctest.py requires print_function
18 tests/heredoctest.py requires print_function
19 tests/md5sum.py not using absolute_import
19 tests/md5sum.py not using absolute_import
20 tests/readlink.py not using absolute_import
20 tests/readlink.py not using absolute_import
21 tests/run-tests.py not using absolute_import
21 tests/run-tests.py not using absolute_import
22 tests/test-demandimport.py not using absolute_import
22 tests/test-demandimport.py not using absolute_import
23
23
24 #if py3exe
24 #if py3exe
25 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
25 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
26 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
26 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
27 hgext/automv.py: error importing module: <SyntaxError> invalid syntax (commands.py, line *) (line *) (glob)
27 hgext/automv.py: error importing module: <SyntaxError> invalid syntax (commands.py, line *) (line *) (glob)
28 hgext/blackbox.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
28 hgext/blackbox.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
29 hgext/bugzilla.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
29 hgext/bugzilla.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
30 hgext/censor.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
30 hgext/censor.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
31 hgext/chgserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
31 hgext/chgserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
32 hgext/children.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
32 hgext/children.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
33 hgext/churn.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
33 hgext/churn.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
34 hgext/clonebundles.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
34 hgext/clonebundles.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
35 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
35 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
36 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
36 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
37 hgext/convert/convcmd.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
37 hgext/convert/convcmd.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
38 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
38 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
39 hgext/convert/cvsps.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
39 hgext/convert/cvsps.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
40 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
40 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
41 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
41 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
42 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
42 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
43 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
43 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
44 hgext/convert/hg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
44 hgext/convert/hg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
45 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
45 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
46 hgext/convert/p*.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
46 hgext/convert/p*.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
47 hgext/convert/subversion.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
47 hgext/convert/subversion.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
48 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
48 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
49 hgext/eol.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
49 hgext/eol.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
50 hgext/extdiff.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
50 hgext/extdiff.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
51 hgext/factotum.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
51 hgext/factotum.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
52 hgext/fetch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
52 hgext/fetch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
53 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *) (glob)
53 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *) (glob)
54 hgext/gpg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
54 hgext/gpg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
55 hgext/graphlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
55 hgext/graphlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
56 hgext/hgk.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
56 hgext/hgk.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
57 hgext/histedit.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
57 hgext/histedit.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
58 hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
58 hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
59 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
59 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
60 hgext/largefiles/lfcommands.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
60 hgext/largefiles/lfcommands.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
61 hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
61 hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
62 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
62 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
63 hgext/largefiles/overrides.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
63 hgext/largefiles/overrides.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
64 hgext/largefiles/proto.py: error importing: <ImportError> No module named 'httplib' (error at httppeer.py:*) (glob)
64 hgext/largefiles/proto.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at httppeer.py:*) (glob)
65 hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
65 hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
66 hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
66 hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
67 hgext/largefiles/storefactory.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at bundlerepo.py:*) (glob)
67 hgext/largefiles/storefactory.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at bundlerepo.py:*) (glob)
68 hgext/largefiles/uisetup.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
68 hgext/largefiles/uisetup.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
69 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
69 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
70 hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
70 hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
71 hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
71 hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
72 hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
72 hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
73 hgext/patchbomb.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
73 hgext/patchbomb.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
74 hgext/purge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
74 hgext/purge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
75 hgext/rebase.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
75 hgext/rebase.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
76 hgext/record.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
76 hgext/record.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
77 hgext/relink.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
77 hgext/relink.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
78 hgext/schemes.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
78 hgext/schemes.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
79 hgext/share.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
79 hgext/share.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
80 hgext/shelve.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
80 hgext/shelve.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
81 hgext/strip.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
81 hgext/strip.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
82 hgext/transplant.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
82 hgext/transplant.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
83 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
83 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
84 mercurial/branchmap.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
84 mercurial/branchmap.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
85 mercurial/bundle*.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
85 mercurial/bundle*.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
86 mercurial/bundlerepo.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
86 mercurial/bundlerepo.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
87 mercurial/changegroup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
87 mercurial/changegroup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
88 mercurial/changelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
88 mercurial/changelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
89 mercurial/cmdutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
89 mercurial/cmdutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
91 mercurial/context.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
91 mercurial/context.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
92 mercurial/copies.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
92 mercurial/copies.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
93 mercurial/crecord.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
93 mercurial/crecord.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
94 mercurial/dirstate.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
94 mercurial/dirstate.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
95 mercurial/discovery.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
95 mercurial/discovery.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
96 mercurial/dispatch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
96 mercurial/dispatch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
97 mercurial/exchange.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
97 mercurial/exchange.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
98 mercurial/extensions.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
98 mercurial/extensions.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
99 mercurial/filelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
99 mercurial/filelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
100 mercurial/filemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
100 mercurial/filemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
101 mercurial/fileset.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
101 mercurial/fileset.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
102 mercurial/formatter.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
102 mercurial/formatter.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
103 mercurial/graphmod.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
103 mercurial/graphmod.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
104 mercurial/help.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
104 mercurial/help.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
105 mercurial/hg.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
105 mercurial/hg.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
106 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
106 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
107 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
107 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
108 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
108 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
109 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
109 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
110 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
110 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
111 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
111 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
112 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
112 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
113 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
113 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 mercurial/hook.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
115 mercurial/hook.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
116 mercurial/httpconnection.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
116 mercurial/httpconnection.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
117 mercurial/httppeer.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
117 mercurial/httppeer.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
118 mercurial/keepalive.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
118 mercurial/keepalive.py: error importing module: <ImportError> No module named 'thread' (line *) (glob)
119 mercurial/localrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
119 mercurial/localrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
120 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *) (glob)
120 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *) (glob)
121 mercurial/manifest.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
121 mercurial/manifest.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
122 mercurial/merge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
122 mercurial/merge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
123 mercurial/namespaces.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
123 mercurial/namespaces.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
124 mercurial/patch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
124 mercurial/patch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
125 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
125 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
126 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
126 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
127 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
127 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
128 mercurial/revlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
128 mercurial/revlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
129 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *) (glob)
129 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *) (glob)
130 mercurial/scmutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
130 mercurial/scmutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
131 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
131 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
132 mercurial/simplemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
132 mercurial/simplemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
133 mercurial/sshpeer.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
133 mercurial/sshpeer.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
134 mercurial/sshserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
134 mercurial/sshserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
135 mercurial/statichttprepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
135 mercurial/statichttprepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
136 mercurial/store.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
136 mercurial/store.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
137 mercurial/streamclone.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
137 mercurial/streamclone.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
138 mercurial/subrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
138 mercurial/subrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
139 mercurial/templatefilters.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
139 mercurial/templatefilters.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
140 mercurial/templatekw.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
140 mercurial/templatekw.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
141 mercurial/templater.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
141 mercurial/templater.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
142 mercurial/ui.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
142 mercurial/ui.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
143 mercurial/unionrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
143 mercurial/unionrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
144 mercurial/url.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
144 mercurial/url.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
145 mercurial/verify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
145 mercurial/verify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
146 mercurial/win*.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
146 mercurial/win*.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
147 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
147 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
148 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
148 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
149
149
150 #endif
150 #endif
General Comments 0
You need to be logged in to leave comments. Login now