##// END OF EJS Templates
py3: conditionalize BaseHTTPServer, SimpleHTTPServer and CGIHTTPServer import...
Pulkit Goyal -
r29566:075146e8 default
parent child Browse files
Show More
@@ -1,652 +1,653 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # check-code - a style and portability checker for Mercurial
3 # check-code - a style and portability checker for Mercurial
4 #
4 #
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """style and portability checker for Mercurial
10 """style and portability checker for Mercurial
11
11
12 when a rule triggers wrong, do one of the following (prefer one from top):
12 when a rule triggers wrong, do one of the following (prefer one from top):
13 * do the work-around the rule suggests
13 * do the work-around the rule suggests
14 * doublecheck that it is a false match
14 * doublecheck that it is a false match
15 * improve the rule pattern
15 * improve the rule pattern
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
17 (you can append a short comment and match this, like: #re-raises)
17 (you can append a short comment and match this, like: #re-raises)
18 * change the pattern to a warning and list the exception in test-check-code-hg
18 * change the pattern to a warning and list the exception in test-check-code-hg
19 * ONLY use no--check-code for skipping entire files from external sources
19 * ONLY use no--check-code for skipping entire files from external sources
20 """
20 """
21
21
22 from __future__ import absolute_import, print_function
22 from __future__ import absolute_import, print_function
23 import glob
23 import glob
24 import keyword
24 import keyword
25 import optparse
25 import optparse
26 import os
26 import os
27 import re
27 import re
28 import sys
28 import sys
29 if sys.version_info[0] < 3:
29 if sys.version_info[0] < 3:
30 opentext = open
30 opentext = open
31 else:
31 else:
32 def opentext(f):
32 def opentext(f):
33 return open(f, encoding='ascii')
33 return open(f, encoding='ascii')
34 try:
34 try:
35 xrange
35 xrange
36 except NameError:
36 except NameError:
37 xrange = range
37 xrange = range
38 try:
38 try:
39 import re2
39 import re2
40 except ImportError:
40 except ImportError:
41 re2 = None
41 re2 = None
42
42
43 def compilere(pat, multiline=False):
43 def compilere(pat, multiline=False):
44 if multiline:
44 if multiline:
45 pat = '(?m)' + pat
45 pat = '(?m)' + pat
46 if re2:
46 if re2:
47 try:
47 try:
48 return re2.compile(pat)
48 return re2.compile(pat)
49 except re2.error:
49 except re2.error:
50 pass
50 pass
51 return re.compile(pat)
51 return re.compile(pat)
52
52
53 # check "rules depending on implementation of repquote()" in each
53 # check "rules depending on implementation of repquote()" in each
54 # patterns (especially pypats), before changing around repquote()
54 # patterns (especially pypats), before changing around repquote()
55 _repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q',
55 _repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q',
56 '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'}
56 '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'}
57 def _repquoteencodechr(i):
57 def _repquoteencodechr(i):
58 if i > 255:
58 if i > 255:
59 return 'u'
59 return 'u'
60 c = chr(i)
60 c = chr(i)
61 if c in _repquotefixedmap:
61 if c in _repquotefixedmap:
62 return _repquotefixedmap[c]
62 return _repquotefixedmap[c]
63 if c.isalpha():
63 if c.isalpha():
64 return 'x'
64 return 'x'
65 if c.isdigit():
65 if c.isdigit():
66 return 'n'
66 return 'n'
67 return 'o'
67 return 'o'
68 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
68 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
69
69
70 def repquote(m):
70 def repquote(m):
71 t = m.group('text')
71 t = m.group('text')
72 t = t.translate(_repquotett)
72 t = t.translate(_repquotett)
73 return m.group('quote') + t + m.group('quote')
73 return m.group('quote') + t + m.group('quote')
74
74
75 def reppython(m):
75 def reppython(m):
76 comment = m.group('comment')
76 comment = m.group('comment')
77 if comment:
77 if comment:
78 l = len(comment.rstrip())
78 l = len(comment.rstrip())
79 return "#" * l + comment[l:]
79 return "#" * l + comment[l:]
80 return repquote(m)
80 return repquote(m)
81
81
82 def repcomment(m):
82 def repcomment(m):
83 return m.group(1) + "#" * len(m.group(2))
83 return m.group(1) + "#" * len(m.group(2))
84
84
85 def repccomment(m):
85 def repccomment(m):
86 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
86 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
87 return m.group(1) + t + "*/"
87 return m.group(1) + t + "*/"
88
88
89 def repcallspaces(m):
89 def repcallspaces(m):
90 t = re.sub(r"\n\s+", "\n", m.group(2))
90 t = re.sub(r"\n\s+", "\n", m.group(2))
91 return m.group(1) + t
91 return m.group(1) + t
92
92
93 def repinclude(m):
93 def repinclude(m):
94 return m.group(1) + "<foo>"
94 return m.group(1) + "<foo>"
95
95
96 def rephere(m):
96 def rephere(m):
97 t = re.sub(r"\S", "x", m.group(2))
97 t = re.sub(r"\S", "x", m.group(2))
98 return m.group(1) + t
98 return m.group(1) + t
99
99
100
100
101 testpats = [
101 testpats = [
102 [
102 [
103 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
103 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
104 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
104 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
105 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
105 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
106 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
106 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
107 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
107 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
108 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
108 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
109 (r'echo -n', "don't use 'echo -n', use printf"),
109 (r'echo -n', "don't use 'echo -n', use printf"),
110 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
110 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
111 (r'head -c', "don't use 'head -c', use 'dd'"),
111 (r'head -c', "don't use 'head -c', use 'dd'"),
112 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
112 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
113 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
113 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
114 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
114 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
115 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
115 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
116 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
116 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
117 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
117 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
118 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
118 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
119 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
119 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
120 "use egrep for extended grep syntax"),
120 "use egrep for extended grep syntax"),
121 (r'/bin/', "don't use explicit paths for tools"),
121 (r'/bin/', "don't use explicit paths for tools"),
122 (r'[^\n]\Z', "no trailing newline"),
122 (r'[^\n]\Z', "no trailing newline"),
123 (r'export .*=', "don't export and assign at once"),
123 (r'export .*=', "don't export and assign at once"),
124 (r'^source\b', "don't use 'source', use '.'"),
124 (r'^source\b', "don't use 'source', use '.'"),
125 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
125 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
126 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
126 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
127 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
127 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
128 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
128 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
129 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
129 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
130 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
130 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
131 (r'^alias\b.*=', "don't use alias, use a function"),
131 (r'^alias\b.*=', "don't use alias, use a function"),
132 (r'if\s*!', "don't use '!' to negate exit status"),
132 (r'if\s*!', "don't use '!' to negate exit status"),
133 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
133 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
134 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
134 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
135 (r'^( *)\t', "don't use tabs to indent"),
135 (r'^( *)\t', "don't use tabs to indent"),
136 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
136 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
137 "put a backslash-escaped newline after sed 'i' command"),
137 "put a backslash-escaped newline after sed 'i' command"),
138 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
138 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
139 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
139 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
140 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
140 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
141 (r'\butil\.Abort\b', "directly use error.Abort"),
141 (r'\butil\.Abort\b', "directly use error.Abort"),
142 (r'\|&', "don't use |&, use 2>&1"),
142 (r'\|&', "don't use |&, use 2>&1"),
143 (r'\w = +\w', "only one space after = allowed"),
143 (r'\w = +\w', "only one space after = allowed"),
144 (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
144 (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
145 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'")
145 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'")
146 ],
146 ],
147 # warnings
147 # warnings
148 [
148 [
149 (r'^function', "don't use 'function', use old style"),
149 (r'^function', "don't use 'function', use old style"),
150 (r'^diff.*-\w*N', "don't use 'diff -N'"),
150 (r'^diff.*-\w*N', "don't use 'diff -N'"),
151 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
151 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
152 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
152 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
153 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
153 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
154 ]
154 ]
155 ]
155 ]
156
156
157 testfilters = [
157 testfilters = [
158 (r"( *)(#([^\n]*\S)?)", repcomment),
158 (r"( *)(#([^\n]*\S)?)", repcomment),
159 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
159 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
160 ]
160 ]
161
161
162 winglobmsg = "use (glob) to match Windows paths too"
162 winglobmsg = "use (glob) to match Windows paths too"
163 uprefix = r"^ \$ "
163 uprefix = r"^ \$ "
164 utestpats = [
164 utestpats = [
165 [
165 [
166 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
166 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
167 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
167 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
168 "use regex test output patterns instead of sed"),
168 "use regex test output patterns instead of sed"),
169 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
169 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
170 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
170 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
171 (uprefix + r'.*\|\| echo.*(fail|error)',
171 (uprefix + r'.*\|\| echo.*(fail|error)',
172 "explicit exit code checks unnecessary"),
172 "explicit exit code checks unnecessary"),
173 (uprefix + r'set -e', "don't use set -e"),
173 (uprefix + r'set -e', "don't use set -e"),
174 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
174 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
175 (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
175 (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
176 "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
176 "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
177 '# no-msys'), # in test-pull.t which is skipped on windows
177 '# no-msys'), # in test-pull.t which is skipped on windows
178 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
178 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
179 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
179 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
180 winglobmsg),
180 winglobmsg),
181 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
181 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
182 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
182 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
183 (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg),
183 (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg),
184 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
184 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
185 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
185 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
186 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
186 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
187 (r'^ moving \S+/.*[^)]$', winglobmsg),
187 (r'^ moving \S+/.*[^)]$', winglobmsg),
188 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
188 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
189 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
189 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
190 (r'^ .*file://\$TESTTMP',
190 (r'^ .*file://\$TESTTMP',
191 'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
191 'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
192 (r'^ [^$>].*27\.0\.0\.1.*[^)]$',
192 (r'^ [^$>].*27\.0\.0\.1.*[^)]$',
193 'use (glob) to match localhost IP on hosts without 127.0.0.1 too'),
193 'use (glob) to match localhost IP on hosts without 127.0.0.1 too'),
194 (r'^ (cat|find): .*: No such file or directory',
194 (r'^ (cat|find): .*: No such file or directory',
195 'use test -f to test for file existence'),
195 'use test -f to test for file existence'),
196 (r'^ diff -[^ -]*p',
196 (r'^ diff -[^ -]*p',
197 "don't use (external) diff with -p for portability"),
197 "don't use (external) diff with -p for portability"),
198 (r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
198 (r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
199 "glob timezone field in diff output for portability"),
199 "glob timezone field in diff output for portability"),
200 (r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
200 (r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
201 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"),
201 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"),
202 (r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
202 (r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
203 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"),
203 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"),
204 (r'^ @@ -[0-9]+ [+][0-9]+ @@',
204 (r'^ @@ -[0-9]+ [+][0-9]+ @@',
205 "use '@@ -N* +N* @@ (glob)' style chunk header for portability"),
205 "use '@@ -N* +N* @@ (glob)' style chunk header for portability"),
206 (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
206 (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
207 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
207 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
208 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"),
208 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"),
209 ],
209 ],
210 # warnings
210 # warnings
211 [
211 [
212 (r'^ (?!.*127\.0\.0\.1)[^*?/\n]* \(glob\)$',
212 (r'^ (?!.*127\.0\.0\.1)[^*?/\n]* \(glob\)$',
213 "glob match with no glob string (?, *, /, and 127.0.0.1)"),
213 "glob match with no glob string (?, *, /, and 127.0.0.1)"),
214 ]
214 ]
215 ]
215 ]
216
216
217 for i in [0, 1]:
217 for i in [0, 1]:
218 for tp in testpats[i]:
218 for tp in testpats[i]:
219 p = tp[0]
219 p = tp[0]
220 m = tp[1]
220 m = tp[1]
221 if p.startswith(r'^'):
221 if p.startswith(r'^'):
222 p = r"^ [$>] (%s)" % p[1:]
222 p = r"^ [$>] (%s)" % p[1:]
223 else:
223 else:
224 p = r"^ [$>] .*(%s)" % p
224 p = r"^ [$>] .*(%s)" % p
225 utestpats[i].append((p, m) + tp[2:])
225 utestpats[i].append((p, m) + tp[2:])
226
226
227 utestfilters = [
227 utestfilters = [
228 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
228 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
229 (r"( +)(#([^\n]*\S)?)", repcomment),
229 (r"( +)(#([^\n]*\S)?)", repcomment),
230 ]
230 ]
231
231
232 pypats = [
232 pypats = [
233 [
233 [
234 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
234 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
235 "tuple parameter unpacking not available in Python 3+"),
235 "tuple parameter unpacking not available in Python 3+"),
236 (r'lambda\s*\(.*,.*\)',
236 (r'lambda\s*\(.*,.*\)',
237 "tuple parameter unpacking not available in Python 3+"),
237 "tuple parameter unpacking not available in Python 3+"),
238 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
238 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
239 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
239 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
240 (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
240 (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
241 'dict-from-generator'),
241 'dict-from-generator'),
242 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
242 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
243 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
243 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
244 (r'^\s*\t', "don't use tabs"),
244 (r'^\s*\t', "don't use tabs"),
245 (r'\S;\s*\n', "semicolon"),
245 (r'\S;\s*\n', "semicolon"),
246 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
246 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
247 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
247 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
248 (r'(\w|\)),\w', "missing whitespace after ,"),
248 (r'(\w|\)),\w', "missing whitespace after ,"),
249 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
249 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
250 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
250 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
251 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
251 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
252 (r'.{81}', "line too long"),
252 (r'.{81}', "line too long"),
253 (r'[^\n]\Z', "no trailing newline"),
253 (r'[^\n]\Z', "no trailing newline"),
254 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
254 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
255 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
255 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
256 # "don't use underbars in identifiers"),
256 # "don't use underbars in identifiers"),
257 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
257 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
258 "don't use camelcase in identifiers"),
258 "don't use camelcase in identifiers"),
259 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
259 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
260 "linebreak after :"),
260 "linebreak after :"),
261 (r'class\s[^( \n]+:', "old-style class, use class foo(object)",
261 (r'class\s[^( \n]+:', "old-style class, use class foo(object)",
262 r'#.*old-style'),
262 r'#.*old-style'),
263 (r'class\s[^( \n]+\(\):',
263 (r'class\s[^( \n]+\(\):',
264 "class foo() creates old style object, use class foo(object)",
264 "class foo() creates old style object, use class foo(object)",
265 r'#.*old-style'),
265 r'#.*old-style'),
266 (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist
266 (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist
267 if k not in ('print', 'exec')),
267 if k not in ('print', 'exec')),
268 "Python keyword is not a function"),
268 "Python keyword is not a function"),
269 (r',]', "unneeded trailing ',' in list"),
269 (r',]', "unneeded trailing ',' in list"),
270 # (r'class\s[A-Z][^\(]*\((?!Exception)',
270 # (r'class\s[A-Z][^\(]*\((?!Exception)',
271 # "don't capitalize non-exception classes"),
271 # "don't capitalize non-exception classes"),
272 # (r'in range\(', "use xrange"),
272 # (r'in range\(', "use xrange"),
273 # (r'^\s*print\s+', "avoid using print in core and extensions"),
273 # (r'^\s*print\s+', "avoid using print in core and extensions"),
274 (r'[\x80-\xff]', "non-ASCII character literal"),
274 (r'[\x80-\xff]', "non-ASCII character literal"),
275 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
275 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
276 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
276 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
277 "gratuitous whitespace after Python keyword"),
277 "gratuitous whitespace after Python keyword"),
278 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
278 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
279 # (r'\s\s=', "gratuitous whitespace before ="),
279 # (r'\s\s=', "gratuitous whitespace before ="),
280 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
280 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
281 "missing whitespace around operator"),
281 "missing whitespace around operator"),
282 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
282 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
283 "missing whitespace around operator"),
283 "missing whitespace around operator"),
284 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
284 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
285 "missing whitespace around operator"),
285 "missing whitespace around operator"),
286 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
286 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
287 "wrong whitespace around ="),
287 "wrong whitespace around ="),
288 (r'\([^()]*( =[^=]|[^<>!=]= )',
288 (r'\([^()]*( =[^=]|[^<>!=]= )',
289 "no whitespace around = for named parameters"),
289 "no whitespace around = for named parameters"),
290 (r'raise Exception', "don't raise generic exceptions"),
290 (r'raise Exception', "don't raise generic exceptions"),
291 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
291 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
292 "don't use old-style two-argument raise, use Exception(message)"),
292 "don't use old-style two-argument raise, use Exception(message)"),
293 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
293 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
294 (r' [=!]=\s+(True|False|None)',
294 (r' [=!]=\s+(True|False|None)',
295 "comparison with singleton, use 'is' or 'is not' instead"),
295 "comparison with singleton, use 'is' or 'is not' instead"),
296 (r'^\s*(while|if) [01]:',
296 (r'^\s*(while|if) [01]:',
297 "use True/False for constant Boolean expression"),
297 "use True/False for constant Boolean expression"),
298 (r'(?:(?<!def)\s+|\()hasattr',
298 (r'(?:(?<!def)\s+|\()hasattr',
299 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
299 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
300 (r'opener\([^)]*\).read\(',
300 (r'opener\([^)]*\).read\(',
301 "use opener.read() instead"),
301 "use opener.read() instead"),
302 (r'opener\([^)]*\).write\(',
302 (r'opener\([^)]*\).write\(',
303 "use opener.write() instead"),
303 "use opener.write() instead"),
304 (r'[\s\(](open|file)\([^)]*\)\.read\(',
304 (r'[\s\(](open|file)\([^)]*\)\.read\(',
305 "use util.readfile() instead"),
305 "use util.readfile() instead"),
306 (r'[\s\(](open|file)\([^)]*\)\.write\(',
306 (r'[\s\(](open|file)\([^)]*\)\.write\(',
307 "use util.writefile() instead"),
307 "use util.writefile() instead"),
308 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
308 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
309 "always assign an opened file to a variable, and close it afterwards"),
309 "always assign an opened file to a variable, and close it afterwards"),
310 (r'[\s\(](open|file)\([^)]*\)\.',
310 (r'[\s\(](open|file)\([^)]*\)\.',
311 "always assign an opened file to a variable, and close it afterwards"),
311 "always assign an opened file to a variable, and close it afterwards"),
312 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
312 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
313 (r'\.debug\(\_', "don't mark debug messages for translation"),
313 (r'\.debug\(\_', "don't mark debug messages for translation"),
314 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
314 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
315 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
315 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
316 (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
316 (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
317 'legacy exception syntax; use "as" instead of ","'),
317 'legacy exception syntax; use "as" instead of ","'),
318 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
318 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
319 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
319 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
320 (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
320 (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
321 (r'os\.path\.join\(.*, *(""|\'\')\)',
321 (r'os\.path\.join\(.*, *(""|\'\')\)',
322 "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
322 "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
323 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
323 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
324 # XXX only catch mutable arguments on the first line of the definition
324 # XXX only catch mutable arguments on the first line of the definition
325 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
325 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
326 (r'\butil\.Abort\b', "directly use error.Abort"),
326 (r'\butil\.Abort\b', "directly use error.Abort"),
327 (r'^import Queue', "don't use Queue, use util.queue + util.empty"),
327 (r'^import Queue', "don't use Queue, use util.queue + util.empty"),
328 (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
328 (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
329 (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
329 (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
330 (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
330 (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
331 (r'^import urlparse', "don't use urlparse, use util.urlparse"),
331 (r'^import urlparse', "don't use urlparse, use util.urlparse"),
332 (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
332 (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
333 (r'^import cPickle', "don't use cPickle, use util.pickle"),
333 (r'^import cPickle', "don't use cPickle, use util.pickle"),
334 (r'^import pickle', "don't use pickle, use util.pickle"),
334 (r'^import pickle', "don't use pickle, use util.pickle"),
335 (r'^import httplib', "don't use httplib, use util.httplib"),
335 (r'^import httplib', "don't use httplib, use util.httplib"),
336 (r'^import BaseHTTPServer', "use util.httpserver instead"),
336 (r'\.next\(\)', "don't use .next(), use next(...)"),
337 (r'\.next\(\)', "don't use .next(), use next(...)"),
337
338
338 # rules depending on implementation of repquote()
339 # rules depending on implementation of repquote()
339 (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
340 (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
340 'string join across lines with no space'),
341 'string join across lines with no space'),
341 (r'''(?x)ui\.(status|progress|write|note|warn)\(
342 (r'''(?x)ui\.(status|progress|write|note|warn)\(
342 [ \t\n#]*
343 [ \t\n#]*
343 (?# any strings/comments might precede a string, which
344 (?# any strings/comments might precede a string, which
344 # contains translatable message)
345 # contains translatable message)
345 ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
346 ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
346 (?# sequence consisting of below might precede translatable message
347 (?# sequence consisting of below might precede translatable message
347 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
348 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
348 # - escaped character: "\\", "\n", "\0" ...
349 # - escaped character: "\\", "\n", "\0" ...
349 # - character other than '%', 'b' as '\', and 'x' as alphabet)
350 # - character other than '%', 'b' as '\', and 'x' as alphabet)
350 (['"]|\'\'\'|""")
351 (['"]|\'\'\'|""")
351 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
352 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
352 (?# this regexp can't use [^...] style,
353 (?# this regexp can't use [^...] style,
353 # because _preparepats forcibly adds "\n" into [^...],
354 # because _preparepats forcibly adds "\n" into [^...],
354 # even though this regexp wants match it against "\n")''',
355 # even though this regexp wants match it against "\n")''',
355 "missing _() in ui message (use () to hide false-positives)"),
356 "missing _() in ui message (use () to hide false-positives)"),
356 ],
357 ],
357 # warnings
358 # warnings
358 [
359 [
359 # rules depending on implementation of repquote()
360 # rules depending on implementation of repquote()
360 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
361 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
361 ]
362 ]
362 ]
363 ]
363
364
364 pyfilters = [
365 pyfilters = [
365 (r"""(?msx)(?P<comment>\#.*?$)|
366 (r"""(?msx)(?P<comment>\#.*?$)|
366 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
367 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
367 (?P<text>(([^\\]|\\.)*?))
368 (?P<text>(([^\\]|\\.)*?))
368 (?P=quote))""", reppython),
369 (?P=quote))""", reppython),
369 ]
370 ]
370
371
371 txtfilters = []
372 txtfilters = []
372
373
373 txtpats = [
374 txtpats = [
374 [
375 [
375 ('\s$', 'trailing whitespace'),
376 ('\s$', 'trailing whitespace'),
376 ('.. note::[ \n][^\n]', 'add two newlines after note::')
377 ('.. note::[ \n][^\n]', 'add two newlines after note::')
377 ],
378 ],
378 []
379 []
379 ]
380 ]
380
381
381 cpats = [
382 cpats = [
382 [
383 [
383 (r'//', "don't use //-style comments"),
384 (r'//', "don't use //-style comments"),
384 (r'^ ', "don't use spaces to indent"),
385 (r'^ ', "don't use spaces to indent"),
385 (r'\S\t', "don't use tabs except for indent"),
386 (r'\S\t', "don't use tabs except for indent"),
386 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
387 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
387 (r'.{81}', "line too long"),
388 (r'.{81}', "line too long"),
388 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
389 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
389 (r'return\(', "return is not a function"),
390 (r'return\(', "return is not a function"),
390 (r' ;', "no space before ;"),
391 (r' ;', "no space before ;"),
391 (r'[^;] \)', "no space before )"),
392 (r'[^;] \)', "no space before )"),
392 (r'[)][{]', "space between ) and {"),
393 (r'[)][{]', "space between ) and {"),
393 (r'\w+\* \w+', "use int *foo, not int* foo"),
394 (r'\w+\* \w+', "use int *foo, not int* foo"),
394 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
395 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
395 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
396 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
396 (r'\w,\w', "missing whitespace after ,"),
397 (r'\w,\w', "missing whitespace after ,"),
397 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
398 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
398 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
399 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
399 (r'^#\s+\w', "use #foo, not # foo"),
400 (r'^#\s+\w', "use #foo, not # foo"),
400 (r'[^\n]\Z', "no trailing newline"),
401 (r'[^\n]\Z', "no trailing newline"),
401 (r'^\s*#import\b', "use only #include in standard C code"),
402 (r'^\s*#import\b', "use only #include in standard C code"),
402 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
403 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
403 (r'strcat\(', "don't use strcat"),
404 (r'strcat\(', "don't use strcat"),
404
405
405 # rules depending on implementation of repquote()
406 # rules depending on implementation of repquote()
406 ],
407 ],
407 # warnings
408 # warnings
408 [
409 [
409 # rules depending on implementation of repquote()
410 # rules depending on implementation of repquote()
410 ]
411 ]
411 ]
412 ]
412
413
413 cfilters = [
414 cfilters = [
414 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
415 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
415 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
416 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
416 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
417 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
417 (r'(\()([^)]+\))', repcallspaces),
418 (r'(\()([^)]+\))', repcallspaces),
418 ]
419 ]
419
420
420 inutilpats = [
421 inutilpats = [
421 [
422 [
422 (r'\bui\.', "don't use ui in util"),
423 (r'\bui\.', "don't use ui in util"),
423 ],
424 ],
424 # warnings
425 # warnings
425 []
426 []
426 ]
427 ]
427
428
428 inrevlogpats = [
429 inrevlogpats = [
429 [
430 [
430 (r'\brepo\.', "don't use repo in revlog"),
431 (r'\brepo\.', "don't use repo in revlog"),
431 ],
432 ],
432 # warnings
433 # warnings
433 []
434 []
434 ]
435 ]
435
436
436 webtemplatefilters = []
437 webtemplatefilters = []
437
438
438 webtemplatepats = [
439 webtemplatepats = [
439 [],
440 [],
440 [
441 [
441 (r'{desc(\|(?!websub|firstline)[^\|]*)+}',
442 (r'{desc(\|(?!websub|firstline)[^\|]*)+}',
442 'follow desc keyword with either firstline or websub'),
443 'follow desc keyword with either firstline or websub'),
443 ]
444 ]
444 ]
445 ]
445
446
446 checks = [
447 checks = [
447 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
448 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
448 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
449 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
449 ('c', r'.*\.[ch]$', '', cfilters, cpats),
450 ('c', r'.*\.[ch]$', '', cfilters, cpats),
450 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
451 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
451 ('layering violation repo in revlog', r'mercurial/revlog\.py', '',
452 ('layering violation repo in revlog', r'mercurial/revlog\.py', '',
452 pyfilters, inrevlogpats),
453 pyfilters, inrevlogpats),
453 ('layering violation ui in util', r'mercurial/util\.py', '', pyfilters,
454 ('layering violation ui in util', r'mercurial/util\.py', '', pyfilters,
454 inutilpats),
455 inutilpats),
455 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
456 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
456 ('web template', r'mercurial/templates/.*\.tmpl', '',
457 ('web template', r'mercurial/templates/.*\.tmpl', '',
457 webtemplatefilters, webtemplatepats),
458 webtemplatefilters, webtemplatepats),
458 ]
459 ]
459
460
460 def _preparepats():
461 def _preparepats():
461 for c in checks:
462 for c in checks:
462 failandwarn = c[-1]
463 failandwarn = c[-1]
463 for pats in failandwarn:
464 for pats in failandwarn:
464 for i, pseq in enumerate(pats):
465 for i, pseq in enumerate(pats):
465 # fix-up regexes for multi-line searches
466 # fix-up regexes for multi-line searches
466 p = pseq[0]
467 p = pseq[0]
467 # \s doesn't match \n
468 # \s doesn't match \n
468 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
469 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
469 # [^...] doesn't match newline
470 # [^...] doesn't match newline
470 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
471 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
471
472
472 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
473 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
473 filters = c[3]
474 filters = c[3]
474 for i, flt in enumerate(filters):
475 for i, flt in enumerate(filters):
475 filters[i] = re.compile(flt[0]), flt[1]
476 filters[i] = re.compile(flt[0]), flt[1]
476 _preparepats()
477 _preparepats()
477
478
478 class norepeatlogger(object):
479 class norepeatlogger(object):
479 def __init__(self):
480 def __init__(self):
480 self._lastseen = None
481 self._lastseen = None
481
482
482 def log(self, fname, lineno, line, msg, blame):
483 def log(self, fname, lineno, line, msg, blame):
483 """print error related a to given line of a given file.
484 """print error related a to given line of a given file.
484
485
485 The faulty line will also be printed but only once in the case
486 The faulty line will also be printed but only once in the case
486 of multiple errors.
487 of multiple errors.
487
488
488 :fname: filename
489 :fname: filename
489 :lineno: line number
490 :lineno: line number
490 :line: actual content of the line
491 :line: actual content of the line
491 :msg: error message
492 :msg: error message
492 """
493 """
493 msgid = fname, lineno, line
494 msgid = fname, lineno, line
494 if msgid != self._lastseen:
495 if msgid != self._lastseen:
495 if blame:
496 if blame:
496 print("%s:%d (%s):" % (fname, lineno, blame))
497 print("%s:%d (%s):" % (fname, lineno, blame))
497 else:
498 else:
498 print("%s:%d:" % (fname, lineno))
499 print("%s:%d:" % (fname, lineno))
499 print(" > %s" % line)
500 print(" > %s" % line)
500 self._lastseen = msgid
501 self._lastseen = msgid
501 print(" " + msg)
502 print(" " + msg)
502
503
503 _defaultlogger = norepeatlogger()
504 _defaultlogger = norepeatlogger()
504
505
505 def getblame(f):
506 def getblame(f):
506 lines = []
507 lines = []
507 for l in os.popen('hg annotate -un %s' % f):
508 for l in os.popen('hg annotate -un %s' % f):
508 start, line = l.split(':', 1)
509 start, line = l.split(':', 1)
509 user, rev = start.split()
510 user, rev = start.split()
510 lines.append((line[1:-1], user, rev))
511 lines.append((line[1:-1], user, rev))
511 return lines
512 return lines
512
513
513 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
514 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
514 blame=False, debug=False, lineno=True):
515 blame=False, debug=False, lineno=True):
515 """checks style and portability of a given file
516 """checks style and portability of a given file
516
517
517 :f: filepath
518 :f: filepath
518 :logfunc: function used to report error
519 :logfunc: function used to report error
519 logfunc(filename, linenumber, linecontent, errormessage)
520 logfunc(filename, linenumber, linecontent, errormessage)
520 :maxerr: number of error to display before aborting.
521 :maxerr: number of error to display before aborting.
521 Set to false (default) to report all errors
522 Set to false (default) to report all errors
522
523
523 return True if no error is found, False otherwise.
524 return True if no error is found, False otherwise.
524 """
525 """
525 blamecache = None
526 blamecache = None
526 result = True
527 result = True
527
528
528 try:
529 try:
529 with opentext(f) as fp:
530 with opentext(f) as fp:
530 try:
531 try:
531 pre = post = fp.read()
532 pre = post = fp.read()
532 except UnicodeDecodeError as e:
533 except UnicodeDecodeError as e:
533 print("%s while reading %s" % (e, f))
534 print("%s while reading %s" % (e, f))
534 return result
535 return result
535 except IOError as e:
536 except IOError as e:
536 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
537 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
537 return result
538 return result
538
539
539 for name, match, magic, filters, pats in checks:
540 for name, match, magic, filters, pats in checks:
540 if debug:
541 if debug:
541 print(name, f)
542 print(name, f)
542 fc = 0
543 fc = 0
543 if not (re.match(match, f) or (magic and re.search(magic, pre))):
544 if not (re.match(match, f) or (magic and re.search(magic, pre))):
544 if debug:
545 if debug:
545 print("Skipping %s for %s it doesn't match %s" % (
546 print("Skipping %s for %s it doesn't match %s" % (
546 name, match, f))
547 name, match, f))
547 continue
548 continue
548 if "no-" "check-code" in pre:
549 if "no-" "check-code" in pre:
549 # If you're looking at this line, it's because a file has:
550 # If you're looking at this line, it's because a file has:
550 # no- check- code
551 # no- check- code
551 # but the reason to output skipping is to make life for
552 # but the reason to output skipping is to make life for
552 # tests easier. So, instead of writing it with a normal
553 # tests easier. So, instead of writing it with a normal
553 # spelling, we write it with the expected spelling from
554 # spelling, we write it with the expected spelling from
554 # tests/test-check-code.t
555 # tests/test-check-code.t
555 print("Skipping %s it has no-che?k-code (glob)" % f)
556 print("Skipping %s it has no-che?k-code (glob)" % f)
556 return "Skip" # skip checking this file
557 return "Skip" # skip checking this file
557 for p, r in filters:
558 for p, r in filters:
558 post = re.sub(p, r, post)
559 post = re.sub(p, r, post)
559 nerrs = len(pats[0]) # nerr elements are errors
560 nerrs = len(pats[0]) # nerr elements are errors
560 if warnings:
561 if warnings:
561 pats = pats[0] + pats[1]
562 pats = pats[0] + pats[1]
562 else:
563 else:
563 pats = pats[0]
564 pats = pats[0]
564 # print post # uncomment to show filtered version
565 # print post # uncomment to show filtered version
565
566
566 if debug:
567 if debug:
567 print("Checking %s for %s" % (name, f))
568 print("Checking %s for %s" % (name, f))
568
569
569 prelines = None
570 prelines = None
570 errors = []
571 errors = []
571 for i, pat in enumerate(pats):
572 for i, pat in enumerate(pats):
572 if len(pat) == 3:
573 if len(pat) == 3:
573 p, msg, ignore = pat
574 p, msg, ignore = pat
574 else:
575 else:
575 p, msg = pat
576 p, msg = pat
576 ignore = None
577 ignore = None
577 if i >= nerrs:
578 if i >= nerrs:
578 msg = "warning: " + msg
579 msg = "warning: " + msg
579
580
580 pos = 0
581 pos = 0
581 n = 0
582 n = 0
582 for m in p.finditer(post):
583 for m in p.finditer(post):
583 if prelines is None:
584 if prelines is None:
584 prelines = pre.splitlines()
585 prelines = pre.splitlines()
585 postlines = post.splitlines(True)
586 postlines = post.splitlines(True)
586
587
587 start = m.start()
588 start = m.start()
588 while n < len(postlines):
589 while n < len(postlines):
589 step = len(postlines[n])
590 step = len(postlines[n])
590 if pos + step > start:
591 if pos + step > start:
591 break
592 break
592 pos += step
593 pos += step
593 n += 1
594 n += 1
594 l = prelines[n]
595 l = prelines[n]
595
596
596 if ignore and re.search(ignore, l, re.MULTILINE):
597 if ignore and re.search(ignore, l, re.MULTILINE):
597 if debug:
598 if debug:
598 print("Skipping %s for %s:%s (ignore pattern)" % (
599 print("Skipping %s for %s:%s (ignore pattern)" % (
599 name, f, n))
600 name, f, n))
600 continue
601 continue
601 bd = ""
602 bd = ""
602 if blame:
603 if blame:
603 bd = 'working directory'
604 bd = 'working directory'
604 if not blamecache:
605 if not blamecache:
605 blamecache = getblame(f)
606 blamecache = getblame(f)
606 if n < len(blamecache):
607 if n < len(blamecache):
607 bl, bu, br = blamecache[n]
608 bl, bu, br = blamecache[n]
608 if bl == l:
609 if bl == l:
609 bd = '%s@%s' % (bu, br)
610 bd = '%s@%s' % (bu, br)
610
611
611 errors.append((f, lineno and n + 1, l, msg, bd))
612 errors.append((f, lineno and n + 1, l, msg, bd))
612 result = False
613 result = False
613
614
614 errors.sort()
615 errors.sort()
615 for e in errors:
616 for e in errors:
616 logfunc(*e)
617 logfunc(*e)
617 fc += 1
618 fc += 1
618 if maxerr and fc >= maxerr:
619 if maxerr and fc >= maxerr:
619 print(" (too many errors, giving up)")
620 print(" (too many errors, giving up)")
620 break
621 break
621
622
622 return result
623 return result
623
624
624 if __name__ == "__main__":
625 if __name__ == "__main__":
625 parser = optparse.OptionParser("%prog [options] [files]")
626 parser = optparse.OptionParser("%prog [options] [files]")
626 parser.add_option("-w", "--warnings", action="store_true",
627 parser.add_option("-w", "--warnings", action="store_true",
627 help="include warning-level checks")
628 help="include warning-level checks")
628 parser.add_option("-p", "--per-file", type="int",
629 parser.add_option("-p", "--per-file", type="int",
629 help="max warnings per file")
630 help="max warnings per file")
630 parser.add_option("-b", "--blame", action="store_true",
631 parser.add_option("-b", "--blame", action="store_true",
631 help="use annotate to generate blame info")
632 help="use annotate to generate blame info")
632 parser.add_option("", "--debug", action="store_true",
633 parser.add_option("", "--debug", action="store_true",
633 help="show debug information")
634 help="show debug information")
634 parser.add_option("", "--nolineno", action="store_false",
635 parser.add_option("", "--nolineno", action="store_false",
635 dest='lineno', help="don't show line numbers")
636 dest='lineno', help="don't show line numbers")
636
637
637 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
638 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
638 lineno=True)
639 lineno=True)
639 (options, args) = parser.parse_args()
640 (options, args) = parser.parse_args()
640
641
641 if len(args) == 0:
642 if len(args) == 0:
642 check = glob.glob("*")
643 check = glob.glob("*")
643 else:
644 else:
644 check = args
645 check = args
645
646
646 ret = 0
647 ret = 0
647 for f in check:
648 for f in check:
648 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
649 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
649 blame=options.blame, debug=options.debug,
650 blame=options.blame, debug=options.debug,
650 lineno=options.lineno):
651 lineno=options.lineno):
651 ret = 1
652 ret = 1
652 sys.exit(ret)
653 sys.exit(ret)
@@ -1,193 +1,196 b''
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import BaseHTTPServer
12 import errno
11 import errno
13 import mimetypes
12 import mimetypes
14 import os
13 import os
15
14
15 from .. import util
16
17 httpserver = util.httpserver
18
16 HTTP_OK = 200
19 HTTP_OK = 200
17 HTTP_NOT_MODIFIED = 304
20 HTTP_NOT_MODIFIED = 304
18 HTTP_BAD_REQUEST = 400
21 HTTP_BAD_REQUEST = 400
19 HTTP_UNAUTHORIZED = 401
22 HTTP_UNAUTHORIZED = 401
20 HTTP_FORBIDDEN = 403
23 HTTP_FORBIDDEN = 403
21 HTTP_NOT_FOUND = 404
24 HTTP_NOT_FOUND = 404
22 HTTP_METHOD_NOT_ALLOWED = 405
25 HTTP_METHOD_NOT_ALLOWED = 405
23 HTTP_SERVER_ERROR = 500
26 HTTP_SERVER_ERROR = 500
24
27
25
28
26 def ismember(ui, username, userlist):
29 def ismember(ui, username, userlist):
27 """Check if username is a member of userlist.
30 """Check if username is a member of userlist.
28
31
29 If userlist has a single '*' member, all users are considered members.
32 If userlist has a single '*' member, all users are considered members.
30 Can be overridden by extensions to provide more complex authorization
33 Can be overridden by extensions to provide more complex authorization
31 schemes.
34 schemes.
32 """
35 """
33 return userlist == ['*'] or username in userlist
36 return userlist == ['*'] or username in userlist
34
37
35 def checkauthz(hgweb, req, op):
38 def checkauthz(hgweb, req, op):
36 '''Check permission for operation based on request data (including
39 '''Check permission for operation based on request data (including
37 authentication info). Return if op allowed, else raise an ErrorResponse
40 authentication info). Return if op allowed, else raise an ErrorResponse
38 exception.'''
41 exception.'''
39
42
40 user = req.env.get('REMOTE_USER')
43 user = req.env.get('REMOTE_USER')
41
44
42 deny_read = hgweb.configlist('web', 'deny_read')
45 deny_read = hgweb.configlist('web', 'deny_read')
43 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
46 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
44 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
47 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
45
48
46 allow_read = hgweb.configlist('web', 'allow_read')
49 allow_read = hgweb.configlist('web', 'allow_read')
47 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
50 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
48 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
51 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
49
52
50 if op == 'pull' and not hgweb.allowpull:
53 if op == 'pull' and not hgweb.allowpull:
51 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
54 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
52 elif op == 'pull' or op is None: # op is None for interface requests
55 elif op == 'pull' or op is None: # op is None for interface requests
53 return
56 return
54
57
55 # enforce that you can only push using POST requests
58 # enforce that you can only push using POST requests
56 if req.env['REQUEST_METHOD'] != 'POST':
59 if req.env['REQUEST_METHOD'] != 'POST':
57 msg = 'push requires POST request'
60 msg = 'push requires POST request'
58 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
61 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
59
62
60 # require ssl by default for pushing, auth info cannot be sniffed
63 # require ssl by default for pushing, auth info cannot be sniffed
61 # and replayed
64 # and replayed
62 scheme = req.env.get('wsgi.url_scheme')
65 scheme = req.env.get('wsgi.url_scheme')
63 if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
66 if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
64 raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
67 raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
65
68
66 deny = hgweb.configlist('web', 'deny_push')
69 deny = hgweb.configlist('web', 'deny_push')
67 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
70 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
68 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
71 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
69
72
70 allow = hgweb.configlist('web', 'allow_push')
73 allow = hgweb.configlist('web', 'allow_push')
71 if not (allow and ismember(hgweb.repo.ui, user, allow)):
74 if not (allow and ismember(hgweb.repo.ui, user, allow)):
72 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
75 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
73
76
74 # Hooks for hgweb permission checks; extensions can add hooks here.
77 # Hooks for hgweb permission checks; extensions can add hooks here.
75 # Each hook is invoked like this: hook(hgweb, request, operation),
78 # Each hook is invoked like this: hook(hgweb, request, operation),
76 # where operation is either read, pull or push. Hooks should either
79 # where operation is either read, pull or push. Hooks should either
77 # raise an ErrorResponse exception, or just return.
80 # raise an ErrorResponse exception, or just return.
78 #
81 #
79 # It is possible to do both authentication and authorization through
82 # It is possible to do both authentication and authorization through
80 # this.
83 # this.
81 permhooks = [checkauthz]
84 permhooks = [checkauthz]
82
85
83
86
84 class ErrorResponse(Exception):
87 class ErrorResponse(Exception):
85 def __init__(self, code, message=None, headers=[]):
88 def __init__(self, code, message=None, headers=[]):
86 if message is None:
89 if message is None:
87 message = _statusmessage(code)
90 message = _statusmessage(code)
88 Exception.__init__(self, message)
91 Exception.__init__(self, message)
89 self.code = code
92 self.code = code
90 self.headers = headers
93 self.headers = headers
91
94
92 class continuereader(object):
95 class continuereader(object):
93 def __init__(self, f, write):
96 def __init__(self, f, write):
94 self.f = f
97 self.f = f
95 self._write = write
98 self._write = write
96 self.continued = False
99 self.continued = False
97
100
98 def read(self, amt=-1):
101 def read(self, amt=-1):
99 if not self.continued:
102 if not self.continued:
100 self.continued = True
103 self.continued = True
101 self._write('HTTP/1.1 100 Continue\r\n\r\n')
104 self._write('HTTP/1.1 100 Continue\r\n\r\n')
102 return self.f.read(amt)
105 return self.f.read(amt)
103
106
104 def __getattr__(self, attr):
107 def __getattr__(self, attr):
105 if attr in ('close', 'readline', 'readlines', '__iter__'):
108 if attr in ('close', 'readline', 'readlines', '__iter__'):
106 return getattr(self.f, attr)
109 return getattr(self.f, attr)
107 raise AttributeError
110 raise AttributeError
108
111
109 def _statusmessage(code):
112 def _statusmessage(code):
110 responses = BaseHTTPServer.BaseHTTPRequestHandler.responses
113 responses = httpserver.basehttprequesthandler.responses
111 return responses.get(code, ('Error', 'Unknown error'))[0]
114 return responses.get(code, ('Error', 'Unknown error'))[0]
112
115
113 def statusmessage(code, message=None):
116 def statusmessage(code, message=None):
114 return '%d %s' % (code, message or _statusmessage(code))
117 return '%d %s' % (code, message or _statusmessage(code))
115
118
116 def get_stat(spath, fn):
119 def get_stat(spath, fn):
117 """stat fn if it exists, spath otherwise"""
120 """stat fn if it exists, spath otherwise"""
118 cl_path = os.path.join(spath, fn)
121 cl_path = os.path.join(spath, fn)
119 if os.path.exists(cl_path):
122 if os.path.exists(cl_path):
120 return os.stat(cl_path)
123 return os.stat(cl_path)
121 else:
124 else:
122 return os.stat(spath)
125 return os.stat(spath)
123
126
124 def get_mtime(spath):
127 def get_mtime(spath):
125 return get_stat(spath, "00changelog.i").st_mtime
128 return get_stat(spath, "00changelog.i").st_mtime
126
129
127 def staticfile(directory, fname, req):
130 def staticfile(directory, fname, req):
128 """return a file inside directory with guessed Content-Type header
131 """return a file inside directory with guessed Content-Type header
129
132
130 fname always uses '/' as directory separator and isn't allowed to
133 fname always uses '/' as directory separator and isn't allowed to
131 contain unusual path components.
134 contain unusual path components.
132 Content-Type is guessed using the mimetypes module.
135 Content-Type is guessed using the mimetypes module.
133 Return an empty string if fname is illegal or file not found.
136 Return an empty string if fname is illegal or file not found.
134
137
135 """
138 """
136 parts = fname.split('/')
139 parts = fname.split('/')
137 for part in parts:
140 for part in parts:
138 if (part in ('', os.curdir, os.pardir) or
141 if (part in ('', os.curdir, os.pardir) or
139 os.sep in part or os.altsep is not None and os.altsep in part):
142 os.sep in part or os.altsep is not None and os.altsep in part):
140 return
143 return
141 fpath = os.path.join(*parts)
144 fpath = os.path.join(*parts)
142 if isinstance(directory, str):
145 if isinstance(directory, str):
143 directory = [directory]
146 directory = [directory]
144 for d in directory:
147 for d in directory:
145 path = os.path.join(d, fpath)
148 path = os.path.join(d, fpath)
146 if os.path.exists(path):
149 if os.path.exists(path):
147 break
150 break
148 try:
151 try:
149 os.stat(path)
152 os.stat(path)
150 ct = mimetypes.guess_type(path)[0] or "text/plain"
153 ct = mimetypes.guess_type(path)[0] or "text/plain"
151 fp = open(path, 'rb')
154 fp = open(path, 'rb')
152 data = fp.read()
155 data = fp.read()
153 fp.close()
156 fp.close()
154 req.respond(HTTP_OK, ct, body=data)
157 req.respond(HTTP_OK, ct, body=data)
155 except TypeError:
158 except TypeError:
156 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
159 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
157 except OSError as err:
160 except OSError as err:
158 if err.errno == errno.ENOENT:
161 if err.errno == errno.ENOENT:
159 raise ErrorResponse(HTTP_NOT_FOUND)
162 raise ErrorResponse(HTTP_NOT_FOUND)
160 else:
163 else:
161 raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
164 raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
162
165
163 def paritygen(stripecount, offset=0):
166 def paritygen(stripecount, offset=0):
164 """count parity of horizontal stripes for easier reading"""
167 """count parity of horizontal stripes for easier reading"""
165 if stripecount and offset:
168 if stripecount and offset:
166 # account for offset, e.g. due to building the list in reverse
169 # account for offset, e.g. due to building the list in reverse
167 count = (stripecount + offset) % stripecount
170 count = (stripecount + offset) % stripecount
168 parity = (stripecount + offset) / stripecount & 1
171 parity = (stripecount + offset) / stripecount & 1
169 else:
172 else:
170 count = 0
173 count = 0
171 parity = 0
174 parity = 0
172 while True:
175 while True:
173 yield parity
176 yield parity
174 count += 1
177 count += 1
175 if stripecount and count >= stripecount:
178 if stripecount and count >= stripecount:
176 parity = 1 - parity
179 parity = 1 - parity
177 count = 0
180 count = 0
178
181
179 def get_contact(config):
182 def get_contact(config):
180 """Return repo contact information or empty string.
183 """Return repo contact information or empty string.
181
184
182 web.contact is the primary source, but if that is not set, try
185 web.contact is the primary source, but if that is not set, try
183 ui.username or $EMAIL as a fallback to display something useful.
186 ui.username or $EMAIL as a fallback to display something useful.
184 """
187 """
185 return (config("web", "contact") or
188 return (config("web", "contact") or
186 config("ui", "username") or
189 config("ui", "username") or
187 os.environ.get("EMAIL") or "")
190 os.environ.get("EMAIL") or "")
188
191
189 def caching(web, req):
192 def caching(web, req):
190 tag = 'W/"%s"' % web.mtime
193 tag = 'W/"%s"' % web.mtime
191 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
194 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
192 raise ErrorResponse(HTTP_NOT_MODIFIED)
195 raise ErrorResponse(HTTP_NOT_MODIFIED)
193 req.headers.append(('ETag', tag))
196 req.headers.append(('ETag', tag))
@@ -1,334 +1,334 b''
1 # hgweb/server.py - The standalone hg web server.
1 # hgweb/server.py - The standalone hg web server.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import BaseHTTPServer
12 import errno
11 import errno
13 import os
12 import os
14 import socket
13 import socket
15 import sys
14 import sys
16 import traceback
15 import traceback
17
16
18 from ..i18n import _
17 from ..i18n import _
19
18
20 from .. import (
19 from .. import (
21 error,
20 error,
22 util,
21 util,
23 )
22 )
24
23
24 httpservermod = util.httpserver
25 socketserver = util.socketserver
25 socketserver = util.socketserver
26 urlerr = util.urlerr
26 urlerr = util.urlerr
27 urlreq = util.urlreq
27 urlreq = util.urlreq
28
28
29 from . import (
29 from . import (
30 common,
30 common,
31 )
31 )
32
32
33 def _splitURI(uri):
33 def _splitURI(uri):
34 """Return path and query that has been split from uri
34 """Return path and query that has been split from uri
35
35
36 Just like CGI environment, the path is unquoted, the query is
36 Just like CGI environment, the path is unquoted, the query is
37 not.
37 not.
38 """
38 """
39 if '?' in uri:
39 if '?' in uri:
40 path, query = uri.split('?', 1)
40 path, query = uri.split('?', 1)
41 else:
41 else:
42 path, query = uri, ''
42 path, query = uri, ''
43 return urlreq.unquote(path), query
43 return urlreq.unquote(path), query
44
44
45 class _error_logger(object):
45 class _error_logger(object):
46 def __init__(self, handler):
46 def __init__(self, handler):
47 self.handler = handler
47 self.handler = handler
48 def flush(self):
48 def flush(self):
49 pass
49 pass
50 def write(self, str):
50 def write(self, str):
51 self.writelines(str.split('\n'))
51 self.writelines(str.split('\n'))
52 def writelines(self, seq):
52 def writelines(self, seq):
53 for msg in seq:
53 for msg in seq:
54 self.handler.log_error("HG error: %s", msg)
54 self.handler.log_error("HG error: %s", msg)
55
55
56 class _httprequesthandler(BaseHTTPServer.BaseHTTPRequestHandler):
56 class _httprequesthandler(httpservermod.basehttprequesthandler):
57
57
58 url_scheme = 'http'
58 url_scheme = 'http'
59
59
60 @staticmethod
60 @staticmethod
61 def preparehttpserver(httpserver, ui):
61 def preparehttpserver(httpserver, ui):
62 """Prepare .socket of new HTTPServer instance"""
62 """Prepare .socket of new HTTPServer instance"""
63 pass
63 pass
64
64
65 def __init__(self, *args, **kargs):
65 def __init__(self, *args, **kargs):
66 self.protocol_version = 'HTTP/1.1'
66 self.protocol_version = 'HTTP/1.1'
67 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
67 httpservermod.basehttprequesthandler.__init__(self, *args, **kargs)
68
68
69 def _log_any(self, fp, format, *args):
69 def _log_any(self, fp, format, *args):
70 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
70 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
71 self.log_date_time_string(),
71 self.log_date_time_string(),
72 format % args))
72 format % args))
73 fp.flush()
73 fp.flush()
74
74
75 def log_error(self, format, *args):
75 def log_error(self, format, *args):
76 self._log_any(self.server.errorlog, format, *args)
76 self._log_any(self.server.errorlog, format, *args)
77
77
78 def log_message(self, format, *args):
78 def log_message(self, format, *args):
79 self._log_any(self.server.accesslog, format, *args)
79 self._log_any(self.server.accesslog, format, *args)
80
80
81 def log_request(self, code='-', size='-'):
81 def log_request(self, code='-', size='-'):
82 xheaders = []
82 xheaders = []
83 if util.safehasattr(self, 'headers'):
83 if util.safehasattr(self, 'headers'):
84 xheaders = [h for h in self.headers.items()
84 xheaders = [h for h in self.headers.items()
85 if h[0].startswith('x-')]
85 if h[0].startswith('x-')]
86 self.log_message('"%s" %s %s%s',
86 self.log_message('"%s" %s %s%s',
87 self.requestline, str(code), str(size),
87 self.requestline, str(code), str(size),
88 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
88 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
89
89
90 def do_write(self):
90 def do_write(self):
91 try:
91 try:
92 self.do_hgweb()
92 self.do_hgweb()
93 except socket.error as inst:
93 except socket.error as inst:
94 if inst[0] != errno.EPIPE:
94 if inst[0] != errno.EPIPE:
95 raise
95 raise
96
96
97 def do_POST(self):
97 def do_POST(self):
98 try:
98 try:
99 self.do_write()
99 self.do_write()
100 except Exception:
100 except Exception:
101 self._start_response("500 Internal Server Error", [])
101 self._start_response("500 Internal Server Error", [])
102 self._write("Internal Server Error")
102 self._write("Internal Server Error")
103 self._done()
103 self._done()
104 tb = "".join(traceback.format_exception(*sys.exc_info()))
104 tb = "".join(traceback.format_exception(*sys.exc_info()))
105 self.log_error("Exception happened during processing "
105 self.log_error("Exception happened during processing "
106 "request '%s':\n%s", self.path, tb)
106 "request '%s':\n%s", self.path, tb)
107
107
108 def do_GET(self):
108 def do_GET(self):
109 self.do_POST()
109 self.do_POST()
110
110
111 def do_hgweb(self):
111 def do_hgweb(self):
112 path, query = _splitURI(self.path)
112 path, query = _splitURI(self.path)
113
113
114 env = {}
114 env = {}
115 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
115 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
116 env['REQUEST_METHOD'] = self.command
116 env['REQUEST_METHOD'] = self.command
117 env['SERVER_NAME'] = self.server.server_name
117 env['SERVER_NAME'] = self.server.server_name
118 env['SERVER_PORT'] = str(self.server.server_port)
118 env['SERVER_PORT'] = str(self.server.server_port)
119 env['REQUEST_URI'] = self.path
119 env['REQUEST_URI'] = self.path
120 env['SCRIPT_NAME'] = self.server.prefix
120 env['SCRIPT_NAME'] = self.server.prefix
121 env['PATH_INFO'] = path[len(self.server.prefix):]
121 env['PATH_INFO'] = path[len(self.server.prefix):]
122 env['REMOTE_HOST'] = self.client_address[0]
122 env['REMOTE_HOST'] = self.client_address[0]
123 env['REMOTE_ADDR'] = self.client_address[0]
123 env['REMOTE_ADDR'] = self.client_address[0]
124 if query:
124 if query:
125 env['QUERY_STRING'] = query
125 env['QUERY_STRING'] = query
126
126
127 if self.headers.typeheader is None:
127 if self.headers.typeheader is None:
128 env['CONTENT_TYPE'] = self.headers.type
128 env['CONTENT_TYPE'] = self.headers.type
129 else:
129 else:
130 env['CONTENT_TYPE'] = self.headers.typeheader
130 env['CONTENT_TYPE'] = self.headers.typeheader
131 length = self.headers.getheader('content-length')
131 length = self.headers.getheader('content-length')
132 if length:
132 if length:
133 env['CONTENT_LENGTH'] = length
133 env['CONTENT_LENGTH'] = length
134 for header in [h for h in self.headers.keys()
134 for header in [h for h in self.headers.keys()
135 if h not in ('content-type', 'content-length')]:
135 if h not in ('content-type', 'content-length')]:
136 hkey = 'HTTP_' + header.replace('-', '_').upper()
136 hkey = 'HTTP_' + header.replace('-', '_').upper()
137 hval = self.headers.getheader(header)
137 hval = self.headers.getheader(header)
138 hval = hval.replace('\n', '').strip()
138 hval = hval.replace('\n', '').strip()
139 if hval:
139 if hval:
140 env[hkey] = hval
140 env[hkey] = hval
141 env['SERVER_PROTOCOL'] = self.request_version
141 env['SERVER_PROTOCOL'] = self.request_version
142 env['wsgi.version'] = (1, 0)
142 env['wsgi.version'] = (1, 0)
143 env['wsgi.url_scheme'] = self.url_scheme
143 env['wsgi.url_scheme'] = self.url_scheme
144 if env.get('HTTP_EXPECT', '').lower() == '100-continue':
144 if env.get('HTTP_EXPECT', '').lower() == '100-continue':
145 self.rfile = common.continuereader(self.rfile, self.wfile.write)
145 self.rfile = common.continuereader(self.rfile, self.wfile.write)
146
146
147 env['wsgi.input'] = self.rfile
147 env['wsgi.input'] = self.rfile
148 env['wsgi.errors'] = _error_logger(self)
148 env['wsgi.errors'] = _error_logger(self)
149 env['wsgi.multithread'] = isinstance(self.server,
149 env['wsgi.multithread'] = isinstance(self.server,
150 socketserver.ThreadingMixIn)
150 socketserver.ThreadingMixIn)
151 env['wsgi.multiprocess'] = isinstance(self.server,
151 env['wsgi.multiprocess'] = isinstance(self.server,
152 socketserver.ForkingMixIn)
152 socketserver.ForkingMixIn)
153 env['wsgi.run_once'] = 0
153 env['wsgi.run_once'] = 0
154
154
155 self.saved_status = None
155 self.saved_status = None
156 self.saved_headers = []
156 self.saved_headers = []
157 self.sent_headers = False
157 self.sent_headers = False
158 self.length = None
158 self.length = None
159 self._chunked = None
159 self._chunked = None
160 for chunk in self.server.application(env, self._start_response):
160 for chunk in self.server.application(env, self._start_response):
161 self._write(chunk)
161 self._write(chunk)
162 if not self.sent_headers:
162 if not self.sent_headers:
163 self.send_headers()
163 self.send_headers()
164 self._done()
164 self._done()
165
165
166 def send_headers(self):
166 def send_headers(self):
167 if not self.saved_status:
167 if not self.saved_status:
168 raise AssertionError("Sending headers before "
168 raise AssertionError("Sending headers before "
169 "start_response() called")
169 "start_response() called")
170 saved_status = self.saved_status.split(None, 1)
170 saved_status = self.saved_status.split(None, 1)
171 saved_status[0] = int(saved_status[0])
171 saved_status[0] = int(saved_status[0])
172 self.send_response(*saved_status)
172 self.send_response(*saved_status)
173 self.length = None
173 self.length = None
174 self._chunked = False
174 self._chunked = False
175 for h in self.saved_headers:
175 for h in self.saved_headers:
176 self.send_header(*h)
176 self.send_header(*h)
177 if h[0].lower() == 'content-length':
177 if h[0].lower() == 'content-length':
178 self.length = int(h[1])
178 self.length = int(h[1])
179 if (self.length is None and
179 if (self.length is None and
180 saved_status[0] != common.HTTP_NOT_MODIFIED):
180 saved_status[0] != common.HTTP_NOT_MODIFIED):
181 self._chunked = (not self.close_connection and
181 self._chunked = (not self.close_connection and
182 self.request_version == "HTTP/1.1")
182 self.request_version == "HTTP/1.1")
183 if self._chunked:
183 if self._chunked:
184 self.send_header('Transfer-Encoding', 'chunked')
184 self.send_header('Transfer-Encoding', 'chunked')
185 else:
185 else:
186 self.send_header('Connection', 'close')
186 self.send_header('Connection', 'close')
187 self.end_headers()
187 self.end_headers()
188 self.sent_headers = True
188 self.sent_headers = True
189
189
190 def _start_response(self, http_status, headers, exc_info=None):
190 def _start_response(self, http_status, headers, exc_info=None):
191 code, msg = http_status.split(None, 1)
191 code, msg = http_status.split(None, 1)
192 code = int(code)
192 code = int(code)
193 self.saved_status = http_status
193 self.saved_status = http_status
194 bad_headers = ('connection', 'transfer-encoding')
194 bad_headers = ('connection', 'transfer-encoding')
195 self.saved_headers = [h for h in headers
195 self.saved_headers = [h for h in headers
196 if h[0].lower() not in bad_headers]
196 if h[0].lower() not in bad_headers]
197 return self._write
197 return self._write
198
198
199 def _write(self, data):
199 def _write(self, data):
200 if not self.saved_status:
200 if not self.saved_status:
201 raise AssertionError("data written before start_response() called")
201 raise AssertionError("data written before start_response() called")
202 elif not self.sent_headers:
202 elif not self.sent_headers:
203 self.send_headers()
203 self.send_headers()
204 if self.length is not None:
204 if self.length is not None:
205 if len(data) > self.length:
205 if len(data) > self.length:
206 raise AssertionError("Content-length header sent, but more "
206 raise AssertionError("Content-length header sent, but more "
207 "bytes than specified are being written.")
207 "bytes than specified are being written.")
208 self.length = self.length - len(data)
208 self.length = self.length - len(data)
209 elif self._chunked and data:
209 elif self._chunked and data:
210 data = '%x\r\n%s\r\n' % (len(data), data)
210 data = '%x\r\n%s\r\n' % (len(data), data)
211 self.wfile.write(data)
211 self.wfile.write(data)
212 self.wfile.flush()
212 self.wfile.flush()
213
213
214 def _done(self):
214 def _done(self):
215 if self._chunked:
215 if self._chunked:
216 self.wfile.write('0\r\n\r\n')
216 self.wfile.write('0\r\n\r\n')
217 self.wfile.flush()
217 self.wfile.flush()
218
218
219 class _httprequesthandlerssl(_httprequesthandler):
219 class _httprequesthandlerssl(_httprequesthandler):
220 """HTTPS handler based on Python's ssl module"""
220 """HTTPS handler based on Python's ssl module"""
221
221
222 url_scheme = 'https'
222 url_scheme = 'https'
223
223
224 @staticmethod
224 @staticmethod
225 def preparehttpserver(httpserver, ui):
225 def preparehttpserver(httpserver, ui):
226 try:
226 try:
227 from .. import sslutil
227 from .. import sslutil
228 sslutil.modernssl
228 sslutil.modernssl
229 except ImportError:
229 except ImportError:
230 raise error.Abort(_("SSL support is unavailable"))
230 raise error.Abort(_("SSL support is unavailable"))
231
231
232 certfile = ui.config('web', 'certificate')
232 certfile = ui.config('web', 'certificate')
233
233
234 # These config options are currently only meant for testing. Use
234 # These config options are currently only meant for testing. Use
235 # at your own risk.
235 # at your own risk.
236 cafile = ui.config('devel', 'servercafile')
236 cafile = ui.config('devel', 'servercafile')
237 reqcert = ui.configbool('devel', 'serverrequirecert')
237 reqcert = ui.configbool('devel', 'serverrequirecert')
238
238
239 httpserver.socket = sslutil.wrapserversocket(httpserver.socket,
239 httpserver.socket = sslutil.wrapserversocket(httpserver.socket,
240 ui,
240 ui,
241 certfile=certfile,
241 certfile=certfile,
242 cafile=cafile,
242 cafile=cafile,
243 requireclientcert=reqcert)
243 requireclientcert=reqcert)
244
244
245 def setup(self):
245 def setup(self):
246 self.connection = self.request
246 self.connection = self.request
247 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
247 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
248 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
248 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
249
249
250 try:
250 try:
251 import threading
251 import threading
252 threading.activeCount() # silence pyflakes and bypass demandimport
252 threading.activeCount() # silence pyflakes and bypass demandimport
253 _mixin = socketserver.ThreadingMixIn
253 _mixin = socketserver.ThreadingMixIn
254 except ImportError:
254 except ImportError:
255 if util.safehasattr(os, "fork"):
255 if util.safehasattr(os, "fork"):
256 _mixin = socketserver.ForkingMixIn
256 _mixin = socketserver.ForkingMixIn
257 else:
257 else:
258 class _mixin(object):
258 class _mixin(object):
259 pass
259 pass
260
260
261 def openlog(opt, default):
261 def openlog(opt, default):
262 if opt and opt != '-':
262 if opt and opt != '-':
263 return open(opt, 'a')
263 return open(opt, 'a')
264 return default
264 return default
265
265
266 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
266 class MercurialHTTPServer(object, _mixin, httpservermod.httpserver):
267
267
268 # SO_REUSEADDR has broken semantics on windows
268 # SO_REUSEADDR has broken semantics on windows
269 if os.name == 'nt':
269 if os.name == 'nt':
270 allow_reuse_address = 0
270 allow_reuse_address = 0
271
271
272 def __init__(self, ui, app, addr, handler, **kwargs):
272 def __init__(self, ui, app, addr, handler, **kwargs):
273 BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs)
273 httpservermod.httpserver.__init__(self, addr, handler, **kwargs)
274 self.daemon_threads = True
274 self.daemon_threads = True
275 self.application = app
275 self.application = app
276
276
277 handler.preparehttpserver(self, ui)
277 handler.preparehttpserver(self, ui)
278
278
279 prefix = ui.config('web', 'prefix', '')
279 prefix = ui.config('web', 'prefix', '')
280 if prefix:
280 if prefix:
281 prefix = '/' + prefix.strip('/')
281 prefix = '/' + prefix.strip('/')
282 self.prefix = prefix
282 self.prefix = prefix
283
283
284 alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
284 alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
285 elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
285 elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
286 self.accesslog = alog
286 self.accesslog = alog
287 self.errorlog = elog
287 self.errorlog = elog
288
288
289 self.addr, self.port = self.socket.getsockname()[0:2]
289 self.addr, self.port = self.socket.getsockname()[0:2]
290 self.fqaddr = socket.getfqdn(addr[0])
290 self.fqaddr = socket.getfqdn(addr[0])
291
291
292 class IPv6HTTPServer(MercurialHTTPServer):
292 class IPv6HTTPServer(MercurialHTTPServer):
293 address_family = getattr(socket, 'AF_INET6', None)
293 address_family = getattr(socket, 'AF_INET6', None)
294 def __init__(self, *args, **kwargs):
294 def __init__(self, *args, **kwargs):
295 if self.address_family is None:
295 if self.address_family is None:
296 raise error.RepoError(_('IPv6 is not available on this system'))
296 raise error.RepoError(_('IPv6 is not available on this system'))
297 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
297 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
298
298
299 def create_server(ui, app):
299 def create_server(ui, app):
300
300
301 if ui.config('web', 'certificate'):
301 if ui.config('web', 'certificate'):
302 handler = _httprequesthandlerssl
302 handler = _httprequesthandlerssl
303 else:
303 else:
304 handler = _httprequesthandler
304 handler = _httprequesthandler
305
305
306 if ui.configbool('web', 'ipv6'):
306 if ui.configbool('web', 'ipv6'):
307 cls = IPv6HTTPServer
307 cls = IPv6HTTPServer
308 else:
308 else:
309 cls = MercurialHTTPServer
309 cls = MercurialHTTPServer
310
310
311 # ugly hack due to python issue5853 (for threaded use)
311 # ugly hack due to python issue5853 (for threaded use)
312 try:
312 try:
313 import mimetypes
313 import mimetypes
314 mimetypes.init()
314 mimetypes.init()
315 except UnicodeDecodeError:
315 except UnicodeDecodeError:
316 # Python 2.x's mimetypes module attempts to decode strings
316 # Python 2.x's mimetypes module attempts to decode strings
317 # from Windows' ANSI APIs as ascii (fail), then re-encode them
317 # from Windows' ANSI APIs as ascii (fail), then re-encode them
318 # as ascii (clown fail), because the default Python Unicode
318 # as ascii (clown fail), because the default Python Unicode
319 # codec is hardcoded as ascii.
319 # codec is hardcoded as ascii.
320
320
321 sys.argv # unwrap demand-loader so that reload() works
321 sys.argv # unwrap demand-loader so that reload() works
322 reload(sys) # resurrect sys.setdefaultencoding()
322 reload(sys) # resurrect sys.setdefaultencoding()
323 oldenc = sys.getdefaultencoding()
323 oldenc = sys.getdefaultencoding()
324 sys.setdefaultencoding("latin1") # or any full 8-bit encoding
324 sys.setdefaultencoding("latin1") # or any full 8-bit encoding
325 mimetypes.init()
325 mimetypes.init()
326 sys.setdefaultencoding(oldenc)
326 sys.setdefaultencoding(oldenc)
327
327
328 address = ui.config('web', 'address', '')
328 address = ui.config('web', 'address', '')
329 port = util.getport(ui.config('web', 'port', 8000))
329 port = util.getport(ui.config('web', 'port', 8000))
330 try:
330 try:
331 return cls(ui, app, (address, port), handler)
331 return cls(ui, app, (address, port), handler)
332 except socket.error as inst:
332 except socket.error as inst:
333 raise error.Abort(_("cannot start server at '%s:%d': %s")
333 raise error.Abort(_("cannot start server at '%s:%d': %s")
334 % (address, port, inst.args[1]))
334 % (address, port, inst.args[1]))
@@ -1,159 +1,180 b''
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 try:
13 try:
14 import cPickle as pickle
14 import cPickle as pickle
15 pickle.dumps
15 pickle.dumps
16 except ImportError:
16 except ImportError:
17 import pickle
17 import pickle
18 pickle.dumps # silence pyflakes
18 pickle.dumps # silence pyflakes
19
19
20 try:
20 try:
21 import httplib
21 import httplib
22 httplib.HTTPException
22 httplib.HTTPException
23 except ImportError:
23 except ImportError:
24 import http.client as httplib
24 import http.client as httplib
25 httplib.HTTPException
25 httplib.HTTPException
26
26
27 try:
27 try:
28 import SocketServer as socketserver
28 import SocketServer as socketserver
29 socketserver.ThreadingMixIn
29 socketserver.ThreadingMixIn
30 except ImportError:
30 except ImportError:
31 import socketserver
31 import socketserver
32 socketserver.ThreadingMixIn
32 socketserver.ThreadingMixIn
33
33
34 try:
34 try:
35 import xmlrpclib
35 import xmlrpclib
36 xmlrpclib.Transport
36 xmlrpclib.Transport
37 except ImportError:
37 except ImportError:
38 import xmlrpc.client as xmlrpclib
38 import xmlrpc.client as xmlrpclib
39 xmlrpclib.Transport
39 xmlrpclib.Transport
40
40
41 try:
41 try:
42 import urlparse
42 import urlparse
43 urlparse.urlparse
43 urlparse.urlparse
44 except ImportError:
44 except ImportError:
45 import urllib.parse as urlparse
45 import urllib.parse as urlparse
46 urlparse.urlparse
46 urlparse.urlparse
47
47
48 try:
48 try:
49 import cStringIO as io
49 import cStringIO as io
50 stringio = io.StringIO
50 stringio = io.StringIO
51 except ImportError:
51 except ImportError:
52 import io
52 import io
53 stringio = io.StringIO
53 stringio = io.StringIO
54
54
55 try:
55 try:
56 import Queue as _queue
56 import Queue as _queue
57 _queue.Queue
57 _queue.Queue
58 except ImportError:
58 except ImportError:
59 import queue as _queue
59 import queue as _queue
60 empty = _queue.Empty
60 empty = _queue.Empty
61 queue = _queue.Queue
61 queue = _queue.Queue
62
62
63 class _pycompatstub(object):
63 class _pycompatstub(object):
64 pass
64 pass
65
65
66 def _alias(alias, origin, items):
66 def _alias(alias, origin, items):
67 """ populate a _pycompatstub
67 """ populate a _pycompatstub
68
68
69 copies items from origin to alias
69 copies items from origin to alias
70 """
70 """
71 def hgcase(item):
71 def hgcase(item):
72 return item.replace('_', '').lower()
72 return item.replace('_', '').lower()
73 for item in items:
73 for item in items:
74 try:
74 try:
75 setattr(alias, hgcase(item), getattr(origin, item))
75 setattr(alias, hgcase(item), getattr(origin, item))
76 except AttributeError:
76 except AttributeError:
77 pass
77 pass
78
78
79 httpserver = _pycompatstub()
79 urlreq = _pycompatstub()
80 urlreq = _pycompatstub()
80 urlerr = _pycompatstub()
81 urlerr = _pycompatstub()
81 try:
82 try:
83 import BaseHTTPServer
84 import CGIHTTPServer
85 import SimpleHTTPServer
82 import urllib2
86 import urllib2
83 import urllib
87 import urllib
84 _alias(urlreq, urllib, (
88 _alias(urlreq, urllib, (
85 "addclosehook",
89 "addclosehook",
86 "addinfourl",
90 "addinfourl",
87 "ftpwrapper",
91 "ftpwrapper",
88 "pathname2url",
92 "pathname2url",
89 "quote",
93 "quote",
90 "splitattr",
94 "splitattr",
91 "splitpasswd",
95 "splitpasswd",
92 "splitport",
96 "splitport",
93 "splituser",
97 "splituser",
94 "unquote",
98 "unquote",
95 "url2pathname",
99 "url2pathname",
96 "urlencode",
100 "urlencode",
97 "urlencode",
101 "urlencode",
98 ))
102 ))
99 _alias(urlreq, urllib2, (
103 _alias(urlreq, urllib2, (
100 "AbstractHTTPHandler",
104 "AbstractHTTPHandler",
101 "BaseHandler",
105 "BaseHandler",
102 "build_opener",
106 "build_opener",
103 "FileHandler",
107 "FileHandler",
104 "FTPHandler",
108 "FTPHandler",
105 "HTTPBasicAuthHandler",
109 "HTTPBasicAuthHandler",
106 "HTTPDigestAuthHandler",
110 "HTTPDigestAuthHandler",
107 "HTTPHandler",
111 "HTTPHandler",
108 "HTTPPasswordMgrWithDefaultRealm",
112 "HTTPPasswordMgrWithDefaultRealm",
109 "HTTPSHandler",
113 "HTTPSHandler",
110 "install_opener",
114 "install_opener",
111 "ProxyHandler",
115 "ProxyHandler",
112 "Request",
116 "Request",
113 "urlopen",
117 "urlopen",
114 ))
118 ))
115 _alias(urlerr, urllib2, (
119 _alias(urlerr, urllib2, (
116 "HTTPError",
120 "HTTPError",
117 "URLError",
121 "URLError",
118 ))
122 ))
123 _alias(httpserver, BaseHTTPServer, (
124 "HTTPServer",
125 "BaseHTTPRequestHandler",
126 ))
127 _alias(httpserver, SimpleHTTPServer, (
128 "SimpleHTTPRequestHandler",
129 ))
130 _alias(httpserver, CGIHTTPServer, (
131 "CGIHTTPRequestHandler",
132 ))
119
133
120 except ImportError:
134 except ImportError:
121 import urllib.request
135 import urllib.request
122 _alias(urlreq, urllib.request, (
136 _alias(urlreq, urllib.request, (
123 "AbstractHTTPHandler",
137 "AbstractHTTPHandler",
124 "addclosehook",
138 "addclosehook",
125 "addinfourl",
139 "addinfourl",
126 "BaseHandler",
140 "BaseHandler",
127 "build_opener",
141 "build_opener",
128 "FileHandler",
142 "FileHandler",
129 "FTPHandler",
143 "FTPHandler",
130 "ftpwrapper",
144 "ftpwrapper",
131 "HTTPHandler",
145 "HTTPHandler",
132 "HTTPSHandler",
146 "HTTPSHandler",
133 "install_opener",
147 "install_opener",
134 "pathname2url",
148 "pathname2url",
135 "HTTPBasicAuthHandler",
149 "HTTPBasicAuthHandler",
136 "HTTPDigestAuthHandler",
150 "HTTPDigestAuthHandler",
137 "HTTPPasswordMgrWithDefaultRealm",
151 "HTTPPasswordMgrWithDefaultRealm",
138 "ProxyHandler",
152 "ProxyHandler",
139 "quote",
153 "quote",
140 "Request",
154 "Request",
141 "splitattr",
155 "splitattr",
142 "splitpasswd",
156 "splitpasswd",
143 "splitport",
157 "splitport",
144 "splituser",
158 "splituser",
145 "unquote",
159 "unquote",
146 "url2pathname",
160 "url2pathname",
147 "urlopen",
161 "urlopen",
148 ))
162 ))
149 import urllib.error
163 import urllib.error
150 _alias(urlerr, urllib.error, (
164 _alias(urlerr, urllib.error, (
151 "HTTPError",
165 "HTTPError",
152 "URLError",
166 "URLError",
153 ))
167 ))
168 import http.server
169 _alias(httpserver, http.server, (
170 "HTTPServer",
171 "BaseHTTPRequestHandler",
172 "SimpleHTTPRequestHandler",
173 "CGIHTTPRequestHandler",
174 ))
154
175
155 try:
176 try:
156 xrange
177 xrange
157 except NameError:
178 except NameError:
158 import builtins
179 import builtins
159 builtins.xrange = range
180 builtins.xrange = range
@@ -1,2858 +1,2859 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'httplib',
50 'httplib',
51 'httpserver',
51 'pickle',
52 'pickle',
52 'queue',
53 'queue',
53 'urlerr',
54 'urlerr',
54 'urlparse',
55 'urlparse',
55 # we do import urlreq, but we do it outside the loop
56 # we do import urlreq, but we do it outside the loop
56 #'urlreq',
57 #'urlreq',
57 'stringio',
58 'stringio',
58 'socketserver',
59 'socketserver',
59 'xmlrpclib',
60 'xmlrpclib',
60 ):
61 ):
61 globals()[attr] = getattr(pycompat, attr)
62 globals()[attr] = getattr(pycompat, attr)
62
63
63 # This line is to make pyflakes happy:
64 # This line is to make pyflakes happy:
64 urlreq = pycompat.urlreq
65 urlreq = pycompat.urlreq
65
66
66 if os.name == 'nt':
67 if os.name == 'nt':
67 from . import windows as platform
68 from . import windows as platform
68 else:
69 else:
69 from . import posix as platform
70 from . import posix as platform
70
71
71 _ = i18n._
72 _ = i18n._
72
73
73 bindunixsocket = platform.bindunixsocket
74 bindunixsocket = platform.bindunixsocket
74 cachestat = platform.cachestat
75 cachestat = platform.cachestat
75 checkexec = platform.checkexec
76 checkexec = platform.checkexec
76 checklink = platform.checklink
77 checklink = platform.checklink
77 copymode = platform.copymode
78 copymode = platform.copymode
78 executablepath = platform.executablepath
79 executablepath = platform.executablepath
79 expandglobs = platform.expandglobs
80 expandglobs = platform.expandglobs
80 explainexit = platform.explainexit
81 explainexit = platform.explainexit
81 findexe = platform.findexe
82 findexe = platform.findexe
82 gethgcmd = platform.gethgcmd
83 gethgcmd = platform.gethgcmd
83 getuser = platform.getuser
84 getuser = platform.getuser
84 getpid = os.getpid
85 getpid = os.getpid
85 groupmembers = platform.groupmembers
86 groupmembers = platform.groupmembers
86 groupname = platform.groupname
87 groupname = platform.groupname
87 hidewindow = platform.hidewindow
88 hidewindow = platform.hidewindow
88 isexec = platform.isexec
89 isexec = platform.isexec
89 isowner = platform.isowner
90 isowner = platform.isowner
90 localpath = platform.localpath
91 localpath = platform.localpath
91 lookupreg = platform.lookupreg
92 lookupreg = platform.lookupreg
92 makedir = platform.makedir
93 makedir = platform.makedir
93 nlinks = platform.nlinks
94 nlinks = platform.nlinks
94 normpath = platform.normpath
95 normpath = platform.normpath
95 normcase = platform.normcase
96 normcase = platform.normcase
96 normcasespec = platform.normcasespec
97 normcasespec = platform.normcasespec
97 normcasefallback = platform.normcasefallback
98 normcasefallback = platform.normcasefallback
98 openhardlinks = platform.openhardlinks
99 openhardlinks = platform.openhardlinks
99 oslink = platform.oslink
100 oslink = platform.oslink
100 parsepatchoutput = platform.parsepatchoutput
101 parsepatchoutput = platform.parsepatchoutput
101 pconvert = platform.pconvert
102 pconvert = platform.pconvert
102 poll = platform.poll
103 poll = platform.poll
103 popen = platform.popen
104 popen = platform.popen
104 posixfile = platform.posixfile
105 posixfile = platform.posixfile
105 quotecommand = platform.quotecommand
106 quotecommand = platform.quotecommand
106 readpipe = platform.readpipe
107 readpipe = platform.readpipe
107 rename = platform.rename
108 rename = platform.rename
108 removedirs = platform.removedirs
109 removedirs = platform.removedirs
109 samedevice = platform.samedevice
110 samedevice = platform.samedevice
110 samefile = platform.samefile
111 samefile = platform.samefile
111 samestat = platform.samestat
112 samestat = platform.samestat
112 setbinary = platform.setbinary
113 setbinary = platform.setbinary
113 setflags = platform.setflags
114 setflags = platform.setflags
114 setsignalhandler = platform.setsignalhandler
115 setsignalhandler = platform.setsignalhandler
115 shellquote = platform.shellquote
116 shellquote = platform.shellquote
116 spawndetached = platform.spawndetached
117 spawndetached = platform.spawndetached
117 split = platform.split
118 split = platform.split
118 sshargs = platform.sshargs
119 sshargs = platform.sshargs
119 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
120 statisexec = platform.statisexec
121 statisexec = platform.statisexec
121 statislink = platform.statislink
122 statislink = platform.statislink
122 termwidth = platform.termwidth
123 termwidth = platform.termwidth
123 testpid = platform.testpid
124 testpid = platform.testpid
124 umask = platform.umask
125 umask = platform.umask
125 unlink = platform.unlink
126 unlink = platform.unlink
126 unlinkpath = platform.unlinkpath
127 unlinkpath = platform.unlinkpath
127 username = platform.username
128 username = platform.username
128
129
129 # Python compatibility
130 # Python compatibility
130
131
131 _notset = object()
132 _notset = object()
132
133
133 # disable Python's problematic floating point timestamps (issue4836)
134 # disable Python's problematic floating point timestamps (issue4836)
134 # (Python hypocritically says you shouldn't change this behavior in
135 # (Python hypocritically says you shouldn't change this behavior in
135 # libraries, and sure enough Mercurial is not a library.)
136 # libraries, and sure enough Mercurial is not a library.)
136 os.stat_float_times(False)
137 os.stat_float_times(False)
137
138
138 def safehasattr(thing, attr):
139 def safehasattr(thing, attr):
139 return getattr(thing, attr, _notset) is not _notset
140 return getattr(thing, attr, _notset) is not _notset
140
141
141 DIGESTS = {
142 DIGESTS = {
142 'md5': hashlib.md5,
143 'md5': hashlib.md5,
143 'sha1': hashlib.sha1,
144 'sha1': hashlib.sha1,
144 'sha512': hashlib.sha512,
145 'sha512': hashlib.sha512,
145 }
146 }
146 # List of digest types from strongest to weakest
147 # List of digest types from strongest to weakest
147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
148 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
148
149
149 for k in DIGESTS_BY_STRENGTH:
150 for k in DIGESTS_BY_STRENGTH:
150 assert k in DIGESTS
151 assert k in DIGESTS
151
152
152 class digester(object):
153 class digester(object):
153 """helper to compute digests.
154 """helper to compute digests.
154
155
155 This helper can be used to compute one or more digests given their name.
156 This helper can be used to compute one or more digests given their name.
156
157
157 >>> d = digester(['md5', 'sha1'])
158 >>> d = digester(['md5', 'sha1'])
158 >>> d.update('foo')
159 >>> d.update('foo')
159 >>> [k for k in sorted(d)]
160 >>> [k for k in sorted(d)]
160 ['md5', 'sha1']
161 ['md5', 'sha1']
161 >>> d['md5']
162 >>> d['md5']
162 'acbd18db4cc2f85cedef654fccc4a4d8'
163 'acbd18db4cc2f85cedef654fccc4a4d8'
163 >>> d['sha1']
164 >>> d['sha1']
164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
165 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
165 >>> digester.preferred(['md5', 'sha1'])
166 >>> digester.preferred(['md5', 'sha1'])
166 'sha1'
167 'sha1'
167 """
168 """
168
169
169 def __init__(self, digests, s=''):
170 def __init__(self, digests, s=''):
170 self._hashes = {}
171 self._hashes = {}
171 for k in digests:
172 for k in digests:
172 if k not in DIGESTS:
173 if k not in DIGESTS:
173 raise Abort(_('unknown digest type: %s') % k)
174 raise Abort(_('unknown digest type: %s') % k)
174 self._hashes[k] = DIGESTS[k]()
175 self._hashes[k] = DIGESTS[k]()
175 if s:
176 if s:
176 self.update(s)
177 self.update(s)
177
178
178 def update(self, data):
179 def update(self, data):
179 for h in self._hashes.values():
180 for h in self._hashes.values():
180 h.update(data)
181 h.update(data)
181
182
182 def __getitem__(self, key):
183 def __getitem__(self, key):
183 if key not in DIGESTS:
184 if key not in DIGESTS:
184 raise Abort(_('unknown digest type: %s') % k)
185 raise Abort(_('unknown digest type: %s') % k)
185 return self._hashes[key].hexdigest()
186 return self._hashes[key].hexdigest()
186
187
187 def __iter__(self):
188 def __iter__(self):
188 return iter(self._hashes)
189 return iter(self._hashes)
189
190
190 @staticmethod
191 @staticmethod
191 def preferred(supported):
192 def preferred(supported):
192 """returns the strongest digest type in both supported and DIGESTS."""
193 """returns the strongest digest type in both supported and DIGESTS."""
193
194
194 for k in DIGESTS_BY_STRENGTH:
195 for k in DIGESTS_BY_STRENGTH:
195 if k in supported:
196 if k in supported:
196 return k
197 return k
197 return None
198 return None
198
199
199 class digestchecker(object):
200 class digestchecker(object):
200 """file handle wrapper that additionally checks content against a given
201 """file handle wrapper that additionally checks content against a given
201 size and digests.
202 size and digests.
202
203
203 d = digestchecker(fh, size, {'md5': '...'})
204 d = digestchecker(fh, size, {'md5': '...'})
204
205
205 When multiple digests are given, all of them are validated.
206 When multiple digests are given, all of them are validated.
206 """
207 """
207
208
208 def __init__(self, fh, size, digests):
209 def __init__(self, fh, size, digests):
209 self._fh = fh
210 self._fh = fh
210 self._size = size
211 self._size = size
211 self._got = 0
212 self._got = 0
212 self._digests = dict(digests)
213 self._digests = dict(digests)
213 self._digester = digester(self._digests.keys())
214 self._digester = digester(self._digests.keys())
214
215
215 def read(self, length=-1):
216 def read(self, length=-1):
216 content = self._fh.read(length)
217 content = self._fh.read(length)
217 self._digester.update(content)
218 self._digester.update(content)
218 self._got += len(content)
219 self._got += len(content)
219 return content
220 return content
220
221
221 def validate(self):
222 def validate(self):
222 if self._size != self._got:
223 if self._size != self._got:
223 raise Abort(_('size mismatch: expected %d, got %d') %
224 raise Abort(_('size mismatch: expected %d, got %d') %
224 (self._size, self._got))
225 (self._size, self._got))
225 for k, v in self._digests.items():
226 for k, v in self._digests.items():
226 if v != self._digester[k]:
227 if v != self._digester[k]:
227 # i18n: first parameter is a digest name
228 # i18n: first parameter is a digest name
228 raise Abort(_('%s mismatch: expected %s, got %s') %
229 raise Abort(_('%s mismatch: expected %s, got %s') %
229 (k, v, self._digester[k]))
230 (k, v, self._digester[k]))
230
231
231 try:
232 try:
232 buffer = buffer
233 buffer = buffer
233 except NameError:
234 except NameError:
234 if sys.version_info[0] < 3:
235 if sys.version_info[0] < 3:
235 def buffer(sliceable, offset=0):
236 def buffer(sliceable, offset=0):
236 return sliceable[offset:]
237 return sliceable[offset:]
237 else:
238 else:
238 def buffer(sliceable, offset=0):
239 def buffer(sliceable, offset=0):
239 return memoryview(sliceable)[offset:]
240 return memoryview(sliceable)[offset:]
240
241
241 closefds = os.name == 'posix'
242 closefds = os.name == 'posix'
242
243
243 _chunksize = 4096
244 _chunksize = 4096
244
245
245 class bufferedinputpipe(object):
246 class bufferedinputpipe(object):
246 """a manually buffered input pipe
247 """a manually buffered input pipe
247
248
248 Python will not let us use buffered IO and lazy reading with 'polling' at
249 Python will not let us use buffered IO and lazy reading with 'polling' at
249 the same time. We cannot probe the buffer state and select will not detect
250 the same time. We cannot probe the buffer state and select will not detect
250 that data are ready to read if they are already buffered.
251 that data are ready to read if they are already buffered.
251
252
252 This class let us work around that by implementing its own buffering
253 This class let us work around that by implementing its own buffering
253 (allowing efficient readline) while offering a way to know if the buffer is
254 (allowing efficient readline) while offering a way to know if the buffer is
254 empty from the output (allowing collaboration of the buffer with polling).
255 empty from the output (allowing collaboration of the buffer with polling).
255
256
256 This class lives in the 'util' module because it makes use of the 'os'
257 This class lives in the 'util' module because it makes use of the 'os'
257 module from the python stdlib.
258 module from the python stdlib.
258 """
259 """
259
260
260 def __init__(self, input):
261 def __init__(self, input):
261 self._input = input
262 self._input = input
262 self._buffer = []
263 self._buffer = []
263 self._eof = False
264 self._eof = False
264 self._lenbuf = 0
265 self._lenbuf = 0
265
266
266 @property
267 @property
267 def hasbuffer(self):
268 def hasbuffer(self):
268 """True is any data is currently buffered
269 """True is any data is currently buffered
269
270
270 This will be used externally a pre-step for polling IO. If there is
271 This will be used externally a pre-step for polling IO. If there is
271 already data then no polling should be set in place."""
272 already data then no polling should be set in place."""
272 return bool(self._buffer)
273 return bool(self._buffer)
273
274
274 @property
275 @property
275 def closed(self):
276 def closed(self):
276 return self._input.closed
277 return self._input.closed
277
278
278 def fileno(self):
279 def fileno(self):
279 return self._input.fileno()
280 return self._input.fileno()
280
281
281 def close(self):
282 def close(self):
282 return self._input.close()
283 return self._input.close()
283
284
284 def read(self, size):
285 def read(self, size):
285 while (not self._eof) and (self._lenbuf < size):
286 while (not self._eof) and (self._lenbuf < size):
286 self._fillbuffer()
287 self._fillbuffer()
287 return self._frombuffer(size)
288 return self._frombuffer(size)
288
289
289 def readline(self, *args, **kwargs):
290 def readline(self, *args, **kwargs):
290 if 1 < len(self._buffer):
291 if 1 < len(self._buffer):
291 # this should not happen because both read and readline end with a
292 # this should not happen because both read and readline end with a
292 # _frombuffer call that collapse it.
293 # _frombuffer call that collapse it.
293 self._buffer = [''.join(self._buffer)]
294 self._buffer = [''.join(self._buffer)]
294 self._lenbuf = len(self._buffer[0])
295 self._lenbuf = len(self._buffer[0])
295 lfi = -1
296 lfi = -1
296 if self._buffer:
297 if self._buffer:
297 lfi = self._buffer[-1].find('\n')
298 lfi = self._buffer[-1].find('\n')
298 while (not self._eof) and lfi < 0:
299 while (not self._eof) and lfi < 0:
299 self._fillbuffer()
300 self._fillbuffer()
300 if self._buffer:
301 if self._buffer:
301 lfi = self._buffer[-1].find('\n')
302 lfi = self._buffer[-1].find('\n')
302 size = lfi + 1
303 size = lfi + 1
303 if lfi < 0: # end of file
304 if lfi < 0: # end of file
304 size = self._lenbuf
305 size = self._lenbuf
305 elif 1 < len(self._buffer):
306 elif 1 < len(self._buffer):
306 # we need to take previous chunks into account
307 # we need to take previous chunks into account
307 size += self._lenbuf - len(self._buffer[-1])
308 size += self._lenbuf - len(self._buffer[-1])
308 return self._frombuffer(size)
309 return self._frombuffer(size)
309
310
310 def _frombuffer(self, size):
311 def _frombuffer(self, size):
311 """return at most 'size' data from the buffer
312 """return at most 'size' data from the buffer
312
313
313 The data are removed from the buffer."""
314 The data are removed from the buffer."""
314 if size == 0 or not self._buffer:
315 if size == 0 or not self._buffer:
315 return ''
316 return ''
316 buf = self._buffer[0]
317 buf = self._buffer[0]
317 if 1 < len(self._buffer):
318 if 1 < len(self._buffer):
318 buf = ''.join(self._buffer)
319 buf = ''.join(self._buffer)
319
320
320 data = buf[:size]
321 data = buf[:size]
321 buf = buf[len(data):]
322 buf = buf[len(data):]
322 if buf:
323 if buf:
323 self._buffer = [buf]
324 self._buffer = [buf]
324 self._lenbuf = len(buf)
325 self._lenbuf = len(buf)
325 else:
326 else:
326 self._buffer = []
327 self._buffer = []
327 self._lenbuf = 0
328 self._lenbuf = 0
328 return data
329 return data
329
330
330 def _fillbuffer(self):
331 def _fillbuffer(self):
331 """read data to the buffer"""
332 """read data to the buffer"""
332 data = os.read(self._input.fileno(), _chunksize)
333 data = os.read(self._input.fileno(), _chunksize)
333 if not data:
334 if not data:
334 self._eof = True
335 self._eof = True
335 else:
336 else:
336 self._lenbuf += len(data)
337 self._lenbuf += len(data)
337 self._buffer.append(data)
338 self._buffer.append(data)
338
339
339 def popen2(cmd, env=None, newlines=False):
340 def popen2(cmd, env=None, newlines=False):
340 # Setting bufsize to -1 lets the system decide the buffer size.
341 # Setting bufsize to -1 lets the system decide the buffer size.
341 # The default for bufsize is 0, meaning unbuffered. This leads to
342 # The default for bufsize is 0, meaning unbuffered. This leads to
342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
343 # poor performance on Mac OS X: http://bugs.python.org/issue4194
343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
344 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
344 close_fds=closefds,
345 close_fds=closefds,
345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
346 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
346 universal_newlines=newlines,
347 universal_newlines=newlines,
347 env=env)
348 env=env)
348 return p.stdin, p.stdout
349 return p.stdin, p.stdout
349
350
350 def popen3(cmd, env=None, newlines=False):
351 def popen3(cmd, env=None, newlines=False):
351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
352 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
352 return stdin, stdout, stderr
353 return stdin, stdout, stderr
353
354
354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
355 def popen4(cmd, env=None, newlines=False, bufsize=-1):
355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
356 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
356 close_fds=closefds,
357 close_fds=closefds,
357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
358 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
358 stderr=subprocess.PIPE,
359 stderr=subprocess.PIPE,
359 universal_newlines=newlines,
360 universal_newlines=newlines,
360 env=env)
361 env=env)
361 return p.stdin, p.stdout, p.stderr, p
362 return p.stdin, p.stdout, p.stderr, p
362
363
363 def version():
364 def version():
364 """Return version information if available."""
365 """Return version information if available."""
365 try:
366 try:
366 from . import __version__
367 from . import __version__
367 return __version__.version
368 return __version__.version
368 except ImportError:
369 except ImportError:
369 return 'unknown'
370 return 'unknown'
370
371
371 def versiontuple(v=None, n=4):
372 def versiontuple(v=None, n=4):
372 """Parses a Mercurial version string into an N-tuple.
373 """Parses a Mercurial version string into an N-tuple.
373
374
374 The version string to be parsed is specified with the ``v`` argument.
375 The version string to be parsed is specified with the ``v`` argument.
375 If it isn't defined, the current Mercurial version string will be parsed.
376 If it isn't defined, the current Mercurial version string will be parsed.
376
377
377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
378 ``n`` can be 2, 3, or 4. Here is how some version strings map to
378 returned values:
379 returned values:
379
380
380 >>> v = '3.6.1+190-df9b73d2d444'
381 >>> v = '3.6.1+190-df9b73d2d444'
381 >>> versiontuple(v, 2)
382 >>> versiontuple(v, 2)
382 (3, 6)
383 (3, 6)
383 >>> versiontuple(v, 3)
384 >>> versiontuple(v, 3)
384 (3, 6, 1)
385 (3, 6, 1)
385 >>> versiontuple(v, 4)
386 >>> versiontuple(v, 4)
386 (3, 6, 1, '190-df9b73d2d444')
387 (3, 6, 1, '190-df9b73d2d444')
387
388
388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
389 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
389 (3, 6, 1, '190-df9b73d2d444+20151118')
390 (3, 6, 1, '190-df9b73d2d444+20151118')
390
391
391 >>> v = '3.6'
392 >>> v = '3.6'
392 >>> versiontuple(v, 2)
393 >>> versiontuple(v, 2)
393 (3, 6)
394 (3, 6)
394 >>> versiontuple(v, 3)
395 >>> versiontuple(v, 3)
395 (3, 6, None)
396 (3, 6, None)
396 >>> versiontuple(v, 4)
397 >>> versiontuple(v, 4)
397 (3, 6, None, None)
398 (3, 6, None, None)
398 """
399 """
399 if not v:
400 if not v:
400 v = version()
401 v = version()
401 parts = v.split('+', 1)
402 parts = v.split('+', 1)
402 if len(parts) == 1:
403 if len(parts) == 1:
403 vparts, extra = parts[0], None
404 vparts, extra = parts[0], None
404 else:
405 else:
405 vparts, extra = parts
406 vparts, extra = parts
406
407
407 vints = []
408 vints = []
408 for i in vparts.split('.'):
409 for i in vparts.split('.'):
409 try:
410 try:
410 vints.append(int(i))
411 vints.append(int(i))
411 except ValueError:
412 except ValueError:
412 break
413 break
413 # (3, 6) -> (3, 6, None)
414 # (3, 6) -> (3, 6, None)
414 while len(vints) < 3:
415 while len(vints) < 3:
415 vints.append(None)
416 vints.append(None)
416
417
417 if n == 2:
418 if n == 2:
418 return (vints[0], vints[1])
419 return (vints[0], vints[1])
419 if n == 3:
420 if n == 3:
420 return (vints[0], vints[1], vints[2])
421 return (vints[0], vints[1], vints[2])
421 if n == 4:
422 if n == 4:
422 return (vints[0], vints[1], vints[2], extra)
423 return (vints[0], vints[1], vints[2], extra)
423
424
424 # used by parsedate
425 # used by parsedate
425 defaultdateformats = (
426 defaultdateformats = (
426 '%Y-%m-%d %H:%M:%S',
427 '%Y-%m-%d %H:%M:%S',
427 '%Y-%m-%d %I:%M:%S%p',
428 '%Y-%m-%d %I:%M:%S%p',
428 '%Y-%m-%d %H:%M',
429 '%Y-%m-%d %H:%M',
429 '%Y-%m-%d %I:%M%p',
430 '%Y-%m-%d %I:%M%p',
430 '%Y-%m-%d',
431 '%Y-%m-%d',
431 '%m-%d',
432 '%m-%d',
432 '%m/%d',
433 '%m/%d',
433 '%m/%d/%y',
434 '%m/%d/%y',
434 '%m/%d/%Y',
435 '%m/%d/%Y',
435 '%a %b %d %H:%M:%S %Y',
436 '%a %b %d %H:%M:%S %Y',
436 '%a %b %d %I:%M:%S%p %Y',
437 '%a %b %d %I:%M:%S%p %Y',
437 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
438 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
438 '%b %d %H:%M:%S %Y',
439 '%b %d %H:%M:%S %Y',
439 '%b %d %I:%M:%S%p %Y',
440 '%b %d %I:%M:%S%p %Y',
440 '%b %d %H:%M:%S',
441 '%b %d %H:%M:%S',
441 '%b %d %I:%M:%S%p',
442 '%b %d %I:%M:%S%p',
442 '%b %d %H:%M',
443 '%b %d %H:%M',
443 '%b %d %I:%M%p',
444 '%b %d %I:%M%p',
444 '%b %d %Y',
445 '%b %d %Y',
445 '%b %d',
446 '%b %d',
446 '%H:%M:%S',
447 '%H:%M:%S',
447 '%I:%M:%S%p',
448 '%I:%M:%S%p',
448 '%H:%M',
449 '%H:%M',
449 '%I:%M%p',
450 '%I:%M%p',
450 )
451 )
451
452
452 extendeddateformats = defaultdateformats + (
453 extendeddateformats = defaultdateformats + (
453 "%Y",
454 "%Y",
454 "%Y-%m",
455 "%Y-%m",
455 "%b",
456 "%b",
456 "%b %Y",
457 "%b %Y",
457 )
458 )
458
459
459 def cachefunc(func):
460 def cachefunc(func):
460 '''cache the result of function calls'''
461 '''cache the result of function calls'''
461 # XXX doesn't handle keywords args
462 # XXX doesn't handle keywords args
462 if func.__code__.co_argcount == 0:
463 if func.__code__.co_argcount == 0:
463 cache = []
464 cache = []
464 def f():
465 def f():
465 if len(cache) == 0:
466 if len(cache) == 0:
466 cache.append(func())
467 cache.append(func())
467 return cache[0]
468 return cache[0]
468 return f
469 return f
469 cache = {}
470 cache = {}
470 if func.__code__.co_argcount == 1:
471 if func.__code__.co_argcount == 1:
471 # we gain a small amount of time because
472 # we gain a small amount of time because
472 # we don't need to pack/unpack the list
473 # we don't need to pack/unpack the list
473 def f(arg):
474 def f(arg):
474 if arg not in cache:
475 if arg not in cache:
475 cache[arg] = func(arg)
476 cache[arg] = func(arg)
476 return cache[arg]
477 return cache[arg]
477 else:
478 else:
478 def f(*args):
479 def f(*args):
479 if args not in cache:
480 if args not in cache:
480 cache[args] = func(*args)
481 cache[args] = func(*args)
481 return cache[args]
482 return cache[args]
482
483
483 return f
484 return f
484
485
485 class sortdict(dict):
486 class sortdict(dict):
486 '''a simple sorted dictionary'''
487 '''a simple sorted dictionary'''
487 def __init__(self, data=None):
488 def __init__(self, data=None):
488 self._list = []
489 self._list = []
489 if data:
490 if data:
490 self.update(data)
491 self.update(data)
491 def copy(self):
492 def copy(self):
492 return sortdict(self)
493 return sortdict(self)
493 def __setitem__(self, key, val):
494 def __setitem__(self, key, val):
494 if key in self:
495 if key in self:
495 self._list.remove(key)
496 self._list.remove(key)
496 self._list.append(key)
497 self._list.append(key)
497 dict.__setitem__(self, key, val)
498 dict.__setitem__(self, key, val)
498 def __iter__(self):
499 def __iter__(self):
499 return self._list.__iter__()
500 return self._list.__iter__()
500 def update(self, src):
501 def update(self, src):
501 if isinstance(src, dict):
502 if isinstance(src, dict):
502 src = src.iteritems()
503 src = src.iteritems()
503 for k, v in src:
504 for k, v in src:
504 self[k] = v
505 self[k] = v
505 def clear(self):
506 def clear(self):
506 dict.clear(self)
507 dict.clear(self)
507 self._list = []
508 self._list = []
508 def items(self):
509 def items(self):
509 return [(k, self[k]) for k in self._list]
510 return [(k, self[k]) for k in self._list]
510 def __delitem__(self, key):
511 def __delitem__(self, key):
511 dict.__delitem__(self, key)
512 dict.__delitem__(self, key)
512 self._list.remove(key)
513 self._list.remove(key)
513 def pop(self, key, *args, **kwargs):
514 def pop(self, key, *args, **kwargs):
514 dict.pop(self, key, *args, **kwargs)
515 dict.pop(self, key, *args, **kwargs)
515 try:
516 try:
516 self._list.remove(key)
517 self._list.remove(key)
517 except ValueError:
518 except ValueError:
518 pass
519 pass
519 def keys(self):
520 def keys(self):
520 return self._list
521 return self._list
521 def iterkeys(self):
522 def iterkeys(self):
522 return self._list.__iter__()
523 return self._list.__iter__()
523 def iteritems(self):
524 def iteritems(self):
524 for k in self._list:
525 for k in self._list:
525 yield k, self[k]
526 yield k, self[k]
526 def insert(self, index, key, val):
527 def insert(self, index, key, val):
527 self._list.insert(index, key)
528 self._list.insert(index, key)
528 dict.__setitem__(self, key, val)
529 dict.__setitem__(self, key, val)
529
530
530 class _lrucachenode(object):
531 class _lrucachenode(object):
531 """A node in a doubly linked list.
532 """A node in a doubly linked list.
532
533
533 Holds a reference to nodes on either side as well as a key-value
534 Holds a reference to nodes on either side as well as a key-value
534 pair for the dictionary entry.
535 pair for the dictionary entry.
535 """
536 """
536 __slots__ = ('next', 'prev', 'key', 'value')
537 __slots__ = ('next', 'prev', 'key', 'value')
537
538
538 def __init__(self):
539 def __init__(self):
539 self.next = None
540 self.next = None
540 self.prev = None
541 self.prev = None
541
542
542 self.key = _notset
543 self.key = _notset
543 self.value = None
544 self.value = None
544
545
545 def markempty(self):
546 def markempty(self):
546 """Mark the node as emptied."""
547 """Mark the node as emptied."""
547 self.key = _notset
548 self.key = _notset
548
549
549 class lrucachedict(object):
550 class lrucachedict(object):
550 """Dict that caches most recent accesses and sets.
551 """Dict that caches most recent accesses and sets.
551
552
552 The dict consists of an actual backing dict - indexed by original
553 The dict consists of an actual backing dict - indexed by original
553 key - and a doubly linked circular list defining the order of entries in
554 key - and a doubly linked circular list defining the order of entries in
554 the cache.
555 the cache.
555
556
556 The head node is the newest entry in the cache. If the cache is full,
557 The head node is the newest entry in the cache. If the cache is full,
557 we recycle head.prev and make it the new head. Cache accesses result in
558 we recycle head.prev and make it the new head. Cache accesses result in
558 the node being moved to before the existing head and being marked as the
559 the node being moved to before the existing head and being marked as the
559 new head node.
560 new head node.
560 """
561 """
561 def __init__(self, max):
562 def __init__(self, max):
562 self._cache = {}
563 self._cache = {}
563
564
564 self._head = head = _lrucachenode()
565 self._head = head = _lrucachenode()
565 head.prev = head
566 head.prev = head
566 head.next = head
567 head.next = head
567 self._size = 1
568 self._size = 1
568 self._capacity = max
569 self._capacity = max
569
570
570 def __len__(self):
571 def __len__(self):
571 return len(self._cache)
572 return len(self._cache)
572
573
573 def __contains__(self, k):
574 def __contains__(self, k):
574 return k in self._cache
575 return k in self._cache
575
576
576 def __iter__(self):
577 def __iter__(self):
577 # We don't have to iterate in cache order, but why not.
578 # We don't have to iterate in cache order, but why not.
578 n = self._head
579 n = self._head
579 for i in range(len(self._cache)):
580 for i in range(len(self._cache)):
580 yield n.key
581 yield n.key
581 n = n.next
582 n = n.next
582
583
583 def __getitem__(self, k):
584 def __getitem__(self, k):
584 node = self._cache[k]
585 node = self._cache[k]
585 self._movetohead(node)
586 self._movetohead(node)
586 return node.value
587 return node.value
587
588
588 def __setitem__(self, k, v):
589 def __setitem__(self, k, v):
589 node = self._cache.get(k)
590 node = self._cache.get(k)
590 # Replace existing value and mark as newest.
591 # Replace existing value and mark as newest.
591 if node is not None:
592 if node is not None:
592 node.value = v
593 node.value = v
593 self._movetohead(node)
594 self._movetohead(node)
594 return
595 return
595
596
596 if self._size < self._capacity:
597 if self._size < self._capacity:
597 node = self._addcapacity()
598 node = self._addcapacity()
598 else:
599 else:
599 # Grab the last/oldest item.
600 # Grab the last/oldest item.
600 node = self._head.prev
601 node = self._head.prev
601
602
602 # At capacity. Kill the old entry.
603 # At capacity. Kill the old entry.
603 if node.key is not _notset:
604 if node.key is not _notset:
604 del self._cache[node.key]
605 del self._cache[node.key]
605
606
606 node.key = k
607 node.key = k
607 node.value = v
608 node.value = v
608 self._cache[k] = node
609 self._cache[k] = node
609 # And mark it as newest entry. No need to adjust order since it
610 # And mark it as newest entry. No need to adjust order since it
610 # is already self._head.prev.
611 # is already self._head.prev.
611 self._head = node
612 self._head = node
612
613
613 def __delitem__(self, k):
614 def __delitem__(self, k):
614 node = self._cache.pop(k)
615 node = self._cache.pop(k)
615 node.markempty()
616 node.markempty()
616
617
617 # Temporarily mark as newest item before re-adjusting head to make
618 # Temporarily mark as newest item before re-adjusting head to make
618 # this node the oldest item.
619 # this node the oldest item.
619 self._movetohead(node)
620 self._movetohead(node)
620 self._head = node.next
621 self._head = node.next
621
622
622 # Additional dict methods.
623 # Additional dict methods.
623
624
624 def get(self, k, default=None):
625 def get(self, k, default=None):
625 try:
626 try:
626 return self._cache[k]
627 return self._cache[k]
627 except KeyError:
628 except KeyError:
628 return default
629 return default
629
630
630 def clear(self):
631 def clear(self):
631 n = self._head
632 n = self._head
632 while n.key is not _notset:
633 while n.key is not _notset:
633 n.markempty()
634 n.markempty()
634 n = n.next
635 n = n.next
635
636
636 self._cache.clear()
637 self._cache.clear()
637
638
638 def copy(self):
639 def copy(self):
639 result = lrucachedict(self._capacity)
640 result = lrucachedict(self._capacity)
640 n = self._head.prev
641 n = self._head.prev
641 # Iterate in oldest-to-newest order, so the copy has the right ordering
642 # Iterate in oldest-to-newest order, so the copy has the right ordering
642 for i in range(len(self._cache)):
643 for i in range(len(self._cache)):
643 result[n.key] = n.value
644 result[n.key] = n.value
644 n = n.prev
645 n = n.prev
645 return result
646 return result
646
647
647 def _movetohead(self, node):
648 def _movetohead(self, node):
648 """Mark a node as the newest, making it the new head.
649 """Mark a node as the newest, making it the new head.
649
650
650 When a node is accessed, it becomes the freshest entry in the LRU
651 When a node is accessed, it becomes the freshest entry in the LRU
651 list, which is denoted by self._head.
652 list, which is denoted by self._head.
652
653
653 Visually, let's make ``N`` the new head node (* denotes head):
654 Visually, let's make ``N`` the new head node (* denotes head):
654
655
655 previous/oldest <-> head <-> next/next newest
656 previous/oldest <-> head <-> next/next newest
656
657
657 ----<->--- A* ---<->-----
658 ----<->--- A* ---<->-----
658 | |
659 | |
659 E <-> D <-> N <-> C <-> B
660 E <-> D <-> N <-> C <-> B
660
661
661 To:
662 To:
662
663
663 ----<->--- N* ---<->-----
664 ----<->--- N* ---<->-----
664 | |
665 | |
665 E <-> D <-> C <-> B <-> A
666 E <-> D <-> C <-> B <-> A
666
667
667 This requires the following moves:
668 This requires the following moves:
668
669
669 C.next = D (node.prev.next = node.next)
670 C.next = D (node.prev.next = node.next)
670 D.prev = C (node.next.prev = node.prev)
671 D.prev = C (node.next.prev = node.prev)
671 E.next = N (head.prev.next = node)
672 E.next = N (head.prev.next = node)
672 N.prev = E (node.prev = head.prev)
673 N.prev = E (node.prev = head.prev)
673 N.next = A (node.next = head)
674 N.next = A (node.next = head)
674 A.prev = N (head.prev = node)
675 A.prev = N (head.prev = node)
675 """
676 """
676 head = self._head
677 head = self._head
677 # C.next = D
678 # C.next = D
678 node.prev.next = node.next
679 node.prev.next = node.next
679 # D.prev = C
680 # D.prev = C
680 node.next.prev = node.prev
681 node.next.prev = node.prev
681 # N.prev = E
682 # N.prev = E
682 node.prev = head.prev
683 node.prev = head.prev
683 # N.next = A
684 # N.next = A
684 # It is tempting to do just "head" here, however if node is
685 # It is tempting to do just "head" here, however if node is
685 # adjacent to head, this will do bad things.
686 # adjacent to head, this will do bad things.
686 node.next = head.prev.next
687 node.next = head.prev.next
687 # E.next = N
688 # E.next = N
688 node.next.prev = node
689 node.next.prev = node
689 # A.prev = N
690 # A.prev = N
690 node.prev.next = node
691 node.prev.next = node
691
692
692 self._head = node
693 self._head = node
693
694
694 def _addcapacity(self):
695 def _addcapacity(self):
695 """Add a node to the circular linked list.
696 """Add a node to the circular linked list.
696
697
697 The new node is inserted before the head node.
698 The new node is inserted before the head node.
698 """
699 """
699 head = self._head
700 head = self._head
700 node = _lrucachenode()
701 node = _lrucachenode()
701 head.prev.next = node
702 head.prev.next = node
702 node.prev = head.prev
703 node.prev = head.prev
703 node.next = head
704 node.next = head
704 head.prev = node
705 head.prev = node
705 self._size += 1
706 self._size += 1
706 return node
707 return node
707
708
708 def lrucachefunc(func):
709 def lrucachefunc(func):
709 '''cache most recent results of function calls'''
710 '''cache most recent results of function calls'''
710 cache = {}
711 cache = {}
711 order = collections.deque()
712 order = collections.deque()
712 if func.__code__.co_argcount == 1:
713 if func.__code__.co_argcount == 1:
713 def f(arg):
714 def f(arg):
714 if arg not in cache:
715 if arg not in cache:
715 if len(cache) > 20:
716 if len(cache) > 20:
716 del cache[order.popleft()]
717 del cache[order.popleft()]
717 cache[arg] = func(arg)
718 cache[arg] = func(arg)
718 else:
719 else:
719 order.remove(arg)
720 order.remove(arg)
720 order.append(arg)
721 order.append(arg)
721 return cache[arg]
722 return cache[arg]
722 else:
723 else:
723 def f(*args):
724 def f(*args):
724 if args not in cache:
725 if args not in cache:
725 if len(cache) > 20:
726 if len(cache) > 20:
726 del cache[order.popleft()]
727 del cache[order.popleft()]
727 cache[args] = func(*args)
728 cache[args] = func(*args)
728 else:
729 else:
729 order.remove(args)
730 order.remove(args)
730 order.append(args)
731 order.append(args)
731 return cache[args]
732 return cache[args]
732
733
733 return f
734 return f
734
735
735 class propertycache(object):
736 class propertycache(object):
736 def __init__(self, func):
737 def __init__(self, func):
737 self.func = func
738 self.func = func
738 self.name = func.__name__
739 self.name = func.__name__
739 def __get__(self, obj, type=None):
740 def __get__(self, obj, type=None):
740 result = self.func(obj)
741 result = self.func(obj)
741 self.cachevalue(obj, result)
742 self.cachevalue(obj, result)
742 return result
743 return result
743
744
744 def cachevalue(self, obj, value):
745 def cachevalue(self, obj, value):
745 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
746 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
746 obj.__dict__[self.name] = value
747 obj.__dict__[self.name] = value
747
748
748 def pipefilter(s, cmd):
749 def pipefilter(s, cmd):
749 '''filter string S through command CMD, returning its output'''
750 '''filter string S through command CMD, returning its output'''
750 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
751 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
751 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
752 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
752 pout, perr = p.communicate(s)
753 pout, perr = p.communicate(s)
753 return pout
754 return pout
754
755
755 def tempfilter(s, cmd):
756 def tempfilter(s, cmd):
756 '''filter string S through a pair of temporary files with CMD.
757 '''filter string S through a pair of temporary files with CMD.
757 CMD is used as a template to create the real command to be run,
758 CMD is used as a template to create the real command to be run,
758 with the strings INFILE and OUTFILE replaced by the real names of
759 with the strings INFILE and OUTFILE replaced by the real names of
759 the temporary files generated.'''
760 the temporary files generated.'''
760 inname, outname = None, None
761 inname, outname = None, None
761 try:
762 try:
762 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
763 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
763 fp = os.fdopen(infd, 'wb')
764 fp = os.fdopen(infd, 'wb')
764 fp.write(s)
765 fp.write(s)
765 fp.close()
766 fp.close()
766 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
767 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
767 os.close(outfd)
768 os.close(outfd)
768 cmd = cmd.replace('INFILE', inname)
769 cmd = cmd.replace('INFILE', inname)
769 cmd = cmd.replace('OUTFILE', outname)
770 cmd = cmd.replace('OUTFILE', outname)
770 code = os.system(cmd)
771 code = os.system(cmd)
771 if sys.platform == 'OpenVMS' and code & 1:
772 if sys.platform == 'OpenVMS' and code & 1:
772 code = 0
773 code = 0
773 if code:
774 if code:
774 raise Abort(_("command '%s' failed: %s") %
775 raise Abort(_("command '%s' failed: %s") %
775 (cmd, explainexit(code)))
776 (cmd, explainexit(code)))
776 return readfile(outname)
777 return readfile(outname)
777 finally:
778 finally:
778 try:
779 try:
779 if inname:
780 if inname:
780 os.unlink(inname)
781 os.unlink(inname)
781 except OSError:
782 except OSError:
782 pass
783 pass
783 try:
784 try:
784 if outname:
785 if outname:
785 os.unlink(outname)
786 os.unlink(outname)
786 except OSError:
787 except OSError:
787 pass
788 pass
788
789
789 filtertable = {
790 filtertable = {
790 'tempfile:': tempfilter,
791 'tempfile:': tempfilter,
791 'pipe:': pipefilter,
792 'pipe:': pipefilter,
792 }
793 }
793
794
794 def filter(s, cmd):
795 def filter(s, cmd):
795 "filter a string through a command that transforms its input to its output"
796 "filter a string through a command that transforms its input to its output"
796 for name, fn in filtertable.iteritems():
797 for name, fn in filtertable.iteritems():
797 if cmd.startswith(name):
798 if cmd.startswith(name):
798 return fn(s, cmd[len(name):].lstrip())
799 return fn(s, cmd[len(name):].lstrip())
799 return pipefilter(s, cmd)
800 return pipefilter(s, cmd)
800
801
801 def binary(s):
802 def binary(s):
802 """return true if a string is binary data"""
803 """return true if a string is binary data"""
803 return bool(s and '\0' in s)
804 return bool(s and '\0' in s)
804
805
805 def increasingchunks(source, min=1024, max=65536):
806 def increasingchunks(source, min=1024, max=65536):
806 '''return no less than min bytes per chunk while data remains,
807 '''return no less than min bytes per chunk while data remains,
807 doubling min after each chunk until it reaches max'''
808 doubling min after each chunk until it reaches max'''
808 def log2(x):
809 def log2(x):
809 if not x:
810 if not x:
810 return 0
811 return 0
811 i = 0
812 i = 0
812 while x:
813 while x:
813 x >>= 1
814 x >>= 1
814 i += 1
815 i += 1
815 return i - 1
816 return i - 1
816
817
817 buf = []
818 buf = []
818 blen = 0
819 blen = 0
819 for chunk in source:
820 for chunk in source:
820 buf.append(chunk)
821 buf.append(chunk)
821 blen += len(chunk)
822 blen += len(chunk)
822 if blen >= min:
823 if blen >= min:
823 if min < max:
824 if min < max:
824 min = min << 1
825 min = min << 1
825 nmin = 1 << log2(blen)
826 nmin = 1 << log2(blen)
826 if nmin > min:
827 if nmin > min:
827 min = nmin
828 min = nmin
828 if min > max:
829 if min > max:
829 min = max
830 min = max
830 yield ''.join(buf)
831 yield ''.join(buf)
831 blen = 0
832 blen = 0
832 buf = []
833 buf = []
833 if buf:
834 if buf:
834 yield ''.join(buf)
835 yield ''.join(buf)
835
836
836 Abort = error.Abort
837 Abort = error.Abort
837
838
838 def always(fn):
839 def always(fn):
839 return True
840 return True
840
841
841 def never(fn):
842 def never(fn):
842 return False
843 return False
843
844
844 def nogc(func):
845 def nogc(func):
845 """disable garbage collector
846 """disable garbage collector
846
847
847 Python's garbage collector triggers a GC each time a certain number of
848 Python's garbage collector triggers a GC each time a certain number of
848 container objects (the number being defined by gc.get_threshold()) are
849 container objects (the number being defined by gc.get_threshold()) are
849 allocated even when marked not to be tracked by the collector. Tracking has
850 allocated even when marked not to be tracked by the collector. Tracking has
850 no effect on when GCs are triggered, only on what objects the GC looks
851 no effect on when GCs are triggered, only on what objects the GC looks
851 into. As a workaround, disable GC while building complex (huge)
852 into. As a workaround, disable GC while building complex (huge)
852 containers.
853 containers.
853
854
854 This garbage collector issue have been fixed in 2.7.
855 This garbage collector issue have been fixed in 2.7.
855 """
856 """
856 def wrapper(*args, **kwargs):
857 def wrapper(*args, **kwargs):
857 gcenabled = gc.isenabled()
858 gcenabled = gc.isenabled()
858 gc.disable()
859 gc.disable()
859 try:
860 try:
860 return func(*args, **kwargs)
861 return func(*args, **kwargs)
861 finally:
862 finally:
862 if gcenabled:
863 if gcenabled:
863 gc.enable()
864 gc.enable()
864 return wrapper
865 return wrapper
865
866
866 def pathto(root, n1, n2):
867 def pathto(root, n1, n2):
867 '''return the relative path from one place to another.
868 '''return the relative path from one place to another.
868 root should use os.sep to separate directories
869 root should use os.sep to separate directories
869 n1 should use os.sep to separate directories
870 n1 should use os.sep to separate directories
870 n2 should use "/" to separate directories
871 n2 should use "/" to separate directories
871 returns an os.sep-separated path.
872 returns an os.sep-separated path.
872
873
873 If n1 is a relative path, it's assumed it's
874 If n1 is a relative path, it's assumed it's
874 relative to root.
875 relative to root.
875 n2 should always be relative to root.
876 n2 should always be relative to root.
876 '''
877 '''
877 if not n1:
878 if not n1:
878 return localpath(n2)
879 return localpath(n2)
879 if os.path.isabs(n1):
880 if os.path.isabs(n1):
880 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
881 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
881 return os.path.join(root, localpath(n2))
882 return os.path.join(root, localpath(n2))
882 n2 = '/'.join((pconvert(root), n2))
883 n2 = '/'.join((pconvert(root), n2))
883 a, b = splitpath(n1), n2.split('/')
884 a, b = splitpath(n1), n2.split('/')
884 a.reverse()
885 a.reverse()
885 b.reverse()
886 b.reverse()
886 while a and b and a[-1] == b[-1]:
887 while a and b and a[-1] == b[-1]:
887 a.pop()
888 a.pop()
888 b.pop()
889 b.pop()
889 b.reverse()
890 b.reverse()
890 return os.sep.join((['..'] * len(a)) + b) or '.'
891 return os.sep.join((['..'] * len(a)) + b) or '.'
891
892
892 def mainfrozen():
893 def mainfrozen():
893 """return True if we are a frozen executable.
894 """return True if we are a frozen executable.
894
895
895 The code supports py2exe (most common, Windows only) and tools/freeze
896 The code supports py2exe (most common, Windows only) and tools/freeze
896 (portable, not much used).
897 (portable, not much used).
897 """
898 """
898 return (safehasattr(sys, "frozen") or # new py2exe
899 return (safehasattr(sys, "frozen") or # new py2exe
899 safehasattr(sys, "importers") or # old py2exe
900 safehasattr(sys, "importers") or # old py2exe
900 imp.is_frozen("__main__")) # tools/freeze
901 imp.is_frozen("__main__")) # tools/freeze
901
902
902 # the location of data files matching the source code
903 # the location of data files matching the source code
903 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
904 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
904 # executable version (py2exe) doesn't support __file__
905 # executable version (py2exe) doesn't support __file__
905 datapath = os.path.dirname(sys.executable)
906 datapath = os.path.dirname(sys.executable)
906 else:
907 else:
907 datapath = os.path.dirname(__file__)
908 datapath = os.path.dirname(__file__)
908
909
909 i18n.setdatapath(datapath)
910 i18n.setdatapath(datapath)
910
911
911 _hgexecutable = None
912 _hgexecutable = None
912
913
913 def hgexecutable():
914 def hgexecutable():
914 """return location of the 'hg' executable.
915 """return location of the 'hg' executable.
915
916
916 Defaults to $HG or 'hg' in the search path.
917 Defaults to $HG or 'hg' in the search path.
917 """
918 """
918 if _hgexecutable is None:
919 if _hgexecutable is None:
919 hg = os.environ.get('HG')
920 hg = os.environ.get('HG')
920 mainmod = sys.modules['__main__']
921 mainmod = sys.modules['__main__']
921 if hg:
922 if hg:
922 _sethgexecutable(hg)
923 _sethgexecutable(hg)
923 elif mainfrozen():
924 elif mainfrozen():
924 if getattr(sys, 'frozen', None) == 'macosx_app':
925 if getattr(sys, 'frozen', None) == 'macosx_app':
925 # Env variable set by py2app
926 # Env variable set by py2app
926 _sethgexecutable(os.environ['EXECUTABLEPATH'])
927 _sethgexecutable(os.environ['EXECUTABLEPATH'])
927 else:
928 else:
928 _sethgexecutable(sys.executable)
929 _sethgexecutable(sys.executable)
929 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
930 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
930 _sethgexecutable(mainmod.__file__)
931 _sethgexecutable(mainmod.__file__)
931 else:
932 else:
932 exe = findexe('hg') or os.path.basename(sys.argv[0])
933 exe = findexe('hg') or os.path.basename(sys.argv[0])
933 _sethgexecutable(exe)
934 _sethgexecutable(exe)
934 return _hgexecutable
935 return _hgexecutable
935
936
936 def _sethgexecutable(path):
937 def _sethgexecutable(path):
937 """set location of the 'hg' executable"""
938 """set location of the 'hg' executable"""
938 global _hgexecutable
939 global _hgexecutable
939 _hgexecutable = path
940 _hgexecutable = path
940
941
941 def _isstdout(f):
942 def _isstdout(f):
942 fileno = getattr(f, 'fileno', None)
943 fileno = getattr(f, 'fileno', None)
943 return fileno and fileno() == sys.__stdout__.fileno()
944 return fileno and fileno() == sys.__stdout__.fileno()
944
945
945 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
946 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
946 '''enhanced shell command execution.
947 '''enhanced shell command execution.
947 run with environment maybe modified, maybe in different dir.
948 run with environment maybe modified, maybe in different dir.
948
949
949 if command fails and onerr is None, return status, else raise onerr
950 if command fails and onerr is None, return status, else raise onerr
950 object as exception.
951 object as exception.
951
952
952 if out is specified, it is assumed to be a file-like object that has a
953 if out is specified, it is assumed to be a file-like object that has a
953 write() method. stdout and stderr will be redirected to out.'''
954 write() method. stdout and stderr will be redirected to out.'''
954 if environ is None:
955 if environ is None:
955 environ = {}
956 environ = {}
956 try:
957 try:
957 sys.stdout.flush()
958 sys.stdout.flush()
958 except Exception:
959 except Exception:
959 pass
960 pass
960 def py2shell(val):
961 def py2shell(val):
961 'convert python object into string that is useful to shell'
962 'convert python object into string that is useful to shell'
962 if val is None or val is False:
963 if val is None or val is False:
963 return '0'
964 return '0'
964 if val is True:
965 if val is True:
965 return '1'
966 return '1'
966 return str(val)
967 return str(val)
967 origcmd = cmd
968 origcmd = cmd
968 cmd = quotecommand(cmd)
969 cmd = quotecommand(cmd)
969 if sys.platform == 'plan9' and (sys.version_info[0] == 2
970 if sys.platform == 'plan9' and (sys.version_info[0] == 2
970 and sys.version_info[1] < 7):
971 and sys.version_info[1] < 7):
971 # subprocess kludge to work around issues in half-baked Python
972 # subprocess kludge to work around issues in half-baked Python
972 # ports, notably bichued/python:
973 # ports, notably bichued/python:
973 if not cwd is None:
974 if not cwd is None:
974 os.chdir(cwd)
975 os.chdir(cwd)
975 rc = os.system(cmd)
976 rc = os.system(cmd)
976 else:
977 else:
977 env = dict(os.environ)
978 env = dict(os.environ)
978 env.update((k, py2shell(v)) for k, v in environ.iteritems())
979 env.update((k, py2shell(v)) for k, v in environ.iteritems())
979 env['HG'] = hgexecutable()
980 env['HG'] = hgexecutable()
980 if out is None or _isstdout(out):
981 if out is None or _isstdout(out):
981 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
982 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
982 env=env, cwd=cwd)
983 env=env, cwd=cwd)
983 else:
984 else:
984 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
985 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
985 env=env, cwd=cwd, stdout=subprocess.PIPE,
986 env=env, cwd=cwd, stdout=subprocess.PIPE,
986 stderr=subprocess.STDOUT)
987 stderr=subprocess.STDOUT)
987 while True:
988 while True:
988 line = proc.stdout.readline()
989 line = proc.stdout.readline()
989 if not line:
990 if not line:
990 break
991 break
991 out.write(line)
992 out.write(line)
992 proc.wait()
993 proc.wait()
993 rc = proc.returncode
994 rc = proc.returncode
994 if sys.platform == 'OpenVMS' and rc & 1:
995 if sys.platform == 'OpenVMS' and rc & 1:
995 rc = 0
996 rc = 0
996 if rc and onerr:
997 if rc and onerr:
997 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
998 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
998 explainexit(rc)[0])
999 explainexit(rc)[0])
999 if errprefix:
1000 if errprefix:
1000 errmsg = '%s: %s' % (errprefix, errmsg)
1001 errmsg = '%s: %s' % (errprefix, errmsg)
1001 raise onerr(errmsg)
1002 raise onerr(errmsg)
1002 return rc
1003 return rc
1003
1004
1004 def checksignature(func):
1005 def checksignature(func):
1005 '''wrap a function with code to check for calling errors'''
1006 '''wrap a function with code to check for calling errors'''
1006 def check(*args, **kwargs):
1007 def check(*args, **kwargs):
1007 try:
1008 try:
1008 return func(*args, **kwargs)
1009 return func(*args, **kwargs)
1009 except TypeError:
1010 except TypeError:
1010 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1011 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1011 raise error.SignatureError
1012 raise error.SignatureError
1012 raise
1013 raise
1013
1014
1014 return check
1015 return check
1015
1016
1016 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1017 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1017 '''copy a file, preserving mode and optionally other stat info like
1018 '''copy a file, preserving mode and optionally other stat info like
1018 atime/mtime
1019 atime/mtime
1019
1020
1020 checkambig argument is used with filestat, and is useful only if
1021 checkambig argument is used with filestat, and is useful only if
1021 destination file is guarded by any lock (e.g. repo.lock or
1022 destination file is guarded by any lock (e.g. repo.lock or
1022 repo.wlock).
1023 repo.wlock).
1023
1024
1024 copystat and checkambig should be exclusive.
1025 copystat and checkambig should be exclusive.
1025 '''
1026 '''
1026 assert not (copystat and checkambig)
1027 assert not (copystat and checkambig)
1027 oldstat = None
1028 oldstat = None
1028 if os.path.lexists(dest):
1029 if os.path.lexists(dest):
1029 if checkambig:
1030 if checkambig:
1030 oldstat = checkambig and filestat(dest)
1031 oldstat = checkambig and filestat(dest)
1031 unlink(dest)
1032 unlink(dest)
1032 # hardlinks are problematic on CIFS, quietly ignore this flag
1033 # hardlinks are problematic on CIFS, quietly ignore this flag
1033 # until we find a way to work around it cleanly (issue4546)
1034 # until we find a way to work around it cleanly (issue4546)
1034 if False and hardlink:
1035 if False and hardlink:
1035 try:
1036 try:
1036 oslink(src, dest)
1037 oslink(src, dest)
1037 return
1038 return
1038 except (IOError, OSError):
1039 except (IOError, OSError):
1039 pass # fall back to normal copy
1040 pass # fall back to normal copy
1040 if os.path.islink(src):
1041 if os.path.islink(src):
1041 os.symlink(os.readlink(src), dest)
1042 os.symlink(os.readlink(src), dest)
1042 # copytime is ignored for symlinks, but in general copytime isn't needed
1043 # copytime is ignored for symlinks, but in general copytime isn't needed
1043 # for them anyway
1044 # for them anyway
1044 else:
1045 else:
1045 try:
1046 try:
1046 shutil.copyfile(src, dest)
1047 shutil.copyfile(src, dest)
1047 if copystat:
1048 if copystat:
1048 # copystat also copies mode
1049 # copystat also copies mode
1049 shutil.copystat(src, dest)
1050 shutil.copystat(src, dest)
1050 else:
1051 else:
1051 shutil.copymode(src, dest)
1052 shutil.copymode(src, dest)
1052 if oldstat and oldstat.stat:
1053 if oldstat and oldstat.stat:
1053 newstat = filestat(dest)
1054 newstat = filestat(dest)
1054 if newstat.isambig(oldstat):
1055 if newstat.isambig(oldstat):
1055 # stat of copied file is ambiguous to original one
1056 # stat of copied file is ambiguous to original one
1056 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1057 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1057 os.utime(dest, (advanced, advanced))
1058 os.utime(dest, (advanced, advanced))
1058 except shutil.Error as inst:
1059 except shutil.Error as inst:
1059 raise Abort(str(inst))
1060 raise Abort(str(inst))
1060
1061
1061 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1062 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1062 """Copy a directory tree using hardlinks if possible."""
1063 """Copy a directory tree using hardlinks if possible."""
1063 num = 0
1064 num = 0
1064
1065
1065 if hardlink is None:
1066 if hardlink is None:
1066 hardlink = (os.stat(src).st_dev ==
1067 hardlink = (os.stat(src).st_dev ==
1067 os.stat(os.path.dirname(dst)).st_dev)
1068 os.stat(os.path.dirname(dst)).st_dev)
1068 if hardlink:
1069 if hardlink:
1069 topic = _('linking')
1070 topic = _('linking')
1070 else:
1071 else:
1071 topic = _('copying')
1072 topic = _('copying')
1072
1073
1073 if os.path.isdir(src):
1074 if os.path.isdir(src):
1074 os.mkdir(dst)
1075 os.mkdir(dst)
1075 for name, kind in osutil.listdir(src):
1076 for name, kind in osutil.listdir(src):
1076 srcname = os.path.join(src, name)
1077 srcname = os.path.join(src, name)
1077 dstname = os.path.join(dst, name)
1078 dstname = os.path.join(dst, name)
1078 def nprog(t, pos):
1079 def nprog(t, pos):
1079 if pos is not None:
1080 if pos is not None:
1080 return progress(t, pos + num)
1081 return progress(t, pos + num)
1081 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1082 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1082 num += n
1083 num += n
1083 else:
1084 else:
1084 if hardlink:
1085 if hardlink:
1085 try:
1086 try:
1086 oslink(src, dst)
1087 oslink(src, dst)
1087 except (IOError, OSError):
1088 except (IOError, OSError):
1088 hardlink = False
1089 hardlink = False
1089 shutil.copy(src, dst)
1090 shutil.copy(src, dst)
1090 else:
1091 else:
1091 shutil.copy(src, dst)
1092 shutil.copy(src, dst)
1092 num += 1
1093 num += 1
1093 progress(topic, num)
1094 progress(topic, num)
1094 progress(topic, None)
1095 progress(topic, None)
1095
1096
1096 return hardlink, num
1097 return hardlink, num
1097
1098
1098 _winreservednames = '''con prn aux nul
1099 _winreservednames = '''con prn aux nul
1099 com1 com2 com3 com4 com5 com6 com7 com8 com9
1100 com1 com2 com3 com4 com5 com6 com7 com8 com9
1100 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1101 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1101 _winreservedchars = ':*?"<>|'
1102 _winreservedchars = ':*?"<>|'
1102 def checkwinfilename(path):
1103 def checkwinfilename(path):
1103 r'''Check that the base-relative path is a valid filename on Windows.
1104 r'''Check that the base-relative path is a valid filename on Windows.
1104 Returns None if the path is ok, or a UI string describing the problem.
1105 Returns None if the path is ok, or a UI string describing the problem.
1105
1106
1106 >>> checkwinfilename("just/a/normal/path")
1107 >>> checkwinfilename("just/a/normal/path")
1107 >>> checkwinfilename("foo/bar/con.xml")
1108 >>> checkwinfilename("foo/bar/con.xml")
1108 "filename contains 'con', which is reserved on Windows"
1109 "filename contains 'con', which is reserved on Windows"
1109 >>> checkwinfilename("foo/con.xml/bar")
1110 >>> checkwinfilename("foo/con.xml/bar")
1110 "filename contains 'con', which is reserved on Windows"
1111 "filename contains 'con', which is reserved on Windows"
1111 >>> checkwinfilename("foo/bar/xml.con")
1112 >>> checkwinfilename("foo/bar/xml.con")
1112 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1113 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1113 "filename contains 'AUX', which is reserved on Windows"
1114 "filename contains 'AUX', which is reserved on Windows"
1114 >>> checkwinfilename("foo/bar/bla:.txt")
1115 >>> checkwinfilename("foo/bar/bla:.txt")
1115 "filename contains ':', which is reserved on Windows"
1116 "filename contains ':', which is reserved on Windows"
1116 >>> checkwinfilename("foo/bar/b\07la.txt")
1117 >>> checkwinfilename("foo/bar/b\07la.txt")
1117 "filename contains '\\x07', which is invalid on Windows"
1118 "filename contains '\\x07', which is invalid on Windows"
1118 >>> checkwinfilename("foo/bar/bla ")
1119 >>> checkwinfilename("foo/bar/bla ")
1119 "filename ends with ' ', which is not allowed on Windows"
1120 "filename ends with ' ', which is not allowed on Windows"
1120 >>> checkwinfilename("../bar")
1121 >>> checkwinfilename("../bar")
1121 >>> checkwinfilename("foo\\")
1122 >>> checkwinfilename("foo\\")
1122 "filename ends with '\\', which is invalid on Windows"
1123 "filename ends with '\\', which is invalid on Windows"
1123 >>> checkwinfilename("foo\\/bar")
1124 >>> checkwinfilename("foo\\/bar")
1124 "directory name ends with '\\', which is invalid on Windows"
1125 "directory name ends with '\\', which is invalid on Windows"
1125 '''
1126 '''
1126 if path.endswith('\\'):
1127 if path.endswith('\\'):
1127 return _("filename ends with '\\', which is invalid on Windows")
1128 return _("filename ends with '\\', which is invalid on Windows")
1128 if '\\/' in path:
1129 if '\\/' in path:
1129 return _("directory name ends with '\\', which is invalid on Windows")
1130 return _("directory name ends with '\\', which is invalid on Windows")
1130 for n in path.replace('\\', '/').split('/'):
1131 for n in path.replace('\\', '/').split('/'):
1131 if not n:
1132 if not n:
1132 continue
1133 continue
1133 for c in n:
1134 for c in n:
1134 if c in _winreservedchars:
1135 if c in _winreservedchars:
1135 return _("filename contains '%s', which is reserved "
1136 return _("filename contains '%s', which is reserved "
1136 "on Windows") % c
1137 "on Windows") % c
1137 if ord(c) <= 31:
1138 if ord(c) <= 31:
1138 return _("filename contains %r, which is invalid "
1139 return _("filename contains %r, which is invalid "
1139 "on Windows") % c
1140 "on Windows") % c
1140 base = n.split('.')[0]
1141 base = n.split('.')[0]
1141 if base and base.lower() in _winreservednames:
1142 if base and base.lower() in _winreservednames:
1142 return _("filename contains '%s', which is reserved "
1143 return _("filename contains '%s', which is reserved "
1143 "on Windows") % base
1144 "on Windows") % base
1144 t = n[-1]
1145 t = n[-1]
1145 if t in '. ' and n not in '..':
1146 if t in '. ' and n not in '..':
1146 return _("filename ends with '%s', which is not allowed "
1147 return _("filename ends with '%s', which is not allowed "
1147 "on Windows") % t
1148 "on Windows") % t
1148
1149
1149 if os.name == 'nt':
1150 if os.name == 'nt':
1150 checkosfilename = checkwinfilename
1151 checkosfilename = checkwinfilename
1151 else:
1152 else:
1152 checkosfilename = platform.checkosfilename
1153 checkosfilename = platform.checkosfilename
1153
1154
1154 def makelock(info, pathname):
1155 def makelock(info, pathname):
1155 try:
1156 try:
1156 return os.symlink(info, pathname)
1157 return os.symlink(info, pathname)
1157 except OSError as why:
1158 except OSError as why:
1158 if why.errno == errno.EEXIST:
1159 if why.errno == errno.EEXIST:
1159 raise
1160 raise
1160 except AttributeError: # no symlink in os
1161 except AttributeError: # no symlink in os
1161 pass
1162 pass
1162
1163
1163 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1164 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1164 os.write(ld, info)
1165 os.write(ld, info)
1165 os.close(ld)
1166 os.close(ld)
1166
1167
1167 def readlock(pathname):
1168 def readlock(pathname):
1168 try:
1169 try:
1169 return os.readlink(pathname)
1170 return os.readlink(pathname)
1170 except OSError as why:
1171 except OSError as why:
1171 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1172 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1172 raise
1173 raise
1173 except AttributeError: # no symlink in os
1174 except AttributeError: # no symlink in os
1174 pass
1175 pass
1175 fp = posixfile(pathname)
1176 fp = posixfile(pathname)
1176 r = fp.read()
1177 r = fp.read()
1177 fp.close()
1178 fp.close()
1178 return r
1179 return r
1179
1180
1180 def fstat(fp):
1181 def fstat(fp):
1181 '''stat file object that may not have fileno method.'''
1182 '''stat file object that may not have fileno method.'''
1182 try:
1183 try:
1183 return os.fstat(fp.fileno())
1184 return os.fstat(fp.fileno())
1184 except AttributeError:
1185 except AttributeError:
1185 return os.stat(fp.name)
1186 return os.stat(fp.name)
1186
1187
1187 # File system features
1188 # File system features
1188
1189
1189 def checkcase(path):
1190 def checkcase(path):
1190 """
1191 """
1191 Return true if the given path is on a case-sensitive filesystem
1192 Return true if the given path is on a case-sensitive filesystem
1192
1193
1193 Requires a path (like /foo/.hg) ending with a foldable final
1194 Requires a path (like /foo/.hg) ending with a foldable final
1194 directory component.
1195 directory component.
1195 """
1196 """
1196 s1 = os.lstat(path)
1197 s1 = os.lstat(path)
1197 d, b = os.path.split(path)
1198 d, b = os.path.split(path)
1198 b2 = b.upper()
1199 b2 = b.upper()
1199 if b == b2:
1200 if b == b2:
1200 b2 = b.lower()
1201 b2 = b.lower()
1201 if b == b2:
1202 if b == b2:
1202 return True # no evidence against case sensitivity
1203 return True # no evidence against case sensitivity
1203 p2 = os.path.join(d, b2)
1204 p2 = os.path.join(d, b2)
1204 try:
1205 try:
1205 s2 = os.lstat(p2)
1206 s2 = os.lstat(p2)
1206 if s2 == s1:
1207 if s2 == s1:
1207 return False
1208 return False
1208 return True
1209 return True
1209 except OSError:
1210 except OSError:
1210 return True
1211 return True
1211
1212
1212 try:
1213 try:
1213 import re2
1214 import re2
1214 _re2 = None
1215 _re2 = None
1215 except ImportError:
1216 except ImportError:
1216 _re2 = False
1217 _re2 = False
1217
1218
1218 class _re(object):
1219 class _re(object):
1219 def _checkre2(self):
1220 def _checkre2(self):
1220 global _re2
1221 global _re2
1221 try:
1222 try:
1222 # check if match works, see issue3964
1223 # check if match works, see issue3964
1223 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1224 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1224 except ImportError:
1225 except ImportError:
1225 _re2 = False
1226 _re2 = False
1226
1227
1227 def compile(self, pat, flags=0):
1228 def compile(self, pat, flags=0):
1228 '''Compile a regular expression, using re2 if possible
1229 '''Compile a regular expression, using re2 if possible
1229
1230
1230 For best performance, use only re2-compatible regexp features. The
1231 For best performance, use only re2-compatible regexp features. The
1231 only flags from the re module that are re2-compatible are
1232 only flags from the re module that are re2-compatible are
1232 IGNORECASE and MULTILINE.'''
1233 IGNORECASE and MULTILINE.'''
1233 if _re2 is None:
1234 if _re2 is None:
1234 self._checkre2()
1235 self._checkre2()
1235 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1236 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1236 if flags & remod.IGNORECASE:
1237 if flags & remod.IGNORECASE:
1237 pat = '(?i)' + pat
1238 pat = '(?i)' + pat
1238 if flags & remod.MULTILINE:
1239 if flags & remod.MULTILINE:
1239 pat = '(?m)' + pat
1240 pat = '(?m)' + pat
1240 try:
1241 try:
1241 return re2.compile(pat)
1242 return re2.compile(pat)
1242 except re2.error:
1243 except re2.error:
1243 pass
1244 pass
1244 return remod.compile(pat, flags)
1245 return remod.compile(pat, flags)
1245
1246
1246 @propertycache
1247 @propertycache
1247 def escape(self):
1248 def escape(self):
1248 '''Return the version of escape corresponding to self.compile.
1249 '''Return the version of escape corresponding to self.compile.
1249
1250
1250 This is imperfect because whether re2 or re is used for a particular
1251 This is imperfect because whether re2 or re is used for a particular
1251 function depends on the flags, etc, but it's the best we can do.
1252 function depends on the flags, etc, but it's the best we can do.
1252 '''
1253 '''
1253 global _re2
1254 global _re2
1254 if _re2 is None:
1255 if _re2 is None:
1255 self._checkre2()
1256 self._checkre2()
1256 if _re2:
1257 if _re2:
1257 return re2.escape
1258 return re2.escape
1258 else:
1259 else:
1259 return remod.escape
1260 return remod.escape
1260
1261
1261 re = _re()
1262 re = _re()
1262
1263
1263 _fspathcache = {}
1264 _fspathcache = {}
1264 def fspath(name, root):
1265 def fspath(name, root):
1265 '''Get name in the case stored in the filesystem
1266 '''Get name in the case stored in the filesystem
1266
1267
1267 The name should be relative to root, and be normcase-ed for efficiency.
1268 The name should be relative to root, and be normcase-ed for efficiency.
1268
1269
1269 Note that this function is unnecessary, and should not be
1270 Note that this function is unnecessary, and should not be
1270 called, for case-sensitive filesystems (simply because it's expensive).
1271 called, for case-sensitive filesystems (simply because it's expensive).
1271
1272
1272 The root should be normcase-ed, too.
1273 The root should be normcase-ed, too.
1273 '''
1274 '''
1274 def _makefspathcacheentry(dir):
1275 def _makefspathcacheentry(dir):
1275 return dict((normcase(n), n) for n in os.listdir(dir))
1276 return dict((normcase(n), n) for n in os.listdir(dir))
1276
1277
1277 seps = os.sep
1278 seps = os.sep
1278 if os.altsep:
1279 if os.altsep:
1279 seps = seps + os.altsep
1280 seps = seps + os.altsep
1280 # Protect backslashes. This gets silly very quickly.
1281 # Protect backslashes. This gets silly very quickly.
1281 seps.replace('\\','\\\\')
1282 seps.replace('\\','\\\\')
1282 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1283 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1283 dir = os.path.normpath(root)
1284 dir = os.path.normpath(root)
1284 result = []
1285 result = []
1285 for part, sep in pattern.findall(name):
1286 for part, sep in pattern.findall(name):
1286 if sep:
1287 if sep:
1287 result.append(sep)
1288 result.append(sep)
1288 continue
1289 continue
1289
1290
1290 if dir not in _fspathcache:
1291 if dir not in _fspathcache:
1291 _fspathcache[dir] = _makefspathcacheentry(dir)
1292 _fspathcache[dir] = _makefspathcacheentry(dir)
1292 contents = _fspathcache[dir]
1293 contents = _fspathcache[dir]
1293
1294
1294 found = contents.get(part)
1295 found = contents.get(part)
1295 if not found:
1296 if not found:
1296 # retry "once per directory" per "dirstate.walk" which
1297 # retry "once per directory" per "dirstate.walk" which
1297 # may take place for each patches of "hg qpush", for example
1298 # may take place for each patches of "hg qpush", for example
1298 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1299 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1299 found = contents.get(part)
1300 found = contents.get(part)
1300
1301
1301 result.append(found or part)
1302 result.append(found or part)
1302 dir = os.path.join(dir, part)
1303 dir = os.path.join(dir, part)
1303
1304
1304 return ''.join(result)
1305 return ''.join(result)
1305
1306
1306 def checknlink(testfile):
1307 def checknlink(testfile):
1307 '''check whether hardlink count reporting works properly'''
1308 '''check whether hardlink count reporting works properly'''
1308
1309
1309 # testfile may be open, so we need a separate file for checking to
1310 # testfile may be open, so we need a separate file for checking to
1310 # work around issue2543 (or testfile may get lost on Samba shares)
1311 # work around issue2543 (or testfile may get lost on Samba shares)
1311 f1 = testfile + ".hgtmp1"
1312 f1 = testfile + ".hgtmp1"
1312 if os.path.lexists(f1):
1313 if os.path.lexists(f1):
1313 return False
1314 return False
1314 try:
1315 try:
1315 posixfile(f1, 'w').close()
1316 posixfile(f1, 'w').close()
1316 except IOError:
1317 except IOError:
1317 return False
1318 return False
1318
1319
1319 f2 = testfile + ".hgtmp2"
1320 f2 = testfile + ".hgtmp2"
1320 fd = None
1321 fd = None
1321 try:
1322 try:
1322 oslink(f1, f2)
1323 oslink(f1, f2)
1323 # nlinks() may behave differently for files on Windows shares if
1324 # nlinks() may behave differently for files on Windows shares if
1324 # the file is open.
1325 # the file is open.
1325 fd = posixfile(f2)
1326 fd = posixfile(f2)
1326 return nlinks(f2) > 1
1327 return nlinks(f2) > 1
1327 except OSError:
1328 except OSError:
1328 return False
1329 return False
1329 finally:
1330 finally:
1330 if fd is not None:
1331 if fd is not None:
1331 fd.close()
1332 fd.close()
1332 for f in (f1, f2):
1333 for f in (f1, f2):
1333 try:
1334 try:
1334 os.unlink(f)
1335 os.unlink(f)
1335 except OSError:
1336 except OSError:
1336 pass
1337 pass
1337
1338
1338 def endswithsep(path):
1339 def endswithsep(path):
1339 '''Check path ends with os.sep or os.altsep.'''
1340 '''Check path ends with os.sep or os.altsep.'''
1340 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1341 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1341
1342
1342 def splitpath(path):
1343 def splitpath(path):
1343 '''Split path by os.sep.
1344 '''Split path by os.sep.
1344 Note that this function does not use os.altsep because this is
1345 Note that this function does not use os.altsep because this is
1345 an alternative of simple "xxx.split(os.sep)".
1346 an alternative of simple "xxx.split(os.sep)".
1346 It is recommended to use os.path.normpath() before using this
1347 It is recommended to use os.path.normpath() before using this
1347 function if need.'''
1348 function if need.'''
1348 return path.split(os.sep)
1349 return path.split(os.sep)
1349
1350
1350 def gui():
1351 def gui():
1351 '''Are we running in a GUI?'''
1352 '''Are we running in a GUI?'''
1352 if sys.platform == 'darwin':
1353 if sys.platform == 'darwin':
1353 if 'SSH_CONNECTION' in os.environ:
1354 if 'SSH_CONNECTION' in os.environ:
1354 # handle SSH access to a box where the user is logged in
1355 # handle SSH access to a box where the user is logged in
1355 return False
1356 return False
1356 elif getattr(osutil, 'isgui', None):
1357 elif getattr(osutil, 'isgui', None):
1357 # check if a CoreGraphics session is available
1358 # check if a CoreGraphics session is available
1358 return osutil.isgui()
1359 return osutil.isgui()
1359 else:
1360 else:
1360 # pure build; use a safe default
1361 # pure build; use a safe default
1361 return True
1362 return True
1362 else:
1363 else:
1363 return os.name == "nt" or os.environ.get("DISPLAY")
1364 return os.name == "nt" or os.environ.get("DISPLAY")
1364
1365
1365 def mktempcopy(name, emptyok=False, createmode=None):
1366 def mktempcopy(name, emptyok=False, createmode=None):
1366 """Create a temporary file with the same contents from name
1367 """Create a temporary file with the same contents from name
1367
1368
1368 The permission bits are copied from the original file.
1369 The permission bits are copied from the original file.
1369
1370
1370 If the temporary file is going to be truncated immediately, you
1371 If the temporary file is going to be truncated immediately, you
1371 can use emptyok=True as an optimization.
1372 can use emptyok=True as an optimization.
1372
1373
1373 Returns the name of the temporary file.
1374 Returns the name of the temporary file.
1374 """
1375 """
1375 d, fn = os.path.split(name)
1376 d, fn = os.path.split(name)
1376 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1377 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1377 os.close(fd)
1378 os.close(fd)
1378 # Temporary files are created with mode 0600, which is usually not
1379 # Temporary files are created with mode 0600, which is usually not
1379 # what we want. If the original file already exists, just copy
1380 # what we want. If the original file already exists, just copy
1380 # its mode. Otherwise, manually obey umask.
1381 # its mode. Otherwise, manually obey umask.
1381 copymode(name, temp, createmode)
1382 copymode(name, temp, createmode)
1382 if emptyok:
1383 if emptyok:
1383 return temp
1384 return temp
1384 try:
1385 try:
1385 try:
1386 try:
1386 ifp = posixfile(name, "rb")
1387 ifp = posixfile(name, "rb")
1387 except IOError as inst:
1388 except IOError as inst:
1388 if inst.errno == errno.ENOENT:
1389 if inst.errno == errno.ENOENT:
1389 return temp
1390 return temp
1390 if not getattr(inst, 'filename', None):
1391 if not getattr(inst, 'filename', None):
1391 inst.filename = name
1392 inst.filename = name
1392 raise
1393 raise
1393 ofp = posixfile(temp, "wb")
1394 ofp = posixfile(temp, "wb")
1394 for chunk in filechunkiter(ifp):
1395 for chunk in filechunkiter(ifp):
1395 ofp.write(chunk)
1396 ofp.write(chunk)
1396 ifp.close()
1397 ifp.close()
1397 ofp.close()
1398 ofp.close()
1398 except: # re-raises
1399 except: # re-raises
1399 try: os.unlink(temp)
1400 try: os.unlink(temp)
1400 except OSError: pass
1401 except OSError: pass
1401 raise
1402 raise
1402 return temp
1403 return temp
1403
1404
1404 class filestat(object):
1405 class filestat(object):
1405 """help to exactly detect change of a file
1406 """help to exactly detect change of a file
1406
1407
1407 'stat' attribute is result of 'os.stat()' if specified 'path'
1408 'stat' attribute is result of 'os.stat()' if specified 'path'
1408 exists. Otherwise, it is None. This can avoid preparative
1409 exists. Otherwise, it is None. This can avoid preparative
1409 'exists()' examination on client side of this class.
1410 'exists()' examination on client side of this class.
1410 """
1411 """
1411 def __init__(self, path):
1412 def __init__(self, path):
1412 try:
1413 try:
1413 self.stat = os.stat(path)
1414 self.stat = os.stat(path)
1414 except OSError as err:
1415 except OSError as err:
1415 if err.errno != errno.ENOENT:
1416 if err.errno != errno.ENOENT:
1416 raise
1417 raise
1417 self.stat = None
1418 self.stat = None
1418
1419
1419 __hash__ = object.__hash__
1420 __hash__ = object.__hash__
1420
1421
1421 def __eq__(self, old):
1422 def __eq__(self, old):
1422 try:
1423 try:
1423 # if ambiguity between stat of new and old file is
1424 # if ambiguity between stat of new and old file is
1424 # avoided, comparision of size, ctime and mtime is enough
1425 # avoided, comparision of size, ctime and mtime is enough
1425 # to exactly detect change of a file regardless of platform
1426 # to exactly detect change of a file regardless of platform
1426 return (self.stat.st_size == old.stat.st_size and
1427 return (self.stat.st_size == old.stat.st_size and
1427 self.stat.st_ctime == old.stat.st_ctime and
1428 self.stat.st_ctime == old.stat.st_ctime and
1428 self.stat.st_mtime == old.stat.st_mtime)
1429 self.stat.st_mtime == old.stat.st_mtime)
1429 except AttributeError:
1430 except AttributeError:
1430 return False
1431 return False
1431
1432
1432 def isambig(self, old):
1433 def isambig(self, old):
1433 """Examine whether new (= self) stat is ambiguous against old one
1434 """Examine whether new (= self) stat is ambiguous against old one
1434
1435
1435 "S[N]" below means stat of a file at N-th change:
1436 "S[N]" below means stat of a file at N-th change:
1436
1437
1437 - S[n-1].ctime < S[n].ctime: can detect change of a file
1438 - S[n-1].ctime < S[n].ctime: can detect change of a file
1438 - S[n-1].ctime == S[n].ctime
1439 - S[n-1].ctime == S[n].ctime
1439 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1440 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1440 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1441 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1441 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1442 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1442 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1443 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1443
1444
1444 Case (*2) above means that a file was changed twice or more at
1445 Case (*2) above means that a file was changed twice or more at
1445 same time in sec (= S[n-1].ctime), and comparison of timestamp
1446 same time in sec (= S[n-1].ctime), and comparison of timestamp
1446 is ambiguous.
1447 is ambiguous.
1447
1448
1448 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1449 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1449 timestamp is ambiguous".
1450 timestamp is ambiguous".
1450
1451
1451 But advancing mtime only in case (*2) doesn't work as
1452 But advancing mtime only in case (*2) doesn't work as
1452 expected, because naturally advanced S[n].mtime in case (*1)
1453 expected, because naturally advanced S[n].mtime in case (*1)
1453 might be equal to manually advanced S[n-1 or earlier].mtime.
1454 might be equal to manually advanced S[n-1 or earlier].mtime.
1454
1455
1455 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1456 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1456 treated as ambiguous regardless of mtime, to avoid overlooking
1457 treated as ambiguous regardless of mtime, to avoid overlooking
1457 by confliction between such mtime.
1458 by confliction between such mtime.
1458
1459
1459 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1460 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1460 S[n].mtime", even if size of a file isn't changed.
1461 S[n].mtime", even if size of a file isn't changed.
1461 """
1462 """
1462 try:
1463 try:
1463 return (self.stat.st_ctime == old.stat.st_ctime)
1464 return (self.stat.st_ctime == old.stat.st_ctime)
1464 except AttributeError:
1465 except AttributeError:
1465 return False
1466 return False
1466
1467
1467 def __ne__(self, other):
1468 def __ne__(self, other):
1468 return not self == other
1469 return not self == other
1469
1470
1470 class atomictempfile(object):
1471 class atomictempfile(object):
1471 '''writable file object that atomically updates a file
1472 '''writable file object that atomically updates a file
1472
1473
1473 All writes will go to a temporary copy of the original file. Call
1474 All writes will go to a temporary copy of the original file. Call
1474 close() when you are done writing, and atomictempfile will rename
1475 close() when you are done writing, and atomictempfile will rename
1475 the temporary copy to the original name, making the changes
1476 the temporary copy to the original name, making the changes
1476 visible. If the object is destroyed without being closed, all your
1477 visible. If the object is destroyed without being closed, all your
1477 writes are discarded.
1478 writes are discarded.
1478
1479
1479 checkambig argument of constructor is used with filestat, and is
1480 checkambig argument of constructor is used with filestat, and is
1480 useful only if target file is guarded by any lock (e.g. repo.lock
1481 useful only if target file is guarded by any lock (e.g. repo.lock
1481 or repo.wlock).
1482 or repo.wlock).
1482 '''
1483 '''
1483 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1484 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1484 self.__name = name # permanent name
1485 self.__name = name # permanent name
1485 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1486 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1486 createmode=createmode)
1487 createmode=createmode)
1487 self._fp = posixfile(self._tempname, mode)
1488 self._fp = posixfile(self._tempname, mode)
1488 self._checkambig = checkambig
1489 self._checkambig = checkambig
1489
1490
1490 # delegated methods
1491 # delegated methods
1491 self.read = self._fp.read
1492 self.read = self._fp.read
1492 self.write = self._fp.write
1493 self.write = self._fp.write
1493 self.seek = self._fp.seek
1494 self.seek = self._fp.seek
1494 self.tell = self._fp.tell
1495 self.tell = self._fp.tell
1495 self.fileno = self._fp.fileno
1496 self.fileno = self._fp.fileno
1496
1497
1497 def close(self):
1498 def close(self):
1498 if not self._fp.closed:
1499 if not self._fp.closed:
1499 self._fp.close()
1500 self._fp.close()
1500 filename = localpath(self.__name)
1501 filename = localpath(self.__name)
1501 oldstat = self._checkambig and filestat(filename)
1502 oldstat = self._checkambig and filestat(filename)
1502 if oldstat and oldstat.stat:
1503 if oldstat and oldstat.stat:
1503 rename(self._tempname, filename)
1504 rename(self._tempname, filename)
1504 newstat = filestat(filename)
1505 newstat = filestat(filename)
1505 if newstat.isambig(oldstat):
1506 if newstat.isambig(oldstat):
1506 # stat of changed file is ambiguous to original one
1507 # stat of changed file is ambiguous to original one
1507 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1508 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1508 os.utime(filename, (advanced, advanced))
1509 os.utime(filename, (advanced, advanced))
1509 else:
1510 else:
1510 rename(self._tempname, filename)
1511 rename(self._tempname, filename)
1511
1512
1512 def discard(self):
1513 def discard(self):
1513 if not self._fp.closed:
1514 if not self._fp.closed:
1514 try:
1515 try:
1515 os.unlink(self._tempname)
1516 os.unlink(self._tempname)
1516 except OSError:
1517 except OSError:
1517 pass
1518 pass
1518 self._fp.close()
1519 self._fp.close()
1519
1520
1520 def __del__(self):
1521 def __del__(self):
1521 if safehasattr(self, '_fp'): # constructor actually did something
1522 if safehasattr(self, '_fp'): # constructor actually did something
1522 self.discard()
1523 self.discard()
1523
1524
1524 def __enter__(self):
1525 def __enter__(self):
1525 return self
1526 return self
1526
1527
1527 def __exit__(self, exctype, excvalue, traceback):
1528 def __exit__(self, exctype, excvalue, traceback):
1528 if exctype is not None:
1529 if exctype is not None:
1529 self.discard()
1530 self.discard()
1530 else:
1531 else:
1531 self.close()
1532 self.close()
1532
1533
1533 def makedirs(name, mode=None, notindexed=False):
1534 def makedirs(name, mode=None, notindexed=False):
1534 """recursive directory creation with parent mode inheritance
1535 """recursive directory creation with parent mode inheritance
1535
1536
1536 Newly created directories are marked as "not to be indexed by
1537 Newly created directories are marked as "not to be indexed by
1537 the content indexing service", if ``notindexed`` is specified
1538 the content indexing service", if ``notindexed`` is specified
1538 for "write" mode access.
1539 for "write" mode access.
1539 """
1540 """
1540 try:
1541 try:
1541 makedir(name, notindexed)
1542 makedir(name, notindexed)
1542 except OSError as err:
1543 except OSError as err:
1543 if err.errno == errno.EEXIST:
1544 if err.errno == errno.EEXIST:
1544 return
1545 return
1545 if err.errno != errno.ENOENT or not name:
1546 if err.errno != errno.ENOENT or not name:
1546 raise
1547 raise
1547 parent = os.path.dirname(os.path.abspath(name))
1548 parent = os.path.dirname(os.path.abspath(name))
1548 if parent == name:
1549 if parent == name:
1549 raise
1550 raise
1550 makedirs(parent, mode, notindexed)
1551 makedirs(parent, mode, notindexed)
1551 try:
1552 try:
1552 makedir(name, notindexed)
1553 makedir(name, notindexed)
1553 except OSError as err:
1554 except OSError as err:
1554 # Catch EEXIST to handle races
1555 # Catch EEXIST to handle races
1555 if err.errno == errno.EEXIST:
1556 if err.errno == errno.EEXIST:
1556 return
1557 return
1557 raise
1558 raise
1558 if mode is not None:
1559 if mode is not None:
1559 os.chmod(name, mode)
1560 os.chmod(name, mode)
1560
1561
1561 def readfile(path):
1562 def readfile(path):
1562 with open(path, 'rb') as fp:
1563 with open(path, 'rb') as fp:
1563 return fp.read()
1564 return fp.read()
1564
1565
1565 def writefile(path, text):
1566 def writefile(path, text):
1566 with open(path, 'wb') as fp:
1567 with open(path, 'wb') as fp:
1567 fp.write(text)
1568 fp.write(text)
1568
1569
1569 def appendfile(path, text):
1570 def appendfile(path, text):
1570 with open(path, 'ab') as fp:
1571 with open(path, 'ab') as fp:
1571 fp.write(text)
1572 fp.write(text)
1572
1573
1573 class chunkbuffer(object):
1574 class chunkbuffer(object):
1574 """Allow arbitrary sized chunks of data to be efficiently read from an
1575 """Allow arbitrary sized chunks of data to be efficiently read from an
1575 iterator over chunks of arbitrary size."""
1576 iterator over chunks of arbitrary size."""
1576
1577
1577 def __init__(self, in_iter):
1578 def __init__(self, in_iter):
1578 """in_iter is the iterator that's iterating over the input chunks.
1579 """in_iter is the iterator that's iterating over the input chunks.
1579 targetsize is how big a buffer to try to maintain."""
1580 targetsize is how big a buffer to try to maintain."""
1580 def splitbig(chunks):
1581 def splitbig(chunks):
1581 for chunk in chunks:
1582 for chunk in chunks:
1582 if len(chunk) > 2**20:
1583 if len(chunk) > 2**20:
1583 pos = 0
1584 pos = 0
1584 while pos < len(chunk):
1585 while pos < len(chunk):
1585 end = pos + 2 ** 18
1586 end = pos + 2 ** 18
1586 yield chunk[pos:end]
1587 yield chunk[pos:end]
1587 pos = end
1588 pos = end
1588 else:
1589 else:
1589 yield chunk
1590 yield chunk
1590 self.iter = splitbig(in_iter)
1591 self.iter = splitbig(in_iter)
1591 self._queue = collections.deque()
1592 self._queue = collections.deque()
1592 self._chunkoffset = 0
1593 self._chunkoffset = 0
1593
1594
1594 def read(self, l=None):
1595 def read(self, l=None):
1595 """Read L bytes of data from the iterator of chunks of data.
1596 """Read L bytes of data from the iterator of chunks of data.
1596 Returns less than L bytes if the iterator runs dry.
1597 Returns less than L bytes if the iterator runs dry.
1597
1598
1598 If size parameter is omitted, read everything"""
1599 If size parameter is omitted, read everything"""
1599 if l is None:
1600 if l is None:
1600 return ''.join(self.iter)
1601 return ''.join(self.iter)
1601
1602
1602 left = l
1603 left = l
1603 buf = []
1604 buf = []
1604 queue = self._queue
1605 queue = self._queue
1605 while left > 0:
1606 while left > 0:
1606 # refill the queue
1607 # refill the queue
1607 if not queue:
1608 if not queue:
1608 target = 2**18
1609 target = 2**18
1609 for chunk in self.iter:
1610 for chunk in self.iter:
1610 queue.append(chunk)
1611 queue.append(chunk)
1611 target -= len(chunk)
1612 target -= len(chunk)
1612 if target <= 0:
1613 if target <= 0:
1613 break
1614 break
1614 if not queue:
1615 if not queue:
1615 break
1616 break
1616
1617
1617 # The easy way to do this would be to queue.popleft(), modify the
1618 # The easy way to do this would be to queue.popleft(), modify the
1618 # chunk (if necessary), then queue.appendleft(). However, for cases
1619 # chunk (if necessary), then queue.appendleft(). However, for cases
1619 # where we read partial chunk content, this incurs 2 dequeue
1620 # where we read partial chunk content, this incurs 2 dequeue
1620 # mutations and creates a new str for the remaining chunk in the
1621 # mutations and creates a new str for the remaining chunk in the
1621 # queue. Our code below avoids this overhead.
1622 # queue. Our code below avoids this overhead.
1622
1623
1623 chunk = queue[0]
1624 chunk = queue[0]
1624 chunkl = len(chunk)
1625 chunkl = len(chunk)
1625 offset = self._chunkoffset
1626 offset = self._chunkoffset
1626
1627
1627 # Use full chunk.
1628 # Use full chunk.
1628 if offset == 0 and left >= chunkl:
1629 if offset == 0 and left >= chunkl:
1629 left -= chunkl
1630 left -= chunkl
1630 queue.popleft()
1631 queue.popleft()
1631 buf.append(chunk)
1632 buf.append(chunk)
1632 # self._chunkoffset remains at 0.
1633 # self._chunkoffset remains at 0.
1633 continue
1634 continue
1634
1635
1635 chunkremaining = chunkl - offset
1636 chunkremaining = chunkl - offset
1636
1637
1637 # Use all of unconsumed part of chunk.
1638 # Use all of unconsumed part of chunk.
1638 if left >= chunkremaining:
1639 if left >= chunkremaining:
1639 left -= chunkremaining
1640 left -= chunkremaining
1640 queue.popleft()
1641 queue.popleft()
1641 # offset == 0 is enabled by block above, so this won't merely
1642 # offset == 0 is enabled by block above, so this won't merely
1642 # copy via ``chunk[0:]``.
1643 # copy via ``chunk[0:]``.
1643 buf.append(chunk[offset:])
1644 buf.append(chunk[offset:])
1644 self._chunkoffset = 0
1645 self._chunkoffset = 0
1645
1646
1646 # Partial chunk needed.
1647 # Partial chunk needed.
1647 else:
1648 else:
1648 buf.append(chunk[offset:offset + left])
1649 buf.append(chunk[offset:offset + left])
1649 self._chunkoffset += left
1650 self._chunkoffset += left
1650 left -= chunkremaining
1651 left -= chunkremaining
1651
1652
1652 return ''.join(buf)
1653 return ''.join(buf)
1653
1654
1654 def filechunkiter(f, size=65536, limit=None):
1655 def filechunkiter(f, size=65536, limit=None):
1655 """Create a generator that produces the data in the file size
1656 """Create a generator that produces the data in the file size
1656 (default 65536) bytes at a time, up to optional limit (default is
1657 (default 65536) bytes at a time, up to optional limit (default is
1657 to read all data). Chunks may be less than size bytes if the
1658 to read all data). Chunks may be less than size bytes if the
1658 chunk is the last chunk in the file, or the file is a socket or
1659 chunk is the last chunk in the file, or the file is a socket or
1659 some other type of file that sometimes reads less data than is
1660 some other type of file that sometimes reads less data than is
1660 requested."""
1661 requested."""
1661 assert size >= 0
1662 assert size >= 0
1662 assert limit is None or limit >= 0
1663 assert limit is None or limit >= 0
1663 while True:
1664 while True:
1664 if limit is None:
1665 if limit is None:
1665 nbytes = size
1666 nbytes = size
1666 else:
1667 else:
1667 nbytes = min(limit, size)
1668 nbytes = min(limit, size)
1668 s = nbytes and f.read(nbytes)
1669 s = nbytes and f.read(nbytes)
1669 if not s:
1670 if not s:
1670 break
1671 break
1671 if limit:
1672 if limit:
1672 limit -= len(s)
1673 limit -= len(s)
1673 yield s
1674 yield s
1674
1675
1675 def makedate(timestamp=None):
1676 def makedate(timestamp=None):
1676 '''Return a unix timestamp (or the current time) as a (unixtime,
1677 '''Return a unix timestamp (or the current time) as a (unixtime,
1677 offset) tuple based off the local timezone.'''
1678 offset) tuple based off the local timezone.'''
1678 if timestamp is None:
1679 if timestamp is None:
1679 timestamp = time.time()
1680 timestamp = time.time()
1680 if timestamp < 0:
1681 if timestamp < 0:
1681 hint = _("check your clock")
1682 hint = _("check your clock")
1682 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1683 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1683 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1684 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1684 datetime.datetime.fromtimestamp(timestamp))
1685 datetime.datetime.fromtimestamp(timestamp))
1685 tz = delta.days * 86400 + delta.seconds
1686 tz = delta.days * 86400 + delta.seconds
1686 return timestamp, tz
1687 return timestamp, tz
1687
1688
1688 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1689 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1689 """represent a (unixtime, offset) tuple as a localized time.
1690 """represent a (unixtime, offset) tuple as a localized time.
1690 unixtime is seconds since the epoch, and offset is the time zone's
1691 unixtime is seconds since the epoch, and offset is the time zone's
1691 number of seconds away from UTC.
1692 number of seconds away from UTC.
1692
1693
1693 >>> datestr((0, 0))
1694 >>> datestr((0, 0))
1694 'Thu Jan 01 00:00:00 1970 +0000'
1695 'Thu Jan 01 00:00:00 1970 +0000'
1695 >>> datestr((42, 0))
1696 >>> datestr((42, 0))
1696 'Thu Jan 01 00:00:42 1970 +0000'
1697 'Thu Jan 01 00:00:42 1970 +0000'
1697 >>> datestr((-42, 0))
1698 >>> datestr((-42, 0))
1698 'Wed Dec 31 23:59:18 1969 +0000'
1699 'Wed Dec 31 23:59:18 1969 +0000'
1699 >>> datestr((0x7fffffff, 0))
1700 >>> datestr((0x7fffffff, 0))
1700 'Tue Jan 19 03:14:07 2038 +0000'
1701 'Tue Jan 19 03:14:07 2038 +0000'
1701 >>> datestr((-0x80000000, 0))
1702 >>> datestr((-0x80000000, 0))
1702 'Fri Dec 13 20:45:52 1901 +0000'
1703 'Fri Dec 13 20:45:52 1901 +0000'
1703 """
1704 """
1704 t, tz = date or makedate()
1705 t, tz = date or makedate()
1705 if "%1" in format or "%2" in format or "%z" in format:
1706 if "%1" in format or "%2" in format or "%z" in format:
1706 sign = (tz > 0) and "-" or "+"
1707 sign = (tz > 0) and "-" or "+"
1707 minutes = abs(tz) // 60
1708 minutes = abs(tz) // 60
1708 q, r = divmod(minutes, 60)
1709 q, r = divmod(minutes, 60)
1709 format = format.replace("%z", "%1%2")
1710 format = format.replace("%z", "%1%2")
1710 format = format.replace("%1", "%c%02d" % (sign, q))
1711 format = format.replace("%1", "%c%02d" % (sign, q))
1711 format = format.replace("%2", "%02d" % r)
1712 format = format.replace("%2", "%02d" % r)
1712 d = t - tz
1713 d = t - tz
1713 if d > 0x7fffffff:
1714 if d > 0x7fffffff:
1714 d = 0x7fffffff
1715 d = 0x7fffffff
1715 elif d < -0x80000000:
1716 elif d < -0x80000000:
1716 d = -0x80000000
1717 d = -0x80000000
1717 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1718 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1718 # because they use the gmtime() system call which is buggy on Windows
1719 # because they use the gmtime() system call which is buggy on Windows
1719 # for negative values.
1720 # for negative values.
1720 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1721 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1721 s = t.strftime(format)
1722 s = t.strftime(format)
1722 return s
1723 return s
1723
1724
1724 def shortdate(date=None):
1725 def shortdate(date=None):
1725 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1726 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1726 return datestr(date, format='%Y-%m-%d')
1727 return datestr(date, format='%Y-%m-%d')
1727
1728
1728 def parsetimezone(tz):
1729 def parsetimezone(tz):
1729 """parse a timezone string and return an offset integer"""
1730 """parse a timezone string and return an offset integer"""
1730 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1731 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1731 sign = (tz[0] == "+") and 1 or -1
1732 sign = (tz[0] == "+") and 1 or -1
1732 hours = int(tz[1:3])
1733 hours = int(tz[1:3])
1733 minutes = int(tz[3:5])
1734 minutes = int(tz[3:5])
1734 return -sign * (hours * 60 + minutes) * 60
1735 return -sign * (hours * 60 + minutes) * 60
1735 if tz == "GMT" or tz == "UTC":
1736 if tz == "GMT" or tz == "UTC":
1736 return 0
1737 return 0
1737 return None
1738 return None
1738
1739
1739 def strdate(string, format, defaults=[]):
1740 def strdate(string, format, defaults=[]):
1740 """parse a localized time string and return a (unixtime, offset) tuple.
1741 """parse a localized time string and return a (unixtime, offset) tuple.
1741 if the string cannot be parsed, ValueError is raised."""
1742 if the string cannot be parsed, ValueError is raised."""
1742 # NOTE: unixtime = localunixtime + offset
1743 # NOTE: unixtime = localunixtime + offset
1743 offset, date = parsetimezone(string.split()[-1]), string
1744 offset, date = parsetimezone(string.split()[-1]), string
1744 if offset is not None:
1745 if offset is not None:
1745 date = " ".join(string.split()[:-1])
1746 date = " ".join(string.split()[:-1])
1746
1747
1747 # add missing elements from defaults
1748 # add missing elements from defaults
1748 usenow = False # default to using biased defaults
1749 usenow = False # default to using biased defaults
1749 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1750 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1750 found = [True for p in part if ("%"+p) in format]
1751 found = [True for p in part if ("%"+p) in format]
1751 if not found:
1752 if not found:
1752 date += "@" + defaults[part][usenow]
1753 date += "@" + defaults[part][usenow]
1753 format += "@%" + part[0]
1754 format += "@%" + part[0]
1754 else:
1755 else:
1755 # We've found a specific time element, less specific time
1756 # We've found a specific time element, less specific time
1756 # elements are relative to today
1757 # elements are relative to today
1757 usenow = True
1758 usenow = True
1758
1759
1759 timetuple = time.strptime(date, format)
1760 timetuple = time.strptime(date, format)
1760 localunixtime = int(calendar.timegm(timetuple))
1761 localunixtime = int(calendar.timegm(timetuple))
1761 if offset is None:
1762 if offset is None:
1762 # local timezone
1763 # local timezone
1763 unixtime = int(time.mktime(timetuple))
1764 unixtime = int(time.mktime(timetuple))
1764 offset = unixtime - localunixtime
1765 offset = unixtime - localunixtime
1765 else:
1766 else:
1766 unixtime = localunixtime + offset
1767 unixtime = localunixtime + offset
1767 return unixtime, offset
1768 return unixtime, offset
1768
1769
1769 def parsedate(date, formats=None, bias=None):
1770 def parsedate(date, formats=None, bias=None):
1770 """parse a localized date/time and return a (unixtime, offset) tuple.
1771 """parse a localized date/time and return a (unixtime, offset) tuple.
1771
1772
1772 The date may be a "unixtime offset" string or in one of the specified
1773 The date may be a "unixtime offset" string or in one of the specified
1773 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1774 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1774
1775
1775 >>> parsedate(' today ') == parsedate(\
1776 >>> parsedate(' today ') == parsedate(\
1776 datetime.date.today().strftime('%b %d'))
1777 datetime.date.today().strftime('%b %d'))
1777 True
1778 True
1778 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1779 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1779 datetime.timedelta(days=1)\
1780 datetime.timedelta(days=1)\
1780 ).strftime('%b %d'))
1781 ).strftime('%b %d'))
1781 True
1782 True
1782 >>> now, tz = makedate()
1783 >>> now, tz = makedate()
1783 >>> strnow, strtz = parsedate('now')
1784 >>> strnow, strtz = parsedate('now')
1784 >>> (strnow - now) < 1
1785 >>> (strnow - now) < 1
1785 True
1786 True
1786 >>> tz == strtz
1787 >>> tz == strtz
1787 True
1788 True
1788 """
1789 """
1789 if bias is None:
1790 if bias is None:
1790 bias = {}
1791 bias = {}
1791 if not date:
1792 if not date:
1792 return 0, 0
1793 return 0, 0
1793 if isinstance(date, tuple) and len(date) == 2:
1794 if isinstance(date, tuple) and len(date) == 2:
1794 return date
1795 return date
1795 if not formats:
1796 if not formats:
1796 formats = defaultdateformats
1797 formats = defaultdateformats
1797 date = date.strip()
1798 date = date.strip()
1798
1799
1799 if date == 'now' or date == _('now'):
1800 if date == 'now' or date == _('now'):
1800 return makedate()
1801 return makedate()
1801 if date == 'today' or date == _('today'):
1802 if date == 'today' or date == _('today'):
1802 date = datetime.date.today().strftime('%b %d')
1803 date = datetime.date.today().strftime('%b %d')
1803 elif date == 'yesterday' or date == _('yesterday'):
1804 elif date == 'yesterday' or date == _('yesterday'):
1804 date = (datetime.date.today() -
1805 date = (datetime.date.today() -
1805 datetime.timedelta(days=1)).strftime('%b %d')
1806 datetime.timedelta(days=1)).strftime('%b %d')
1806
1807
1807 try:
1808 try:
1808 when, offset = map(int, date.split(' '))
1809 when, offset = map(int, date.split(' '))
1809 except ValueError:
1810 except ValueError:
1810 # fill out defaults
1811 # fill out defaults
1811 now = makedate()
1812 now = makedate()
1812 defaults = {}
1813 defaults = {}
1813 for part in ("d", "mb", "yY", "HI", "M", "S"):
1814 for part in ("d", "mb", "yY", "HI", "M", "S"):
1814 # this piece is for rounding the specific end of unknowns
1815 # this piece is for rounding the specific end of unknowns
1815 b = bias.get(part)
1816 b = bias.get(part)
1816 if b is None:
1817 if b is None:
1817 if part[0] in "HMS":
1818 if part[0] in "HMS":
1818 b = "00"
1819 b = "00"
1819 else:
1820 else:
1820 b = "0"
1821 b = "0"
1821
1822
1822 # this piece is for matching the generic end to today's date
1823 # this piece is for matching the generic end to today's date
1823 n = datestr(now, "%" + part[0])
1824 n = datestr(now, "%" + part[0])
1824
1825
1825 defaults[part] = (b, n)
1826 defaults[part] = (b, n)
1826
1827
1827 for format in formats:
1828 for format in formats:
1828 try:
1829 try:
1829 when, offset = strdate(date, format, defaults)
1830 when, offset = strdate(date, format, defaults)
1830 except (ValueError, OverflowError):
1831 except (ValueError, OverflowError):
1831 pass
1832 pass
1832 else:
1833 else:
1833 break
1834 break
1834 else:
1835 else:
1835 raise Abort(_('invalid date: %r') % date)
1836 raise Abort(_('invalid date: %r') % date)
1836 # validate explicit (probably user-specified) date and
1837 # validate explicit (probably user-specified) date and
1837 # time zone offset. values must fit in signed 32 bits for
1838 # time zone offset. values must fit in signed 32 bits for
1838 # current 32-bit linux runtimes. timezones go from UTC-12
1839 # current 32-bit linux runtimes. timezones go from UTC-12
1839 # to UTC+14
1840 # to UTC+14
1840 if when < -0x80000000 or when > 0x7fffffff:
1841 if when < -0x80000000 or when > 0x7fffffff:
1841 raise Abort(_('date exceeds 32 bits: %d') % when)
1842 raise Abort(_('date exceeds 32 bits: %d') % when)
1842 if offset < -50400 or offset > 43200:
1843 if offset < -50400 or offset > 43200:
1843 raise Abort(_('impossible time zone offset: %d') % offset)
1844 raise Abort(_('impossible time zone offset: %d') % offset)
1844 return when, offset
1845 return when, offset
1845
1846
1846 def matchdate(date):
1847 def matchdate(date):
1847 """Return a function that matches a given date match specifier
1848 """Return a function that matches a given date match specifier
1848
1849
1849 Formats include:
1850 Formats include:
1850
1851
1851 '{date}' match a given date to the accuracy provided
1852 '{date}' match a given date to the accuracy provided
1852
1853
1853 '<{date}' on or before a given date
1854 '<{date}' on or before a given date
1854
1855
1855 '>{date}' on or after a given date
1856 '>{date}' on or after a given date
1856
1857
1857 >>> p1 = parsedate("10:29:59")
1858 >>> p1 = parsedate("10:29:59")
1858 >>> p2 = parsedate("10:30:00")
1859 >>> p2 = parsedate("10:30:00")
1859 >>> p3 = parsedate("10:30:59")
1860 >>> p3 = parsedate("10:30:59")
1860 >>> p4 = parsedate("10:31:00")
1861 >>> p4 = parsedate("10:31:00")
1861 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1862 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1862 >>> f = matchdate("10:30")
1863 >>> f = matchdate("10:30")
1863 >>> f(p1[0])
1864 >>> f(p1[0])
1864 False
1865 False
1865 >>> f(p2[0])
1866 >>> f(p2[0])
1866 True
1867 True
1867 >>> f(p3[0])
1868 >>> f(p3[0])
1868 True
1869 True
1869 >>> f(p4[0])
1870 >>> f(p4[0])
1870 False
1871 False
1871 >>> f(p5[0])
1872 >>> f(p5[0])
1872 False
1873 False
1873 """
1874 """
1874
1875
1875 def lower(date):
1876 def lower(date):
1876 d = {'mb': "1", 'd': "1"}
1877 d = {'mb': "1", 'd': "1"}
1877 return parsedate(date, extendeddateformats, d)[0]
1878 return parsedate(date, extendeddateformats, d)[0]
1878
1879
1879 def upper(date):
1880 def upper(date):
1880 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1881 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1881 for days in ("31", "30", "29"):
1882 for days in ("31", "30", "29"):
1882 try:
1883 try:
1883 d["d"] = days
1884 d["d"] = days
1884 return parsedate(date, extendeddateformats, d)[0]
1885 return parsedate(date, extendeddateformats, d)[0]
1885 except Abort:
1886 except Abort:
1886 pass
1887 pass
1887 d["d"] = "28"
1888 d["d"] = "28"
1888 return parsedate(date, extendeddateformats, d)[0]
1889 return parsedate(date, extendeddateformats, d)[0]
1889
1890
1890 date = date.strip()
1891 date = date.strip()
1891
1892
1892 if not date:
1893 if not date:
1893 raise Abort(_("dates cannot consist entirely of whitespace"))
1894 raise Abort(_("dates cannot consist entirely of whitespace"))
1894 elif date[0] == "<":
1895 elif date[0] == "<":
1895 if not date[1:]:
1896 if not date[1:]:
1896 raise Abort(_("invalid day spec, use '<DATE'"))
1897 raise Abort(_("invalid day spec, use '<DATE'"))
1897 when = upper(date[1:])
1898 when = upper(date[1:])
1898 return lambda x: x <= when
1899 return lambda x: x <= when
1899 elif date[0] == ">":
1900 elif date[0] == ">":
1900 if not date[1:]:
1901 if not date[1:]:
1901 raise Abort(_("invalid day spec, use '>DATE'"))
1902 raise Abort(_("invalid day spec, use '>DATE'"))
1902 when = lower(date[1:])
1903 when = lower(date[1:])
1903 return lambda x: x >= when
1904 return lambda x: x >= when
1904 elif date[0] == "-":
1905 elif date[0] == "-":
1905 try:
1906 try:
1906 days = int(date[1:])
1907 days = int(date[1:])
1907 except ValueError:
1908 except ValueError:
1908 raise Abort(_("invalid day spec: %s") % date[1:])
1909 raise Abort(_("invalid day spec: %s") % date[1:])
1909 if days < 0:
1910 if days < 0:
1910 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1911 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1911 % date[1:])
1912 % date[1:])
1912 when = makedate()[0] - days * 3600 * 24
1913 when = makedate()[0] - days * 3600 * 24
1913 return lambda x: x >= when
1914 return lambda x: x >= when
1914 elif " to " in date:
1915 elif " to " in date:
1915 a, b = date.split(" to ")
1916 a, b = date.split(" to ")
1916 start, stop = lower(a), upper(b)
1917 start, stop = lower(a), upper(b)
1917 return lambda x: x >= start and x <= stop
1918 return lambda x: x >= start and x <= stop
1918 else:
1919 else:
1919 start, stop = lower(date), upper(date)
1920 start, stop = lower(date), upper(date)
1920 return lambda x: x >= start and x <= stop
1921 return lambda x: x >= start and x <= stop
1921
1922
1922 def stringmatcher(pattern):
1923 def stringmatcher(pattern):
1923 """
1924 """
1924 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1925 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1925 returns the matcher name, pattern, and matcher function.
1926 returns the matcher name, pattern, and matcher function.
1926 missing or unknown prefixes are treated as literal matches.
1927 missing or unknown prefixes are treated as literal matches.
1927
1928
1928 helper for tests:
1929 helper for tests:
1929 >>> def test(pattern, *tests):
1930 >>> def test(pattern, *tests):
1930 ... kind, pattern, matcher = stringmatcher(pattern)
1931 ... kind, pattern, matcher = stringmatcher(pattern)
1931 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1932 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1932
1933
1933 exact matching (no prefix):
1934 exact matching (no prefix):
1934 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1935 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1935 ('literal', 'abcdefg', [False, False, True])
1936 ('literal', 'abcdefg', [False, False, True])
1936
1937
1937 regex matching ('re:' prefix)
1938 regex matching ('re:' prefix)
1938 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1939 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1939 ('re', 'a.+b', [False, False, True])
1940 ('re', 'a.+b', [False, False, True])
1940
1941
1941 force exact matches ('literal:' prefix)
1942 force exact matches ('literal:' prefix)
1942 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1943 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1943 ('literal', 're:foobar', [False, True])
1944 ('literal', 're:foobar', [False, True])
1944
1945
1945 unknown prefixes are ignored and treated as literals
1946 unknown prefixes are ignored and treated as literals
1946 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1947 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1947 ('literal', 'foo:bar', [False, False, True])
1948 ('literal', 'foo:bar', [False, False, True])
1948 """
1949 """
1949 if pattern.startswith('re:'):
1950 if pattern.startswith('re:'):
1950 pattern = pattern[3:]
1951 pattern = pattern[3:]
1951 try:
1952 try:
1952 regex = remod.compile(pattern)
1953 regex = remod.compile(pattern)
1953 except remod.error as e:
1954 except remod.error as e:
1954 raise error.ParseError(_('invalid regular expression: %s')
1955 raise error.ParseError(_('invalid regular expression: %s')
1955 % e)
1956 % e)
1956 return 're', pattern, regex.search
1957 return 're', pattern, regex.search
1957 elif pattern.startswith('literal:'):
1958 elif pattern.startswith('literal:'):
1958 pattern = pattern[8:]
1959 pattern = pattern[8:]
1959 return 'literal', pattern, pattern.__eq__
1960 return 'literal', pattern, pattern.__eq__
1960
1961
1961 def shortuser(user):
1962 def shortuser(user):
1962 """Return a short representation of a user name or email address."""
1963 """Return a short representation of a user name or email address."""
1963 f = user.find('@')
1964 f = user.find('@')
1964 if f >= 0:
1965 if f >= 0:
1965 user = user[:f]
1966 user = user[:f]
1966 f = user.find('<')
1967 f = user.find('<')
1967 if f >= 0:
1968 if f >= 0:
1968 user = user[f + 1:]
1969 user = user[f + 1:]
1969 f = user.find(' ')
1970 f = user.find(' ')
1970 if f >= 0:
1971 if f >= 0:
1971 user = user[:f]
1972 user = user[:f]
1972 f = user.find('.')
1973 f = user.find('.')
1973 if f >= 0:
1974 if f >= 0:
1974 user = user[:f]
1975 user = user[:f]
1975 return user
1976 return user
1976
1977
1977 def emailuser(user):
1978 def emailuser(user):
1978 """Return the user portion of an email address."""
1979 """Return the user portion of an email address."""
1979 f = user.find('@')
1980 f = user.find('@')
1980 if f >= 0:
1981 if f >= 0:
1981 user = user[:f]
1982 user = user[:f]
1982 f = user.find('<')
1983 f = user.find('<')
1983 if f >= 0:
1984 if f >= 0:
1984 user = user[f + 1:]
1985 user = user[f + 1:]
1985 return user
1986 return user
1986
1987
1987 def email(author):
1988 def email(author):
1988 '''get email of author.'''
1989 '''get email of author.'''
1989 r = author.find('>')
1990 r = author.find('>')
1990 if r == -1:
1991 if r == -1:
1991 r = None
1992 r = None
1992 return author[author.find('<') + 1:r]
1993 return author[author.find('<') + 1:r]
1993
1994
1994 def ellipsis(text, maxlength=400):
1995 def ellipsis(text, maxlength=400):
1995 """Trim string to at most maxlength (default: 400) columns in display."""
1996 """Trim string to at most maxlength (default: 400) columns in display."""
1996 return encoding.trim(text, maxlength, ellipsis='...')
1997 return encoding.trim(text, maxlength, ellipsis='...')
1997
1998
1998 def unitcountfn(*unittable):
1999 def unitcountfn(*unittable):
1999 '''return a function that renders a readable count of some quantity'''
2000 '''return a function that renders a readable count of some quantity'''
2000
2001
2001 def go(count):
2002 def go(count):
2002 for multiplier, divisor, format in unittable:
2003 for multiplier, divisor, format in unittable:
2003 if count >= divisor * multiplier:
2004 if count >= divisor * multiplier:
2004 return format % (count / float(divisor))
2005 return format % (count / float(divisor))
2005 return unittable[-1][2] % count
2006 return unittable[-1][2] % count
2006
2007
2007 return go
2008 return go
2008
2009
2009 bytecount = unitcountfn(
2010 bytecount = unitcountfn(
2010 (100, 1 << 30, _('%.0f GB')),
2011 (100, 1 << 30, _('%.0f GB')),
2011 (10, 1 << 30, _('%.1f GB')),
2012 (10, 1 << 30, _('%.1f GB')),
2012 (1, 1 << 30, _('%.2f GB')),
2013 (1, 1 << 30, _('%.2f GB')),
2013 (100, 1 << 20, _('%.0f MB')),
2014 (100, 1 << 20, _('%.0f MB')),
2014 (10, 1 << 20, _('%.1f MB')),
2015 (10, 1 << 20, _('%.1f MB')),
2015 (1, 1 << 20, _('%.2f MB')),
2016 (1, 1 << 20, _('%.2f MB')),
2016 (100, 1 << 10, _('%.0f KB')),
2017 (100, 1 << 10, _('%.0f KB')),
2017 (10, 1 << 10, _('%.1f KB')),
2018 (10, 1 << 10, _('%.1f KB')),
2018 (1, 1 << 10, _('%.2f KB')),
2019 (1, 1 << 10, _('%.2f KB')),
2019 (1, 1, _('%.0f bytes')),
2020 (1, 1, _('%.0f bytes')),
2020 )
2021 )
2021
2022
2022 def uirepr(s):
2023 def uirepr(s):
2023 # Avoid double backslash in Windows path repr()
2024 # Avoid double backslash in Windows path repr()
2024 return repr(s).replace('\\\\', '\\')
2025 return repr(s).replace('\\\\', '\\')
2025
2026
2026 # delay import of textwrap
2027 # delay import of textwrap
2027 def MBTextWrapper(**kwargs):
2028 def MBTextWrapper(**kwargs):
2028 class tw(textwrap.TextWrapper):
2029 class tw(textwrap.TextWrapper):
2029 """
2030 """
2030 Extend TextWrapper for width-awareness.
2031 Extend TextWrapper for width-awareness.
2031
2032
2032 Neither number of 'bytes' in any encoding nor 'characters' is
2033 Neither number of 'bytes' in any encoding nor 'characters' is
2033 appropriate to calculate terminal columns for specified string.
2034 appropriate to calculate terminal columns for specified string.
2034
2035
2035 Original TextWrapper implementation uses built-in 'len()' directly,
2036 Original TextWrapper implementation uses built-in 'len()' directly,
2036 so overriding is needed to use width information of each characters.
2037 so overriding is needed to use width information of each characters.
2037
2038
2038 In addition, characters classified into 'ambiguous' width are
2039 In addition, characters classified into 'ambiguous' width are
2039 treated as wide in East Asian area, but as narrow in other.
2040 treated as wide in East Asian area, but as narrow in other.
2040
2041
2041 This requires use decision to determine width of such characters.
2042 This requires use decision to determine width of such characters.
2042 """
2043 """
2043 def _cutdown(self, ucstr, space_left):
2044 def _cutdown(self, ucstr, space_left):
2044 l = 0
2045 l = 0
2045 colwidth = encoding.ucolwidth
2046 colwidth = encoding.ucolwidth
2046 for i in xrange(len(ucstr)):
2047 for i in xrange(len(ucstr)):
2047 l += colwidth(ucstr[i])
2048 l += colwidth(ucstr[i])
2048 if space_left < l:
2049 if space_left < l:
2049 return (ucstr[:i], ucstr[i:])
2050 return (ucstr[:i], ucstr[i:])
2050 return ucstr, ''
2051 return ucstr, ''
2051
2052
2052 # overriding of base class
2053 # overriding of base class
2053 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2054 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2054 space_left = max(width - cur_len, 1)
2055 space_left = max(width - cur_len, 1)
2055
2056
2056 if self.break_long_words:
2057 if self.break_long_words:
2057 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2058 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2058 cur_line.append(cut)
2059 cur_line.append(cut)
2059 reversed_chunks[-1] = res
2060 reversed_chunks[-1] = res
2060 elif not cur_line:
2061 elif not cur_line:
2061 cur_line.append(reversed_chunks.pop())
2062 cur_line.append(reversed_chunks.pop())
2062
2063
2063 # this overriding code is imported from TextWrapper of Python 2.6
2064 # this overriding code is imported from TextWrapper of Python 2.6
2064 # to calculate columns of string by 'encoding.ucolwidth()'
2065 # to calculate columns of string by 'encoding.ucolwidth()'
2065 def _wrap_chunks(self, chunks):
2066 def _wrap_chunks(self, chunks):
2066 colwidth = encoding.ucolwidth
2067 colwidth = encoding.ucolwidth
2067
2068
2068 lines = []
2069 lines = []
2069 if self.width <= 0:
2070 if self.width <= 0:
2070 raise ValueError("invalid width %r (must be > 0)" % self.width)
2071 raise ValueError("invalid width %r (must be > 0)" % self.width)
2071
2072
2072 # Arrange in reverse order so items can be efficiently popped
2073 # Arrange in reverse order so items can be efficiently popped
2073 # from a stack of chucks.
2074 # from a stack of chucks.
2074 chunks.reverse()
2075 chunks.reverse()
2075
2076
2076 while chunks:
2077 while chunks:
2077
2078
2078 # Start the list of chunks that will make up the current line.
2079 # Start the list of chunks that will make up the current line.
2079 # cur_len is just the length of all the chunks in cur_line.
2080 # cur_len is just the length of all the chunks in cur_line.
2080 cur_line = []
2081 cur_line = []
2081 cur_len = 0
2082 cur_len = 0
2082
2083
2083 # Figure out which static string will prefix this line.
2084 # Figure out which static string will prefix this line.
2084 if lines:
2085 if lines:
2085 indent = self.subsequent_indent
2086 indent = self.subsequent_indent
2086 else:
2087 else:
2087 indent = self.initial_indent
2088 indent = self.initial_indent
2088
2089
2089 # Maximum width for this line.
2090 # Maximum width for this line.
2090 width = self.width - len(indent)
2091 width = self.width - len(indent)
2091
2092
2092 # First chunk on line is whitespace -- drop it, unless this
2093 # First chunk on line is whitespace -- drop it, unless this
2093 # is the very beginning of the text (i.e. no lines started yet).
2094 # is the very beginning of the text (i.e. no lines started yet).
2094 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2095 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2095 del chunks[-1]
2096 del chunks[-1]
2096
2097
2097 while chunks:
2098 while chunks:
2098 l = colwidth(chunks[-1])
2099 l = colwidth(chunks[-1])
2099
2100
2100 # Can at least squeeze this chunk onto the current line.
2101 # Can at least squeeze this chunk onto the current line.
2101 if cur_len + l <= width:
2102 if cur_len + l <= width:
2102 cur_line.append(chunks.pop())
2103 cur_line.append(chunks.pop())
2103 cur_len += l
2104 cur_len += l
2104
2105
2105 # Nope, this line is full.
2106 # Nope, this line is full.
2106 else:
2107 else:
2107 break
2108 break
2108
2109
2109 # The current line is full, and the next chunk is too big to
2110 # The current line is full, and the next chunk is too big to
2110 # fit on *any* line (not just this one).
2111 # fit on *any* line (not just this one).
2111 if chunks and colwidth(chunks[-1]) > width:
2112 if chunks and colwidth(chunks[-1]) > width:
2112 self._handle_long_word(chunks, cur_line, cur_len, width)
2113 self._handle_long_word(chunks, cur_line, cur_len, width)
2113
2114
2114 # If the last chunk on this line is all whitespace, drop it.
2115 # If the last chunk on this line is all whitespace, drop it.
2115 if (self.drop_whitespace and
2116 if (self.drop_whitespace and
2116 cur_line and cur_line[-1].strip() == ''):
2117 cur_line and cur_line[-1].strip() == ''):
2117 del cur_line[-1]
2118 del cur_line[-1]
2118
2119
2119 # Convert current line back to a string and store it in list
2120 # Convert current line back to a string and store it in list
2120 # of all lines (return value).
2121 # of all lines (return value).
2121 if cur_line:
2122 if cur_line:
2122 lines.append(indent + ''.join(cur_line))
2123 lines.append(indent + ''.join(cur_line))
2123
2124
2124 return lines
2125 return lines
2125
2126
2126 global MBTextWrapper
2127 global MBTextWrapper
2127 MBTextWrapper = tw
2128 MBTextWrapper = tw
2128 return tw(**kwargs)
2129 return tw(**kwargs)
2129
2130
2130 def wrap(line, width, initindent='', hangindent=''):
2131 def wrap(line, width, initindent='', hangindent=''):
2131 maxindent = max(len(hangindent), len(initindent))
2132 maxindent = max(len(hangindent), len(initindent))
2132 if width <= maxindent:
2133 if width <= maxindent:
2133 # adjust for weird terminal size
2134 # adjust for weird terminal size
2134 width = max(78, maxindent + 1)
2135 width = max(78, maxindent + 1)
2135 line = line.decode(encoding.encoding, encoding.encodingmode)
2136 line = line.decode(encoding.encoding, encoding.encodingmode)
2136 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2137 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2137 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2138 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2138 wrapper = MBTextWrapper(width=width,
2139 wrapper = MBTextWrapper(width=width,
2139 initial_indent=initindent,
2140 initial_indent=initindent,
2140 subsequent_indent=hangindent)
2141 subsequent_indent=hangindent)
2141 return wrapper.fill(line).encode(encoding.encoding)
2142 return wrapper.fill(line).encode(encoding.encoding)
2142
2143
2143 def iterlines(iterator):
2144 def iterlines(iterator):
2144 for chunk in iterator:
2145 for chunk in iterator:
2145 for line in chunk.splitlines():
2146 for line in chunk.splitlines():
2146 yield line
2147 yield line
2147
2148
2148 def expandpath(path):
2149 def expandpath(path):
2149 return os.path.expanduser(os.path.expandvars(path))
2150 return os.path.expanduser(os.path.expandvars(path))
2150
2151
2151 def hgcmd():
2152 def hgcmd():
2152 """Return the command used to execute current hg
2153 """Return the command used to execute current hg
2153
2154
2154 This is different from hgexecutable() because on Windows we want
2155 This is different from hgexecutable() because on Windows we want
2155 to avoid things opening new shell windows like batch files, so we
2156 to avoid things opening new shell windows like batch files, so we
2156 get either the python call or current executable.
2157 get either the python call or current executable.
2157 """
2158 """
2158 if mainfrozen():
2159 if mainfrozen():
2159 if getattr(sys, 'frozen', None) == 'macosx_app':
2160 if getattr(sys, 'frozen', None) == 'macosx_app':
2160 # Env variable set by py2app
2161 # Env variable set by py2app
2161 return [os.environ['EXECUTABLEPATH']]
2162 return [os.environ['EXECUTABLEPATH']]
2162 else:
2163 else:
2163 return [sys.executable]
2164 return [sys.executable]
2164 return gethgcmd()
2165 return gethgcmd()
2165
2166
2166 def rundetached(args, condfn):
2167 def rundetached(args, condfn):
2167 """Execute the argument list in a detached process.
2168 """Execute the argument list in a detached process.
2168
2169
2169 condfn is a callable which is called repeatedly and should return
2170 condfn is a callable which is called repeatedly and should return
2170 True once the child process is known to have started successfully.
2171 True once the child process is known to have started successfully.
2171 At this point, the child process PID is returned. If the child
2172 At this point, the child process PID is returned. If the child
2172 process fails to start or finishes before condfn() evaluates to
2173 process fails to start or finishes before condfn() evaluates to
2173 True, return -1.
2174 True, return -1.
2174 """
2175 """
2175 # Windows case is easier because the child process is either
2176 # Windows case is easier because the child process is either
2176 # successfully starting and validating the condition or exiting
2177 # successfully starting and validating the condition or exiting
2177 # on failure. We just poll on its PID. On Unix, if the child
2178 # on failure. We just poll on its PID. On Unix, if the child
2178 # process fails to start, it will be left in a zombie state until
2179 # process fails to start, it will be left in a zombie state until
2179 # the parent wait on it, which we cannot do since we expect a long
2180 # the parent wait on it, which we cannot do since we expect a long
2180 # running process on success. Instead we listen for SIGCHLD telling
2181 # running process on success. Instead we listen for SIGCHLD telling
2181 # us our child process terminated.
2182 # us our child process terminated.
2182 terminated = set()
2183 terminated = set()
2183 def handler(signum, frame):
2184 def handler(signum, frame):
2184 terminated.add(os.wait())
2185 terminated.add(os.wait())
2185 prevhandler = None
2186 prevhandler = None
2186 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2187 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2187 if SIGCHLD is not None:
2188 if SIGCHLD is not None:
2188 prevhandler = signal.signal(SIGCHLD, handler)
2189 prevhandler = signal.signal(SIGCHLD, handler)
2189 try:
2190 try:
2190 pid = spawndetached(args)
2191 pid = spawndetached(args)
2191 while not condfn():
2192 while not condfn():
2192 if ((pid in terminated or not testpid(pid))
2193 if ((pid in terminated or not testpid(pid))
2193 and not condfn()):
2194 and not condfn()):
2194 return -1
2195 return -1
2195 time.sleep(0.1)
2196 time.sleep(0.1)
2196 return pid
2197 return pid
2197 finally:
2198 finally:
2198 if prevhandler is not None:
2199 if prevhandler is not None:
2199 signal.signal(signal.SIGCHLD, prevhandler)
2200 signal.signal(signal.SIGCHLD, prevhandler)
2200
2201
2201 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2202 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2202 """Return the result of interpolating items in the mapping into string s.
2203 """Return the result of interpolating items in the mapping into string s.
2203
2204
2204 prefix is a single character string, or a two character string with
2205 prefix is a single character string, or a two character string with
2205 a backslash as the first character if the prefix needs to be escaped in
2206 a backslash as the first character if the prefix needs to be escaped in
2206 a regular expression.
2207 a regular expression.
2207
2208
2208 fn is an optional function that will be applied to the replacement text
2209 fn is an optional function that will be applied to the replacement text
2209 just before replacement.
2210 just before replacement.
2210
2211
2211 escape_prefix is an optional flag that allows using doubled prefix for
2212 escape_prefix is an optional flag that allows using doubled prefix for
2212 its escaping.
2213 its escaping.
2213 """
2214 """
2214 fn = fn or (lambda s: s)
2215 fn = fn or (lambda s: s)
2215 patterns = '|'.join(mapping.keys())
2216 patterns = '|'.join(mapping.keys())
2216 if escape_prefix:
2217 if escape_prefix:
2217 patterns += '|' + prefix
2218 patterns += '|' + prefix
2218 if len(prefix) > 1:
2219 if len(prefix) > 1:
2219 prefix_char = prefix[1:]
2220 prefix_char = prefix[1:]
2220 else:
2221 else:
2221 prefix_char = prefix
2222 prefix_char = prefix
2222 mapping[prefix_char] = prefix_char
2223 mapping[prefix_char] = prefix_char
2223 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2224 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2224 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2225 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2225
2226
2226 def getport(port):
2227 def getport(port):
2227 """Return the port for a given network service.
2228 """Return the port for a given network service.
2228
2229
2229 If port is an integer, it's returned as is. If it's a string, it's
2230 If port is an integer, it's returned as is. If it's a string, it's
2230 looked up using socket.getservbyname(). If there's no matching
2231 looked up using socket.getservbyname(). If there's no matching
2231 service, error.Abort is raised.
2232 service, error.Abort is raised.
2232 """
2233 """
2233 try:
2234 try:
2234 return int(port)
2235 return int(port)
2235 except ValueError:
2236 except ValueError:
2236 pass
2237 pass
2237
2238
2238 try:
2239 try:
2239 return socket.getservbyname(port)
2240 return socket.getservbyname(port)
2240 except socket.error:
2241 except socket.error:
2241 raise Abort(_("no port number associated with service '%s'") % port)
2242 raise Abort(_("no port number associated with service '%s'") % port)
2242
2243
2243 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2244 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2244 '0': False, 'no': False, 'false': False, 'off': False,
2245 '0': False, 'no': False, 'false': False, 'off': False,
2245 'never': False}
2246 'never': False}
2246
2247
2247 def parsebool(s):
2248 def parsebool(s):
2248 """Parse s into a boolean.
2249 """Parse s into a boolean.
2249
2250
2250 If s is not a valid boolean, returns None.
2251 If s is not a valid boolean, returns None.
2251 """
2252 """
2252 return _booleans.get(s.lower(), None)
2253 return _booleans.get(s.lower(), None)
2253
2254
2254 _hexdig = '0123456789ABCDEFabcdef'
2255 _hexdig = '0123456789ABCDEFabcdef'
2255 _hextochr = dict((a + b, chr(int(a + b, 16)))
2256 _hextochr = dict((a + b, chr(int(a + b, 16)))
2256 for a in _hexdig for b in _hexdig)
2257 for a in _hexdig for b in _hexdig)
2257
2258
2258 def _urlunquote(s):
2259 def _urlunquote(s):
2259 """Decode HTTP/HTML % encoding.
2260 """Decode HTTP/HTML % encoding.
2260
2261
2261 >>> _urlunquote('abc%20def')
2262 >>> _urlunquote('abc%20def')
2262 'abc def'
2263 'abc def'
2263 """
2264 """
2264 res = s.split('%')
2265 res = s.split('%')
2265 # fastpath
2266 # fastpath
2266 if len(res) == 1:
2267 if len(res) == 1:
2267 return s
2268 return s
2268 s = res[0]
2269 s = res[0]
2269 for item in res[1:]:
2270 for item in res[1:]:
2270 try:
2271 try:
2271 s += _hextochr[item[:2]] + item[2:]
2272 s += _hextochr[item[:2]] + item[2:]
2272 except KeyError:
2273 except KeyError:
2273 s += '%' + item
2274 s += '%' + item
2274 except UnicodeDecodeError:
2275 except UnicodeDecodeError:
2275 s += unichr(int(item[:2], 16)) + item[2:]
2276 s += unichr(int(item[:2], 16)) + item[2:]
2276 return s
2277 return s
2277
2278
2278 class url(object):
2279 class url(object):
2279 r"""Reliable URL parser.
2280 r"""Reliable URL parser.
2280
2281
2281 This parses URLs and provides attributes for the following
2282 This parses URLs and provides attributes for the following
2282 components:
2283 components:
2283
2284
2284 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2285 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2285
2286
2286 Missing components are set to None. The only exception is
2287 Missing components are set to None. The only exception is
2287 fragment, which is set to '' if present but empty.
2288 fragment, which is set to '' if present but empty.
2288
2289
2289 If parsefragment is False, fragment is included in query. If
2290 If parsefragment is False, fragment is included in query. If
2290 parsequery is False, query is included in path. If both are
2291 parsequery is False, query is included in path. If both are
2291 False, both fragment and query are included in path.
2292 False, both fragment and query are included in path.
2292
2293
2293 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2294 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2294
2295
2295 Note that for backward compatibility reasons, bundle URLs do not
2296 Note that for backward compatibility reasons, bundle URLs do not
2296 take host names. That means 'bundle://../' has a path of '../'.
2297 take host names. That means 'bundle://../' has a path of '../'.
2297
2298
2298 Examples:
2299 Examples:
2299
2300
2300 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2301 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2301 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2302 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2302 >>> url('ssh://[::1]:2200//home/joe/repo')
2303 >>> url('ssh://[::1]:2200//home/joe/repo')
2303 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2304 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2304 >>> url('file:///home/joe/repo')
2305 >>> url('file:///home/joe/repo')
2305 <url scheme: 'file', path: '/home/joe/repo'>
2306 <url scheme: 'file', path: '/home/joe/repo'>
2306 >>> url('file:///c:/temp/foo/')
2307 >>> url('file:///c:/temp/foo/')
2307 <url scheme: 'file', path: 'c:/temp/foo/'>
2308 <url scheme: 'file', path: 'c:/temp/foo/'>
2308 >>> url('bundle:foo')
2309 >>> url('bundle:foo')
2309 <url scheme: 'bundle', path: 'foo'>
2310 <url scheme: 'bundle', path: 'foo'>
2310 >>> url('bundle://../foo')
2311 >>> url('bundle://../foo')
2311 <url scheme: 'bundle', path: '../foo'>
2312 <url scheme: 'bundle', path: '../foo'>
2312 >>> url(r'c:\foo\bar')
2313 >>> url(r'c:\foo\bar')
2313 <url path: 'c:\\foo\\bar'>
2314 <url path: 'c:\\foo\\bar'>
2314 >>> url(r'\\blah\blah\blah')
2315 >>> url(r'\\blah\blah\blah')
2315 <url path: '\\\\blah\\blah\\blah'>
2316 <url path: '\\\\blah\\blah\\blah'>
2316 >>> url(r'\\blah\blah\blah#baz')
2317 >>> url(r'\\blah\blah\blah#baz')
2317 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2318 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2318 >>> url(r'file:///C:\users\me')
2319 >>> url(r'file:///C:\users\me')
2319 <url scheme: 'file', path: 'C:\\users\\me'>
2320 <url scheme: 'file', path: 'C:\\users\\me'>
2320
2321
2321 Authentication credentials:
2322 Authentication credentials:
2322
2323
2323 >>> url('ssh://joe:xyz@x/repo')
2324 >>> url('ssh://joe:xyz@x/repo')
2324 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2325 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2325 >>> url('ssh://joe@x/repo')
2326 >>> url('ssh://joe@x/repo')
2326 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2327 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2327
2328
2328 Query strings and fragments:
2329 Query strings and fragments:
2329
2330
2330 >>> url('http://host/a?b#c')
2331 >>> url('http://host/a?b#c')
2331 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2332 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2332 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2333 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2333 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2334 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2334 """
2335 """
2335
2336
2336 _safechars = "!~*'()+"
2337 _safechars = "!~*'()+"
2337 _safepchars = "/!~*'()+:\\"
2338 _safepchars = "/!~*'()+:\\"
2338 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2339 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2339
2340
2340 def __init__(self, path, parsequery=True, parsefragment=True):
2341 def __init__(self, path, parsequery=True, parsefragment=True):
2341 # We slowly chomp away at path until we have only the path left
2342 # We slowly chomp away at path until we have only the path left
2342 self.scheme = self.user = self.passwd = self.host = None
2343 self.scheme = self.user = self.passwd = self.host = None
2343 self.port = self.path = self.query = self.fragment = None
2344 self.port = self.path = self.query = self.fragment = None
2344 self._localpath = True
2345 self._localpath = True
2345 self._hostport = ''
2346 self._hostport = ''
2346 self._origpath = path
2347 self._origpath = path
2347
2348
2348 if parsefragment and '#' in path:
2349 if parsefragment and '#' in path:
2349 path, self.fragment = path.split('#', 1)
2350 path, self.fragment = path.split('#', 1)
2350 if not path:
2351 if not path:
2351 path = None
2352 path = None
2352
2353
2353 # special case for Windows drive letters and UNC paths
2354 # special case for Windows drive letters and UNC paths
2354 if hasdriveletter(path) or path.startswith(r'\\'):
2355 if hasdriveletter(path) or path.startswith(r'\\'):
2355 self.path = path
2356 self.path = path
2356 return
2357 return
2357
2358
2358 # For compatibility reasons, we can't handle bundle paths as
2359 # For compatibility reasons, we can't handle bundle paths as
2359 # normal URLS
2360 # normal URLS
2360 if path.startswith('bundle:'):
2361 if path.startswith('bundle:'):
2361 self.scheme = 'bundle'
2362 self.scheme = 'bundle'
2362 path = path[7:]
2363 path = path[7:]
2363 if path.startswith('//'):
2364 if path.startswith('//'):
2364 path = path[2:]
2365 path = path[2:]
2365 self.path = path
2366 self.path = path
2366 return
2367 return
2367
2368
2368 if self._matchscheme(path):
2369 if self._matchscheme(path):
2369 parts = path.split(':', 1)
2370 parts = path.split(':', 1)
2370 if parts[0]:
2371 if parts[0]:
2371 self.scheme, path = parts
2372 self.scheme, path = parts
2372 self._localpath = False
2373 self._localpath = False
2373
2374
2374 if not path:
2375 if not path:
2375 path = None
2376 path = None
2376 if self._localpath:
2377 if self._localpath:
2377 self.path = ''
2378 self.path = ''
2378 return
2379 return
2379 else:
2380 else:
2380 if self._localpath:
2381 if self._localpath:
2381 self.path = path
2382 self.path = path
2382 return
2383 return
2383
2384
2384 if parsequery and '?' in path:
2385 if parsequery and '?' in path:
2385 path, self.query = path.split('?', 1)
2386 path, self.query = path.split('?', 1)
2386 if not path:
2387 if not path:
2387 path = None
2388 path = None
2388 if not self.query:
2389 if not self.query:
2389 self.query = None
2390 self.query = None
2390
2391
2391 # // is required to specify a host/authority
2392 # // is required to specify a host/authority
2392 if path and path.startswith('//'):
2393 if path and path.startswith('//'):
2393 parts = path[2:].split('/', 1)
2394 parts = path[2:].split('/', 1)
2394 if len(parts) > 1:
2395 if len(parts) > 1:
2395 self.host, path = parts
2396 self.host, path = parts
2396 else:
2397 else:
2397 self.host = parts[0]
2398 self.host = parts[0]
2398 path = None
2399 path = None
2399 if not self.host:
2400 if not self.host:
2400 self.host = None
2401 self.host = None
2401 # path of file:///d is /d
2402 # path of file:///d is /d
2402 # path of file:///d:/ is d:/, not /d:/
2403 # path of file:///d:/ is d:/, not /d:/
2403 if path and not hasdriveletter(path):
2404 if path and not hasdriveletter(path):
2404 path = '/' + path
2405 path = '/' + path
2405
2406
2406 if self.host and '@' in self.host:
2407 if self.host and '@' in self.host:
2407 self.user, self.host = self.host.rsplit('@', 1)
2408 self.user, self.host = self.host.rsplit('@', 1)
2408 if ':' in self.user:
2409 if ':' in self.user:
2409 self.user, self.passwd = self.user.split(':', 1)
2410 self.user, self.passwd = self.user.split(':', 1)
2410 if not self.host:
2411 if not self.host:
2411 self.host = None
2412 self.host = None
2412
2413
2413 # Don't split on colons in IPv6 addresses without ports
2414 # Don't split on colons in IPv6 addresses without ports
2414 if (self.host and ':' in self.host and
2415 if (self.host and ':' in self.host and
2415 not (self.host.startswith('[') and self.host.endswith(']'))):
2416 not (self.host.startswith('[') and self.host.endswith(']'))):
2416 self._hostport = self.host
2417 self._hostport = self.host
2417 self.host, self.port = self.host.rsplit(':', 1)
2418 self.host, self.port = self.host.rsplit(':', 1)
2418 if not self.host:
2419 if not self.host:
2419 self.host = None
2420 self.host = None
2420
2421
2421 if (self.host and self.scheme == 'file' and
2422 if (self.host and self.scheme == 'file' and
2422 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2423 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2423 raise Abort(_('file:// URLs can only refer to localhost'))
2424 raise Abort(_('file:// URLs can only refer to localhost'))
2424
2425
2425 self.path = path
2426 self.path = path
2426
2427
2427 # leave the query string escaped
2428 # leave the query string escaped
2428 for a in ('user', 'passwd', 'host', 'port',
2429 for a in ('user', 'passwd', 'host', 'port',
2429 'path', 'fragment'):
2430 'path', 'fragment'):
2430 v = getattr(self, a)
2431 v = getattr(self, a)
2431 if v is not None:
2432 if v is not None:
2432 setattr(self, a, _urlunquote(v))
2433 setattr(self, a, _urlunquote(v))
2433
2434
2434 def __repr__(self):
2435 def __repr__(self):
2435 attrs = []
2436 attrs = []
2436 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2437 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2437 'query', 'fragment'):
2438 'query', 'fragment'):
2438 v = getattr(self, a)
2439 v = getattr(self, a)
2439 if v is not None:
2440 if v is not None:
2440 attrs.append('%s: %r' % (a, v))
2441 attrs.append('%s: %r' % (a, v))
2441 return '<url %s>' % ', '.join(attrs)
2442 return '<url %s>' % ', '.join(attrs)
2442
2443
2443 def __str__(self):
2444 def __str__(self):
2444 r"""Join the URL's components back into a URL string.
2445 r"""Join the URL's components back into a URL string.
2445
2446
2446 Examples:
2447 Examples:
2447
2448
2448 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2449 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2449 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2450 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2450 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2451 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2451 'http://user:pw@host:80/?foo=bar&baz=42'
2452 'http://user:pw@host:80/?foo=bar&baz=42'
2452 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2453 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2453 'http://user:pw@host:80/?foo=bar%3dbaz'
2454 'http://user:pw@host:80/?foo=bar%3dbaz'
2454 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2455 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2455 'ssh://user:pw@[::1]:2200//home/joe#'
2456 'ssh://user:pw@[::1]:2200//home/joe#'
2456 >>> str(url('http://localhost:80//'))
2457 >>> str(url('http://localhost:80//'))
2457 'http://localhost:80//'
2458 'http://localhost:80//'
2458 >>> str(url('http://localhost:80/'))
2459 >>> str(url('http://localhost:80/'))
2459 'http://localhost:80/'
2460 'http://localhost:80/'
2460 >>> str(url('http://localhost:80'))
2461 >>> str(url('http://localhost:80'))
2461 'http://localhost:80/'
2462 'http://localhost:80/'
2462 >>> str(url('bundle:foo'))
2463 >>> str(url('bundle:foo'))
2463 'bundle:foo'
2464 'bundle:foo'
2464 >>> str(url('bundle://../foo'))
2465 >>> str(url('bundle://../foo'))
2465 'bundle:../foo'
2466 'bundle:../foo'
2466 >>> str(url('path'))
2467 >>> str(url('path'))
2467 'path'
2468 'path'
2468 >>> str(url('file:///tmp/foo/bar'))
2469 >>> str(url('file:///tmp/foo/bar'))
2469 'file:///tmp/foo/bar'
2470 'file:///tmp/foo/bar'
2470 >>> str(url('file:///c:/tmp/foo/bar'))
2471 >>> str(url('file:///c:/tmp/foo/bar'))
2471 'file:///c:/tmp/foo/bar'
2472 'file:///c:/tmp/foo/bar'
2472 >>> print url(r'bundle:foo\bar')
2473 >>> print url(r'bundle:foo\bar')
2473 bundle:foo\bar
2474 bundle:foo\bar
2474 >>> print url(r'file:///D:\data\hg')
2475 >>> print url(r'file:///D:\data\hg')
2475 file:///D:\data\hg
2476 file:///D:\data\hg
2476 """
2477 """
2477 if self._localpath:
2478 if self._localpath:
2478 s = self.path
2479 s = self.path
2479 if self.scheme == 'bundle':
2480 if self.scheme == 'bundle':
2480 s = 'bundle:' + s
2481 s = 'bundle:' + s
2481 if self.fragment:
2482 if self.fragment:
2482 s += '#' + self.fragment
2483 s += '#' + self.fragment
2483 return s
2484 return s
2484
2485
2485 s = self.scheme + ':'
2486 s = self.scheme + ':'
2486 if self.user or self.passwd or self.host:
2487 if self.user or self.passwd or self.host:
2487 s += '//'
2488 s += '//'
2488 elif self.scheme and (not self.path or self.path.startswith('/')
2489 elif self.scheme and (not self.path or self.path.startswith('/')
2489 or hasdriveletter(self.path)):
2490 or hasdriveletter(self.path)):
2490 s += '//'
2491 s += '//'
2491 if hasdriveletter(self.path):
2492 if hasdriveletter(self.path):
2492 s += '/'
2493 s += '/'
2493 if self.user:
2494 if self.user:
2494 s += urlreq.quote(self.user, safe=self._safechars)
2495 s += urlreq.quote(self.user, safe=self._safechars)
2495 if self.passwd:
2496 if self.passwd:
2496 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2497 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2497 if self.user or self.passwd:
2498 if self.user or self.passwd:
2498 s += '@'
2499 s += '@'
2499 if self.host:
2500 if self.host:
2500 if not (self.host.startswith('[') and self.host.endswith(']')):
2501 if not (self.host.startswith('[') and self.host.endswith(']')):
2501 s += urlreq.quote(self.host)
2502 s += urlreq.quote(self.host)
2502 else:
2503 else:
2503 s += self.host
2504 s += self.host
2504 if self.port:
2505 if self.port:
2505 s += ':' + urlreq.quote(self.port)
2506 s += ':' + urlreq.quote(self.port)
2506 if self.host:
2507 if self.host:
2507 s += '/'
2508 s += '/'
2508 if self.path:
2509 if self.path:
2509 # TODO: similar to the query string, we should not unescape the
2510 # TODO: similar to the query string, we should not unescape the
2510 # path when we store it, the path might contain '%2f' = '/',
2511 # path when we store it, the path might contain '%2f' = '/',
2511 # which we should *not* escape.
2512 # which we should *not* escape.
2512 s += urlreq.quote(self.path, safe=self._safepchars)
2513 s += urlreq.quote(self.path, safe=self._safepchars)
2513 if self.query:
2514 if self.query:
2514 # we store the query in escaped form.
2515 # we store the query in escaped form.
2515 s += '?' + self.query
2516 s += '?' + self.query
2516 if self.fragment is not None:
2517 if self.fragment is not None:
2517 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2518 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2518 return s
2519 return s
2519
2520
2520 def authinfo(self):
2521 def authinfo(self):
2521 user, passwd = self.user, self.passwd
2522 user, passwd = self.user, self.passwd
2522 try:
2523 try:
2523 self.user, self.passwd = None, None
2524 self.user, self.passwd = None, None
2524 s = str(self)
2525 s = str(self)
2525 finally:
2526 finally:
2526 self.user, self.passwd = user, passwd
2527 self.user, self.passwd = user, passwd
2527 if not self.user:
2528 if not self.user:
2528 return (s, None)
2529 return (s, None)
2529 # authinfo[1] is passed to urllib2 password manager, and its
2530 # authinfo[1] is passed to urllib2 password manager, and its
2530 # URIs must not contain credentials. The host is passed in the
2531 # URIs must not contain credentials. The host is passed in the
2531 # URIs list because Python < 2.4.3 uses only that to search for
2532 # URIs list because Python < 2.4.3 uses only that to search for
2532 # a password.
2533 # a password.
2533 return (s, (None, (s, self.host),
2534 return (s, (None, (s, self.host),
2534 self.user, self.passwd or ''))
2535 self.user, self.passwd or ''))
2535
2536
2536 def isabs(self):
2537 def isabs(self):
2537 if self.scheme and self.scheme != 'file':
2538 if self.scheme and self.scheme != 'file':
2538 return True # remote URL
2539 return True # remote URL
2539 if hasdriveletter(self.path):
2540 if hasdriveletter(self.path):
2540 return True # absolute for our purposes - can't be joined()
2541 return True # absolute for our purposes - can't be joined()
2541 if self.path.startswith(r'\\'):
2542 if self.path.startswith(r'\\'):
2542 return True # Windows UNC path
2543 return True # Windows UNC path
2543 if self.path.startswith('/'):
2544 if self.path.startswith('/'):
2544 return True # POSIX-style
2545 return True # POSIX-style
2545 return False
2546 return False
2546
2547
2547 def localpath(self):
2548 def localpath(self):
2548 if self.scheme == 'file' or self.scheme == 'bundle':
2549 if self.scheme == 'file' or self.scheme == 'bundle':
2549 path = self.path or '/'
2550 path = self.path or '/'
2550 # For Windows, we need to promote hosts containing drive
2551 # For Windows, we need to promote hosts containing drive
2551 # letters to paths with drive letters.
2552 # letters to paths with drive letters.
2552 if hasdriveletter(self._hostport):
2553 if hasdriveletter(self._hostport):
2553 path = self._hostport + '/' + self.path
2554 path = self._hostport + '/' + self.path
2554 elif (self.host is not None and self.path
2555 elif (self.host is not None and self.path
2555 and not hasdriveletter(path)):
2556 and not hasdriveletter(path)):
2556 path = '/' + path
2557 path = '/' + path
2557 return path
2558 return path
2558 return self._origpath
2559 return self._origpath
2559
2560
2560 def islocal(self):
2561 def islocal(self):
2561 '''whether localpath will return something that posixfile can open'''
2562 '''whether localpath will return something that posixfile can open'''
2562 return (not self.scheme or self.scheme == 'file'
2563 return (not self.scheme or self.scheme == 'file'
2563 or self.scheme == 'bundle')
2564 or self.scheme == 'bundle')
2564
2565
2565 def hasscheme(path):
2566 def hasscheme(path):
2566 return bool(url(path).scheme)
2567 return bool(url(path).scheme)
2567
2568
2568 def hasdriveletter(path):
2569 def hasdriveletter(path):
2569 return path and path[1:2] == ':' and path[0:1].isalpha()
2570 return path and path[1:2] == ':' and path[0:1].isalpha()
2570
2571
2571 def urllocalpath(path):
2572 def urllocalpath(path):
2572 return url(path, parsequery=False, parsefragment=False).localpath()
2573 return url(path, parsequery=False, parsefragment=False).localpath()
2573
2574
2574 def hidepassword(u):
2575 def hidepassword(u):
2575 '''hide user credential in a url string'''
2576 '''hide user credential in a url string'''
2576 u = url(u)
2577 u = url(u)
2577 if u.passwd:
2578 if u.passwd:
2578 u.passwd = '***'
2579 u.passwd = '***'
2579 return str(u)
2580 return str(u)
2580
2581
2581 def removeauth(u):
2582 def removeauth(u):
2582 '''remove all authentication information from a url string'''
2583 '''remove all authentication information from a url string'''
2583 u = url(u)
2584 u = url(u)
2584 u.user = u.passwd = None
2585 u.user = u.passwd = None
2585 return str(u)
2586 return str(u)
2586
2587
2587 def isatty(fp):
2588 def isatty(fp):
2588 try:
2589 try:
2589 return fp.isatty()
2590 return fp.isatty()
2590 except AttributeError:
2591 except AttributeError:
2591 return False
2592 return False
2592
2593
2593 timecount = unitcountfn(
2594 timecount = unitcountfn(
2594 (1, 1e3, _('%.0f s')),
2595 (1, 1e3, _('%.0f s')),
2595 (100, 1, _('%.1f s')),
2596 (100, 1, _('%.1f s')),
2596 (10, 1, _('%.2f s')),
2597 (10, 1, _('%.2f s')),
2597 (1, 1, _('%.3f s')),
2598 (1, 1, _('%.3f s')),
2598 (100, 0.001, _('%.1f ms')),
2599 (100, 0.001, _('%.1f ms')),
2599 (10, 0.001, _('%.2f ms')),
2600 (10, 0.001, _('%.2f ms')),
2600 (1, 0.001, _('%.3f ms')),
2601 (1, 0.001, _('%.3f ms')),
2601 (100, 0.000001, _('%.1f us')),
2602 (100, 0.000001, _('%.1f us')),
2602 (10, 0.000001, _('%.2f us')),
2603 (10, 0.000001, _('%.2f us')),
2603 (1, 0.000001, _('%.3f us')),
2604 (1, 0.000001, _('%.3f us')),
2604 (100, 0.000000001, _('%.1f ns')),
2605 (100, 0.000000001, _('%.1f ns')),
2605 (10, 0.000000001, _('%.2f ns')),
2606 (10, 0.000000001, _('%.2f ns')),
2606 (1, 0.000000001, _('%.3f ns')),
2607 (1, 0.000000001, _('%.3f ns')),
2607 )
2608 )
2608
2609
2609 _timenesting = [0]
2610 _timenesting = [0]
2610
2611
2611 def timed(func):
2612 def timed(func):
2612 '''Report the execution time of a function call to stderr.
2613 '''Report the execution time of a function call to stderr.
2613
2614
2614 During development, use as a decorator when you need to measure
2615 During development, use as a decorator when you need to measure
2615 the cost of a function, e.g. as follows:
2616 the cost of a function, e.g. as follows:
2616
2617
2617 @util.timed
2618 @util.timed
2618 def foo(a, b, c):
2619 def foo(a, b, c):
2619 pass
2620 pass
2620 '''
2621 '''
2621
2622
2622 def wrapper(*args, **kwargs):
2623 def wrapper(*args, **kwargs):
2623 start = time.time()
2624 start = time.time()
2624 indent = 2
2625 indent = 2
2625 _timenesting[0] += indent
2626 _timenesting[0] += indent
2626 try:
2627 try:
2627 return func(*args, **kwargs)
2628 return func(*args, **kwargs)
2628 finally:
2629 finally:
2629 elapsed = time.time() - start
2630 elapsed = time.time() - start
2630 _timenesting[0] -= indent
2631 _timenesting[0] -= indent
2631 sys.stderr.write('%s%s: %s\n' %
2632 sys.stderr.write('%s%s: %s\n' %
2632 (' ' * _timenesting[0], func.__name__,
2633 (' ' * _timenesting[0], func.__name__,
2633 timecount(elapsed)))
2634 timecount(elapsed)))
2634 return wrapper
2635 return wrapper
2635
2636
2636 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2637 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2637 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2638 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2638
2639
2639 def sizetoint(s):
2640 def sizetoint(s):
2640 '''Convert a space specifier to a byte count.
2641 '''Convert a space specifier to a byte count.
2641
2642
2642 >>> sizetoint('30')
2643 >>> sizetoint('30')
2643 30
2644 30
2644 >>> sizetoint('2.2kb')
2645 >>> sizetoint('2.2kb')
2645 2252
2646 2252
2646 >>> sizetoint('6M')
2647 >>> sizetoint('6M')
2647 6291456
2648 6291456
2648 '''
2649 '''
2649 t = s.strip().lower()
2650 t = s.strip().lower()
2650 try:
2651 try:
2651 for k, u in _sizeunits:
2652 for k, u in _sizeunits:
2652 if t.endswith(k):
2653 if t.endswith(k):
2653 return int(float(t[:-len(k)]) * u)
2654 return int(float(t[:-len(k)]) * u)
2654 return int(t)
2655 return int(t)
2655 except ValueError:
2656 except ValueError:
2656 raise error.ParseError(_("couldn't parse size: %s") % s)
2657 raise error.ParseError(_("couldn't parse size: %s") % s)
2657
2658
2658 class hooks(object):
2659 class hooks(object):
2659 '''A collection of hook functions that can be used to extend a
2660 '''A collection of hook functions that can be used to extend a
2660 function's behavior. Hooks are called in lexicographic order,
2661 function's behavior. Hooks are called in lexicographic order,
2661 based on the names of their sources.'''
2662 based on the names of their sources.'''
2662
2663
2663 def __init__(self):
2664 def __init__(self):
2664 self._hooks = []
2665 self._hooks = []
2665
2666
2666 def add(self, source, hook):
2667 def add(self, source, hook):
2667 self._hooks.append((source, hook))
2668 self._hooks.append((source, hook))
2668
2669
2669 def __call__(self, *args):
2670 def __call__(self, *args):
2670 self._hooks.sort(key=lambda x: x[0])
2671 self._hooks.sort(key=lambda x: x[0])
2671 results = []
2672 results = []
2672 for source, hook in self._hooks:
2673 for source, hook in self._hooks:
2673 results.append(hook(*args))
2674 results.append(hook(*args))
2674 return results
2675 return results
2675
2676
2676 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2677 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2677 '''Yields lines for a nicely formatted stacktrace.
2678 '''Yields lines for a nicely formatted stacktrace.
2678 Skips the 'skip' last entries.
2679 Skips the 'skip' last entries.
2679 Each file+linenumber is formatted according to fileline.
2680 Each file+linenumber is formatted according to fileline.
2680 Each line is formatted according to line.
2681 Each line is formatted according to line.
2681 If line is None, it yields:
2682 If line is None, it yields:
2682 length of longest filepath+line number,
2683 length of longest filepath+line number,
2683 filepath+linenumber,
2684 filepath+linenumber,
2684 function
2685 function
2685
2686
2686 Not be used in production code but very convenient while developing.
2687 Not be used in production code but very convenient while developing.
2687 '''
2688 '''
2688 entries = [(fileline % (fn, ln), func)
2689 entries = [(fileline % (fn, ln), func)
2689 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2690 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2690 if entries:
2691 if entries:
2691 fnmax = max(len(entry[0]) for entry in entries)
2692 fnmax = max(len(entry[0]) for entry in entries)
2692 for fnln, func in entries:
2693 for fnln, func in entries:
2693 if line is None:
2694 if line is None:
2694 yield (fnmax, fnln, func)
2695 yield (fnmax, fnln, func)
2695 else:
2696 else:
2696 yield line % (fnmax, fnln, func)
2697 yield line % (fnmax, fnln, func)
2697
2698
2698 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2699 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2699 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2700 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2700 Skips the 'skip' last entries. By default it will flush stdout first.
2701 Skips the 'skip' last entries. By default it will flush stdout first.
2701 It can be used everywhere and intentionally does not require an ui object.
2702 It can be used everywhere and intentionally does not require an ui object.
2702 Not be used in production code but very convenient while developing.
2703 Not be used in production code but very convenient while developing.
2703 '''
2704 '''
2704 if otherf:
2705 if otherf:
2705 otherf.flush()
2706 otherf.flush()
2706 f.write('%s at:\n' % msg)
2707 f.write('%s at:\n' % msg)
2707 for line in getstackframes(skip + 1):
2708 for line in getstackframes(skip + 1):
2708 f.write(line)
2709 f.write(line)
2709 f.flush()
2710 f.flush()
2710
2711
2711 class dirs(object):
2712 class dirs(object):
2712 '''a multiset of directory names from a dirstate or manifest'''
2713 '''a multiset of directory names from a dirstate or manifest'''
2713
2714
2714 def __init__(self, map, skip=None):
2715 def __init__(self, map, skip=None):
2715 self._dirs = {}
2716 self._dirs = {}
2716 addpath = self.addpath
2717 addpath = self.addpath
2717 if safehasattr(map, 'iteritems') and skip is not None:
2718 if safehasattr(map, 'iteritems') and skip is not None:
2718 for f, s in map.iteritems():
2719 for f, s in map.iteritems():
2719 if s[0] != skip:
2720 if s[0] != skip:
2720 addpath(f)
2721 addpath(f)
2721 else:
2722 else:
2722 for f in map:
2723 for f in map:
2723 addpath(f)
2724 addpath(f)
2724
2725
2725 def addpath(self, path):
2726 def addpath(self, path):
2726 dirs = self._dirs
2727 dirs = self._dirs
2727 for base in finddirs(path):
2728 for base in finddirs(path):
2728 if base in dirs:
2729 if base in dirs:
2729 dirs[base] += 1
2730 dirs[base] += 1
2730 return
2731 return
2731 dirs[base] = 1
2732 dirs[base] = 1
2732
2733
2733 def delpath(self, path):
2734 def delpath(self, path):
2734 dirs = self._dirs
2735 dirs = self._dirs
2735 for base in finddirs(path):
2736 for base in finddirs(path):
2736 if dirs[base] > 1:
2737 if dirs[base] > 1:
2737 dirs[base] -= 1
2738 dirs[base] -= 1
2738 return
2739 return
2739 del dirs[base]
2740 del dirs[base]
2740
2741
2741 def __iter__(self):
2742 def __iter__(self):
2742 return self._dirs.iterkeys()
2743 return self._dirs.iterkeys()
2743
2744
2744 def __contains__(self, d):
2745 def __contains__(self, d):
2745 return d in self._dirs
2746 return d in self._dirs
2746
2747
2747 if safehasattr(parsers, 'dirs'):
2748 if safehasattr(parsers, 'dirs'):
2748 dirs = parsers.dirs
2749 dirs = parsers.dirs
2749
2750
2750 def finddirs(path):
2751 def finddirs(path):
2751 pos = path.rfind('/')
2752 pos = path.rfind('/')
2752 while pos != -1:
2753 while pos != -1:
2753 yield path[:pos]
2754 yield path[:pos]
2754 pos = path.rfind('/', 0, pos)
2755 pos = path.rfind('/', 0, pos)
2755
2756
2756 # compression utility
2757 # compression utility
2757
2758
2758 class nocompress(object):
2759 class nocompress(object):
2759 def compress(self, x):
2760 def compress(self, x):
2760 return x
2761 return x
2761 def flush(self):
2762 def flush(self):
2762 return ""
2763 return ""
2763
2764
2764 compressors = {
2765 compressors = {
2765 None: nocompress,
2766 None: nocompress,
2766 # lambda to prevent early import
2767 # lambda to prevent early import
2767 'BZ': lambda: bz2.BZ2Compressor(),
2768 'BZ': lambda: bz2.BZ2Compressor(),
2768 'GZ': lambda: zlib.compressobj(),
2769 'GZ': lambda: zlib.compressobj(),
2769 }
2770 }
2770 # also support the old form by courtesies
2771 # also support the old form by courtesies
2771 compressors['UN'] = compressors[None]
2772 compressors['UN'] = compressors[None]
2772
2773
2773 def _makedecompressor(decompcls):
2774 def _makedecompressor(decompcls):
2774 def generator(f):
2775 def generator(f):
2775 d = decompcls()
2776 d = decompcls()
2776 for chunk in filechunkiter(f):
2777 for chunk in filechunkiter(f):
2777 yield d.decompress(chunk)
2778 yield d.decompress(chunk)
2778 def func(fh):
2779 def func(fh):
2779 return chunkbuffer(generator(fh))
2780 return chunkbuffer(generator(fh))
2780 return func
2781 return func
2781
2782
2782 class ctxmanager(object):
2783 class ctxmanager(object):
2783 '''A context manager for use in 'with' blocks to allow multiple
2784 '''A context manager for use in 'with' blocks to allow multiple
2784 contexts to be entered at once. This is both safer and more
2785 contexts to be entered at once. This is both safer and more
2785 flexible than contextlib.nested.
2786 flexible than contextlib.nested.
2786
2787
2787 Once Mercurial supports Python 2.7+, this will become mostly
2788 Once Mercurial supports Python 2.7+, this will become mostly
2788 unnecessary.
2789 unnecessary.
2789 '''
2790 '''
2790
2791
2791 def __init__(self, *args):
2792 def __init__(self, *args):
2792 '''Accepts a list of no-argument functions that return context
2793 '''Accepts a list of no-argument functions that return context
2793 managers. These will be invoked at __call__ time.'''
2794 managers. These will be invoked at __call__ time.'''
2794 self._pending = args
2795 self._pending = args
2795 self._atexit = []
2796 self._atexit = []
2796
2797
2797 def __enter__(self):
2798 def __enter__(self):
2798 return self
2799 return self
2799
2800
2800 def enter(self):
2801 def enter(self):
2801 '''Create and enter context managers in the order in which they were
2802 '''Create and enter context managers in the order in which they were
2802 passed to the constructor.'''
2803 passed to the constructor.'''
2803 values = []
2804 values = []
2804 for func in self._pending:
2805 for func in self._pending:
2805 obj = func()
2806 obj = func()
2806 values.append(obj.__enter__())
2807 values.append(obj.__enter__())
2807 self._atexit.append(obj.__exit__)
2808 self._atexit.append(obj.__exit__)
2808 del self._pending
2809 del self._pending
2809 return values
2810 return values
2810
2811
2811 def atexit(self, func, *args, **kwargs):
2812 def atexit(self, func, *args, **kwargs):
2812 '''Add a function to call when this context manager exits. The
2813 '''Add a function to call when this context manager exits. The
2813 ordering of multiple atexit calls is unspecified, save that
2814 ordering of multiple atexit calls is unspecified, save that
2814 they will happen before any __exit__ functions.'''
2815 they will happen before any __exit__ functions.'''
2815 def wrapper(exc_type, exc_val, exc_tb):
2816 def wrapper(exc_type, exc_val, exc_tb):
2816 func(*args, **kwargs)
2817 func(*args, **kwargs)
2817 self._atexit.append(wrapper)
2818 self._atexit.append(wrapper)
2818 return func
2819 return func
2819
2820
2820 def __exit__(self, exc_type, exc_val, exc_tb):
2821 def __exit__(self, exc_type, exc_val, exc_tb):
2821 '''Context managers are exited in the reverse order from which
2822 '''Context managers are exited in the reverse order from which
2822 they were created.'''
2823 they were created.'''
2823 received = exc_type is not None
2824 received = exc_type is not None
2824 suppressed = False
2825 suppressed = False
2825 pending = None
2826 pending = None
2826 self._atexit.reverse()
2827 self._atexit.reverse()
2827 for exitfunc in self._atexit:
2828 for exitfunc in self._atexit:
2828 try:
2829 try:
2829 if exitfunc(exc_type, exc_val, exc_tb):
2830 if exitfunc(exc_type, exc_val, exc_tb):
2830 suppressed = True
2831 suppressed = True
2831 exc_type = None
2832 exc_type = None
2832 exc_val = None
2833 exc_val = None
2833 exc_tb = None
2834 exc_tb = None
2834 except BaseException:
2835 except BaseException:
2835 pending = sys.exc_info()
2836 pending = sys.exc_info()
2836 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2837 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2837 del self._atexit
2838 del self._atexit
2838 if pending:
2839 if pending:
2839 raise exc_val
2840 raise exc_val
2840 return received and suppressed
2841 return received and suppressed
2841
2842
2842 def _bz2():
2843 def _bz2():
2843 d = bz2.BZ2Decompressor()
2844 d = bz2.BZ2Decompressor()
2844 # Bzip2 stream start with BZ, but we stripped it.
2845 # Bzip2 stream start with BZ, but we stripped it.
2845 # we put it back for good measure.
2846 # we put it back for good measure.
2846 d.decompress('BZ')
2847 d.decompress('BZ')
2847 return d
2848 return d
2848
2849
2849 decompressors = {None: lambda fh: fh,
2850 decompressors = {None: lambda fh: fh,
2850 '_truncatedBZ': _makedecompressor(_bz2),
2851 '_truncatedBZ': _makedecompressor(_bz2),
2851 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2852 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2852 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2853 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2853 }
2854 }
2854 # also support the old form by courtesies
2855 # also support the old form by courtesies
2855 decompressors['UN'] = decompressors[None]
2856 decompressors['UN'] = decompressors[None]
2856
2857
2857 # convenient shortcut
2858 # convenient shortcut
2858 dst = debugstacktrace
2859 dst = debugstacktrace
@@ -1,55 +1,55 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 from __future__ import absolute_import
3 from __future__ import absolute_import
4
4
5 """
5 """
6 Small and dumb HTTP server for use in tests.
6 Small and dumb HTTP server for use in tests.
7 """
7 """
8
8
9 import BaseHTTPServer
10 import SimpleHTTPServer
11 import optparse
9 import optparse
12 import signal
10 import signal
13 import sys
11 import sys
14
12
15 from mercurial import (
13 from mercurial import (
16 cmdutil,
14 cmdutil,
15 util,
17 )
16 )
18
17
18 httpserver = util.httpserver
19 OptionParser = optparse.OptionParser
19 OptionParser = optparse.OptionParser
20
20
21 class simplehttpservice(object):
21 class simplehttpservice(object):
22 def __init__(self, host, port):
22 def __init__(self, host, port):
23 self.address = (host, port)
23 self.address = (host, port)
24 def init(self):
24 def init(self):
25 self.httpd = BaseHTTPServer.HTTPServer(
25 self.httpd = httpserver.httpserver(
26 self.address, SimpleHTTPServer.SimpleHTTPRequestHandler)
26 self.address, httpserver.simplehttprequesthandler)
27 def run(self):
27 def run(self):
28 self.httpd.serve_forever()
28 self.httpd.serve_forever()
29
29
30 if __name__ == '__main__':
30 if __name__ == '__main__':
31 parser = OptionParser()
31 parser = OptionParser()
32 parser.add_option('-p', '--port', dest='port', type='int', default=8000,
32 parser.add_option('-p', '--port', dest='port', type='int', default=8000,
33 help='TCP port to listen on', metavar='PORT')
33 help='TCP port to listen on', metavar='PORT')
34 parser.add_option('-H', '--host', dest='host', default='localhost',
34 parser.add_option('-H', '--host', dest='host', default='localhost',
35 help='hostname or IP to listen on', metavar='HOST')
35 help='hostname or IP to listen on', metavar='HOST')
36 parser.add_option('--pid', dest='pid',
36 parser.add_option('--pid', dest='pid',
37 help='file name where the PID of the server is stored')
37 help='file name where the PID of the server is stored')
38 parser.add_option('-f', '--foreground', dest='foreground',
38 parser.add_option('-f', '--foreground', dest='foreground',
39 action='store_true',
39 action='store_true',
40 help='do not start the HTTP server in the background')
40 help='do not start the HTTP server in the background')
41 parser.add_option('--daemon-postexec', action='append')
41 parser.add_option('--daemon-postexec', action='append')
42
42
43 (options, args) = parser.parse_args()
43 (options, args) = parser.parse_args()
44
44
45 signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
45 signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
46
46
47 if options.foreground and options.pid:
47 if options.foreground and options.pid:
48 parser.error("options --pid and --foreground are mutually exclusive")
48 parser.error("options --pid and --foreground are mutually exclusive")
49
49
50 opts = {'pid_file': options.pid,
50 opts = {'pid_file': options.pid,
51 'daemon': not options.foreground,
51 'daemon': not options.foreground,
52 'daemon_postexec': options.daemon_postexec}
52 'daemon_postexec': options.daemon_postexec}
53 service = simplehttpservice(options.host, options.port)
53 service = simplehttpservice(options.host, options.port)
54 cmdutil.service(opts, initfn=service.init, runfn=service.run,
54 cmdutil.service(opts, initfn=service.init, runfn=service.run,
55 runargs=[sys.executable, __file__] + sys.argv[1:])
55 runargs=[sys.executable, __file__] + sys.argv[1:])
@@ -1,175 +1,175 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ cd "$TESTDIR"/..
4 $ cd "$TESTDIR"/..
5
5
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 i18n/check-translation.py not using absolute_import
11 i18n/check-translation.py not using absolute_import
12 setup.py not using absolute_import
12 setup.py not using absolute_import
13 tests/test-demandimport.py not using absolute_import
13 tests/test-demandimport.py not using absolute_import
14
14
15 #if py3exe
15 #if py3exe
16 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
16 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
17 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
17 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
18 hgext/acl.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
18 hgext/acl.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
19 hgext/automv.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
19 hgext/automv.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
20 hgext/blackbox.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
20 hgext/blackbox.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
21 hgext/bugzilla.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
21 hgext/bugzilla.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
22 hgext/censor.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
22 hgext/censor.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
23 hgext/chgserver.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
23 hgext/chgserver.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
24 hgext/children.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
24 hgext/children.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
25 hgext/churn.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
25 hgext/churn.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
26 hgext/clonebundles.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
26 hgext/clonebundles.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
27 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
27 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
28 hgext/convert/bzr.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
28 hgext/convert/bzr.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
29 hgext/convert/common.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
29 hgext/convert/common.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
30 hgext/convert/convcmd.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
30 hgext/convert/convcmd.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
31 hgext/convert/cvs.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
31 hgext/convert/cvs.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
32 hgext/convert/cvsps.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
32 hgext/convert/cvsps.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
33 hgext/convert/darcs.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
33 hgext/convert/darcs.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
34 hgext/convert/filemap.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
34 hgext/convert/filemap.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
35 hgext/convert/git.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
35 hgext/convert/git.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
36 hgext/convert/gnuarch.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
36 hgext/convert/gnuarch.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
37 hgext/convert/hg.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
37 hgext/convert/hg.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
38 hgext/convert/monotone.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
38 hgext/convert/monotone.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
39 hgext/convert/p4.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
39 hgext/convert/p4.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
40 hgext/convert/subversion.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
40 hgext/convert/subversion.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
41 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
41 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
42 hgext/eol.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
42 hgext/eol.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
43 hgext/extdiff.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
43 hgext/extdiff.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
44 hgext/factotum.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
44 hgext/factotum.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
45 hgext/fetch.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
45 hgext/fetch.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
46 hgext/fsmonitor/state.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
46 hgext/fsmonitor/state.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
47 hgext/fsmonitor/watchmanclient.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
47 hgext/fsmonitor/watchmanclient.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
48 hgext/gpg.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
48 hgext/gpg.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
49 hgext/graphlog.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
49 hgext/graphlog.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
50 hgext/hgk.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
50 hgext/hgk.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
51 hgext/highlight/highlight.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
51 hgext/highlight/highlight.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
52 hgext/histedit.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
52 hgext/histedit.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
53 hgext/journal.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
53 hgext/journal.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
54 hgext/keyword.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
54 hgext/keyword.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
55 hgext/largefiles/basestore.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
55 hgext/largefiles/basestore.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
56 hgext/largefiles/lfcommands.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
56 hgext/largefiles/lfcommands.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
57 hgext/largefiles/lfutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
57 hgext/largefiles/lfutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
58 hgext/largefiles/localstore.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
58 hgext/largefiles/localstore.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
59 hgext/largefiles/overrides.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
59 hgext/largefiles/overrides.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
60 hgext/largefiles/proto.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
60 hgext/largefiles/proto.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
61 hgext/largefiles/remotestore.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
61 hgext/largefiles/remotestore.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
62 hgext/largefiles/reposetup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
62 hgext/largefiles/reposetup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
63 hgext/largefiles/storefactory.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
63 hgext/largefiles/storefactory.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
64 hgext/largefiles/uisetup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
64 hgext/largefiles/uisetup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
65 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
65 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
66 hgext/mq.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
66 hgext/mq.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
67 hgext/notify.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
67 hgext/notify.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
68 hgext/pager.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
68 hgext/pager.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
69 hgext/patchbomb.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
69 hgext/patchbomb.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
70 hgext/purge.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
70 hgext/purge.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
71 hgext/rebase.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
71 hgext/rebase.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
72 hgext/record.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
72 hgext/record.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
73 hgext/relink.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
73 hgext/relink.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
74 hgext/schemes.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
74 hgext/schemes.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
75 hgext/share.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
75 hgext/share.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
76 hgext/shelve.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
76 hgext/shelve.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
77 hgext/strip.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
77 hgext/strip.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
78 hgext/transplant.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
78 hgext/transplant.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
79 hgext/win32mbcs.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
79 hgext/win32mbcs.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
80 hgext/win32text.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
80 hgext/win32text.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
81 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
81 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
82 mercurial/bookmarks.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
82 mercurial/bookmarks.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
83 mercurial/branchmap.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
83 mercurial/branchmap.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
84 mercurial/bundle2.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
84 mercurial/bundle2.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
85 mercurial/bundlerepo.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
85 mercurial/bundlerepo.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
86 mercurial/byterange.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
86 mercurial/byterange.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
87 mercurial/changegroup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
87 mercurial/changegroup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
88 mercurial/changelog.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
88 mercurial/changelog.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
89 mercurial/cmdutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
89 mercurial/cmdutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
91 mercurial/commandserver.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
91 mercurial/commandserver.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
92 mercurial/config.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
92 mercurial/config.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
93 mercurial/context.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
93 mercurial/context.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
94 mercurial/copies.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
94 mercurial/copies.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
95 mercurial/crecord.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
95 mercurial/crecord.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
96 mercurial/dagparser.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
96 mercurial/dagparser.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
97 mercurial/dagutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
97 mercurial/dagutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
98 mercurial/destutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
98 mercurial/destutil.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
99 mercurial/dirstate.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
99 mercurial/dirstate.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
100 mercurial/discovery.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
100 mercurial/discovery.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
101 mercurial/dispatch.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
101 mercurial/dispatch.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
102 mercurial/exchange.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
102 mercurial/exchange.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
103 mercurial/extensions.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
103 mercurial/extensions.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
104 mercurial/fancyopts.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
104 mercurial/fancyopts.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
105 mercurial/filelog.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
105 mercurial/filelog.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
106 mercurial/filemerge.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
106 mercurial/filemerge.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
107 mercurial/fileset.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
107 mercurial/fileset.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
108 mercurial/formatter.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
108 mercurial/formatter.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
109 mercurial/graphmod.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
109 mercurial/graphmod.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
110 mercurial/hbisect.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
110 mercurial/hbisect.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
111 mercurial/help.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
111 mercurial/help.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
112 mercurial/hg.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
112 mercurial/hg.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
113 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
113 mercurial/hgweb/common.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
116 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
116 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
117 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
117 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
118 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
118 mercurial/hgweb/server.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
119 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
119 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
120 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
120 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
121 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
121 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
122 mercurial/hook.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
122 mercurial/hook.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
123 mercurial/httpconnection.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
123 mercurial/httpconnection.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
124 mercurial/httppeer.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
124 mercurial/httppeer.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
125 mercurial/keepalive.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
125 mercurial/keepalive.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
126 mercurial/localrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
126 mercurial/localrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
127 mercurial/lock.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
127 mercurial/lock.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
128 mercurial/mail.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
128 mercurial/mail.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
129 mercurial/manifest.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
129 mercurial/manifest.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
130 mercurial/match.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
130 mercurial/match.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
131 mercurial/mdiff.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
131 mercurial/mdiff.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
132 mercurial/merge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
132 mercurial/merge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
133 mercurial/minirst.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
133 mercurial/minirst.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
134 mercurial/namespaces.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
134 mercurial/namespaces.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
135 mercurial/obsolete.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
135 mercurial/obsolete.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
136 mercurial/patch.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
136 mercurial/patch.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
137 mercurial/pathutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
137 mercurial/pathutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
138 mercurial/peer.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
138 mercurial/peer.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
139 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
139 mercurial/pure/mpatch.py: error importing module: <AttributeError> 'VendorImporter' object has no attribute 'find_spec' (line *) (glob)
140 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
140 mercurial/pure/parsers.py: error importing module: <AttributeError> 'VendorImporter' object has no attribute 'find_spec' (line *) (glob)
141 mercurial/pushkey.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
141 mercurial/pushkey.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
142 mercurial/pvec.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
142 mercurial/pvec.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
143 mercurial/registrar.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
143 mercurial/registrar.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
144 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
144 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
145 mercurial/repoview.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
145 mercurial/repoview.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
146 mercurial/revlog.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
146 mercurial/revlog.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
147 mercurial/revset.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
147 mercurial/revset.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
148 mercurial/scmposix.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
148 mercurial/scmposix.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
149 mercurial/scmutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
149 mercurial/scmutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
150 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
150 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
151 mercurial/similar.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
151 mercurial/similar.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
152 mercurial/simplemerge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
152 mercurial/simplemerge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
153 mercurial/sshpeer.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
153 mercurial/sshpeer.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
154 mercurial/sshserver.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
154 mercurial/sshserver.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
155 mercurial/sslutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
155 mercurial/sslutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
156 mercurial/statichttprepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
156 mercurial/statichttprepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
157 mercurial/store.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
157 mercurial/store.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
158 mercurial/streamclone.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
158 mercurial/streamclone.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
159 mercurial/subrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
159 mercurial/subrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
160 mercurial/tagmerge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
160 mercurial/tagmerge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
161 mercurial/tags.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
161 mercurial/tags.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
162 mercurial/templatefilters.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
162 mercurial/templatefilters.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
163 mercurial/templatekw.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
163 mercurial/templatekw.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
164 mercurial/templater.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
164 mercurial/templater.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
165 mercurial/transaction.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
165 mercurial/transaction.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
166 mercurial/ui.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
166 mercurial/ui.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
167 mercurial/unionrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
167 mercurial/unionrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
168 mercurial/url.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
168 mercurial/url.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
169 mercurial/util.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
169 mercurial/util.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
170 mercurial/verify.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
170 mercurial/verify.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
171 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
171 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
172 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
172 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
173 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
173 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
174
174
175 #endif
175 #endif
@@ -1,184 +1,184 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 __doc__ = """Tiny HTTP Proxy.
5 __doc__ = """Tiny HTTP Proxy.
6
6
7 This module implements GET, HEAD, POST, PUT and DELETE methods
7 This module implements GET, HEAD, POST, PUT and DELETE methods
8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
9 method is also implemented experimentally, but has not been
9 method is also implemented experimentally, but has not been
10 tested yet.
10 tested yet.
11
11
12 Any help will be greatly appreciated. SUZUKI Hisao
12 Any help will be greatly appreciated. SUZUKI Hisao
13 """
13 """
14
14
15 __version__ = "0.2.1"
15 __version__ = "0.2.1"
16
16
17 import BaseHTTPServer
18 import optparse
17 import optparse
19 import os
18 import os
20 import select
19 import select
21 import socket
20 import socket
22 import sys
21 import sys
23
22
24 from mercurial import util
23 from mercurial import util
25
24
25 httpserver = util.httpserver
26 urlparse = util.urlparse
26 urlparse = util.urlparse
27 socketserver = util.socketserver
27 socketserver = util.socketserver
28
28
29 class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
29 class ProxyHandler (httpserver.basehttprequesthandler):
30 __base = BaseHTTPServer.BaseHTTPRequestHandler
30 __base = httpserver.basehttprequesthandler
31 __base_handle = __base.handle
31 __base_handle = __base.handle
32
32
33 server_version = "TinyHTTPProxy/" + __version__
33 server_version = "TinyHTTPProxy/" + __version__
34 rbufsize = 0 # self.rfile Be unbuffered
34 rbufsize = 0 # self.rfile Be unbuffered
35
35
36 def handle(self):
36 def handle(self):
37 (ip, port) = self.client_address
37 (ip, port) = self.client_address
38 allowed = getattr(self, 'allowed_clients', None)
38 allowed = getattr(self, 'allowed_clients', None)
39 if allowed is not None and ip not in allowed:
39 if allowed is not None and ip not in allowed:
40 self.raw_requestline = self.rfile.readline()
40 self.raw_requestline = self.rfile.readline()
41 if self.parse_request():
41 if self.parse_request():
42 self.send_error(403)
42 self.send_error(403)
43 else:
43 else:
44 self.__base_handle()
44 self.__base_handle()
45
45
46 def log_request(self, code='-', size='-'):
46 def log_request(self, code='-', size='-'):
47 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
47 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
48 self.log_message('"%s" %s %s%s',
48 self.log_message('"%s" %s %s%s',
49 self.requestline, str(code), str(size),
49 self.requestline, str(code), str(size),
50 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
50 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
51
51
52 def _connect_to(self, netloc, soc):
52 def _connect_to(self, netloc, soc):
53 i = netloc.find(':')
53 i = netloc.find(':')
54 if i >= 0:
54 if i >= 0:
55 host_port = netloc[:i], int(netloc[i + 1:])
55 host_port = netloc[:i], int(netloc[i + 1:])
56 else:
56 else:
57 host_port = netloc, 80
57 host_port = netloc, 80
58 print("\t" "connect to %s:%d" % host_port)
58 print("\t" "connect to %s:%d" % host_port)
59 try: soc.connect(host_port)
59 try: soc.connect(host_port)
60 except socket.error as arg:
60 except socket.error as arg:
61 try: msg = arg[1]
61 try: msg = arg[1]
62 except (IndexError, TypeError): msg = arg
62 except (IndexError, TypeError): msg = arg
63 self.send_error(404, msg)
63 self.send_error(404, msg)
64 return 0
64 return 0
65 return 1
65 return 1
66
66
67 def do_CONNECT(self):
67 def do_CONNECT(self):
68 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
68 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
69 try:
69 try:
70 if self._connect_to(self.path, soc):
70 if self._connect_to(self.path, soc):
71 self.log_request(200)
71 self.log_request(200)
72 self.wfile.write(self.protocol_version +
72 self.wfile.write(self.protocol_version +
73 " 200 Connection established\r\n")
73 " 200 Connection established\r\n")
74 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
74 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
75 self.wfile.write("\r\n")
75 self.wfile.write("\r\n")
76 self._read_write(soc, 300)
76 self._read_write(soc, 300)
77 finally:
77 finally:
78 print("\t" "bye")
78 print("\t" "bye")
79 soc.close()
79 soc.close()
80 self.connection.close()
80 self.connection.close()
81
81
82 def do_GET(self):
82 def do_GET(self):
83 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
83 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
84 self.path, 'http')
84 self.path, 'http')
85 if scm != 'http' or fragment or not netloc:
85 if scm != 'http' or fragment or not netloc:
86 self.send_error(400, "bad url %s" % self.path)
86 self.send_error(400, "bad url %s" % self.path)
87 return
87 return
88 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
88 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
89 try:
89 try:
90 if self._connect_to(netloc, soc):
90 if self._connect_to(netloc, soc):
91 self.log_request()
91 self.log_request()
92 soc.send("%s %s %s\r\n" % (
92 soc.send("%s %s %s\r\n" % (
93 self.command,
93 self.command,
94 urlparse.urlunparse(('', '', path, params, query, '')),
94 urlparse.urlunparse(('', '', path, params, query, '')),
95 self.request_version))
95 self.request_version))
96 self.headers['Connection'] = 'close'
96 self.headers['Connection'] = 'close'
97 del self.headers['Proxy-Connection']
97 del self.headers['Proxy-Connection']
98 for key_val in self.headers.items():
98 for key_val in self.headers.items():
99 soc.send("%s: %s\r\n" % key_val)
99 soc.send("%s: %s\r\n" % key_val)
100 soc.send("\r\n")
100 soc.send("\r\n")
101 self._read_write(soc)
101 self._read_write(soc)
102 finally:
102 finally:
103 print("\t" "bye")
103 print("\t" "bye")
104 soc.close()
104 soc.close()
105 self.connection.close()
105 self.connection.close()
106
106
107 def _read_write(self, soc, max_idling=20):
107 def _read_write(self, soc, max_idling=20):
108 iw = [self.connection, soc]
108 iw = [self.connection, soc]
109 ow = []
109 ow = []
110 count = 0
110 count = 0
111 while True:
111 while True:
112 count += 1
112 count += 1
113 (ins, _, exs) = select.select(iw, ow, iw, 3)
113 (ins, _, exs) = select.select(iw, ow, iw, 3)
114 if exs:
114 if exs:
115 break
115 break
116 if ins:
116 if ins:
117 for i in ins:
117 for i in ins:
118 if i is soc:
118 if i is soc:
119 out = self.connection
119 out = self.connection
120 else:
120 else:
121 out = soc
121 out = soc
122 try:
122 try:
123 data = i.recv(8192)
123 data = i.recv(8192)
124 except socket.error:
124 except socket.error:
125 break
125 break
126 if data:
126 if data:
127 out.send(data)
127 out.send(data)
128 count = 0
128 count = 0
129 else:
129 else:
130 print("\t" "idle", count)
130 print("\t" "idle", count)
131 if count == max_idling:
131 if count == max_idling:
132 break
132 break
133
133
134 do_HEAD = do_GET
134 do_HEAD = do_GET
135 do_POST = do_GET
135 do_POST = do_GET
136 do_PUT = do_GET
136 do_PUT = do_GET
137 do_DELETE = do_GET
137 do_DELETE = do_GET
138
138
139 class ThreadingHTTPServer (socketserver.ThreadingMixIn,
139 class ThreadingHTTPServer (socketserver.ThreadingMixIn,
140 BaseHTTPServer.HTTPServer):
140 httpserver.httpserver):
141 def __init__(self, *args, **kwargs):
141 def __init__(self, *args, **kwargs):
142 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
142 httpserver.httpserver.__init__(self, *args, **kwargs)
143 a = open("proxy.pid", "w")
143 a = open("proxy.pid", "w")
144 a.write(str(os.getpid()) + "\n")
144 a.write(str(os.getpid()) + "\n")
145 a.close()
145 a.close()
146
146
147 def runserver(port=8000, bind=""):
147 def runserver(port=8000, bind=""):
148 server_address = (bind, port)
148 server_address = (bind, port)
149 ProxyHandler.protocol_version = "HTTP/1.0"
149 ProxyHandler.protocol_version = "HTTP/1.0"
150 httpd = ThreadingHTTPServer(server_address, ProxyHandler)
150 httpd = ThreadingHTTPServer(server_address, ProxyHandler)
151 sa = httpd.socket.getsockname()
151 sa = httpd.socket.getsockname()
152 print("Serving HTTP on", sa[0], "port", sa[1], "...")
152 print("Serving HTTP on", sa[0], "port", sa[1], "...")
153 try:
153 try:
154 httpd.serve_forever()
154 httpd.serve_forever()
155 except KeyboardInterrupt:
155 except KeyboardInterrupt:
156 print("\nKeyboard interrupt received, exiting.")
156 print("\nKeyboard interrupt received, exiting.")
157 httpd.server_close()
157 httpd.server_close()
158 sys.exit(0)
158 sys.exit(0)
159
159
160 if __name__ == '__main__':
160 if __name__ == '__main__':
161 argv = sys.argv
161 argv = sys.argv
162 if argv[1:] and argv[1] in ('-h', '--help'):
162 if argv[1:] and argv[1] in ('-h', '--help'):
163 print(argv[0], "[port [allowed_client_name ...]]")
163 print(argv[0], "[port [allowed_client_name ...]]")
164 else:
164 else:
165 if argv[2:]:
165 if argv[2:]:
166 allowed = []
166 allowed = []
167 for name in argv[2:]:
167 for name in argv[2:]:
168 client = socket.gethostbyname(name)
168 client = socket.gethostbyname(name)
169 allowed.append(client)
169 allowed.append(client)
170 print("Accept: %s (%s)" % (client, name))
170 print("Accept: %s (%s)" % (client, name))
171 ProxyHandler.allowed_clients = allowed
171 ProxyHandler.allowed_clients = allowed
172 del argv[2:]
172 del argv[2:]
173 else:
173 else:
174 print("Any clients will be served...")
174 print("Any clients will be served...")
175
175
176 parser = optparse.OptionParser()
176 parser = optparse.OptionParser()
177 parser.add_option('-b', '--bind', metavar='ADDRESS',
177 parser.add_option('-b', '--bind', metavar='ADDRESS',
178 help='Specify alternate bind address '
178 help='Specify alternate bind address '
179 '[default: all interfaces]', default='')
179 '[default: all interfaces]', default='')
180 (options, args) = parser.parse_args()
180 (options, args) = parser.parse_args()
181 port = 8000
181 port = 8000
182 if len(args) == 1:
182 if len(args) == 1:
183 port = int(args[0])
183 port = int(args[0])
184 runserver(port, options.bind)
184 runserver(port, options.bind)
General Comments 0
You need to be logged in to leave comments. Login now