Show More
@@ -1,34 +1,34 b'' | |||||
1 | import os, __builtin__ |
|
1 | import os, __builtin__ | |
2 | from mercurial import util |
|
2 | from mercurial import util | |
3 |
|
3 | |||
4 | def lowerwrap(scope, funcname): |
|
4 | def lowerwrap(scope, funcname): | |
5 | f = getattr(scope, funcname) |
|
5 | f = getattr(scope, funcname) | |
6 | def wrap(fname, *args, **kwargs): |
|
6 | def wrap(fname, *args, **kwargs): | |
7 | d, base = os.path.split(fname) |
|
7 | d, base = os.path.split(fname) | |
8 | try: |
|
8 | try: | |
9 | files = os.listdir(d or '.') |
|
9 | files = os.listdir(d or '.') | |
10 |
except OSError |
|
10 | except OSError: | |
11 | files = [] |
|
11 | files = [] | |
12 | if base in files: |
|
12 | if base in files: | |
13 | return f(fname, *args, **kwargs) |
|
13 | return f(fname, *args, **kwargs) | |
14 | for fn in files: |
|
14 | for fn in files: | |
15 | if fn.lower() == base.lower(): |
|
15 | if fn.lower() == base.lower(): | |
16 | return f(os.path.join(d, fn), *args, **kwargs) |
|
16 | return f(os.path.join(d, fn), *args, **kwargs) | |
17 | return f(fname, *args, **kwargs) |
|
17 | return f(fname, *args, **kwargs) | |
18 | scope.__dict__[funcname] = wrap |
|
18 | scope.__dict__[funcname] = wrap | |
19 |
|
19 | |||
20 | def normcase(path): |
|
20 | def normcase(path): | |
21 | return path.lower() |
|
21 | return path.lower() | |
22 |
|
22 | |||
23 | os.path.normcase = normcase |
|
23 | os.path.normcase = normcase | |
24 |
|
24 | |||
25 | for f in 'file open'.split(): |
|
25 | for f in 'file open'.split(): | |
26 | lowerwrap(__builtin__, f) |
|
26 | lowerwrap(__builtin__, f) | |
27 |
|
27 | |||
28 | for f in "chmod chown open lstat stat remove unlink".split(): |
|
28 | for f in "chmod chown open lstat stat remove unlink".split(): | |
29 | lowerwrap(os, f) |
|
29 | lowerwrap(os, f) | |
30 |
|
30 | |||
31 | for f in "exists lexists".split(): |
|
31 | for f in "exists lexists".split(): | |
32 | lowerwrap(os.path, f) |
|
32 | lowerwrap(os.path, f) | |
33 |
|
33 | |||
34 | lowerwrap(util, 'posixfile') |
|
34 | lowerwrap(util, 'posixfile') |
@@ -1,512 +1,511 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # |
|
2 | # | |
3 | # check-code - a style and portability checker for Mercurial |
|
3 | # check-code - a style and portability checker for Mercurial | |
4 | # |
|
4 | # | |
5 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
5 | # Copyright 2010 Matt Mackall <mpm@selenic.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms of the |
|
7 | # This software may be used and distributed according to the terms of the | |
8 | # GNU General Public License version 2 or any later version. |
|
8 | # GNU General Public License version 2 or any later version. | |
9 |
|
9 | |||
10 | import re, glob, os, sys |
|
10 | import re, glob, os, sys | |
11 | import keyword |
|
11 | import keyword | |
12 | import optparse |
|
12 | import optparse | |
13 | try: |
|
13 | try: | |
14 | import re2 |
|
14 | import re2 | |
15 | except ImportError: |
|
15 | except ImportError: | |
16 | re2 = None |
|
16 | re2 = None | |
17 |
|
17 | |||
18 | def compilere(pat, multiline=False): |
|
18 | def compilere(pat, multiline=False): | |
19 | if multiline: |
|
19 | if multiline: | |
20 | pat = '(?m)' + pat |
|
20 | pat = '(?m)' + pat | |
21 | if re2: |
|
21 | if re2: | |
22 | try: |
|
22 | try: | |
23 | return re2.compile(pat) |
|
23 | return re2.compile(pat) | |
24 | except re2.error: |
|
24 | except re2.error: | |
25 | pass |
|
25 | pass | |
26 | return re.compile(pat) |
|
26 | return re.compile(pat) | |
27 |
|
27 | |||
28 | def repquote(m): |
|
28 | def repquote(m): | |
29 | t = re.sub(r"\w", "x", m.group('text')) |
|
29 | t = re.sub(r"\w", "x", m.group('text')) | |
30 | t = re.sub(r"[^\s\nx]", "o", t) |
|
30 | t = re.sub(r"[^\s\nx]", "o", t) | |
31 | return m.group('quote') + t + m.group('quote') |
|
31 | return m.group('quote') + t + m.group('quote') | |
32 |
|
32 | |||
33 | def reppython(m): |
|
33 | def reppython(m): | |
34 | comment = m.group('comment') |
|
34 | comment = m.group('comment') | |
35 | if comment: |
|
35 | if comment: | |
36 | l = len(comment.rstrip()) |
|
36 | l = len(comment.rstrip()) | |
37 | return "#" * l + comment[l:] |
|
37 | return "#" * l + comment[l:] | |
38 | return repquote(m) |
|
38 | return repquote(m) | |
39 |
|
39 | |||
40 | def repcomment(m): |
|
40 | def repcomment(m): | |
41 | return m.group(1) + "#" * len(m.group(2)) |
|
41 | return m.group(1) + "#" * len(m.group(2)) | |
42 |
|
42 | |||
43 | def repccomment(m): |
|
43 | def repccomment(m): | |
44 | t = re.sub(r"((?<=\n) )|\S", "x", m.group(2)) |
|
44 | t = re.sub(r"((?<=\n) )|\S", "x", m.group(2)) | |
45 | return m.group(1) + t + "*/" |
|
45 | return m.group(1) + t + "*/" | |
46 |
|
46 | |||
47 | def repcallspaces(m): |
|
47 | def repcallspaces(m): | |
48 | t = re.sub(r"\n\s+", "\n", m.group(2)) |
|
48 | t = re.sub(r"\n\s+", "\n", m.group(2)) | |
49 | return m.group(1) + t |
|
49 | return m.group(1) + t | |
50 |
|
50 | |||
51 | def repinclude(m): |
|
51 | def repinclude(m): | |
52 | return m.group(1) + "<foo>" |
|
52 | return m.group(1) + "<foo>" | |
53 |
|
53 | |||
54 | def rephere(m): |
|
54 | def rephere(m): | |
55 | t = re.sub(r"\S", "x", m.group(2)) |
|
55 | t = re.sub(r"\S", "x", m.group(2)) | |
56 | return m.group(1) + t |
|
56 | return m.group(1) + t | |
57 |
|
57 | |||
58 |
|
58 | |||
59 | testpats = [ |
|
59 | testpats = [ | |
60 | [ |
|
60 | [ | |
61 | (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"), |
|
61 | (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"), | |
62 | (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"), |
|
62 | (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"), | |
63 | (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"), |
|
63 | (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"), | |
64 | (r'sed.*-i', "don't use 'sed -i', use a temporary file"), |
|
64 | (r'sed.*-i', "don't use 'sed -i', use a temporary file"), | |
65 | (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"), |
|
65 | (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"), | |
66 | (r'echo -n', "don't use 'echo -n', use printf"), |
|
66 | (r'echo -n', "don't use 'echo -n', use printf"), | |
67 | (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"), |
|
67 | (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"), | |
68 | (r'head -c', "don't use 'head -c', use 'dd'"), |
|
68 | (r'head -c', "don't use 'head -c', use 'dd'"), | |
69 | (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"), |
|
69 | (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"), | |
70 | (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"), |
|
70 | (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"), | |
71 | (r'printf.*\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"), |
|
71 | (r'printf.*\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"), | |
72 | (r'printf.*\\x', "don't use printf \\x, use Python"), |
|
72 | (r'printf.*\\x', "don't use printf \\x, use Python"), | |
73 | (r'\$\(.*\)', "don't use $(expr), use `expr`"), |
|
73 | (r'\$\(.*\)', "don't use $(expr), use `expr`"), | |
74 | (r'rm -rf \*', "don't use naked rm -rf, target a directory"), |
|
74 | (r'rm -rf \*', "don't use naked rm -rf, target a directory"), | |
75 | (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w', |
|
75 | (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w', | |
76 | "use egrep for extended grep syntax"), |
|
76 | "use egrep for extended grep syntax"), | |
77 | (r'/bin/', "don't use explicit paths for tools"), |
|
77 | (r'/bin/', "don't use explicit paths for tools"), | |
78 | (r'[^\n]\Z', "no trailing newline"), |
|
78 | (r'[^\n]\Z', "no trailing newline"), | |
79 | (r'export.*=', "don't export and assign at once"), |
|
79 | (r'export.*=', "don't export and assign at once"), | |
80 | (r'^source\b', "don't use 'source', use '.'"), |
|
80 | (r'^source\b', "don't use 'source', use '.'"), | |
81 | (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"), |
|
81 | (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"), | |
82 | (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"), |
|
82 | (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"), | |
83 | (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"), |
|
83 | (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"), | |
84 | (r'^stop\(\)', "don't use 'stop' as a shell function name"), |
|
84 | (r'^stop\(\)', "don't use 'stop' as a shell function name"), | |
85 | (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"), |
|
85 | (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"), | |
86 | (r'^alias\b.*=', "don't use alias, use a function"), |
|
86 | (r'^alias\b.*=', "don't use alias, use a function"), | |
87 | (r'if\s*!', "don't use '!' to negate exit status"), |
|
87 | (r'if\s*!', "don't use '!' to negate exit status"), | |
88 | (r'/dev/u?random', "don't use entropy, use /dev/zero"), |
|
88 | (r'/dev/u?random', "don't use entropy, use /dev/zero"), | |
89 | (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"), |
|
89 | (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"), | |
90 | (r'^( *)\t', "don't use tabs to indent"), |
|
90 | (r'^( *)\t', "don't use tabs to indent"), | |
91 | (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', |
|
91 | (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', | |
92 | "put a backslash-escaped newline after sed 'i' command"), |
|
92 | "put a backslash-escaped newline after sed 'i' command"), | |
93 | ], |
|
93 | ], | |
94 | # warnings |
|
94 | # warnings | |
95 | [ |
|
95 | [ | |
96 | (r'^function', "don't use 'function', use old style"), |
|
96 | (r'^function', "don't use 'function', use old style"), | |
97 | (r'^diff.*-\w*N', "don't use 'diff -N'"), |
|
97 | (r'^diff.*-\w*N', "don't use 'diff -N'"), | |
98 | (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"), |
|
98 | (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"), | |
99 | (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"), |
|
99 | (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"), | |
100 | (r'kill (`|\$\()', "don't use kill, use killdaemons.py") |
|
100 | (r'kill (`|\$\()', "don't use kill, use killdaemons.py") | |
101 | ] |
|
101 | ] | |
102 | ] |
|
102 | ] | |
103 |
|
103 | |||
104 | testfilters = [ |
|
104 | testfilters = [ | |
105 | (r"( *)(#([^\n]*\S)?)", repcomment), |
|
105 | (r"( *)(#([^\n]*\S)?)", repcomment), | |
106 | (r"<<(\S+)((.|\n)*?\n\1)", rephere), |
|
106 | (r"<<(\S+)((.|\n)*?\n\1)", rephere), | |
107 | ] |
|
107 | ] | |
108 |
|
108 | |||
109 | winglobmsg = "use (glob) to match Windows paths too" |
|
109 | winglobmsg = "use (glob) to match Windows paths too" | |
110 | uprefix = r"^ \$ " |
|
110 | uprefix = r"^ \$ " | |
111 | utestpats = [ |
|
111 | utestpats = [ | |
112 | [ |
|
112 | [ | |
113 | (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"), |
|
113 | (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"), | |
114 | (uprefix + r'.*\|\s*sed[^|>\n]*\n', |
|
114 | (uprefix + r'.*\|\s*sed[^|>\n]*\n', | |
115 | "use regex test output patterns instead of sed"), |
|
115 | "use regex test output patterns instead of sed"), | |
116 | (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"), |
|
116 | (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"), | |
117 | (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"), |
|
117 | (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"), | |
118 | (uprefix + r'.*\|\| echo.*(fail|error)', |
|
118 | (uprefix + r'.*\|\| echo.*(fail|error)', | |
119 | "explicit exit code checks unnecessary"), |
|
119 | "explicit exit code checks unnecessary"), | |
120 | (uprefix + r'set -e', "don't use set -e"), |
|
120 | (uprefix + r'set -e', "don't use set -e"), | |
121 | (uprefix + r'\s', "don't indent commands, use > for continued lines"), |
|
121 | (uprefix + r'\s', "don't indent commands, use > for continued lines"), | |
122 | (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), |
|
122 | (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), | |
123 | (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', |
|
123 | (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', | |
124 | winglobmsg), |
|
124 | winglobmsg), | |
125 | (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
125 | (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), | |
126 | (r'^ reverting .*/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
126 | (r'^ reverting .*/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), | |
127 | (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
127 | (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), | |
128 | (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
128 | (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), | |
129 | (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg, |
|
129 | (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg, | |
130 | '\$TESTTMP/unix-repo$'), |
|
130 | '\$TESTTMP/unix-repo$'), | |
131 | (r'^ moving \S+/.*[^)]$', winglobmsg), |
|
131 | (r'^ moving \S+/.*[^)]$', winglobmsg), | |
132 | (r'^ no changes made to subrepo since.*/.*[^)]$', |
|
132 | (r'^ no changes made to subrepo since.*/.*[^)]$', | |
133 | winglobmsg, '\$TESTTMP/unix-repo$'), |
|
133 | winglobmsg, '\$TESTTMP/unix-repo$'), | |
134 | (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', |
|
134 | (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', | |
135 | winglobmsg, '\$TESTTMP/unix-repo$'), |
|
135 | winglobmsg, '\$TESTTMP/unix-repo$'), | |
136 | ], |
|
136 | ], | |
137 | # warnings |
|
137 | # warnings | |
138 | [ |
|
138 | [ | |
139 | (r'^ [^*?/\n]* \(glob\)$', |
|
139 | (r'^ [^*?/\n]* \(glob\)$', | |
140 | "warning: glob match with no glob character (?*/)"), |
|
140 | "warning: glob match with no glob character (?*/)"), | |
141 | ] |
|
141 | ] | |
142 | ] |
|
142 | ] | |
143 |
|
143 | |||
144 | for i in [0, 1]: |
|
144 | for i in [0, 1]: | |
145 | for p, m in testpats[i]: |
|
145 | for p, m in testpats[i]: | |
146 | if p.startswith(r'^'): |
|
146 | if p.startswith(r'^'): | |
147 | p = r"^ [$>] (%s)" % p[1:] |
|
147 | p = r"^ [$>] (%s)" % p[1:] | |
148 | else: |
|
148 | else: | |
149 | p = r"^ [$>] .*(%s)" % p |
|
149 | p = r"^ [$>] .*(%s)" % p | |
150 | utestpats[i].append((p, m)) |
|
150 | utestpats[i].append((p, m)) | |
151 |
|
151 | |||
152 | utestfilters = [ |
|
152 | utestfilters = [ | |
153 | (r"<<(\S+)((.|\n)*?\n > \1)", rephere), |
|
153 | (r"<<(\S+)((.|\n)*?\n > \1)", rephere), | |
154 | (r"( *)(#([^\n]*\S)?)", repcomment), |
|
154 | (r"( *)(#([^\n]*\S)?)", repcomment), | |
155 | ] |
|
155 | ] | |
156 |
|
156 | |||
157 | pypats = [ |
|
157 | pypats = [ | |
158 | [ |
|
158 | [ | |
159 | (r'^\s*def\s*\w+\s*\(.*,\s*\(', |
|
159 | (r'^\s*def\s*\w+\s*\(.*,\s*\(', | |
160 | "tuple parameter unpacking not available in Python 3+"), |
|
160 | "tuple parameter unpacking not available in Python 3+"), | |
161 | (r'lambda\s*\(.*,.*\)', |
|
161 | (r'lambda\s*\(.*,.*\)', | |
162 | "tuple parameter unpacking not available in Python 3+"), |
|
162 | "tuple parameter unpacking not available in Python 3+"), | |
163 | (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), |
|
163 | (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), | |
164 | (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), |
|
164 | (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), | |
165 | (r'\.has_key\b', "dict.has_key is not available in Python 3+"), |
|
165 | (r'\.has_key\b', "dict.has_key is not available in Python 3+"), | |
166 | (r'\s<>\s', '<> operator is not available in Python 3+, use !='), |
|
166 | (r'\s<>\s', '<> operator is not available in Python 3+, use !='), | |
167 | (r'^\s*\t', "don't use tabs"), |
|
167 | (r'^\s*\t', "don't use tabs"), | |
168 | (r'\S;\s*\n', "semicolon"), |
|
168 | (r'\S;\s*\n', "semicolon"), | |
169 | (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"), |
|
169 | (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"), | |
170 | (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"), |
|
170 | (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"), | |
171 | (r'(\w|\)),\w', "missing whitespace after ,"), |
|
171 | (r'(\w|\)),\w', "missing whitespace after ,"), | |
172 | (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), |
|
172 | (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), | |
173 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), |
|
173 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), | |
174 | (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n' |
|
174 | (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n' | |
175 | r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'), |
|
175 | r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'), | |
176 | (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?' |
|
176 | (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?' | |
177 | r'((?:\n|\1\s.*\n)+?)\1finally:', |
|
177 | r'((?:\n|\1\s.*\n)+?)\1finally:', | |
178 | 'no yield inside try/finally in Python 2.4'), |
|
178 | 'no yield inside try/finally in Python 2.4'), | |
179 | (r'.{81}', "line too long"), |
|
179 | (r'.{81}', "line too long"), | |
180 | (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'), |
|
180 | (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'), | |
181 | (r'[^\n]\Z', "no trailing newline"), |
|
181 | (r'[^\n]\Z', "no trailing newline"), | |
182 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), |
|
182 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), | |
183 | # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=', |
|
183 | # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=', | |
184 | # "don't use underbars in identifiers"), |
|
184 | # "don't use underbars in identifiers"), | |
185 | (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ', |
|
185 | (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ', | |
186 | "don't use camelcase in identifiers"), |
|
186 | "don't use camelcase in identifiers"), | |
187 | (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+', |
|
187 | (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+', | |
188 | "linebreak after :"), |
|
188 | "linebreak after :"), | |
189 | (r'class\s[^( \n]+:', "old-style class, use class foo(object)"), |
|
189 | (r'class\s[^( \n]+:', "old-style class, use class foo(object)"), | |
190 | (r'class\s[^( \n]+\(\):', |
|
190 | (r'class\s[^( \n]+\(\):', | |
191 | "class foo() not available in Python 2.4, use class foo(object)"), |
|
191 | "class foo() not available in Python 2.4, use class foo(object)"), | |
192 | (r'\b(%s)\(' % '|'.join(keyword.kwlist), |
|
192 | (r'\b(%s)\(' % '|'.join(keyword.kwlist), | |
193 | "Python keyword is not a function"), |
|
193 | "Python keyword is not a function"), | |
194 | (r',]', "unneeded trailing ',' in list"), |
|
194 | (r',]', "unneeded trailing ',' in list"), | |
195 | # (r'class\s[A-Z][^\(]*\((?!Exception)', |
|
195 | # (r'class\s[A-Z][^\(]*\((?!Exception)', | |
196 | # "don't capitalize non-exception classes"), |
|
196 | # "don't capitalize non-exception classes"), | |
197 | # (r'in range\(', "use xrange"), |
|
197 | # (r'in range\(', "use xrange"), | |
198 | # (r'^\s*print\s+', "avoid using print in core and extensions"), |
|
198 | # (r'^\s*print\s+', "avoid using print in core and extensions"), | |
199 | (r'[\x80-\xff]', "non-ASCII character literal"), |
|
199 | (r'[\x80-\xff]', "non-ASCII character literal"), | |
200 | (r'("\')\.format\(', "str.format() not available in Python 2.4"), |
|
200 | (r'("\')\.format\(', "str.format() not available in Python 2.4"), | |
201 | (r'^\s*with\s+', "with not available in Python 2.4"), |
|
201 | (r'^\s*with\s+', "with not available in Python 2.4"), | |
202 | (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"), |
|
202 | (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"), | |
203 | (r'^\s*except.* as .*:', "except as not available in Python 2.4"), |
|
203 | (r'^\s*except.* as .*:', "except as not available in Python 2.4"), | |
204 | (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"), |
|
204 | (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"), | |
205 | (r'(?<!def)\s+(any|all|format)\(', |
|
205 | (r'(?<!def)\s+(any|all|format)\(', | |
206 | "any/all/format not available in Python 2.4"), |
|
206 | "any/all/format not available in Python 2.4"), | |
207 | (r'(?<!def)\s+(callable)\(', |
|
207 | (r'(?<!def)\s+(callable)\(', | |
208 | "callable not available in Python 3, use getattr(f, '__call__', None)"), |
|
208 | "callable not available in Python 3, use getattr(f, '__call__', None)"), | |
209 | (r'if\s.*\selse', "if ... else form not available in Python 2.4"), |
|
209 | (r'if\s.*\selse', "if ... else form not available in Python 2.4"), | |
210 | (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist), |
|
210 | (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist), | |
211 | "gratuitous whitespace after Python keyword"), |
|
211 | "gratuitous whitespace after Python keyword"), | |
212 | (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"), |
|
212 | (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"), | |
213 | # (r'\s\s=', "gratuitous whitespace before ="), |
|
213 | # (r'\s\s=', "gratuitous whitespace before ="), | |
214 | (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S', |
|
214 | (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S', | |
215 | "missing whitespace around operator"), |
|
215 | "missing whitespace around operator"), | |
216 | (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s', |
|
216 | (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s', | |
217 | "missing whitespace around operator"), |
|
217 | "missing whitespace around operator"), | |
218 | (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S', |
|
218 | (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S', | |
219 | "missing whitespace around operator"), |
|
219 | "missing whitespace around operator"), | |
220 | (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', |
|
220 | (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', | |
221 | "wrong whitespace around ="), |
|
221 | "wrong whitespace around ="), | |
222 | (r'raise Exception', "don't raise generic exceptions"), |
|
222 | (r'raise Exception', "don't raise generic exceptions"), | |
223 | (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', |
|
223 | (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', | |
224 | "don't use old-style two-argument raise, use Exception(message)"), |
|
224 | "don't use old-style two-argument raise, use Exception(message)"), | |
225 | (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"), |
|
225 | (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"), | |
226 | (r' [=!]=\s+(True|False|None)', |
|
226 | (r' [=!]=\s+(True|False|None)', | |
227 | "comparison with singleton, use 'is' or 'is not' instead"), |
|
227 | "comparison with singleton, use 'is' or 'is not' instead"), | |
228 | (r'^\s*(while|if) [01]:', |
|
228 | (r'^\s*(while|if) [01]:', | |
229 | "use True/False for constant Boolean expression"), |
|
229 | "use True/False for constant Boolean expression"), | |
230 | (r'(?:(?<!def)\s+|\()hasattr', |
|
230 | (r'(?:(?<!def)\s+|\()hasattr', | |
231 | 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'), |
|
231 | 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'), | |
232 | (r'opener\([^)]*\).read\(', |
|
232 | (r'opener\([^)]*\).read\(', | |
233 | "use opener.read() instead"), |
|
233 | "use opener.read() instead"), | |
234 | (r'BaseException', 'not in Python 2.4, use Exception'), |
|
234 | (r'BaseException', 'not in Python 2.4, use Exception'), | |
235 | (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'), |
|
235 | (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'), | |
236 | (r'opener\([^)]*\).write\(', |
|
236 | (r'opener\([^)]*\).write\(', | |
237 | "use opener.write() instead"), |
|
237 | "use opener.write() instead"), | |
238 | (r'[\s\(](open|file)\([^)]*\)\.read\(', |
|
238 | (r'[\s\(](open|file)\([^)]*\)\.read\(', | |
239 | "use util.readfile() instead"), |
|
239 | "use util.readfile() instead"), | |
240 | (r'[\s\(](open|file)\([^)]*\)\.write\(', |
|
240 | (r'[\s\(](open|file)\([^)]*\)\.write\(', | |
241 | "use util.readfile() instead"), |
|
241 | "use util.readfile() instead"), | |
242 | (r'^[\s\(]*(open(er)?|file)\([^)]*\)', |
|
242 | (r'^[\s\(]*(open(er)?|file)\([^)]*\)', | |
243 | "always assign an opened file to a variable, and close it afterwards"), |
|
243 | "always assign an opened file to a variable, and close it afterwards"), | |
244 | (r'[\s\(](open|file)\([^)]*\)\.', |
|
244 | (r'[\s\(](open|file)\([^)]*\)\.', | |
245 | "always assign an opened file to a variable, and close it afterwards"), |
|
245 | "always assign an opened file to a variable, and close it afterwards"), | |
246 | (r'(?i)descendent', "the proper spelling is descendAnt"), |
|
246 | (r'(?i)descendent', "the proper spelling is descendAnt"), | |
247 | (r'\.debug\(\_', "don't mark debug messages for translation"), |
|
247 | (r'\.debug\(\_', "don't mark debug messages for translation"), | |
248 | (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), |
|
248 | (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), | |
249 | (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'), |
|
249 | (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'), | |
250 | (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"), |
|
250 | (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"), | |
251 | (r'ui\.(status|progress|write|note|warn)\([\'\"]x', |
|
251 | (r'ui\.(status|progress|write|note|warn)\([\'\"]x', | |
252 | "missing _() in ui message (use () to hide false-positives)"), |
|
252 | "missing _() in ui message (use () to hide false-positives)"), | |
253 | (r'release\(.*wlock, .*lock\)', "wrong lock release order"), |
|
253 | (r'release\(.*wlock, .*lock\)', "wrong lock release order"), | |
254 | ], |
|
254 | ], | |
255 | # warnings |
|
255 | # warnings | |
256 | [ |
|
256 | [ | |
257 | ] |
|
257 | ] | |
258 | ] |
|
258 | ] | |
259 |
|
259 | |||
260 | pyfilters = [ |
|
260 | pyfilters = [ | |
261 | (r"""(?msx)(?P<comment>\#.*?$)| |
|
261 | (r"""(?msx)(?P<comment>\#.*?$)| | |
262 | ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!"))) |
|
262 | ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!"))) | |
263 | (?P<text>(([^\\]|\\.)*?)) |
|
263 | (?P<text>(([^\\]|\\.)*?)) | |
264 | (?P=quote))""", reppython), |
|
264 | (?P=quote))""", reppython), | |
265 | ] |
|
265 | ] | |
266 |
|
266 | |||
267 | txtfilters = [] |
|
267 | txtfilters = [] | |
268 |
|
268 | |||
269 | txtpats = [ |
|
269 | txtpats = [ | |
270 | [ |
|
270 | [ | |
271 | ('\s$', 'trailing whitespace'), |
|
271 | ('\s$', 'trailing whitespace'), | |
272 | ], |
|
272 | ], | |
273 | [] |
|
273 | [] | |
274 | ] |
|
274 | ] | |
275 |
|
275 | |||
276 | cpats = [ |
|
276 | cpats = [ | |
277 | [ |
|
277 | [ | |
278 | (r'//', "don't use //-style comments"), |
|
278 | (r'//', "don't use //-style comments"), | |
279 | (r'^ ', "don't use spaces to indent"), |
|
279 | (r'^ ', "don't use spaces to indent"), | |
280 | (r'\S\t', "don't use tabs except for indent"), |
|
280 | (r'\S\t', "don't use tabs except for indent"), | |
281 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), |
|
281 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), | |
282 | (r'.{81}', "line too long"), |
|
282 | (r'.{81}', "line too long"), | |
283 | (r'(while|if|do|for)\(', "use space after while/if/do/for"), |
|
283 | (r'(while|if|do|for)\(', "use space after while/if/do/for"), | |
284 | (r'return\(', "return is not a function"), |
|
284 | (r'return\(', "return is not a function"), | |
285 | (r' ;', "no space before ;"), |
|
285 | (r' ;', "no space before ;"), | |
286 | (r'\w+\* \w+', "use int *foo, not int* foo"), |
|
286 | (r'\w+\* \w+', "use int *foo, not int* foo"), | |
287 | (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"), |
|
287 | (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"), | |
288 | (r'\w+ (\+\+|--)', "use foo++, not foo ++"), |
|
288 | (r'\w+ (\+\+|--)', "use foo++, not foo ++"), | |
289 | (r'\w,\w', "missing whitespace after ,"), |
|
289 | (r'\w,\w', "missing whitespace after ,"), | |
290 | (r'^[^#]\w[+/*]\w', "missing whitespace in expression"), |
|
290 | (r'^[^#]\w[+/*]\w', "missing whitespace in expression"), | |
291 | (r'^#\s+\w', "use #foo, not # foo"), |
|
291 | (r'^#\s+\w', "use #foo, not # foo"), | |
292 | (r'[^\n]\Z', "no trailing newline"), |
|
292 | (r'[^\n]\Z', "no trailing newline"), | |
293 | (r'^\s*#import\b', "use only #include in standard C code"), |
|
293 | (r'^\s*#import\b', "use only #include in standard C code"), | |
294 | ], |
|
294 | ], | |
295 | # warnings |
|
295 | # warnings | |
296 | [] |
|
296 | [] | |
297 | ] |
|
297 | ] | |
298 |
|
298 | |||
299 | cfilters = [ |
|
299 | cfilters = [ | |
300 | (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment), |
|
300 | (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment), | |
301 | (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote), |
|
301 | (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote), | |
302 | (r'''(#\s*include\s+<)([^>]+)>''', repinclude), |
|
302 | (r'''(#\s*include\s+<)([^>]+)>''', repinclude), | |
303 | (r'(\()([^)]+\))', repcallspaces), |
|
303 | (r'(\()([^)]+\))', repcallspaces), | |
304 | ] |
|
304 | ] | |
305 |
|
305 | |||
306 | inutilpats = [ |
|
306 | inutilpats = [ | |
307 | [ |
|
307 | [ | |
308 | (r'\bui\.', "don't use ui in util"), |
|
308 | (r'\bui\.', "don't use ui in util"), | |
309 | ], |
|
309 | ], | |
310 | # warnings |
|
310 | # warnings | |
311 | [] |
|
311 | [] | |
312 | ] |
|
312 | ] | |
313 |
|
313 | |||
314 | inrevlogpats = [ |
|
314 | inrevlogpats = [ | |
315 | [ |
|
315 | [ | |
316 | (r'\brepo\.', "don't use repo in revlog"), |
|
316 | (r'\brepo\.', "don't use repo in revlog"), | |
317 | ], |
|
317 | ], | |
318 | # warnings |
|
318 | # warnings | |
319 | [] |
|
319 | [] | |
320 | ] |
|
320 | ] | |
321 |
|
321 | |||
322 | checks = [ |
|
322 | checks = [ | |
323 | ('python', r'.*\.(py|cgi)$', pyfilters, pypats), |
|
323 | ('python', r'.*\.(py|cgi)$', pyfilters, pypats), | |
324 | ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats), |
|
324 | ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats), | |
325 | ('c', r'.*\.c$', cfilters, cpats), |
|
325 | ('c', r'.*\.c$', cfilters, cpats), | |
326 | ('unified test', r'.*\.t$', utestfilters, utestpats), |
|
326 | ('unified test', r'.*\.t$', utestfilters, utestpats), | |
327 | ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters, |
|
327 | ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters, | |
328 | inrevlogpats), |
|
328 | inrevlogpats), | |
329 | ('layering violation ui in util', r'mercurial/util\.py', pyfilters, |
|
329 | ('layering violation ui in util', r'mercurial/util\.py', pyfilters, | |
330 | inutilpats), |
|
330 | inutilpats), | |
331 | ('txt', r'.*\.txt$', txtfilters, txtpats), |
|
331 | ('txt', r'.*\.txt$', txtfilters, txtpats), | |
332 | ] |
|
332 | ] | |
333 |
|
333 | |||
334 | def _preparepats(): |
|
334 | def _preparepats(): | |
335 | for c in checks: |
|
335 | for c in checks: | |
336 | failandwarn = c[-1] |
|
336 | failandwarn = c[-1] | |
337 | for pats in failandwarn: |
|
337 | for pats in failandwarn: | |
338 | for i, pseq in enumerate(pats): |
|
338 | for i, pseq in enumerate(pats): | |
339 | # fix-up regexes for multi-line searches |
|
339 | # fix-up regexes for multi-line searches | |
340 |
|
|
340 | p = pseq[0] | |
341 | # \s doesn't match \n |
|
341 | # \s doesn't match \n | |
342 | p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p) |
|
342 | p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p) | |
343 | # [^...] doesn't match newline |
|
343 | # [^...] doesn't match newline | |
344 | p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p) |
|
344 | p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p) | |
345 |
|
345 | |||
346 | #print po, '=>', p |
|
|||
347 | pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:] |
|
346 | pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:] | |
348 | filters = c[2] |
|
347 | filters = c[2] | |
349 | for i, flt in enumerate(filters): |
|
348 | for i, flt in enumerate(filters): | |
350 | filters[i] = re.compile(flt[0]), flt[1] |
|
349 | filters[i] = re.compile(flt[0]), flt[1] | |
351 | _preparepats() |
|
350 | _preparepats() | |
352 |
|
351 | |||
353 | class norepeatlogger(object): |
|
352 | class norepeatlogger(object): | |
354 | def __init__(self): |
|
353 | def __init__(self): | |
355 | self._lastseen = None |
|
354 | self._lastseen = None | |
356 |
|
355 | |||
357 | def log(self, fname, lineno, line, msg, blame): |
|
356 | def log(self, fname, lineno, line, msg, blame): | |
358 | """print error related a to given line of a given file. |
|
357 | """print error related a to given line of a given file. | |
359 |
|
358 | |||
360 | The faulty line will also be printed but only once in the case |
|
359 | The faulty line will also be printed but only once in the case | |
361 | of multiple errors. |
|
360 | of multiple errors. | |
362 |
|
361 | |||
363 | :fname: filename |
|
362 | :fname: filename | |
364 | :lineno: line number |
|
363 | :lineno: line number | |
365 | :line: actual content of the line |
|
364 | :line: actual content of the line | |
366 | :msg: error message |
|
365 | :msg: error message | |
367 | """ |
|
366 | """ | |
368 | msgid = fname, lineno, line |
|
367 | msgid = fname, lineno, line | |
369 | if msgid != self._lastseen: |
|
368 | if msgid != self._lastseen: | |
370 | if blame: |
|
369 | if blame: | |
371 | print "%s:%d (%s):" % (fname, lineno, blame) |
|
370 | print "%s:%d (%s):" % (fname, lineno, blame) | |
372 | else: |
|
371 | else: | |
373 | print "%s:%d:" % (fname, lineno) |
|
372 | print "%s:%d:" % (fname, lineno) | |
374 | print " > %s" % line |
|
373 | print " > %s" % line | |
375 | self._lastseen = msgid |
|
374 | self._lastseen = msgid | |
376 | print " " + msg |
|
375 | print " " + msg | |
377 |
|
376 | |||
378 | _defaultlogger = norepeatlogger() |
|
377 | _defaultlogger = norepeatlogger() | |
379 |
|
378 | |||
380 | def getblame(f): |
|
379 | def getblame(f): | |
381 | lines = [] |
|
380 | lines = [] | |
382 | for l in os.popen('hg annotate -un %s' % f): |
|
381 | for l in os.popen('hg annotate -un %s' % f): | |
383 | start, line = l.split(':', 1) |
|
382 | start, line = l.split(':', 1) | |
384 | user, rev = start.split() |
|
383 | user, rev = start.split() | |
385 | lines.append((line[1:-1], user, rev)) |
|
384 | lines.append((line[1:-1], user, rev)) | |
386 | return lines |
|
385 | return lines | |
387 |
|
386 | |||
388 | def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False, |
|
387 | def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False, | |
389 | blame=False, debug=False, lineno=True): |
|
388 | blame=False, debug=False, lineno=True): | |
390 | """checks style and portability of a given file |
|
389 | """checks style and portability of a given file | |
391 |
|
390 | |||
392 | :f: filepath |
|
391 | :f: filepath | |
393 | :logfunc: function used to report error |
|
392 | :logfunc: function used to report error | |
394 | logfunc(filename, linenumber, linecontent, errormessage) |
|
393 | logfunc(filename, linenumber, linecontent, errormessage) | |
395 | :maxerr: number of error to display before aborting. |
|
394 | :maxerr: number of error to display before aborting. | |
396 | Set to false (default) to report all errors |
|
395 | Set to false (default) to report all errors | |
397 |
|
396 | |||
398 | return True if no error is found, False otherwise. |
|
397 | return True if no error is found, False otherwise. | |
399 | """ |
|
398 | """ | |
400 | blamecache = None |
|
399 | blamecache = None | |
401 | result = True |
|
400 | result = True | |
402 | for name, match, filters, pats in checks: |
|
401 | for name, match, filters, pats in checks: | |
403 | if debug: |
|
402 | if debug: | |
404 | print name, f |
|
403 | print name, f | |
405 | fc = 0 |
|
404 | fc = 0 | |
406 | if not re.match(match, f): |
|
405 | if not re.match(match, f): | |
407 | if debug: |
|
406 | if debug: | |
408 | print "Skipping %s for %s it doesn't match %s" % ( |
|
407 | print "Skipping %s for %s it doesn't match %s" % ( | |
409 | name, match, f) |
|
408 | name, match, f) | |
410 | continue |
|
409 | continue | |
411 | fp = open(f) |
|
410 | fp = open(f) | |
412 | pre = post = fp.read() |
|
411 | pre = post = fp.read() | |
413 | fp.close() |
|
412 | fp.close() | |
414 | if "no-" + "check-code" in pre: |
|
413 | if "no-" + "check-code" in pre: | |
415 | if debug: |
|
414 | if debug: | |
416 | print "Skipping %s for %s it has no- and check-code" % ( |
|
415 | print "Skipping %s for %s it has no- and check-code" % ( | |
417 | name, f) |
|
416 | name, f) | |
418 | break |
|
417 | break | |
419 | for p, r in filters: |
|
418 | for p, r in filters: | |
420 | post = re.sub(p, r, post) |
|
419 | post = re.sub(p, r, post) | |
421 | if warnings: |
|
420 | if warnings: | |
422 | pats = pats[0] + pats[1] |
|
421 | pats = pats[0] + pats[1] | |
423 | else: |
|
422 | else: | |
424 | pats = pats[0] |
|
423 | pats = pats[0] | |
425 | # print post # uncomment to show filtered version |
|
424 | # print post # uncomment to show filtered version | |
426 |
|
425 | |||
427 | if debug: |
|
426 | if debug: | |
428 | print "Checking %s for %s" % (name, f) |
|
427 | print "Checking %s for %s" % (name, f) | |
429 |
|
428 | |||
430 | prelines = None |
|
429 | prelines = None | |
431 | errors = [] |
|
430 | errors = [] | |
432 | for pat in pats: |
|
431 | for pat in pats: | |
433 | if len(pat) == 3: |
|
432 | if len(pat) == 3: | |
434 | p, msg, ignore = pat |
|
433 | p, msg, ignore = pat | |
435 | else: |
|
434 | else: | |
436 | p, msg = pat |
|
435 | p, msg = pat | |
437 | ignore = None |
|
436 | ignore = None | |
438 |
|
437 | |||
439 | pos = 0 |
|
438 | pos = 0 | |
440 | n = 0 |
|
439 | n = 0 | |
441 | for m in p.finditer(post): |
|
440 | for m in p.finditer(post): | |
442 | if prelines is None: |
|
441 | if prelines is None: | |
443 | prelines = pre.splitlines() |
|
442 | prelines = pre.splitlines() | |
444 | postlines = post.splitlines(True) |
|
443 | postlines = post.splitlines(True) | |
445 |
|
444 | |||
446 | start = m.start() |
|
445 | start = m.start() | |
447 | while n < len(postlines): |
|
446 | while n < len(postlines): | |
448 | step = len(postlines[n]) |
|
447 | step = len(postlines[n]) | |
449 | if pos + step > start: |
|
448 | if pos + step > start: | |
450 | break |
|
449 | break | |
451 | pos += step |
|
450 | pos += step | |
452 | n += 1 |
|
451 | n += 1 | |
453 | l = prelines[n] |
|
452 | l = prelines[n] | |
454 |
|
453 | |||
455 | if "check-code" + "-ignore" in l: |
|
454 | if "check-code" + "-ignore" in l: | |
456 | if debug: |
|
455 | if debug: | |
457 | print "Skipping %s for %s:%s (check-code -ignore)" % ( |
|
456 | print "Skipping %s for %s:%s (check-code -ignore)" % ( | |
458 | name, f, n) |
|
457 | name, f, n) | |
459 | continue |
|
458 | continue | |
460 | elif ignore and re.search(ignore, l, re.MULTILINE): |
|
459 | elif ignore and re.search(ignore, l, re.MULTILINE): | |
461 | continue |
|
460 | continue | |
462 | bd = "" |
|
461 | bd = "" | |
463 | if blame: |
|
462 | if blame: | |
464 | bd = 'working directory' |
|
463 | bd = 'working directory' | |
465 | if not blamecache: |
|
464 | if not blamecache: | |
466 | blamecache = getblame(f) |
|
465 | blamecache = getblame(f) | |
467 | if n < len(blamecache): |
|
466 | if n < len(blamecache): | |
468 | bl, bu, br = blamecache[n] |
|
467 | bl, bu, br = blamecache[n] | |
469 | if bl == l: |
|
468 | if bl == l: | |
470 | bd = '%s@%s' % (bu, br) |
|
469 | bd = '%s@%s' % (bu, br) | |
471 | errors.append((f, lineno and n + 1, l, msg, bd)) |
|
470 | errors.append((f, lineno and n + 1, l, msg, bd)) | |
472 | result = False |
|
471 | result = False | |
473 |
|
472 | |||
474 | errors.sort() |
|
473 | errors.sort() | |
475 | for e in errors: |
|
474 | for e in errors: | |
476 | logfunc(*e) |
|
475 | logfunc(*e) | |
477 | fc += 1 |
|
476 | fc += 1 | |
478 | if maxerr and fc >= maxerr: |
|
477 | if maxerr and fc >= maxerr: | |
479 | print " (too many errors, giving up)" |
|
478 | print " (too many errors, giving up)" | |
480 | break |
|
479 | break | |
481 |
|
480 | |||
482 | return result |
|
481 | return result | |
483 |
|
482 | |||
484 | if __name__ == "__main__": |
|
483 | if __name__ == "__main__": | |
485 | parser = optparse.OptionParser("%prog [options] [files]") |
|
484 | parser = optparse.OptionParser("%prog [options] [files]") | |
486 | parser.add_option("-w", "--warnings", action="store_true", |
|
485 | parser.add_option("-w", "--warnings", action="store_true", | |
487 | help="include warning-level checks") |
|
486 | help="include warning-level checks") | |
488 | parser.add_option("-p", "--per-file", type="int", |
|
487 | parser.add_option("-p", "--per-file", type="int", | |
489 | help="max warnings per file") |
|
488 | help="max warnings per file") | |
490 | parser.add_option("-b", "--blame", action="store_true", |
|
489 | parser.add_option("-b", "--blame", action="store_true", | |
491 | help="use annotate to generate blame info") |
|
490 | help="use annotate to generate blame info") | |
492 | parser.add_option("", "--debug", action="store_true", |
|
491 | parser.add_option("", "--debug", action="store_true", | |
493 | help="show debug information") |
|
492 | help="show debug information") | |
494 | parser.add_option("", "--nolineno", action="store_false", |
|
493 | parser.add_option("", "--nolineno", action="store_false", | |
495 | dest='lineno', help="don't show line numbers") |
|
494 | dest='lineno', help="don't show line numbers") | |
496 |
|
495 | |||
497 | parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False, |
|
496 | parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False, | |
498 | lineno=True) |
|
497 | lineno=True) | |
499 | (options, args) = parser.parse_args() |
|
498 | (options, args) = parser.parse_args() | |
500 |
|
499 | |||
501 | if len(args) == 0: |
|
500 | if len(args) == 0: | |
502 | check = glob.glob("*") |
|
501 | check = glob.glob("*") | |
503 | else: |
|
502 | else: | |
504 | check = args |
|
503 | check = args | |
505 |
|
504 | |||
506 | ret = 0 |
|
505 | ret = 0 | |
507 | for f in check: |
|
506 | for f in check: | |
508 | if not checkfile(f, maxerr=options.per_file, warnings=options.warnings, |
|
507 | if not checkfile(f, maxerr=options.per_file, warnings=options.warnings, | |
509 | blame=options.blame, debug=options.debug, |
|
508 | blame=options.blame, debug=options.debug, | |
510 | lineno=options.lineno): |
|
509 | lineno=options.lineno): | |
511 | ret = 1 |
|
510 | ret = 1 | |
512 | sys.exit(ret) |
|
511 | sys.exit(ret) |
@@ -1,108 +1,107 b'' | |||||
1 | "Fixer that translates some APIs ignored by the default 2to3 fixers." |
|
1 | "Fixer that translates some APIs ignored by the default 2to3 fixers." | |
2 |
|
2 | |||
3 | # FIXME: This fixer has some ugly hacks. Its main design is based on that of |
|
3 | # FIXME: This fixer has some ugly hacks. Its main design is based on that of | |
4 | # fix_imports, from lib2to3. Unfortunately, the fix_imports framework only |
|
4 | # fix_imports, from lib2to3. Unfortunately, the fix_imports framework only | |
5 | # changes module names "without dots", meaning it won't work for some changes |
|
5 | # changes module names "without dots", meaning it won't work for some changes | |
6 | # in the email module/package. Thus this fixer was born. I believe that with a |
|
6 | # in the email module/package. Thus this fixer was born. I believe that with a | |
7 | # bit more thinking, a more generic fixer can be implemented, but I'll leave |
|
7 | # bit more thinking, a more generic fixer can be implemented, but I'll leave | |
8 | # that as future work. |
|
8 | # that as future work. | |
9 |
|
9 | |||
10 | from lib2to3.fixer_util import Name |
|
10 | from lib2to3.fixer_util import Name | |
11 | from lib2to3.fixes import fix_imports |
|
11 | from lib2to3.fixes import fix_imports | |
12 |
|
12 | |||
13 | # This maps the old names to the new names. Note that a drawback of the current |
|
13 | # This maps the old names to the new names. Note that a drawback of the current | |
14 | # design is that the dictionary keys MUST have EXACTLY one dot (.) in them, |
|
14 | # design is that the dictionary keys MUST have EXACTLY one dot (.) in them, | |
15 | # otherwise things will break. (If you don't need a module hierarchy, you're |
|
15 | # otherwise things will break. (If you don't need a module hierarchy, you're | |
16 | # better of just inherit from fix_imports and overriding the MAPPING dict.) |
|
16 | # better of just inherit from fix_imports and overriding the MAPPING dict.) | |
17 |
|
17 | |||
18 | MAPPING = {'email.Utils': 'email.utils', |
|
18 | MAPPING = {'email.Utils': 'email.utils', | |
19 | 'email.Errors': 'email.errors', |
|
19 | 'email.Errors': 'email.errors', | |
20 | 'email.Header': 'email.header', |
|
20 | 'email.Header': 'email.header', | |
21 | 'email.Parser': 'email.parser', |
|
21 | 'email.Parser': 'email.parser', | |
22 | 'email.Encoders': 'email.encoders', |
|
22 | 'email.Encoders': 'email.encoders', | |
23 | 'email.MIMEText': 'email.mime.text', |
|
23 | 'email.MIMEText': 'email.mime.text', | |
24 | 'email.MIMEBase': 'email.mime.base', |
|
24 | 'email.MIMEBase': 'email.mime.base', | |
25 | 'email.Generator': 'email.generator', |
|
25 | 'email.Generator': 'email.generator', | |
26 | 'email.MIMEMultipart': 'email.mime.multipart', |
|
26 | 'email.MIMEMultipart': 'email.mime.multipart', | |
27 | } |
|
27 | } | |
28 |
|
28 | |||
29 | def alternates(members): |
|
29 | def alternates(members): | |
30 | return "(" + "|".join(map(repr, members)) + ")" |
|
30 | return "(" + "|".join(map(repr, members)) + ")" | |
31 |
|
31 | |||
32 | def build_pattern(mapping=MAPPING): |
|
32 | def build_pattern(mapping=MAPPING): | |
33 | packages = {} |
|
33 | packages = {} | |
34 | for key in mapping: |
|
34 | for key in mapping: | |
35 | # What we are doing here is the following: with dotted names, we'll |
|
35 | # What we are doing here is the following: with dotted names, we'll | |
36 | # have something like package_name <trailer '.' module>. Then, we are |
|
36 | # have something like package_name <trailer '.' module>. Then, we are | |
37 | # making a dictionary to copy this structure. For example, if |
|
37 | # making a dictionary to copy this structure. For example, if | |
38 | # mapping={'A.B': 'a.b', 'A.C': 'a.c'}, it will generate the dictionary |
|
38 | # mapping={'A.B': 'a.b', 'A.C': 'a.c'}, it will generate the dictionary | |
39 | # {'A': ['b', 'c']} to, then, generate something like "A <trailer '.' |
|
39 | # {'A': ['b', 'c']} to, then, generate something like "A <trailer '.' | |
40 | # ('b' | 'c')". |
|
40 | # ('b' | 'c')". | |
41 | name = key.split('.') |
|
41 | name = key.split('.') | |
42 | prefix = name[0] |
|
42 | prefix = name[0] | |
43 | if prefix in packages: |
|
43 | if prefix in packages: | |
44 | packages[prefix].append(name[1:][0]) |
|
44 | packages[prefix].append(name[1:][0]) | |
45 | else: |
|
45 | else: | |
46 | packages[prefix] = name[1:] |
|
46 | packages[prefix] = name[1:] | |
47 |
|
47 | |||
48 | mod_list = ' | '.join(["'%s' '.' ('%s')" % |
|
48 | mod_list = ' | '.join(["'%s' '.' ('%s')" % | |
49 | (key, "' | '".join(packages[key])) for key in packages]) |
|
49 | (key, "' | '".join(packages[key])) for key in packages]) | |
50 | mod_list = '(' + mod_list + ' )' |
|
50 | mod_list = '(' + mod_list + ' )' | |
51 | bare_names = alternates(mapping.keys()) |
|
|||
52 |
|
51 | |||
53 | yield """name_import=import_name< 'import' module_name=dotted_name< %s > > |
|
52 | yield """name_import=import_name< 'import' module_name=dotted_name< %s > > | |
54 | """ % mod_list |
|
53 | """ % mod_list | |
55 |
|
54 | |||
56 | yield """name_import=import_name< 'import' |
|
55 | yield """name_import=import_name< 'import' | |
57 | multiple_imports=dotted_as_names< any* |
|
56 | multiple_imports=dotted_as_names< any* | |
58 | module_name=dotted_name< %s > |
|
57 | module_name=dotted_name< %s > | |
59 | any* > |
|
58 | any* > | |
60 | >""" % mod_list |
|
59 | >""" % mod_list | |
61 |
|
60 | |||
62 | packs = ' | '.join(["'%s' trailer<'.' ('%s')>" % (key, |
|
61 | packs = ' | '.join(["'%s' trailer<'.' ('%s')>" % (key, | |
63 | "' | '".join(packages[key])) for key in packages]) |
|
62 | "' | '".join(packages[key])) for key in packages]) | |
64 |
|
63 | |||
65 | yield "power< package=(%s) trailer<'.' any > any* >" % packs |
|
64 | yield "power< package=(%s) trailer<'.' any > any* >" % packs | |
66 |
|
65 | |||
67 | class FixLeftoverImports(fix_imports.FixImports): |
|
66 | class FixLeftoverImports(fix_imports.FixImports): | |
68 | # We want to run this fixer after fix_import has run (this shouldn't matter |
|
67 | # We want to run this fixer after fix_import has run (this shouldn't matter | |
69 | # for hg, though, as setup3k prefers to run the default fixers first) |
|
68 | # for hg, though, as setup3k prefers to run the default fixers first) | |
70 | mapping = MAPPING |
|
69 | mapping = MAPPING | |
71 |
|
70 | |||
72 | def build_pattern(self): |
|
71 | def build_pattern(self): | |
73 | return "|".join(build_pattern(self.mapping)) |
|
72 | return "|".join(build_pattern(self.mapping)) | |
74 |
|
73 | |||
75 | def transform(self, node, results): |
|
74 | def transform(self, node, results): | |
76 | # Mostly copied from fix_imports.py |
|
75 | # Mostly copied from fix_imports.py | |
77 | import_mod = results.get("module_name") |
|
76 | import_mod = results.get("module_name") | |
78 | if import_mod: |
|
77 | if import_mod: | |
79 | try: |
|
78 | try: | |
80 | mod_name = import_mod.value |
|
79 | mod_name = import_mod.value | |
81 | except AttributeError: |
|
80 | except AttributeError: | |
82 | # XXX: A hack to remove whitespace prefixes and suffixes |
|
81 | # XXX: A hack to remove whitespace prefixes and suffixes | |
83 | mod_name = str(import_mod).strip() |
|
82 | mod_name = str(import_mod).strip() | |
84 | new_name = self.mapping[mod_name] |
|
83 | new_name = self.mapping[mod_name] | |
85 | import_mod.replace(Name(new_name, prefix=import_mod.prefix)) |
|
84 | import_mod.replace(Name(new_name, prefix=import_mod.prefix)) | |
86 | if "name_import" in results: |
|
85 | if "name_import" in results: | |
87 | # If it's not a "from x import x, y" or "import x as y" import, |
|
86 | # If it's not a "from x import x, y" or "import x as y" import, | |
88 | # marked its usage to be replaced. |
|
87 | # marked its usage to be replaced. | |
89 | self.replace[mod_name] = new_name |
|
88 | self.replace[mod_name] = new_name | |
90 | if "multiple_imports" in results: |
|
89 | if "multiple_imports" in results: | |
91 | # This is a nasty hack to fix multiple imports on a line (e.g., |
|
90 | # This is a nasty hack to fix multiple imports on a line (e.g., | |
92 | # "import StringIO, urlparse"). The problem is that I can't |
|
91 | # "import StringIO, urlparse"). The problem is that I can't | |
93 | # figure out an easy way to make a pattern recognize the keys of |
|
92 | # figure out an easy way to make a pattern recognize the keys of | |
94 | # MAPPING randomly sprinkled in an import statement. |
|
93 | # MAPPING randomly sprinkled in an import statement. | |
95 | results = self.match(node) |
|
94 | results = self.match(node) | |
96 | if results: |
|
95 | if results: | |
97 | self.transform(node, results) |
|
96 | self.transform(node, results) | |
98 | else: |
|
97 | else: | |
99 | # Replace usage of the module. |
|
98 | # Replace usage of the module. | |
100 | # Now this is, mostly, a hack |
|
99 | # Now this is, mostly, a hack | |
101 | bare_name = results["package"][0] |
|
100 | bare_name = results["package"][0] | |
102 | bare_name_text = ''.join(map(str, results['package'])).strip() |
|
101 | bare_name_text = ''.join(map(str, results['package'])).strip() | |
103 | new_name = self.replace.get(bare_name_text) |
|
102 | new_name = self.replace.get(bare_name_text) | |
104 | prefix = results['package'][0].prefix |
|
103 | prefix = results['package'][0].prefix | |
105 | if new_name: |
|
104 | if new_name: | |
106 | bare_name.replace(Name(new_name, prefix=prefix)) |
|
105 | bare_name.replace(Name(new_name, prefix=prefix)) | |
107 | results["package"][1].replace(Name('')) |
|
106 | results["package"][1].replace(Name('')) | |
108 |
|
107 |
@@ -1,410 +1,410 b'' | |||||
1 | # perf.py - performance test routines |
|
1 | # perf.py - performance test routines | |
2 | '''helper extension to measure performance''' |
|
2 | '''helper extension to measure performance''' | |
3 |
|
3 | |||
4 | from mercurial import cmdutil, scmutil, util, commands, obsolete |
|
4 | from mercurial import cmdutil, scmutil, util, commands, obsolete | |
5 | from mercurial import repoview, branchmap, merge, copies |
|
5 | from mercurial import repoview, branchmap, merge, copies | |
6 | import time, os, sys |
|
6 | import time, os, sys | |
7 |
|
7 | |||
8 | cmdtable = {} |
|
8 | cmdtable = {} | |
9 | command = cmdutil.command(cmdtable) |
|
9 | command = cmdutil.command(cmdtable) | |
10 |
|
10 | |||
11 | def timer(func, title=None): |
|
11 | def timer(func, title=None): | |
12 | results = [] |
|
12 | results = [] | |
13 | begin = time.time() |
|
13 | begin = time.time() | |
14 | count = 0 |
|
14 | count = 0 | |
15 | while True: |
|
15 | while True: | |
16 | ostart = os.times() |
|
16 | ostart = os.times() | |
17 | cstart = time.time() |
|
17 | cstart = time.time() | |
18 | r = func() |
|
18 | r = func() | |
19 | cstop = time.time() |
|
19 | cstop = time.time() | |
20 | ostop = os.times() |
|
20 | ostop = os.times() | |
21 | count += 1 |
|
21 | count += 1 | |
22 | a, b = ostart, ostop |
|
22 | a, b = ostart, ostop | |
23 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) |
|
23 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) | |
24 | if cstop - begin > 3 and count >= 100: |
|
24 | if cstop - begin > 3 and count >= 100: | |
25 | break |
|
25 | break | |
26 | if cstop - begin > 10 and count >= 3: |
|
26 | if cstop - begin > 10 and count >= 3: | |
27 | break |
|
27 | break | |
28 | if title: |
|
28 | if title: | |
29 | sys.stderr.write("! %s\n" % title) |
|
29 | sys.stderr.write("! %s\n" % title) | |
30 | if r: |
|
30 | if r: | |
31 | sys.stderr.write("! result: %s\n" % r) |
|
31 | sys.stderr.write("! result: %s\n" % r) | |
32 | m = min(results) |
|
32 | m = min(results) | |
33 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" |
|
33 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" | |
34 | % (m[0], m[1] + m[2], m[1], m[2], count)) |
|
34 | % (m[0], m[1] + m[2], m[1], m[2], count)) | |
35 |
|
35 | |||
36 | @command('perfwalk') |
|
36 | @command('perfwalk') | |
37 | def perfwalk(ui, repo, *pats): |
|
37 | def perfwalk(ui, repo, *pats): | |
38 | try: |
|
38 | try: | |
39 | m = scmutil.match(repo[None], pats, {}) |
|
39 | m = scmutil.match(repo[None], pats, {}) | |
40 | timer(lambda: len(list(repo.dirstate.walk(m, [], True, False)))) |
|
40 | timer(lambda: len(list(repo.dirstate.walk(m, [], True, False)))) | |
41 | except Exception: |
|
41 | except Exception: | |
42 | try: |
|
42 | try: | |
43 | m = scmutil.match(repo[None], pats, {}) |
|
43 | m = scmutil.match(repo[None], pats, {}) | |
44 | timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)])) |
|
44 | timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)])) | |
45 | except Exception: |
|
45 | except Exception: | |
46 | timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) |
|
46 | timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) | |
47 |
|
47 | |||
48 | @command('perfannotate') |
|
48 | @command('perfannotate') | |
49 | def perfannotate(ui, repo, f): |
|
49 | def perfannotate(ui, repo, f): | |
50 | fc = repo['.'][f] |
|
50 | fc = repo['.'][f] | |
51 | timer(lambda: len(fc.annotate(True))) |
|
51 | timer(lambda: len(fc.annotate(True))) | |
52 |
|
52 | |||
53 | @command('perfstatus', |
|
53 | @command('perfstatus', | |
54 | [('u', 'unknown', False, |
|
54 | [('u', 'unknown', False, | |
55 | 'ask status to look for unknown files')]) |
|
55 | 'ask status to look for unknown files')]) | |
56 | def perfstatus(ui, repo, **opts): |
|
56 | def perfstatus(ui, repo, **opts): | |
57 | #m = match.always(repo.root, repo.getcwd()) |
|
57 | #m = match.always(repo.root, repo.getcwd()) | |
58 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, |
|
58 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, | |
59 | # False)))) |
|
59 | # False)))) | |
60 | timer(lambda: sum(map(len, repo.status(**opts)))) |
|
60 | timer(lambda: sum(map(len, repo.status(**opts)))) | |
61 |
|
61 | |||
62 | @command('perfaddremove') |
|
62 | @command('perfaddremove') | |
63 | def perfaddremove(ui, repo): |
|
63 | def perfaddremove(ui, repo): | |
64 | try: |
|
64 | try: | |
65 | oldquiet = repo.ui.quiet |
|
65 | oldquiet = repo.ui.quiet | |
66 | repo.ui.quiet = True |
|
66 | repo.ui.quiet = True | |
67 | timer(lambda: scmutil.addremove(repo, dry_run=True)) |
|
67 | timer(lambda: scmutil.addremove(repo, dry_run=True)) | |
68 | finally: |
|
68 | finally: | |
69 | repo.ui.quiet = oldquiet |
|
69 | repo.ui.quiet = oldquiet | |
70 |
|
70 | |||
71 | def clearcaches(cl): |
|
71 | def clearcaches(cl): | |
72 | # behave somewhat consistently across internal API changes |
|
72 | # behave somewhat consistently across internal API changes | |
73 | if util.safehasattr(cl, 'clearcaches'): |
|
73 | if util.safehasattr(cl, 'clearcaches'): | |
74 | cl.clearcaches() |
|
74 | cl.clearcaches() | |
75 | elif util.safehasattr(cl, '_nodecache'): |
|
75 | elif util.safehasattr(cl, '_nodecache'): | |
76 | from mercurial.node import nullid, nullrev |
|
76 | from mercurial.node import nullid, nullrev | |
77 | cl._nodecache = {nullid: nullrev} |
|
77 | cl._nodecache = {nullid: nullrev} | |
78 | cl._nodepos = None |
|
78 | cl._nodepos = None | |
79 |
|
79 | |||
80 | @command('perfheads') |
|
80 | @command('perfheads') | |
81 | def perfheads(ui, repo): |
|
81 | def perfheads(ui, repo): | |
82 | cl = repo.changelog |
|
82 | cl = repo.changelog | |
83 | def d(): |
|
83 | def d(): | |
84 | len(cl.headrevs()) |
|
84 | len(cl.headrevs()) | |
85 | clearcaches(cl) |
|
85 | clearcaches(cl) | |
86 | timer(d) |
|
86 | timer(d) | |
87 |
|
87 | |||
88 | @command('perftags') |
|
88 | @command('perftags') | |
89 | def perftags(ui, repo): |
|
89 | def perftags(ui, repo): | |
90 | import mercurial.changelog, mercurial.manifest |
|
90 | import mercurial.changelog, mercurial.manifest | |
91 | def t(): |
|
91 | def t(): | |
92 | repo.changelog = mercurial.changelog.changelog(repo.sopener) |
|
92 | repo.changelog = mercurial.changelog.changelog(repo.sopener) | |
93 | repo.manifest = mercurial.manifest.manifest(repo.sopener) |
|
93 | repo.manifest = mercurial.manifest.manifest(repo.sopener) | |
94 | repo._tags = None |
|
94 | repo._tags = None | |
95 | return len(repo.tags()) |
|
95 | return len(repo.tags()) | |
96 | timer(t) |
|
96 | timer(t) | |
97 |
|
97 | |||
98 | @command('perfancestors') |
|
98 | @command('perfancestors') | |
99 | def perfancestors(ui, repo): |
|
99 | def perfancestors(ui, repo): | |
100 | heads = repo.changelog.headrevs() |
|
100 | heads = repo.changelog.headrevs() | |
101 | def d(): |
|
101 | def d(): | |
102 | for a in repo.changelog.ancestors(heads): |
|
102 | for a in repo.changelog.ancestors(heads): | |
103 | pass |
|
103 | pass | |
104 | timer(d) |
|
104 | timer(d) | |
105 |
|
105 | |||
106 | @command('perfancestorset') |
|
106 | @command('perfancestorset') | |
107 | def perfancestorset(ui, repo, revset): |
|
107 | def perfancestorset(ui, repo, revset): | |
108 | revs = repo.revs(revset) |
|
108 | revs = repo.revs(revset) | |
109 | heads = repo.changelog.headrevs() |
|
109 | heads = repo.changelog.headrevs() | |
110 | def d(): |
|
110 | def d(): | |
111 | s = repo.changelog.ancestors(heads) |
|
111 | s = repo.changelog.ancestors(heads) | |
112 | for rev in revs: |
|
112 | for rev in revs: | |
113 | rev in s |
|
113 | rev in s | |
114 | timer(d) |
|
114 | timer(d) | |
115 |
|
115 | |||
116 | @command('perfdirs') |
|
116 | @command('perfdirs') | |
117 | def perfdirs(ui, repo): |
|
117 | def perfdirs(ui, repo): | |
118 | dirstate = repo.dirstate |
|
118 | dirstate = repo.dirstate | |
119 | 'a' in dirstate |
|
119 | 'a' in dirstate | |
120 | def d(): |
|
120 | def d(): | |
121 | dirstate.dirs() |
|
121 | dirstate.dirs() | |
122 | del dirstate._dirs |
|
122 | del dirstate._dirs | |
123 | timer(d) |
|
123 | timer(d) | |
124 |
|
124 | |||
125 | @command('perfdirstate') |
|
125 | @command('perfdirstate') | |
126 | def perfdirstate(ui, repo): |
|
126 | def perfdirstate(ui, repo): | |
127 | "a" in repo.dirstate |
|
127 | "a" in repo.dirstate | |
128 | def d(): |
|
128 | def d(): | |
129 | repo.dirstate.invalidate() |
|
129 | repo.dirstate.invalidate() | |
130 | "a" in repo.dirstate |
|
130 | "a" in repo.dirstate | |
131 | timer(d) |
|
131 | timer(d) | |
132 |
|
132 | |||
133 | @command('perfdirstatedirs') |
|
133 | @command('perfdirstatedirs') | |
134 | def perfdirstatedirs(ui, repo): |
|
134 | def perfdirstatedirs(ui, repo): | |
135 | "a" in repo.dirstate |
|
135 | "a" in repo.dirstate | |
136 | def d(): |
|
136 | def d(): | |
137 | "a" in repo.dirstate._dirs |
|
137 | "a" in repo.dirstate._dirs | |
138 | del repo.dirstate._dirs |
|
138 | del repo.dirstate._dirs | |
139 | timer(d) |
|
139 | timer(d) | |
140 |
|
140 | |||
141 | @command('perfdirstatewrite') |
|
141 | @command('perfdirstatewrite') | |
142 | def perfdirstatewrite(ui, repo): |
|
142 | def perfdirstatewrite(ui, repo): | |
143 | ds = repo.dirstate |
|
143 | ds = repo.dirstate | |
144 | "a" in ds |
|
144 | "a" in ds | |
145 | def d(): |
|
145 | def d(): | |
146 | ds._dirty = True |
|
146 | ds._dirty = True | |
147 | ds.write() |
|
147 | ds.write() | |
148 | timer(d) |
|
148 | timer(d) | |
149 |
|
149 | |||
150 | @command('perfmergecalculate', |
|
150 | @command('perfmergecalculate', | |
151 | [('r', 'rev', '.', 'rev to merge against')]) |
|
151 | [('r', 'rev', '.', 'rev to merge against')]) | |
152 | def perfmergecalculate(ui, repo, rev): |
|
152 | def perfmergecalculate(ui, repo, rev): | |
153 | wctx = repo[None] |
|
153 | wctx = repo[None] | |
154 | rctx = scmutil.revsingle(repo, rev, rev) |
|
154 | rctx = scmutil.revsingle(repo, rev, rev) | |
155 | ancestor = wctx.ancestor(rctx) |
|
155 | ancestor = wctx.ancestor(rctx) | |
156 | # we don't want working dir files to be stat'd in the benchmark, so prime |
|
156 | # we don't want working dir files to be stat'd in the benchmark, so prime | |
157 | # that cache |
|
157 | # that cache | |
158 | wctx.dirty() |
|
158 | wctx.dirty() | |
159 | def d(): |
|
159 | def d(): | |
160 | # acceptremote is True because we don't want prompts in the middle of |
|
160 | # acceptremote is True because we don't want prompts in the middle of | |
161 | # our benchmark |
|
161 | # our benchmark | |
162 | merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False, |
|
162 | merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False, | |
163 | acceptremote=True) |
|
163 | acceptremote=True) | |
164 | timer(d) |
|
164 | timer(d) | |
165 |
|
165 | |||
166 | @command('perfpathcopies', [], "REV REV") |
|
166 | @command('perfpathcopies', [], "REV REV") | |
167 | def perfpathcopies(ui, repo, rev1, rev2): |
|
167 | def perfpathcopies(ui, repo, rev1, rev2): | |
168 | ctx1 = scmutil.revsingle(repo, rev1, rev1) |
|
168 | ctx1 = scmutil.revsingle(repo, rev1, rev1) | |
169 | ctx2 = scmutil.revsingle(repo, rev2, rev2) |
|
169 | ctx2 = scmutil.revsingle(repo, rev2, rev2) | |
170 | def d(): |
|
170 | def d(): | |
171 | copies.pathcopies(ctx1, ctx2) |
|
171 | copies.pathcopies(ctx1, ctx2) | |
172 | timer(d) |
|
172 | timer(d) | |
173 |
|
173 | |||
174 | @command('perfmanifest') |
|
174 | @command('perfmanifest') | |
175 | def perfmanifest(ui, repo): |
|
175 | def perfmanifest(ui, repo): | |
176 | def d(): |
|
176 | def d(): | |
177 | t = repo.manifest.tip() |
|
177 | t = repo.manifest.tip() | |
178 |
|
|
178 | repo.manifest.read(t) | |
179 | repo.manifest.mapcache = None |
|
179 | repo.manifest.mapcache = None | |
180 | repo.manifest._cache = None |
|
180 | repo.manifest._cache = None | |
181 | timer(d) |
|
181 | timer(d) | |
182 |
|
182 | |||
183 | @command('perfchangeset') |
|
183 | @command('perfchangeset') | |
184 | def perfchangeset(ui, repo, rev): |
|
184 | def perfchangeset(ui, repo, rev): | |
185 | n = repo[rev].node() |
|
185 | n = repo[rev].node() | |
186 | def d(): |
|
186 | def d(): | |
187 |
|
|
187 | repo.changelog.read(n) | |
188 | #repo.changelog._cache = None |
|
188 | #repo.changelog._cache = None | |
189 | timer(d) |
|
189 | timer(d) | |
190 |
|
190 | |||
191 | @command('perfindex') |
|
191 | @command('perfindex') | |
192 | def perfindex(ui, repo): |
|
192 | def perfindex(ui, repo): | |
193 | import mercurial.revlog |
|
193 | import mercurial.revlog | |
194 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
194 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | |
195 | n = repo["tip"].node() |
|
195 | n = repo["tip"].node() | |
196 | def d(): |
|
196 | def d(): | |
197 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") |
|
197 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") | |
198 | cl.rev(n) |
|
198 | cl.rev(n) | |
199 | timer(d) |
|
199 | timer(d) | |
200 |
|
200 | |||
201 | @command('perfstartup') |
|
201 | @command('perfstartup') | |
202 | def perfstartup(ui, repo): |
|
202 | def perfstartup(ui, repo): | |
203 | cmd = sys.argv[0] |
|
203 | cmd = sys.argv[0] | |
204 | def d(): |
|
204 | def d(): | |
205 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) |
|
205 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) | |
206 | timer(d) |
|
206 | timer(d) | |
207 |
|
207 | |||
208 | @command('perfparents') |
|
208 | @command('perfparents') | |
209 | def perfparents(ui, repo): |
|
209 | def perfparents(ui, repo): | |
210 | nl = [repo.changelog.node(i) for i in xrange(1000)] |
|
210 | nl = [repo.changelog.node(i) for i in xrange(1000)] | |
211 | def d(): |
|
211 | def d(): | |
212 | for n in nl: |
|
212 | for n in nl: | |
213 | repo.changelog.parents(n) |
|
213 | repo.changelog.parents(n) | |
214 | timer(d) |
|
214 | timer(d) | |
215 |
|
215 | |||
216 | @command('perflookup') |
|
216 | @command('perflookup') | |
217 | def perflookup(ui, repo, rev): |
|
217 | def perflookup(ui, repo, rev): | |
218 | timer(lambda: len(repo.lookup(rev))) |
|
218 | timer(lambda: len(repo.lookup(rev))) | |
219 |
|
219 | |||
220 | @command('perfrevrange') |
|
220 | @command('perfrevrange') | |
221 | def perfrevrange(ui, repo, *specs): |
|
221 | def perfrevrange(ui, repo, *specs): | |
222 | revrange = scmutil.revrange |
|
222 | revrange = scmutil.revrange | |
223 | timer(lambda: len(revrange(repo, specs))) |
|
223 | timer(lambda: len(revrange(repo, specs))) | |
224 |
|
224 | |||
225 | @command('perfnodelookup') |
|
225 | @command('perfnodelookup') | |
226 | def perfnodelookup(ui, repo, rev): |
|
226 | def perfnodelookup(ui, repo, rev): | |
227 | import mercurial.revlog |
|
227 | import mercurial.revlog | |
228 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
228 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | |
229 | n = repo[rev].node() |
|
229 | n = repo[rev].node() | |
230 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") |
|
230 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") | |
231 | def d(): |
|
231 | def d(): | |
232 | cl.rev(n) |
|
232 | cl.rev(n) | |
233 | clearcaches(cl) |
|
233 | clearcaches(cl) | |
234 | timer(d) |
|
234 | timer(d) | |
235 |
|
235 | |||
236 | @command('perflog', |
|
236 | @command('perflog', | |
237 | [('', 'rename', False, 'ask log to follow renames')]) |
|
237 | [('', 'rename', False, 'ask log to follow renames')]) | |
238 | def perflog(ui, repo, **opts): |
|
238 | def perflog(ui, repo, **opts): | |
239 | ui.pushbuffer() |
|
239 | ui.pushbuffer() | |
240 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
240 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', | |
241 | copies=opts.get('rename'))) |
|
241 | copies=opts.get('rename'))) | |
242 | ui.popbuffer() |
|
242 | ui.popbuffer() | |
243 |
|
243 | |||
244 | @command('perftemplating') |
|
244 | @command('perftemplating') | |
245 | def perftemplating(ui, repo): |
|
245 | def perftemplating(ui, repo): | |
246 | ui.pushbuffer() |
|
246 | ui.pushbuffer() | |
247 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
247 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', | |
248 | template='{date|shortdate} [{rev}:{node|short}]' |
|
248 | template='{date|shortdate} [{rev}:{node|short}]' | |
249 | ' {author|person}: {desc|firstline}\n')) |
|
249 | ' {author|person}: {desc|firstline}\n')) | |
250 | ui.popbuffer() |
|
250 | ui.popbuffer() | |
251 |
|
251 | |||
252 | @command('perfcca') |
|
252 | @command('perfcca') | |
253 | def perfcca(ui, repo): |
|
253 | def perfcca(ui, repo): | |
254 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) |
|
254 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) | |
255 |
|
255 | |||
256 | @command('perffncacheload') |
|
256 | @command('perffncacheload') | |
257 | def perffncacheload(ui, repo): |
|
257 | def perffncacheload(ui, repo): | |
258 | s = repo.store |
|
258 | s = repo.store | |
259 | def d(): |
|
259 | def d(): | |
260 | s.fncache._load() |
|
260 | s.fncache._load() | |
261 | timer(d) |
|
261 | timer(d) | |
262 |
|
262 | |||
263 | @command('perffncachewrite') |
|
263 | @command('perffncachewrite') | |
264 | def perffncachewrite(ui, repo): |
|
264 | def perffncachewrite(ui, repo): | |
265 | s = repo.store |
|
265 | s = repo.store | |
266 | s.fncache._load() |
|
266 | s.fncache._load() | |
267 | def d(): |
|
267 | def d(): | |
268 | s.fncache._dirty = True |
|
268 | s.fncache._dirty = True | |
269 | s.fncache.write() |
|
269 | s.fncache.write() | |
270 | timer(d) |
|
270 | timer(d) | |
271 |
|
271 | |||
272 | @command('perffncacheencode') |
|
272 | @command('perffncacheencode') | |
273 | def perffncacheencode(ui, repo): |
|
273 | def perffncacheencode(ui, repo): | |
274 | s = repo.store |
|
274 | s = repo.store | |
275 | s.fncache._load() |
|
275 | s.fncache._load() | |
276 | def d(): |
|
276 | def d(): | |
277 | for p in s.fncache.entries: |
|
277 | for p in s.fncache.entries: | |
278 | s.encode(p) |
|
278 | s.encode(p) | |
279 | timer(d) |
|
279 | timer(d) | |
280 |
|
280 | |||
281 | @command('perfdiffwd') |
|
281 | @command('perfdiffwd') | |
282 | def perfdiffwd(ui, repo): |
|
282 | def perfdiffwd(ui, repo): | |
283 | """Profile diff of working directory changes""" |
|
283 | """Profile diff of working directory changes""" | |
284 | options = { |
|
284 | options = { | |
285 | 'w': 'ignore_all_space', |
|
285 | 'w': 'ignore_all_space', | |
286 | 'b': 'ignore_space_change', |
|
286 | 'b': 'ignore_space_change', | |
287 | 'B': 'ignore_blank_lines', |
|
287 | 'B': 'ignore_blank_lines', | |
288 | } |
|
288 | } | |
289 |
|
289 | |||
290 | for diffopt in ('', 'w', 'b', 'B', 'wB'): |
|
290 | for diffopt in ('', 'w', 'b', 'B', 'wB'): | |
291 | opts = dict((options[c], '1') for c in diffopt) |
|
291 | opts = dict((options[c], '1') for c in diffopt) | |
292 | def d(): |
|
292 | def d(): | |
293 | ui.pushbuffer() |
|
293 | ui.pushbuffer() | |
294 | commands.diff(ui, repo, **opts) |
|
294 | commands.diff(ui, repo, **opts) | |
295 | ui.popbuffer() |
|
295 | ui.popbuffer() | |
296 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') |
|
296 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') | |
297 | timer(d, title) |
|
297 | timer(d, title) | |
298 |
|
298 | |||
299 | @command('perfrevlog', |
|
299 | @command('perfrevlog', | |
300 | [('d', 'dist', 100, 'distance between the revisions')], |
|
300 | [('d', 'dist', 100, 'distance between the revisions')], | |
301 | "[INDEXFILE]") |
|
301 | "[INDEXFILE]") | |
302 | def perfrevlog(ui, repo, file_, **opts): |
|
302 | def perfrevlog(ui, repo, file_, **opts): | |
303 | from mercurial import revlog |
|
303 | from mercurial import revlog | |
304 | dist = opts['dist'] |
|
304 | dist = opts['dist'] | |
305 | def d(): |
|
305 | def d(): | |
306 | r = revlog.revlog(lambda fn: open(fn, 'rb'), file_) |
|
306 | r = revlog.revlog(lambda fn: open(fn, 'rb'), file_) | |
307 | for x in xrange(0, len(r), dist): |
|
307 | for x in xrange(0, len(r), dist): | |
308 | r.revision(r.node(x)) |
|
308 | r.revision(r.node(x)) | |
309 |
|
309 | |||
310 | timer(d) |
|
310 | timer(d) | |
311 |
|
311 | |||
312 | @command('perfrevset', |
|
312 | @command('perfrevset', | |
313 | [('C', 'clear', False, 'clear volatile cache between each call.')], |
|
313 | [('C', 'clear', False, 'clear volatile cache between each call.')], | |
314 | "REVSET") |
|
314 | "REVSET") | |
315 | def perfrevset(ui, repo, expr, clear=False): |
|
315 | def perfrevset(ui, repo, expr, clear=False): | |
316 | """benchmark the execution time of a revset |
|
316 | """benchmark the execution time of a revset | |
317 |
|
317 | |||
318 | Use the --clean option if need to evaluate the impact of build volatile |
|
318 | Use the --clean option if need to evaluate the impact of build volatile | |
319 | revisions set cache on the revset execution. Volatile cache hold filtered |
|
319 | revisions set cache on the revset execution. Volatile cache hold filtered | |
320 | and obsolete related cache.""" |
|
320 | and obsolete related cache.""" | |
321 | def d(): |
|
321 | def d(): | |
322 | if clear: |
|
322 | if clear: | |
323 | repo.invalidatevolatilesets() |
|
323 | repo.invalidatevolatilesets() | |
324 | repo.revs(expr) |
|
324 | repo.revs(expr) | |
325 | timer(d) |
|
325 | timer(d) | |
326 |
|
326 | |||
327 | @command('perfvolatilesets') |
|
327 | @command('perfvolatilesets') | |
328 | def perfvolatilesets(ui, repo, *names): |
|
328 | def perfvolatilesets(ui, repo, *names): | |
329 | """benchmark the computation of various volatile set |
|
329 | """benchmark the computation of various volatile set | |
330 |
|
330 | |||
331 | Volatile set computes element related to filtering and obsolescence.""" |
|
331 | Volatile set computes element related to filtering and obsolescence.""" | |
332 | repo = repo.unfiltered() |
|
332 | repo = repo.unfiltered() | |
333 |
|
333 | |||
334 | def getobs(name): |
|
334 | def getobs(name): | |
335 | def d(): |
|
335 | def d(): | |
336 | repo.invalidatevolatilesets() |
|
336 | repo.invalidatevolatilesets() | |
337 | obsolete.getrevs(repo, name) |
|
337 | obsolete.getrevs(repo, name) | |
338 | return d |
|
338 | return d | |
339 |
|
339 | |||
340 | allobs = sorted(obsolete.cachefuncs) |
|
340 | allobs = sorted(obsolete.cachefuncs) | |
341 | if names: |
|
341 | if names: | |
342 | allobs = [n for n in allobs if n in names] |
|
342 | allobs = [n for n in allobs if n in names] | |
343 |
|
343 | |||
344 | for name in allobs: |
|
344 | for name in allobs: | |
345 | timer(getobs(name), title=name) |
|
345 | timer(getobs(name), title=name) | |
346 |
|
346 | |||
347 | def getfiltered(name): |
|
347 | def getfiltered(name): | |
348 | def d(): |
|
348 | def d(): | |
349 | repo.invalidatevolatilesets() |
|
349 | repo.invalidatevolatilesets() | |
350 | repoview.filteredrevs(repo, name) |
|
350 | repoview.filteredrevs(repo, name) | |
351 | return d |
|
351 | return d | |
352 |
|
352 | |||
353 | allfilter = sorted(repoview.filtertable) |
|
353 | allfilter = sorted(repoview.filtertable) | |
354 | if names: |
|
354 | if names: | |
355 | allfilter = [n for n in allfilter if n in names] |
|
355 | allfilter = [n for n in allfilter if n in names] | |
356 |
|
356 | |||
357 | for name in allfilter: |
|
357 | for name in allfilter: | |
358 | timer(getfiltered(name), title=name) |
|
358 | timer(getfiltered(name), title=name) | |
359 |
|
359 | |||
360 | @command('perfbranchmap', |
|
360 | @command('perfbranchmap', | |
361 | [('f', 'full', False, |
|
361 | [('f', 'full', False, | |
362 | 'Includes build time of subset'), |
|
362 | 'Includes build time of subset'), | |
363 | ]) |
|
363 | ]) | |
364 | def perfbranchmap(ui, repo, full=False): |
|
364 | def perfbranchmap(ui, repo, full=False): | |
365 | """benchmark the update of a branchmap |
|
365 | """benchmark the update of a branchmap | |
366 |
|
366 | |||
367 | This benchmarks the full repo.branchmap() call with read and write disabled |
|
367 | This benchmarks the full repo.branchmap() call with read and write disabled | |
368 | """ |
|
368 | """ | |
369 | def getbranchmap(filtername): |
|
369 | def getbranchmap(filtername): | |
370 | """generate a benchmark function for the filtername""" |
|
370 | """generate a benchmark function for the filtername""" | |
371 | if filtername is None: |
|
371 | if filtername is None: | |
372 | view = repo |
|
372 | view = repo | |
373 | else: |
|
373 | else: | |
374 | view = repo.filtered(filtername) |
|
374 | view = repo.filtered(filtername) | |
375 | def d(): |
|
375 | def d(): | |
376 | if full: |
|
376 | if full: | |
377 | view._branchcaches.clear() |
|
377 | view._branchcaches.clear() | |
378 | else: |
|
378 | else: | |
379 | view._branchcaches.pop(filtername, None) |
|
379 | view._branchcaches.pop(filtername, None) | |
380 | view.branchmap() |
|
380 | view.branchmap() | |
381 | return d |
|
381 | return d | |
382 | # add filter in smaller subset to bigger subset |
|
382 | # add filter in smaller subset to bigger subset | |
383 | possiblefilters = set(repoview.filtertable) |
|
383 | possiblefilters = set(repoview.filtertable) | |
384 | allfilters = [] |
|
384 | allfilters = [] | |
385 | while possiblefilters: |
|
385 | while possiblefilters: | |
386 | for name in possiblefilters: |
|
386 | for name in possiblefilters: | |
387 | subset = repoview.subsettable.get(name) |
|
387 | subset = repoview.subsettable.get(name) | |
388 | if subset not in possiblefilters: |
|
388 | if subset not in possiblefilters: | |
389 | break |
|
389 | break | |
390 | else: |
|
390 | else: | |
391 | assert False, 'subset cycle %s!' % possiblefilters |
|
391 | assert False, 'subset cycle %s!' % possiblefilters | |
392 | allfilters.append(name) |
|
392 | allfilters.append(name) | |
393 | possiblefilters.remove(name) |
|
393 | possiblefilters.remove(name) | |
394 |
|
394 | |||
395 | # warm the cache |
|
395 | # warm the cache | |
396 | if not full: |
|
396 | if not full: | |
397 | for name in allfilters: |
|
397 | for name in allfilters: | |
398 | repo.filtered(name).branchmap() |
|
398 | repo.filtered(name).branchmap() | |
399 | # add unfiltered |
|
399 | # add unfiltered | |
400 | allfilters.append(None) |
|
400 | allfilters.append(None) | |
401 | oldread = branchmap.read |
|
401 | oldread = branchmap.read | |
402 | oldwrite = branchmap.branchcache.write |
|
402 | oldwrite = branchmap.branchcache.write | |
403 | try: |
|
403 | try: | |
404 | branchmap.read = lambda repo: None |
|
404 | branchmap.read = lambda repo: None | |
405 | branchmap.write = lambda repo: None |
|
405 | branchmap.write = lambda repo: None | |
406 | for name in allfilters: |
|
406 | for name in allfilters: | |
407 | timer(getbranchmap(name), title=str(name)) |
|
407 | timer(getbranchmap(name), title=str(name)) | |
408 | finally: |
|
408 | finally: | |
409 | branchmap.read = oldread |
|
409 | branchmap.read = oldread | |
410 | branchmap.branchcache.write = oldwrite |
|
410 | branchmap.branchcache.write = oldwrite |
@@ -1,67 +1,67 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 |
|
2 | |||
3 | from mercurial import demandimport |
|
3 | from mercurial import demandimport | |
4 | demandimport.enable() |
|
4 | demandimport.enable() | |
5 |
|
5 | |||
6 |
import |
|
6 | import sys | |
7 | from mercurial.i18n import _ |
|
7 | from mercurial.i18n import _ | |
8 | from mercurial import simplemerge, fancyopts, util, ui |
|
8 | from mercurial import simplemerge, fancyopts, util, ui | |
9 |
|
9 | |||
10 | options = [('L', 'label', [], _('labels to use on conflict markers')), |
|
10 | options = [('L', 'label', [], _('labels to use on conflict markers')), | |
11 | ('a', 'text', None, _('treat all files as text')), |
|
11 | ('a', 'text', None, _('treat all files as text')), | |
12 | ('p', 'print', None, |
|
12 | ('p', 'print', None, | |
13 | _('print results instead of overwriting LOCAL')), |
|
13 | _('print results instead of overwriting LOCAL')), | |
14 | ('', 'no-minimal', None, |
|
14 | ('', 'no-minimal', None, | |
15 | _('do not try to minimize conflict regions')), |
|
15 | _('do not try to minimize conflict regions')), | |
16 | ('h', 'help', None, _('display help and exit')), |
|
16 | ('h', 'help', None, _('display help and exit')), | |
17 | ('q', 'quiet', None, _('suppress output'))] |
|
17 | ('q', 'quiet', None, _('suppress output'))] | |
18 |
|
18 | |||
19 | usage = _('''simplemerge [OPTS] LOCAL BASE OTHER |
|
19 | usage = _('''simplemerge [OPTS] LOCAL BASE OTHER | |
20 |
|
20 | |||
21 | Simple three-way file merge utility with a minimal feature set. |
|
21 | Simple three-way file merge utility with a minimal feature set. | |
22 |
|
22 | |||
23 | Apply to LOCAL the changes necessary to go from BASE to OTHER. |
|
23 | Apply to LOCAL the changes necessary to go from BASE to OTHER. | |
24 |
|
24 | |||
25 | By default, LOCAL is overwritten with the results of this operation. |
|
25 | By default, LOCAL is overwritten with the results of this operation. | |
26 | ''') |
|
26 | ''') | |
27 |
|
27 | |||
28 | class ParseError(Exception): |
|
28 | class ParseError(Exception): | |
29 | """Exception raised on errors in parsing the command line.""" |
|
29 | """Exception raised on errors in parsing the command line.""" | |
30 |
|
30 | |||
31 | def showhelp(): |
|
31 | def showhelp(): | |
32 | sys.stdout.write(usage) |
|
32 | sys.stdout.write(usage) | |
33 | sys.stdout.write('\noptions:\n') |
|
33 | sys.stdout.write('\noptions:\n') | |
34 |
|
34 | |||
35 | out_opts = [] |
|
35 | out_opts = [] | |
36 | for shortopt, longopt, default, desc in options: |
|
36 | for shortopt, longopt, default, desc in options: | |
37 | out_opts.append(('%2s%s' % (shortopt and '-%s' % shortopt, |
|
37 | out_opts.append(('%2s%s' % (shortopt and '-%s' % shortopt, | |
38 | longopt and ' --%s' % longopt), |
|
38 | longopt and ' --%s' % longopt), | |
39 | '%s' % desc)) |
|
39 | '%s' % desc)) | |
40 | opts_len = max([len(opt[0]) for opt in out_opts]) |
|
40 | opts_len = max([len(opt[0]) for opt in out_opts]) | |
41 | for first, second in out_opts: |
|
41 | for first, second in out_opts: | |
42 | sys.stdout.write(' %-*s %s\n' % (opts_len, first, second)) |
|
42 | sys.stdout.write(' %-*s %s\n' % (opts_len, first, second)) | |
43 |
|
43 | |||
44 | try: |
|
44 | try: | |
45 | for fp in (sys.stdin, sys.stdout, sys.stderr): |
|
45 | for fp in (sys.stdin, sys.stdout, sys.stderr): | |
46 | util.setbinary(fp) |
|
46 | util.setbinary(fp) | |
47 |
|
47 | |||
48 | opts = {} |
|
48 | opts = {} | |
49 | try: |
|
49 | try: | |
50 | args = fancyopts.fancyopts(sys.argv[1:], options, opts) |
|
50 | args = fancyopts.fancyopts(sys.argv[1:], options, opts) | |
51 | except fancyopts.getopt.GetoptError, e: |
|
51 | except fancyopts.getopt.GetoptError, e: | |
52 | raise ParseError(e) |
|
52 | raise ParseError(e) | |
53 | if opts['help']: |
|
53 | if opts['help']: | |
54 | showhelp() |
|
54 | showhelp() | |
55 | sys.exit(0) |
|
55 | sys.exit(0) | |
56 | if len(args) != 3: |
|
56 | if len(args) != 3: | |
57 | raise ParseError(_('wrong number of arguments')) |
|
57 | raise ParseError(_('wrong number of arguments')) | |
58 | sys.exit(simplemerge.simplemerge(ui.ui(), *args, **opts)) |
|
58 | sys.exit(simplemerge.simplemerge(ui.ui(), *args, **opts)) | |
59 | except ParseError, e: |
|
59 | except ParseError, e: | |
60 | sys.stdout.write("%s: %s\n" % (sys.argv[0], e)) |
|
60 | sys.stdout.write("%s: %s\n" % (sys.argv[0], e)) | |
61 | showhelp() |
|
61 | showhelp() | |
62 | sys.exit(1) |
|
62 | sys.exit(1) | |
63 | except util.Abort, e: |
|
63 | except util.Abort, e: | |
64 | sys.stderr.write("abort: %s\n" % e) |
|
64 | sys.stderr.write("abort: %s\n" % e) | |
65 | sys.exit(255) |
|
65 | sys.exit(255) | |
66 | except KeyboardInterrupt: |
|
66 | except KeyboardInterrupt: | |
67 | sys.exit(255) |
|
67 | sys.exit(255) |
@@ -1,1110 +1,1109 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $ |
|
2 | # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $ | |
3 | # Author: Engelbert Gruber <grubert@users.sourceforge.net> |
|
3 | # Author: Engelbert Gruber <grubert@users.sourceforge.net> | |
4 | # Copyright: This module is put into the public domain. |
|
4 | # Copyright: This module is put into the public domain. | |
5 |
|
5 | |||
6 | """ |
|
6 | """ | |
7 | Simple man page writer for reStructuredText. |
|
7 | Simple man page writer for reStructuredText. | |
8 |
|
8 | |||
9 | Man pages (short for "manual pages") contain system documentation on unix-like |
|
9 | Man pages (short for "manual pages") contain system documentation on unix-like | |
10 | systems. The pages are grouped in numbered sections: |
|
10 | systems. The pages are grouped in numbered sections: | |
11 |
|
11 | |||
12 | 1 executable programs and shell commands |
|
12 | 1 executable programs and shell commands | |
13 | 2 system calls |
|
13 | 2 system calls | |
14 | 3 library functions |
|
14 | 3 library functions | |
15 | 4 special files |
|
15 | 4 special files | |
16 | 5 file formats |
|
16 | 5 file formats | |
17 | 6 games |
|
17 | 6 games | |
18 | 7 miscellaneous |
|
18 | 7 miscellaneous | |
19 | 8 system administration |
|
19 | 8 system administration | |
20 |
|
20 | |||
21 | Man pages are written *troff*, a text file formatting system. |
|
21 | Man pages are written *troff*, a text file formatting system. | |
22 |
|
22 | |||
23 | See http://www.tldp.org/HOWTO/Man-Page for a start. |
|
23 | See http://www.tldp.org/HOWTO/Man-Page for a start. | |
24 |
|
24 | |||
25 | Man pages have no subsection only parts. |
|
25 | Man pages have no subsection only parts. | |
26 | Standard parts |
|
26 | Standard parts | |
27 |
|
27 | |||
28 | NAME , |
|
28 | NAME , | |
29 | SYNOPSIS , |
|
29 | SYNOPSIS , | |
30 | DESCRIPTION , |
|
30 | DESCRIPTION , | |
31 | OPTIONS , |
|
31 | OPTIONS , | |
32 | FILES , |
|
32 | FILES , | |
33 | SEE ALSO , |
|
33 | SEE ALSO , | |
34 | BUGS , |
|
34 | BUGS , | |
35 |
|
35 | |||
36 | and |
|
36 | and | |
37 |
|
37 | |||
38 | AUTHOR . |
|
38 | AUTHOR . | |
39 |
|
39 | |||
40 | A unix-like system keeps an index of the DESCRIPTIONs, which is accesable |
|
40 | A unix-like system keeps an index of the DESCRIPTIONs, which is accesable | |
41 | by the command whatis or apropos. |
|
41 | by the command whatis or apropos. | |
42 |
|
42 | |||
43 | """ |
|
43 | """ | |
44 |
|
44 | |||
45 | __docformat__ = 'reStructuredText' |
|
45 | __docformat__ = 'reStructuredText' | |
46 |
|
46 | |||
47 | import re |
|
47 | import re | |
48 |
|
48 | |||
49 | from docutils import nodes, writers, languages |
|
49 | from docutils import nodes, writers, languages | |
50 | try: |
|
50 | try: | |
51 | import roman |
|
51 | import roman | |
52 | except ImportError: |
|
52 | except ImportError: | |
53 | from docutils.utils import roman |
|
53 | from docutils.utils import roman | |
54 | import inspect |
|
54 | import inspect | |
55 |
|
55 | |||
56 | FIELD_LIST_INDENT = 7 |
|
56 | FIELD_LIST_INDENT = 7 | |
57 | DEFINITION_LIST_INDENT = 7 |
|
57 | DEFINITION_LIST_INDENT = 7 | |
58 | OPTION_LIST_INDENT = 7 |
|
58 | OPTION_LIST_INDENT = 7 | |
59 | BLOCKQOUTE_INDENT = 3.5 |
|
59 | BLOCKQOUTE_INDENT = 3.5 | |
60 |
|
60 | |||
61 | # Define two macros so man/roff can calculate the |
|
61 | # Define two macros so man/roff can calculate the | |
62 | # indent/unindent margins by itself |
|
62 | # indent/unindent margins by itself | |
63 | MACRO_DEF = (r""". |
|
63 | MACRO_DEF = (r""". | |
64 | .nr rst2man-indent-level 0 |
|
64 | .nr rst2man-indent-level 0 | |
65 | . |
|
65 | . | |
66 | .de1 rstReportMargin |
|
66 | .de1 rstReportMargin | |
67 | \\$1 \\n[an-margin] |
|
67 | \\$1 \\n[an-margin] | |
68 | level \\n[rst2man-indent-level] |
|
68 | level \\n[rst2man-indent-level] | |
69 | level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
69 | level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] | |
70 | - |
|
70 | - | |
71 | \\n[rst2man-indent0] |
|
71 | \\n[rst2man-indent0] | |
72 | \\n[rst2man-indent1] |
|
72 | \\n[rst2man-indent1] | |
73 | \\n[rst2man-indent2] |
|
73 | \\n[rst2man-indent2] | |
74 | .. |
|
74 | .. | |
75 | .de1 INDENT |
|
75 | .de1 INDENT | |
76 | .\" .rstReportMargin pre: |
|
76 | .\" .rstReportMargin pre: | |
77 | . RS \\$1 |
|
77 | . RS \\$1 | |
78 | . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] |
|
78 | . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] | |
79 | . nr rst2man-indent-level +1 |
|
79 | . nr rst2man-indent-level +1 | |
80 | .\" .rstReportMargin post: |
|
80 | .\" .rstReportMargin post: | |
81 | .. |
|
81 | .. | |
82 | .de UNINDENT |
|
82 | .de UNINDENT | |
83 | . RE |
|
83 | . RE | |
84 | .\" indent \\n[an-margin] |
|
84 | .\" indent \\n[an-margin] | |
85 | .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
85 | .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] | |
86 | .nr rst2man-indent-level -1 |
|
86 | .nr rst2man-indent-level -1 | |
87 | .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
87 | .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] | |
88 | .in \\n[rst2man-indent\\n[rst2man-indent-level]]u |
|
88 | .in \\n[rst2man-indent\\n[rst2man-indent-level]]u | |
89 | .. |
|
89 | .. | |
90 | """) |
|
90 | """) | |
91 |
|
91 | |||
92 | class Writer(writers.Writer): |
|
92 | class Writer(writers.Writer): | |
93 |
|
93 | |||
94 | supported = ('manpage') |
|
94 | supported = ('manpage') | |
95 | """Formats this writer supports.""" |
|
95 | """Formats this writer supports.""" | |
96 |
|
96 | |||
97 | output = None |
|
97 | output = None | |
98 | """Final translated form of `document`.""" |
|
98 | """Final translated form of `document`.""" | |
99 |
|
99 | |||
100 | def __init__(self): |
|
100 | def __init__(self): | |
101 | writers.Writer.__init__(self) |
|
101 | writers.Writer.__init__(self) | |
102 | self.translator_class = Translator |
|
102 | self.translator_class = Translator | |
103 |
|
103 | |||
104 | def translate(self): |
|
104 | def translate(self): | |
105 | visitor = self.translator_class(self.document) |
|
105 | visitor = self.translator_class(self.document) | |
106 | self.document.walkabout(visitor) |
|
106 | self.document.walkabout(visitor) | |
107 | self.output = visitor.astext() |
|
107 | self.output = visitor.astext() | |
108 |
|
108 | |||
109 |
|
109 | |||
110 | class Table(object): |
|
110 | class Table(object): | |
111 | def __init__(self): |
|
111 | def __init__(self): | |
112 | self._rows = [] |
|
112 | self._rows = [] | |
113 | self._options = ['center'] |
|
113 | self._options = ['center'] | |
114 | self._tab_char = '\t' |
|
114 | self._tab_char = '\t' | |
115 | self._coldefs = [] |
|
115 | self._coldefs = [] | |
116 | def new_row(self): |
|
116 | def new_row(self): | |
117 | self._rows.append([]) |
|
117 | self._rows.append([]) | |
118 | def append_separator(self, separator): |
|
118 | def append_separator(self, separator): | |
119 | """Append the separator for table head.""" |
|
119 | """Append the separator for table head.""" | |
120 | self._rows.append([separator]) |
|
120 | self._rows.append([separator]) | |
121 | def append_cell(self, cell_lines): |
|
121 | def append_cell(self, cell_lines): | |
122 | """cell_lines is an array of lines""" |
|
122 | """cell_lines is an array of lines""" | |
123 | start = 0 |
|
123 | start = 0 | |
124 | if len(cell_lines) > 0 and cell_lines[0] == '.sp\n': |
|
124 | if len(cell_lines) > 0 and cell_lines[0] == '.sp\n': | |
125 | start = 1 |
|
125 | start = 1 | |
126 | self._rows[-1].append(cell_lines[start:]) |
|
126 | self._rows[-1].append(cell_lines[start:]) | |
127 | if len(self._coldefs) < len(self._rows[-1]): |
|
127 | if len(self._coldefs) < len(self._rows[-1]): | |
128 | self._coldefs.append('l') |
|
128 | self._coldefs.append('l') | |
129 | def _minimize_cell(self, cell_lines): |
|
129 | def _minimize_cell(self, cell_lines): | |
130 | """Remove leading and trailing blank and ``.sp`` lines""" |
|
130 | """Remove leading and trailing blank and ``.sp`` lines""" | |
131 | while (cell_lines and cell_lines[0] in ('\n', '.sp\n')): |
|
131 | while (cell_lines and cell_lines[0] in ('\n', '.sp\n')): | |
132 | del cell_lines[0] |
|
132 | del cell_lines[0] | |
133 | while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')): |
|
133 | while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')): | |
134 | del cell_lines[-1] |
|
134 | del cell_lines[-1] | |
135 | def as_list(self): |
|
135 | def as_list(self): | |
136 | text = ['.TS\n'] |
|
136 | text = ['.TS\n'] | |
137 | text.append(' '.join(self._options) + ';\n') |
|
137 | text.append(' '.join(self._options) + ';\n') | |
138 | text.append('|%s|.\n' % ('|'.join(self._coldefs))) |
|
138 | text.append('|%s|.\n' % ('|'.join(self._coldefs))) | |
139 | for row in self._rows: |
|
139 | for row in self._rows: | |
140 | # row = array of cells. cell = array of lines. |
|
140 | # row = array of cells. cell = array of lines. | |
141 | text.append('_\n') # line above |
|
141 | text.append('_\n') # line above | |
142 | text.append('T{\n') |
|
142 | text.append('T{\n') | |
143 | for i in range(len(row)): |
|
143 | for i in range(len(row)): | |
144 | cell = row[i] |
|
144 | cell = row[i] | |
145 | self._minimize_cell(cell) |
|
145 | self._minimize_cell(cell) | |
146 | text.extend(cell) |
|
146 | text.extend(cell) | |
147 | if not text[-1].endswith('\n'): |
|
147 | if not text[-1].endswith('\n'): | |
148 | text[-1] += '\n' |
|
148 | text[-1] += '\n' | |
149 | if i < len(row) - 1: |
|
149 | if i < len(row) - 1: | |
150 | text.append('T}'+self._tab_char+'T{\n') |
|
150 | text.append('T}'+self._tab_char+'T{\n') | |
151 | else: |
|
151 | else: | |
152 | text.append('T}\n') |
|
152 | text.append('T}\n') | |
153 | text.append('_\n') |
|
153 | text.append('_\n') | |
154 | text.append('.TE\n') |
|
154 | text.append('.TE\n') | |
155 | return text |
|
155 | return text | |
156 |
|
156 | |||
157 | class Translator(nodes.NodeVisitor): |
|
157 | class Translator(nodes.NodeVisitor): | |
158 | """""" |
|
158 | """""" | |
159 |
|
159 | |||
160 | words_and_spaces = re.compile(r'\S+| +|\n') |
|
160 | words_and_spaces = re.compile(r'\S+| +|\n') | |
161 | document_start = """Man page generated from reStructuredText.""" |
|
161 | document_start = """Man page generated from reStructuredText.""" | |
162 |
|
162 | |||
163 | def __init__(self, document): |
|
163 | def __init__(self, document): | |
164 | nodes.NodeVisitor.__init__(self, document) |
|
164 | nodes.NodeVisitor.__init__(self, document) | |
165 | self.settings = settings = document.settings |
|
165 | self.settings = settings = document.settings | |
166 | lcode = settings.language_code |
|
166 | lcode = settings.language_code | |
167 | arglen = len(inspect.getargspec(languages.get_language)[0]) |
|
167 | arglen = len(inspect.getargspec(languages.get_language)[0]) | |
168 | if arglen == 2: |
|
168 | if arglen == 2: | |
169 | self.language = languages.get_language(lcode, |
|
169 | self.language = languages.get_language(lcode, | |
170 | self.document.reporter) |
|
170 | self.document.reporter) | |
171 | else: |
|
171 | else: | |
172 | self.language = languages.get_language(lcode) |
|
172 | self.language = languages.get_language(lcode) | |
173 | self.head = [] |
|
173 | self.head = [] | |
174 | self.body = [] |
|
174 | self.body = [] | |
175 | self.foot = [] |
|
175 | self.foot = [] | |
176 | self.section_level = 0 |
|
176 | self.section_level = 0 | |
177 | self.context = [] |
|
177 | self.context = [] | |
178 | self.topic_class = '' |
|
178 | self.topic_class = '' | |
179 | self.colspecs = [] |
|
179 | self.colspecs = [] | |
180 | self.compact_p = 1 |
|
180 | self.compact_p = 1 | |
181 | self.compact_simple = None |
|
181 | self.compact_simple = None | |
182 | # the list style "*" bullet or "#" numbered |
|
182 | # the list style "*" bullet or "#" numbered | |
183 | self._list_char = [] |
|
183 | self._list_char = [] | |
184 | # writing the header .TH and .SH NAME is postboned after |
|
184 | # writing the header .TH and .SH NAME is postboned after | |
185 | # docinfo. |
|
185 | # docinfo. | |
186 | self._docinfo = { |
|
186 | self._docinfo = { | |
187 | "title" : "", "title_upper": "", |
|
187 | "title" : "", "title_upper": "", | |
188 | "subtitle" : "", |
|
188 | "subtitle" : "", | |
189 | "manual_section" : "", "manual_group" : "", |
|
189 | "manual_section" : "", "manual_group" : "", | |
190 | "author" : [], |
|
190 | "author" : [], | |
191 | "date" : "", |
|
191 | "date" : "", | |
192 | "copyright" : "", |
|
192 | "copyright" : "", | |
193 | "version" : "", |
|
193 | "version" : "", | |
194 | } |
|
194 | } | |
195 | self._docinfo_keys = [] # a list to keep the sequence as in source. |
|
195 | self._docinfo_keys = [] # a list to keep the sequence as in source. | |
196 | self._docinfo_names = {} # to get name from text not normalized. |
|
196 | self._docinfo_names = {} # to get name from text not normalized. | |
197 | self._in_docinfo = None |
|
197 | self._in_docinfo = None | |
198 | self._active_table = None |
|
198 | self._active_table = None | |
199 | self._in_literal = False |
|
199 | self._in_literal = False | |
200 | self.header_written = 0 |
|
200 | self.header_written = 0 | |
201 | self._line_block = 0 |
|
201 | self._line_block = 0 | |
202 | self.authors = [] |
|
202 | self.authors = [] | |
203 | self.section_level = 0 |
|
203 | self.section_level = 0 | |
204 | self._indent = [0] |
|
204 | self._indent = [0] | |
205 | # central definition of simple processing rules |
|
205 | # central definition of simple processing rules | |
206 | # what to output on : visit, depart |
|
206 | # what to output on : visit, depart | |
207 | # Do not use paragraph requests ``.PP`` because these set indentation. |
|
207 | # Do not use paragraph requests ``.PP`` because these set indentation. | |
208 | # use ``.sp``. Remove superfluous ``.sp`` in ``astext``. |
|
208 | # use ``.sp``. Remove superfluous ``.sp`` in ``astext``. | |
209 | # |
|
209 | # | |
210 | # Fonts are put on a stack, the top one is used. |
|
210 | # Fonts are put on a stack, the top one is used. | |
211 | # ``.ft P`` or ``\\fP`` pop from stack. |
|
211 | # ``.ft P`` or ``\\fP`` pop from stack. | |
212 | # ``B`` bold, ``I`` italic, ``R`` roman should be available. |
|
212 | # ``B`` bold, ``I`` italic, ``R`` roman should be available. | |
213 | # Hopefully ``C`` courier too. |
|
213 | # Hopefully ``C`` courier too. | |
214 | self.defs = { |
|
214 | self.defs = { | |
215 | 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'), |
|
215 | 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'), | |
216 | 'definition_list_item' : ('.TP', ''), |
|
216 | 'definition_list_item' : ('.TP', ''), | |
217 | 'field_name' : ('.TP\n.B ', '\n'), |
|
217 | 'field_name' : ('.TP\n.B ', '\n'), | |
218 | 'literal' : ('\\fB', '\\fP'), |
|
218 | 'literal' : ('\\fB', '\\fP'), | |
219 | 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'), |
|
219 | 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'), | |
220 |
|
220 | |||
221 | 'option_list_item' : ('.TP\n', ''), |
|
221 | 'option_list_item' : ('.TP\n', ''), | |
222 |
|
222 | |||
223 | 'reference' : (r'\%', r'\:'), |
|
223 | 'reference' : (r'\%', r'\:'), | |
224 | 'emphasis': ('\\fI', '\\fP'), |
|
224 | 'emphasis': ('\\fI', '\\fP'), | |
225 | 'strong' : ('\\fB', '\\fP'), |
|
225 | 'strong' : ('\\fB', '\\fP'), | |
226 | 'term' : ('\n.B ', '\n'), |
|
226 | 'term' : ('\n.B ', '\n'), | |
227 | 'title_reference' : ('\\fI', '\\fP'), |
|
227 | 'title_reference' : ('\\fI', '\\fP'), | |
228 |
|
228 | |||
229 | 'topic-title' : ('.SS ',), |
|
229 | 'topic-title' : ('.SS ',), | |
230 | 'sidebar-title' : ('.SS ',), |
|
230 | 'sidebar-title' : ('.SS ',), | |
231 |
|
231 | |||
232 | 'problematic' : ('\n.nf\n', '\n.fi\n'), |
|
232 | 'problematic' : ('\n.nf\n', '\n.fi\n'), | |
233 | } |
|
233 | } | |
234 | # NOTE don't specify the newline before a dot-command, but ensure |
|
234 | # NOTE don't specify the newline before a dot-command, but ensure | |
235 | # it is there. |
|
235 | # it is there. | |
236 |
|
236 | |||
237 | def comment_begin(self, text): |
|
237 | def comment_begin(self, text): | |
238 | """Return commented version of the passed text WITHOUT end of |
|
238 | """Return commented version of the passed text WITHOUT end of | |
239 | line/comment.""" |
|
239 | line/comment.""" | |
240 | prefix = '.\\" ' |
|
240 | prefix = '.\\" ' | |
241 | out_text = ''.join( |
|
241 | out_text = ''.join( | |
242 | [(prefix + in_line + '\n') |
|
242 | [(prefix + in_line + '\n') | |
243 | for in_line in text.split('\n')]) |
|
243 | for in_line in text.split('\n')]) | |
244 | return out_text |
|
244 | return out_text | |
245 |
|
245 | |||
246 | def comment(self, text): |
|
246 | def comment(self, text): | |
247 | """Return commented version of the passed text.""" |
|
247 | """Return commented version of the passed text.""" | |
248 | return self.comment_begin(text)+'.\n' |
|
248 | return self.comment_begin(text)+'.\n' | |
249 |
|
249 | |||
250 | def ensure_eol(self): |
|
250 | def ensure_eol(self): | |
251 | """Ensure the last line in body is terminated by new line.""" |
|
251 | """Ensure the last line in body is terminated by new line.""" | |
252 | if self.body[-1][-1] != '\n': |
|
252 | if self.body[-1][-1] != '\n': | |
253 | self.body.append('\n') |
|
253 | self.body.append('\n') | |
254 |
|
254 | |||
255 | def astext(self): |
|
255 | def astext(self): | |
256 | """Return the final formatted document as a string.""" |
|
256 | """Return the final formatted document as a string.""" | |
257 | if not self.header_written: |
|
257 | if not self.header_written: | |
258 | # ensure we get a ".TH" as viewers require it. |
|
258 | # ensure we get a ".TH" as viewers require it. | |
259 | self.head.append(self.header()) |
|
259 | self.head.append(self.header()) | |
260 | # filter body |
|
260 | # filter body | |
261 | for i in xrange(len(self.body) - 1, 0, -1): |
|
261 | for i in xrange(len(self.body) - 1, 0, -1): | |
262 | # remove superfluous vertical gaps. |
|
262 | # remove superfluous vertical gaps. | |
263 | if self.body[i] == '.sp\n': |
|
263 | if self.body[i] == '.sp\n': | |
264 | if self.body[i - 1][:4] in ('.BI ','.IP '): |
|
264 | if self.body[i - 1][:4] in ('.BI ','.IP '): | |
265 | self.body[i] = '.\n' |
|
265 | self.body[i] = '.\n' | |
266 | elif (self.body[i - 1][:3] == '.B ' and |
|
266 | elif (self.body[i - 1][:3] == '.B ' and | |
267 | self.body[i - 2][:4] == '.TP\n'): |
|
267 | self.body[i - 2][:4] == '.TP\n'): | |
268 | self.body[i] = '.\n' |
|
268 | self.body[i] = '.\n' | |
269 | elif (self.body[i - 1] == '\n' and |
|
269 | elif (self.body[i - 1] == '\n' and | |
270 | self.body[i - 2][0] != '.' and |
|
270 | self.body[i - 2][0] != '.' and | |
271 | (self.body[i - 3][:7] == '.TP\n.B ' |
|
271 | (self.body[i - 3][:7] == '.TP\n.B ' | |
272 | or self.body[i - 3][:4] == '\n.B ') |
|
272 | or self.body[i - 3][:4] == '\n.B ') | |
273 | ): |
|
273 | ): | |
274 | self.body[i] = '.\n' |
|
274 | self.body[i] = '.\n' | |
275 | return ''.join(self.head + self.body + self.foot) |
|
275 | return ''.join(self.head + self.body + self.foot) | |
276 |
|
276 | |||
277 | def deunicode(self, text): |
|
277 | def deunicode(self, text): | |
278 | text = text.replace(u'\xa0', '\\ ') |
|
278 | text = text.replace(u'\xa0', '\\ ') | |
279 | text = text.replace(u'\u2020', '\\(dg') |
|
279 | text = text.replace(u'\u2020', '\\(dg') | |
280 | return text |
|
280 | return text | |
281 |
|
281 | |||
282 | def visit_Text(self, node): |
|
282 | def visit_Text(self, node): | |
283 | text = node.astext() |
|
283 | text = node.astext() | |
284 | text = text.replace('\\','\\e') |
|
284 | text = text.replace('\\','\\e') | |
285 | replace_pairs = [ |
|
285 | replace_pairs = [ | |
286 | (u'-', ur'\-'), |
|
286 | (u'-', ur'\-'), | |
287 | (u'\'', ur'\(aq'), |
|
287 | (u'\'', ur'\(aq'), | |
288 | (u'Β΄', ur'\''), |
|
288 | (u'Β΄', ur'\''), | |
289 | (u'`', ur'\(ga'), |
|
289 | (u'`', ur'\(ga'), | |
290 | ] |
|
290 | ] | |
291 | for (in_char, out_markup) in replace_pairs: |
|
291 | for (in_char, out_markup) in replace_pairs: | |
292 | text = text.replace(in_char, out_markup) |
|
292 | text = text.replace(in_char, out_markup) | |
293 | # unicode |
|
293 | # unicode | |
294 | text = self.deunicode(text) |
|
294 | text = self.deunicode(text) | |
295 | if self._in_literal: |
|
295 | if self._in_literal: | |
296 | # prevent interpretation of "." at line start |
|
296 | # prevent interpretation of "." at line start | |
297 | if text[0] == '.': |
|
297 | if text[0] == '.': | |
298 | text = '\\&' + text |
|
298 | text = '\\&' + text | |
299 | text = text.replace('\n.', '\n\\&.') |
|
299 | text = text.replace('\n.', '\n\\&.') | |
300 | self.body.append(text) |
|
300 | self.body.append(text) | |
301 |
|
301 | |||
302 | def depart_Text(self, node): |
|
302 | def depart_Text(self, node): | |
303 | pass |
|
303 | pass | |
304 |
|
304 | |||
305 | def list_start(self, node): |
|
305 | def list_start(self, node): | |
306 | class enum_char(object): |
|
306 | class enum_char(object): | |
307 | enum_style = { |
|
307 | enum_style = { | |
308 | 'bullet' : '\\(bu', |
|
308 | 'bullet' : '\\(bu', | |
309 | 'emdash' : '\\(em', |
|
309 | 'emdash' : '\\(em', | |
310 | } |
|
310 | } | |
311 |
|
311 | |||
312 | def __init__(self, style): |
|
312 | def __init__(self, style): | |
313 | self._style = style |
|
313 | self._style = style | |
314 | if 'start' in node: |
|
314 | if 'start' in node: | |
315 | self._cnt = node['start'] - 1 |
|
315 | self._cnt = node['start'] - 1 | |
316 | else: |
|
316 | else: | |
317 | self._cnt = 0 |
|
317 | self._cnt = 0 | |
318 | self._indent = 2 |
|
318 | self._indent = 2 | |
319 | if style == 'arabic': |
|
319 | if style == 'arabic': | |
320 | # indentation depends on number of childrens |
|
320 | # indentation depends on number of childrens | |
321 | # and start value. |
|
321 | # and start value. | |
322 | self._indent = len(str(len(node.children))) |
|
322 | self._indent = len(str(len(node.children))) | |
323 | self._indent += len(str(self._cnt)) + 1 |
|
323 | self._indent += len(str(self._cnt)) + 1 | |
324 | elif style == 'loweralpha': |
|
324 | elif style == 'loweralpha': | |
325 | self._cnt += ord('a') - 1 |
|
325 | self._cnt += ord('a') - 1 | |
326 | self._indent = 3 |
|
326 | self._indent = 3 | |
327 | elif style == 'upperalpha': |
|
327 | elif style == 'upperalpha': | |
328 | self._cnt += ord('A') - 1 |
|
328 | self._cnt += ord('A') - 1 | |
329 | self._indent = 3 |
|
329 | self._indent = 3 | |
330 | elif style.endswith('roman'): |
|
330 | elif style.endswith('roman'): | |
331 | self._indent = 5 |
|
331 | self._indent = 5 | |
332 |
|
332 | |||
333 | def next(self): |
|
333 | def next(self): | |
334 | if self._style == 'bullet': |
|
334 | if self._style == 'bullet': | |
335 | return self.enum_style[self._style] |
|
335 | return self.enum_style[self._style] | |
336 | elif self._style == 'emdash': |
|
336 | elif self._style == 'emdash': | |
337 | return self.enum_style[self._style] |
|
337 | return self.enum_style[self._style] | |
338 | self._cnt += 1 |
|
338 | self._cnt += 1 | |
339 | # TODO add prefix postfix |
|
339 | # TODO add prefix postfix | |
340 | if self._style == 'arabic': |
|
340 | if self._style == 'arabic': | |
341 | return "%d." % self._cnt |
|
341 | return "%d." % self._cnt | |
342 | elif self._style in ('loweralpha', 'upperalpha'): |
|
342 | elif self._style in ('loweralpha', 'upperalpha'): | |
343 | return "%c." % self._cnt |
|
343 | return "%c." % self._cnt | |
344 | elif self._style.endswith('roman'): |
|
344 | elif self._style.endswith('roman'): | |
345 | res = roman.toRoman(self._cnt) + '.' |
|
345 | res = roman.toRoman(self._cnt) + '.' | |
346 | if self._style.startswith('upper'): |
|
346 | if self._style.startswith('upper'): | |
347 | return res.upper() |
|
347 | return res.upper() | |
348 | return res.lower() |
|
348 | return res.lower() | |
349 | else: |
|
349 | else: | |
350 | return "%d." % self._cnt |
|
350 | return "%d." % self._cnt | |
351 | def get_width(self): |
|
351 | def get_width(self): | |
352 | return self._indent |
|
352 | return self._indent | |
353 | def __repr__(self): |
|
353 | def __repr__(self): | |
354 | return 'enum_style-%s' % list(self._style) |
|
354 | return 'enum_style-%s' % list(self._style) | |
355 |
|
355 | |||
356 | if 'enumtype' in node: |
|
356 | if 'enumtype' in node: | |
357 | self._list_char.append(enum_char(node['enumtype'])) |
|
357 | self._list_char.append(enum_char(node['enumtype'])) | |
358 | else: |
|
358 | else: | |
359 | self._list_char.append(enum_char('bullet')) |
|
359 | self._list_char.append(enum_char('bullet')) | |
360 | if len(self._list_char) > 1: |
|
360 | if len(self._list_char) > 1: | |
361 | # indent nested lists |
|
361 | # indent nested lists | |
362 | self.indent(self._list_char[-2].get_width()) |
|
362 | self.indent(self._list_char[-2].get_width()) | |
363 | else: |
|
363 | else: | |
364 | self.indent(self._list_char[-1].get_width()) |
|
364 | self.indent(self._list_char[-1].get_width()) | |
365 |
|
365 | |||
366 | def list_end(self): |
|
366 | def list_end(self): | |
367 | self.dedent() |
|
367 | self.dedent() | |
368 | self._list_char.pop() |
|
368 | self._list_char.pop() | |
369 |
|
369 | |||
370 | def header(self): |
|
370 | def header(self): | |
371 | tmpl = (".TH %(title_upper)s %(manual_section)s" |
|
371 | tmpl = (".TH %(title_upper)s %(manual_section)s" | |
372 | " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n" |
|
372 | " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n" | |
373 | ".SH NAME\n" |
|
373 | ".SH NAME\n" | |
374 | "%(title)s \- %(subtitle)s\n") |
|
374 | "%(title)s \- %(subtitle)s\n") | |
375 | return tmpl % self._docinfo |
|
375 | return tmpl % self._docinfo | |
376 |
|
376 | |||
377 | def append_header(self): |
|
377 | def append_header(self): | |
378 | """append header with .TH and .SH NAME""" |
|
378 | """append header with .TH and .SH NAME""" | |
379 | # NOTE before everything |
|
379 | # NOTE before everything | |
380 | # .TH title_upper section date source manual |
|
380 | # .TH title_upper section date source manual | |
381 | if self.header_written: |
|
381 | if self.header_written: | |
382 | return |
|
382 | return | |
383 | self.body.append(self.header()) |
|
383 | self.body.append(self.header()) | |
384 | self.body.append(MACRO_DEF) |
|
384 | self.body.append(MACRO_DEF) | |
385 | self.header_written = 1 |
|
385 | self.header_written = 1 | |
386 |
|
386 | |||
387 | def visit_address(self, node): |
|
387 | def visit_address(self, node): | |
388 | self.visit_docinfo_item(node, 'address') |
|
388 | self.visit_docinfo_item(node, 'address') | |
389 |
|
389 | |||
390 | def depart_address(self, node): |
|
390 | def depart_address(self, node): | |
391 | pass |
|
391 | pass | |
392 |
|
392 | |||
393 | def visit_admonition(self, node, name=None): |
|
393 | def visit_admonition(self, node, name=None): | |
394 | if name: |
|
394 | if name: | |
395 | self.body.append('.IP %s\n' % |
|
395 | self.body.append('.IP %s\n' % | |
396 | self.language.labels.get(name, name)) |
|
396 | self.language.labels.get(name, name)) | |
397 |
|
397 | |||
398 | def depart_admonition(self, node): |
|
398 | def depart_admonition(self, node): | |
399 | self.body.append('.RE\n') |
|
399 | self.body.append('.RE\n') | |
400 |
|
400 | |||
401 | def visit_attention(self, node): |
|
401 | def visit_attention(self, node): | |
402 | self.visit_admonition(node, 'attention') |
|
402 | self.visit_admonition(node, 'attention') | |
403 |
|
403 | |||
404 | depart_attention = depart_admonition |
|
404 | depart_attention = depart_admonition | |
405 |
|
405 | |||
406 | def visit_docinfo_item(self, node, name): |
|
406 | def visit_docinfo_item(self, node, name): | |
407 | if name == 'author': |
|
407 | if name == 'author': | |
408 | self._docinfo[name].append(node.astext()) |
|
408 | self._docinfo[name].append(node.astext()) | |
409 | else: |
|
409 | else: | |
410 | self._docinfo[name] = node.astext() |
|
410 | self._docinfo[name] = node.astext() | |
411 | self._docinfo_keys.append(name) |
|
411 | self._docinfo_keys.append(name) | |
412 | raise nodes.SkipNode |
|
412 | raise nodes.SkipNode | |
413 |
|
413 | |||
414 | def depart_docinfo_item(self, node): |
|
414 | def depart_docinfo_item(self, node): | |
415 | pass |
|
415 | pass | |
416 |
|
416 | |||
417 | def visit_author(self, node): |
|
417 | def visit_author(self, node): | |
418 | self.visit_docinfo_item(node, 'author') |
|
418 | self.visit_docinfo_item(node, 'author') | |
419 |
|
419 | |||
420 | depart_author = depart_docinfo_item |
|
420 | depart_author = depart_docinfo_item | |
421 |
|
421 | |||
422 | def visit_authors(self, node): |
|
422 | def visit_authors(self, node): | |
423 | # _author is called anyway. |
|
423 | # _author is called anyway. | |
424 | pass |
|
424 | pass | |
425 |
|
425 | |||
426 | def depart_authors(self, node): |
|
426 | def depart_authors(self, node): | |
427 | pass |
|
427 | pass | |
428 |
|
428 | |||
429 | def visit_block_quote(self, node): |
|
429 | def visit_block_quote(self, node): | |
430 | # BUG/HACK: indent alway uses the _last_ indention, |
|
430 | # BUG/HACK: indent alway uses the _last_ indention, | |
431 | # thus we need two of them. |
|
431 | # thus we need two of them. | |
432 | self.indent(BLOCKQOUTE_INDENT) |
|
432 | self.indent(BLOCKQOUTE_INDENT) | |
433 | self.indent(0) |
|
433 | self.indent(0) | |
434 |
|
434 | |||
435 | def depart_block_quote(self, node): |
|
435 | def depart_block_quote(self, node): | |
436 | self.dedent() |
|
436 | self.dedent() | |
437 | self.dedent() |
|
437 | self.dedent() | |
438 |
|
438 | |||
439 | def visit_bullet_list(self, node): |
|
439 | def visit_bullet_list(self, node): | |
440 | self.list_start(node) |
|
440 | self.list_start(node) | |
441 |
|
441 | |||
442 | def depart_bullet_list(self, node): |
|
442 | def depart_bullet_list(self, node): | |
443 | self.list_end() |
|
443 | self.list_end() | |
444 |
|
444 | |||
445 | def visit_caption(self, node): |
|
445 | def visit_caption(self, node): | |
446 | pass |
|
446 | pass | |
447 |
|
447 | |||
448 | def depart_caption(self, node): |
|
448 | def depart_caption(self, node): | |
449 | pass |
|
449 | pass | |
450 |
|
450 | |||
451 | def visit_caution(self, node): |
|
451 | def visit_caution(self, node): | |
452 | self.visit_admonition(node, 'caution') |
|
452 | self.visit_admonition(node, 'caution') | |
453 |
|
453 | |||
454 | depart_caution = depart_admonition |
|
454 | depart_caution = depart_admonition | |
455 |
|
455 | |||
456 | def visit_citation(self, node): |
|
456 | def visit_citation(self, node): | |
457 | num, text = node.astext().split(None, 1) |
|
457 | num, text = node.astext().split(None, 1) | |
458 | num = num.strip() |
|
458 | num = num.strip() | |
459 | self.body.append('.IP [%s] 5\n' % num) |
|
459 | self.body.append('.IP [%s] 5\n' % num) | |
460 |
|
460 | |||
461 | def depart_citation(self, node): |
|
461 | def depart_citation(self, node): | |
462 | pass |
|
462 | pass | |
463 |
|
463 | |||
464 | def visit_citation_reference(self, node): |
|
464 | def visit_citation_reference(self, node): | |
465 | self.body.append('['+node.astext()+']') |
|
465 | self.body.append('['+node.astext()+']') | |
466 | raise nodes.SkipNode |
|
466 | raise nodes.SkipNode | |
467 |
|
467 | |||
468 | def visit_classifier(self, node): |
|
468 | def visit_classifier(self, node): | |
469 | pass |
|
469 | pass | |
470 |
|
470 | |||
471 | def depart_classifier(self, node): |
|
471 | def depart_classifier(self, node): | |
472 | pass |
|
472 | pass | |
473 |
|
473 | |||
474 | def visit_colspec(self, node): |
|
474 | def visit_colspec(self, node): | |
475 | self.colspecs.append(node) |
|
475 | self.colspecs.append(node) | |
476 |
|
476 | |||
477 | def depart_colspec(self, node): |
|
477 | def depart_colspec(self, node): | |
478 | pass |
|
478 | pass | |
479 |
|
479 | |||
480 | def write_colspecs(self): |
|
480 | def write_colspecs(self): | |
481 | self.body.append("%s.\n" % ('L '*len(self.colspecs))) |
|
481 | self.body.append("%s.\n" % ('L '*len(self.colspecs))) | |
482 |
|
482 | |||
483 | def visit_comment(self, node, |
|
483 | def visit_comment(self, node, | |
484 | sub=re.compile('-(?=-)').sub): |
|
484 | sub=re.compile('-(?=-)').sub): | |
485 | self.body.append(self.comment(node.astext())) |
|
485 | self.body.append(self.comment(node.astext())) | |
486 | raise nodes.SkipNode |
|
486 | raise nodes.SkipNode | |
487 |
|
487 | |||
488 | def visit_contact(self, node): |
|
488 | def visit_contact(self, node): | |
489 | self.visit_docinfo_item(node, 'contact') |
|
489 | self.visit_docinfo_item(node, 'contact') | |
490 |
|
490 | |||
491 | depart_contact = depart_docinfo_item |
|
491 | depart_contact = depart_docinfo_item | |
492 |
|
492 | |||
493 | def visit_container(self, node): |
|
493 | def visit_container(self, node): | |
494 | pass |
|
494 | pass | |
495 |
|
495 | |||
496 | def depart_container(self, node): |
|
496 | def depart_container(self, node): | |
497 | pass |
|
497 | pass | |
498 |
|
498 | |||
499 | def visit_compound(self, node): |
|
499 | def visit_compound(self, node): | |
500 | pass |
|
500 | pass | |
501 |
|
501 | |||
502 | def depart_compound(self, node): |
|
502 | def depart_compound(self, node): | |
503 | pass |
|
503 | pass | |
504 |
|
504 | |||
505 | def visit_copyright(self, node): |
|
505 | def visit_copyright(self, node): | |
506 | self.visit_docinfo_item(node, 'copyright') |
|
506 | self.visit_docinfo_item(node, 'copyright') | |
507 |
|
507 | |||
508 | def visit_danger(self, node): |
|
508 | def visit_danger(self, node): | |
509 | self.visit_admonition(node, 'danger') |
|
509 | self.visit_admonition(node, 'danger') | |
510 |
|
510 | |||
511 | depart_danger = depart_admonition |
|
511 | depart_danger = depart_admonition | |
512 |
|
512 | |||
513 | def visit_date(self, node): |
|
513 | def visit_date(self, node): | |
514 | self.visit_docinfo_item(node, 'date') |
|
514 | self.visit_docinfo_item(node, 'date') | |
515 |
|
515 | |||
516 | def visit_decoration(self, node): |
|
516 | def visit_decoration(self, node): | |
517 | pass |
|
517 | pass | |
518 |
|
518 | |||
519 | def depart_decoration(self, node): |
|
519 | def depart_decoration(self, node): | |
520 | pass |
|
520 | pass | |
521 |
|
521 | |||
522 | def visit_definition(self, node): |
|
522 | def visit_definition(self, node): | |
523 | pass |
|
523 | pass | |
524 |
|
524 | |||
525 | def depart_definition(self, node): |
|
525 | def depart_definition(self, node): | |
526 | pass |
|
526 | pass | |
527 |
|
527 | |||
528 | def visit_definition_list(self, node): |
|
528 | def visit_definition_list(self, node): | |
529 | self.indent(DEFINITION_LIST_INDENT) |
|
529 | self.indent(DEFINITION_LIST_INDENT) | |
530 |
|
530 | |||
531 | def depart_definition_list(self, node): |
|
531 | def depart_definition_list(self, node): | |
532 | self.dedent() |
|
532 | self.dedent() | |
533 |
|
533 | |||
534 | def visit_definition_list_item(self, node): |
|
534 | def visit_definition_list_item(self, node): | |
535 | self.body.append(self.defs['definition_list_item'][0]) |
|
535 | self.body.append(self.defs['definition_list_item'][0]) | |
536 |
|
536 | |||
537 | def depart_definition_list_item(self, node): |
|
537 | def depart_definition_list_item(self, node): | |
538 | self.body.append(self.defs['definition_list_item'][1]) |
|
538 | self.body.append(self.defs['definition_list_item'][1]) | |
539 |
|
539 | |||
540 | def visit_description(self, node): |
|
540 | def visit_description(self, node): | |
541 | pass |
|
541 | pass | |
542 |
|
542 | |||
543 | def depart_description(self, node): |
|
543 | def depart_description(self, node): | |
544 | pass |
|
544 | pass | |
545 |
|
545 | |||
546 | def visit_docinfo(self, node): |
|
546 | def visit_docinfo(self, node): | |
547 | self._in_docinfo = 1 |
|
547 | self._in_docinfo = 1 | |
548 |
|
548 | |||
549 | def depart_docinfo(self, node): |
|
549 | def depart_docinfo(self, node): | |
550 | self._in_docinfo = None |
|
550 | self._in_docinfo = None | |
551 | # NOTE nothing should be written before this |
|
551 | # NOTE nothing should be written before this | |
552 | self.append_header() |
|
552 | self.append_header() | |
553 |
|
553 | |||
554 | def visit_doctest_block(self, node): |
|
554 | def visit_doctest_block(self, node): | |
555 | self.body.append(self.defs['literal_block'][0]) |
|
555 | self.body.append(self.defs['literal_block'][0]) | |
556 | self._in_literal = True |
|
556 | self._in_literal = True | |
557 |
|
557 | |||
558 | def depart_doctest_block(self, node): |
|
558 | def depart_doctest_block(self, node): | |
559 | self._in_literal = False |
|
559 | self._in_literal = False | |
560 | self.body.append(self.defs['literal_block'][1]) |
|
560 | self.body.append(self.defs['literal_block'][1]) | |
561 |
|
561 | |||
562 | def visit_document(self, node): |
|
562 | def visit_document(self, node): | |
563 | # no blank line between comment and header. |
|
563 | # no blank line between comment and header. | |
564 | self.body.append(self.comment(self.document_start).rstrip()+'\n') |
|
564 | self.body.append(self.comment(self.document_start).rstrip()+'\n') | |
565 | # writing header is postboned |
|
565 | # writing header is postboned | |
566 | self.header_written = 0 |
|
566 | self.header_written = 0 | |
567 |
|
567 | |||
568 | def depart_document(self, node): |
|
568 | def depart_document(self, node): | |
569 | if self._docinfo['author']: |
|
569 | if self._docinfo['author']: | |
570 | self.body.append('.SH AUTHOR\n%s\n' |
|
570 | self.body.append('.SH AUTHOR\n%s\n' | |
571 | % ', '.join(self._docinfo['author'])) |
|
571 | % ', '.join(self._docinfo['author'])) | |
572 | skip = ('author', 'copyright', 'date', |
|
572 | skip = ('author', 'copyright', 'date', | |
573 | 'manual_group', 'manual_section', |
|
573 | 'manual_group', 'manual_section', | |
574 | 'subtitle', |
|
574 | 'subtitle', | |
575 | 'title', 'title_upper', 'version') |
|
575 | 'title', 'title_upper', 'version') | |
576 | for name in self._docinfo_keys: |
|
576 | for name in self._docinfo_keys: | |
577 | if name == 'address': |
|
577 | if name == 'address': | |
578 | self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % ( |
|
578 | self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % ( | |
579 | self.language.labels.get(name, name), |
|
579 | self.language.labels.get(name, name), | |
580 | self.defs['indent'][0] % 0, |
|
580 | self.defs['indent'][0] % 0, | |
581 | self.defs['indent'][0] % BLOCKQOUTE_INDENT, |
|
581 | self.defs['indent'][0] % BLOCKQOUTE_INDENT, | |
582 | self._docinfo[name], |
|
582 | self._docinfo[name], | |
583 | self.defs['indent'][1], |
|
583 | self.defs['indent'][1], | |
584 | self.defs['indent'][1])) |
|
584 | self.defs['indent'][1])) | |
585 | elif name not in skip: |
|
585 | elif name not in skip: | |
586 | if name in self._docinfo_names: |
|
586 | if name in self._docinfo_names: | |
587 | label = self._docinfo_names[name] |
|
587 | label = self._docinfo_names[name] | |
588 | else: |
|
588 | else: | |
589 | label = self.language.labels.get(name, name) |
|
589 | label = self.language.labels.get(name, name) | |
590 | self.body.append("\n%s: %s\n" % (label, self._docinfo[name])) |
|
590 | self.body.append("\n%s: %s\n" % (label, self._docinfo[name])) | |
591 | if self._docinfo['copyright']: |
|
591 | if self._docinfo['copyright']: | |
592 | self.body.append('.SH COPYRIGHT\n%s\n' |
|
592 | self.body.append('.SH COPYRIGHT\n%s\n' | |
593 | % self._docinfo['copyright']) |
|
593 | % self._docinfo['copyright']) | |
594 | self.body.append(self.comment( |
|
594 | self.body.append(self.comment( | |
595 | 'Generated by docutils manpage writer.\n')) |
|
595 | 'Generated by docutils manpage writer.\n')) | |
596 |
|
596 | |||
597 | def visit_emphasis(self, node): |
|
597 | def visit_emphasis(self, node): | |
598 | self.body.append(self.defs['emphasis'][0]) |
|
598 | self.body.append(self.defs['emphasis'][0]) | |
599 |
|
599 | |||
600 | def depart_emphasis(self, node): |
|
600 | def depart_emphasis(self, node): | |
601 | self.body.append(self.defs['emphasis'][1]) |
|
601 | self.body.append(self.defs['emphasis'][1]) | |
602 |
|
602 | |||
603 | def visit_entry(self, node): |
|
603 | def visit_entry(self, node): | |
604 | # a cell in a table row |
|
604 | # a cell in a table row | |
605 | if 'morerows' in node: |
|
605 | if 'morerows' in node: | |
606 | self.document.reporter.warning('"table row spanning" not supported', |
|
606 | self.document.reporter.warning('"table row spanning" not supported', | |
607 | base_node=node) |
|
607 | base_node=node) | |
608 | if 'morecols' in node: |
|
608 | if 'morecols' in node: | |
609 | self.document.reporter.warning( |
|
609 | self.document.reporter.warning( | |
610 | '"table cell spanning" not supported', base_node=node) |
|
610 | '"table cell spanning" not supported', base_node=node) | |
611 | self.context.append(len(self.body)) |
|
611 | self.context.append(len(self.body)) | |
612 |
|
612 | |||
613 | def depart_entry(self, node): |
|
613 | def depart_entry(self, node): | |
614 | start = self.context.pop() |
|
614 | start = self.context.pop() | |
615 | self._active_table.append_cell(self.body[start:]) |
|
615 | self._active_table.append_cell(self.body[start:]) | |
616 | del self.body[start:] |
|
616 | del self.body[start:] | |
617 |
|
617 | |||
618 | def visit_enumerated_list(self, node): |
|
618 | def visit_enumerated_list(self, node): | |
619 | self.list_start(node) |
|
619 | self.list_start(node) | |
620 |
|
620 | |||
621 | def depart_enumerated_list(self, node): |
|
621 | def depart_enumerated_list(self, node): | |
622 | self.list_end() |
|
622 | self.list_end() | |
623 |
|
623 | |||
624 | def visit_error(self, node): |
|
624 | def visit_error(self, node): | |
625 | self.visit_admonition(node, 'error') |
|
625 | self.visit_admonition(node, 'error') | |
626 |
|
626 | |||
627 | depart_error = depart_admonition |
|
627 | depart_error = depart_admonition | |
628 |
|
628 | |||
629 | def visit_field(self, node): |
|
629 | def visit_field(self, node): | |
630 | pass |
|
630 | pass | |
631 |
|
631 | |||
632 | def depart_field(self, node): |
|
632 | def depart_field(self, node): | |
633 | pass |
|
633 | pass | |
634 |
|
634 | |||
635 | def visit_field_body(self, node): |
|
635 | def visit_field_body(self, node): | |
636 | if self._in_docinfo: |
|
636 | if self._in_docinfo: | |
637 | name_normalized = self._field_name.lower().replace(" ","_") |
|
637 | name_normalized = self._field_name.lower().replace(" ","_") | |
638 | self._docinfo_names[name_normalized] = self._field_name |
|
638 | self._docinfo_names[name_normalized] = self._field_name | |
639 | self.visit_docinfo_item(node, name_normalized) |
|
639 | self.visit_docinfo_item(node, name_normalized) | |
640 | raise nodes.SkipNode |
|
640 | raise nodes.SkipNode | |
641 |
|
641 | |||
642 | def depart_field_body(self, node): |
|
642 | def depart_field_body(self, node): | |
643 | pass |
|
643 | pass | |
644 |
|
644 | |||
645 | def visit_field_list(self, node): |
|
645 | def visit_field_list(self, node): | |
646 | self.indent(FIELD_LIST_INDENT) |
|
646 | self.indent(FIELD_LIST_INDENT) | |
647 |
|
647 | |||
648 | def depart_field_list(self, node): |
|
648 | def depart_field_list(self, node): | |
649 | self.dedent() |
|
649 | self.dedent() | |
650 |
|
650 | |||
651 | def visit_field_name(self, node): |
|
651 | def visit_field_name(self, node): | |
652 | if self._in_docinfo: |
|
652 | if self._in_docinfo: | |
653 | self._field_name = node.astext() |
|
653 | self._field_name = node.astext() | |
654 | raise nodes.SkipNode |
|
654 | raise nodes.SkipNode | |
655 | else: |
|
655 | else: | |
656 | self.body.append(self.defs['field_name'][0]) |
|
656 | self.body.append(self.defs['field_name'][0]) | |
657 |
|
657 | |||
658 | def depart_field_name(self, node): |
|
658 | def depart_field_name(self, node): | |
659 | self.body.append(self.defs['field_name'][1]) |
|
659 | self.body.append(self.defs['field_name'][1]) | |
660 |
|
660 | |||
661 | def visit_figure(self, node): |
|
661 | def visit_figure(self, node): | |
662 | self.indent(2.5) |
|
662 | self.indent(2.5) | |
663 | self.indent(0) |
|
663 | self.indent(0) | |
664 |
|
664 | |||
665 | def depart_figure(self, node): |
|
665 | def depart_figure(self, node): | |
666 | self.dedent() |
|
666 | self.dedent() | |
667 | self.dedent() |
|
667 | self.dedent() | |
668 |
|
668 | |||
669 | def visit_footer(self, node): |
|
669 | def visit_footer(self, node): | |
670 | self.document.reporter.warning('"footer" not supported', |
|
670 | self.document.reporter.warning('"footer" not supported', | |
671 | base_node=node) |
|
671 | base_node=node) | |
672 |
|
672 | |||
673 | def depart_footer(self, node): |
|
673 | def depart_footer(self, node): | |
674 | pass |
|
674 | pass | |
675 |
|
675 | |||
676 | def visit_footnote(self, node): |
|
676 | def visit_footnote(self, node): | |
677 | num, text = node.astext().split(None, 1) |
|
677 | num, text = node.astext().split(None, 1) | |
678 | num = num.strip() |
|
678 | num = num.strip() | |
679 | self.body.append('.IP [%s] 5\n' % self.deunicode(num)) |
|
679 | self.body.append('.IP [%s] 5\n' % self.deunicode(num)) | |
680 |
|
680 | |||
681 | def depart_footnote(self, node): |
|
681 | def depart_footnote(self, node): | |
682 | pass |
|
682 | pass | |
683 |
|
683 | |||
684 | def footnote_backrefs(self, node): |
|
684 | def footnote_backrefs(self, node): | |
685 | self.document.reporter.warning('"footnote_backrefs" not supported', |
|
685 | self.document.reporter.warning('"footnote_backrefs" not supported', | |
686 | base_node=node) |
|
686 | base_node=node) | |
687 |
|
687 | |||
688 | def visit_footnote_reference(self, node): |
|
688 | def visit_footnote_reference(self, node): | |
689 | self.body.append('['+self.deunicode(node.astext())+']') |
|
689 | self.body.append('['+self.deunicode(node.astext())+']') | |
690 | raise nodes.SkipNode |
|
690 | raise nodes.SkipNode | |
691 |
|
691 | |||
692 | def depart_footnote_reference(self, node): |
|
692 | def depart_footnote_reference(self, node): | |
693 | pass |
|
693 | pass | |
694 |
|
694 | |||
695 | def visit_generated(self, node): |
|
695 | def visit_generated(self, node): | |
696 | pass |
|
696 | pass | |
697 |
|
697 | |||
698 | def depart_generated(self, node): |
|
698 | def depart_generated(self, node): | |
699 | pass |
|
699 | pass | |
700 |
|
700 | |||
701 | def visit_header(self, node): |
|
701 | def visit_header(self, node): | |
702 | raise NotImplementedError, node.astext() |
|
702 | raise NotImplementedError, node.astext() | |
703 |
|
703 | |||
704 | def depart_header(self, node): |
|
704 | def depart_header(self, node): | |
705 | pass |
|
705 | pass | |
706 |
|
706 | |||
707 | def visit_hint(self, node): |
|
707 | def visit_hint(self, node): | |
708 | self.visit_admonition(node, 'hint') |
|
708 | self.visit_admonition(node, 'hint') | |
709 |
|
709 | |||
710 | depart_hint = depart_admonition |
|
710 | depart_hint = depart_admonition | |
711 |
|
711 | |||
712 | def visit_subscript(self, node): |
|
712 | def visit_subscript(self, node): | |
713 | self.body.append('\\s-2\\d') |
|
713 | self.body.append('\\s-2\\d') | |
714 |
|
714 | |||
715 | def depart_subscript(self, node): |
|
715 | def depart_subscript(self, node): | |
716 | self.body.append('\\u\\s0') |
|
716 | self.body.append('\\u\\s0') | |
717 |
|
717 | |||
718 | def visit_superscript(self, node): |
|
718 | def visit_superscript(self, node): | |
719 | self.body.append('\\s-2\\u') |
|
719 | self.body.append('\\s-2\\u') | |
720 |
|
720 | |||
721 | def depart_superscript(self, node): |
|
721 | def depart_superscript(self, node): | |
722 | self.body.append('\\d\\s0') |
|
722 | self.body.append('\\d\\s0') | |
723 |
|
723 | |||
724 | def visit_attribution(self, node): |
|
724 | def visit_attribution(self, node): | |
725 | self.body.append('\\(em ') |
|
725 | self.body.append('\\(em ') | |
726 |
|
726 | |||
727 | def depart_attribution(self, node): |
|
727 | def depart_attribution(self, node): | |
728 | self.body.append('\n') |
|
728 | self.body.append('\n') | |
729 |
|
729 | |||
730 | def visit_image(self, node): |
|
730 | def visit_image(self, node): | |
731 | self.document.reporter.warning('"image" not supported', |
|
731 | self.document.reporter.warning('"image" not supported', | |
732 | base_node=node) |
|
732 | base_node=node) | |
733 | text = [] |
|
733 | text = [] | |
734 | if 'alt' in node.attributes: |
|
734 | if 'alt' in node.attributes: | |
735 | text.append(node.attributes['alt']) |
|
735 | text.append(node.attributes['alt']) | |
736 | if 'uri' in node.attributes: |
|
736 | if 'uri' in node.attributes: | |
737 | text.append(node.attributes['uri']) |
|
737 | text.append(node.attributes['uri']) | |
738 | self.body.append('[image: %s]\n' % ('/'.join(text))) |
|
738 | self.body.append('[image: %s]\n' % ('/'.join(text))) | |
739 | raise nodes.SkipNode |
|
739 | raise nodes.SkipNode | |
740 |
|
740 | |||
741 | def visit_important(self, node): |
|
741 | def visit_important(self, node): | |
742 | self.visit_admonition(node, 'important') |
|
742 | self.visit_admonition(node, 'important') | |
743 |
|
743 | |||
744 | depart_important = depart_admonition |
|
744 | depart_important = depart_admonition | |
745 |
|
745 | |||
746 | def visit_label(self, node): |
|
746 | def visit_label(self, node): | |
747 | # footnote and citation |
|
747 | # footnote and citation | |
748 | if (isinstance(node.parent, nodes.footnote) |
|
748 | if (isinstance(node.parent, nodes.footnote) | |
749 | or isinstance(node.parent, nodes.citation)): |
|
749 | or isinstance(node.parent, nodes.citation)): | |
750 | raise nodes.SkipNode |
|
750 | raise nodes.SkipNode | |
751 | self.document.reporter.warning('"unsupported "label"', |
|
751 | self.document.reporter.warning('"unsupported "label"', | |
752 | base_node=node) |
|
752 | base_node=node) | |
753 | self.body.append('[') |
|
753 | self.body.append('[') | |
754 |
|
754 | |||
755 | def depart_label(self, node): |
|
755 | def depart_label(self, node): | |
756 | self.body.append(']\n') |
|
756 | self.body.append(']\n') | |
757 |
|
757 | |||
758 | def visit_legend(self, node): |
|
758 | def visit_legend(self, node): | |
759 | pass |
|
759 | pass | |
760 |
|
760 | |||
761 | def depart_legend(self, node): |
|
761 | def depart_legend(self, node): | |
762 | pass |
|
762 | pass | |
763 |
|
763 | |||
764 | # WHAT should we use .INDENT, .UNINDENT ? |
|
764 | # WHAT should we use .INDENT, .UNINDENT ? | |
765 | def visit_line_block(self, node): |
|
765 | def visit_line_block(self, node): | |
766 | self._line_block += 1 |
|
766 | self._line_block += 1 | |
767 | if self._line_block == 1: |
|
767 | if self._line_block == 1: | |
768 | self.body.append('.sp\n') |
|
768 | self.body.append('.sp\n') | |
769 | self.body.append('.nf\n') |
|
769 | self.body.append('.nf\n') | |
770 | else: |
|
770 | else: | |
771 | self.body.append('.in +2\n') |
|
771 | self.body.append('.in +2\n') | |
772 |
|
772 | |||
773 | def depart_line_block(self, node): |
|
773 | def depart_line_block(self, node): | |
774 | self._line_block -= 1 |
|
774 | self._line_block -= 1 | |
775 | if self._line_block == 0: |
|
775 | if self._line_block == 0: | |
776 | self.body.append('.fi\n') |
|
776 | self.body.append('.fi\n') | |
777 | self.body.append('.sp\n') |
|
777 | self.body.append('.sp\n') | |
778 | else: |
|
778 | else: | |
779 | self.body.append('.in -2\n') |
|
779 | self.body.append('.in -2\n') | |
780 |
|
780 | |||
781 | def visit_line(self, node): |
|
781 | def visit_line(self, node): | |
782 | pass |
|
782 | pass | |
783 |
|
783 | |||
784 | def depart_line(self, node): |
|
784 | def depart_line(self, node): | |
785 | self.body.append('\n') |
|
785 | self.body.append('\n') | |
786 |
|
786 | |||
787 | def visit_list_item(self, node): |
|
787 | def visit_list_item(self, node): | |
788 | # man 7 man argues to use ".IP" instead of ".TP" |
|
788 | # man 7 man argues to use ".IP" instead of ".TP" | |
789 | self.body.append('.IP %s %d\n' % ( |
|
789 | self.body.append('.IP %s %d\n' % ( | |
790 | self._list_char[-1].next(), |
|
790 | self._list_char[-1].next(), | |
791 | self._list_char[-1].get_width(),)) |
|
791 | self._list_char[-1].get_width(),)) | |
792 |
|
792 | |||
793 | def depart_list_item(self, node): |
|
793 | def depart_list_item(self, node): | |
794 | pass |
|
794 | pass | |
795 |
|
795 | |||
796 | def visit_literal(self, node): |
|
796 | def visit_literal(self, node): | |
797 | self.body.append(self.defs['literal'][0]) |
|
797 | self.body.append(self.defs['literal'][0]) | |
798 |
|
798 | |||
799 | def depart_literal(self, node): |
|
799 | def depart_literal(self, node): | |
800 | self.body.append(self.defs['literal'][1]) |
|
800 | self.body.append(self.defs['literal'][1]) | |
801 |
|
801 | |||
802 | def visit_literal_block(self, node): |
|
802 | def visit_literal_block(self, node): | |
803 | self.body.append(self.defs['literal_block'][0]) |
|
803 | self.body.append(self.defs['literal_block'][0]) | |
804 | self._in_literal = True |
|
804 | self._in_literal = True | |
805 |
|
805 | |||
806 | def depart_literal_block(self, node): |
|
806 | def depart_literal_block(self, node): | |
807 | self._in_literal = False |
|
807 | self._in_literal = False | |
808 | self.body.append(self.defs['literal_block'][1]) |
|
808 | self.body.append(self.defs['literal_block'][1]) | |
809 |
|
809 | |||
810 | def visit_meta(self, node): |
|
810 | def visit_meta(self, node): | |
811 | raise NotImplementedError, node.astext() |
|
811 | raise NotImplementedError, node.astext() | |
812 |
|
812 | |||
813 | def depart_meta(self, node): |
|
813 | def depart_meta(self, node): | |
814 | pass |
|
814 | pass | |
815 |
|
815 | |||
816 | def visit_note(self, node): |
|
816 | def visit_note(self, node): | |
817 | self.visit_admonition(node, 'note') |
|
817 | self.visit_admonition(node, 'note') | |
818 |
|
818 | |||
819 | depart_note = depart_admonition |
|
819 | depart_note = depart_admonition | |
820 |
|
820 | |||
821 | def indent(self, by=0.5): |
|
821 | def indent(self, by=0.5): | |
822 | # if we are in a section ".SH" there already is a .RS |
|
822 | # if we are in a section ".SH" there already is a .RS | |
823 | step = self._indent[-1] |
|
823 | step = self._indent[-1] | |
824 | self._indent.append(by) |
|
824 | self._indent.append(by) | |
825 | self.body.append(self.defs['indent'][0] % step) |
|
825 | self.body.append(self.defs['indent'][0] % step) | |
826 |
|
826 | |||
827 | def dedent(self): |
|
827 | def dedent(self): | |
828 | self._indent.pop() |
|
828 | self._indent.pop() | |
829 | self.body.append(self.defs['indent'][1]) |
|
829 | self.body.append(self.defs['indent'][1]) | |
830 |
|
830 | |||
831 | def visit_option_list(self, node): |
|
831 | def visit_option_list(self, node): | |
832 | self.indent(OPTION_LIST_INDENT) |
|
832 | self.indent(OPTION_LIST_INDENT) | |
833 |
|
833 | |||
834 | def depart_option_list(self, node): |
|
834 | def depart_option_list(self, node): | |
835 | self.dedent() |
|
835 | self.dedent() | |
836 |
|
836 | |||
837 | def visit_option_list_item(self, node): |
|
837 | def visit_option_list_item(self, node): | |
838 | # one item of the list |
|
838 | # one item of the list | |
839 | self.body.append(self.defs['option_list_item'][0]) |
|
839 | self.body.append(self.defs['option_list_item'][0]) | |
840 |
|
840 | |||
841 | def depart_option_list_item(self, node): |
|
841 | def depart_option_list_item(self, node): | |
842 | self.body.append(self.defs['option_list_item'][1]) |
|
842 | self.body.append(self.defs['option_list_item'][1]) | |
843 |
|
843 | |||
844 | def visit_option_group(self, node): |
|
844 | def visit_option_group(self, node): | |
845 | # as one option could have several forms it is a group |
|
845 | # as one option could have several forms it is a group | |
846 | # options without parameter bold only, .B, -v |
|
846 | # options without parameter bold only, .B, -v | |
847 | # options with parameter bold italic, .BI, -f file |
|
847 | # options with parameter bold italic, .BI, -f file | |
848 | # |
|
848 | # | |
849 | # we do not know if .B or .BI |
|
849 | # we do not know if .B or .BI | |
850 | self.context.append('.B') # blind guess |
|
850 | self.context.append('.B') # blind guess | |
851 | self.context.append(len(self.body)) # to be able to insert later |
|
851 | self.context.append(len(self.body)) # to be able to insert later | |
852 | self.context.append(0) # option counter |
|
852 | self.context.append(0) # option counter | |
853 |
|
853 | |||
854 | def depart_option_group(self, node): |
|
854 | def depart_option_group(self, node): | |
855 | self.context.pop() # the counter |
|
855 | self.context.pop() # the counter | |
856 | start_position = self.context.pop() |
|
856 | start_position = self.context.pop() | |
857 | text = self.body[start_position:] |
|
857 | text = self.body[start_position:] | |
858 | del self.body[start_position:] |
|
858 | del self.body[start_position:] | |
859 | self.body.append('%s%s\n' % (self.context.pop(), ''.join(text))) |
|
859 | self.body.append('%s%s\n' % (self.context.pop(), ''.join(text))) | |
860 |
|
860 | |||
861 | def visit_option(self, node): |
|
861 | def visit_option(self, node): | |
862 | # each form of the option will be presented separately |
|
862 | # each form of the option will be presented separately | |
863 | if self.context[-1] > 0: |
|
863 | if self.context[-1] > 0: | |
864 | self.body.append(', ') |
|
864 | self.body.append(', ') | |
865 | if self.context[-3] == '.BI': |
|
865 | if self.context[-3] == '.BI': | |
866 | self.body.append('\\') |
|
866 | self.body.append('\\') | |
867 | self.body.append(' ') |
|
867 | self.body.append(' ') | |
868 |
|
868 | |||
869 | def depart_option(self, node): |
|
869 | def depart_option(self, node): | |
870 | self.context[-1] += 1 |
|
870 | self.context[-1] += 1 | |
871 |
|
871 | |||
872 | def visit_option_string(self, node): |
|
872 | def visit_option_string(self, node): | |
873 | # do not know if .B or .BI |
|
873 | # do not know if .B or .BI | |
874 | pass |
|
874 | pass | |
875 |
|
875 | |||
876 | def depart_option_string(self, node): |
|
876 | def depart_option_string(self, node): | |
877 | pass |
|
877 | pass | |
878 |
|
878 | |||
879 | def visit_option_argument(self, node): |
|
879 | def visit_option_argument(self, node): | |
880 | self.context[-3] = '.BI' # bold/italic alternate |
|
880 | self.context[-3] = '.BI' # bold/italic alternate | |
881 | if node['delimiter'] != ' ': |
|
881 | if node['delimiter'] != ' ': | |
882 | self.body.append('\\fB%s ' % node['delimiter']) |
|
882 | self.body.append('\\fB%s ' % node['delimiter']) | |
883 | elif self.body[len(self.body) - 1].endswith('='): |
|
883 | elif self.body[len(self.body) - 1].endswith('='): | |
884 | # a blank only means no blank in output, just changing font |
|
884 | # a blank only means no blank in output, just changing font | |
885 | self.body.append(' ') |
|
885 | self.body.append(' ') | |
886 | else: |
|
886 | else: | |
887 | # blank backslash blank, switch font then a blank |
|
887 | # blank backslash blank, switch font then a blank | |
888 | self.body.append(' \\ ') |
|
888 | self.body.append(' \\ ') | |
889 |
|
889 | |||
890 | def depart_option_argument(self, node): |
|
890 | def depart_option_argument(self, node): | |
891 | pass |
|
891 | pass | |
892 |
|
892 | |||
893 | def visit_organization(self, node): |
|
893 | def visit_organization(self, node): | |
894 | self.visit_docinfo_item(node, 'organization') |
|
894 | self.visit_docinfo_item(node, 'organization') | |
895 |
|
895 | |||
896 | def depart_organization(self, node): |
|
896 | def depart_organization(self, node): | |
897 | pass |
|
897 | pass | |
898 |
|
898 | |||
899 | def visit_paragraph(self, node): |
|
899 | def visit_paragraph(self, node): | |
900 | # ``.PP`` : Start standard indented paragraph. |
|
900 | # ``.PP`` : Start standard indented paragraph. | |
901 | # ``.LP`` : Start block paragraph, all except the first. |
|
901 | # ``.LP`` : Start block paragraph, all except the first. | |
902 | # ``.P [type]`` : Start paragraph type. |
|
902 | # ``.P [type]`` : Start paragraph type. | |
903 | # NOTE don't use paragraph starts because they reset indentation. |
|
903 | # NOTE don't use paragraph starts because they reset indentation. | |
904 | # ``.sp`` is only vertical space |
|
904 | # ``.sp`` is only vertical space | |
905 | self.ensure_eol() |
|
905 | self.ensure_eol() | |
906 | self.body.append('.sp\n') |
|
906 | self.body.append('.sp\n') | |
907 |
|
907 | |||
908 | def depart_paragraph(self, node): |
|
908 | def depart_paragraph(self, node): | |
909 | self.body.append('\n') |
|
909 | self.body.append('\n') | |
910 |
|
910 | |||
911 | def visit_problematic(self, node): |
|
911 | def visit_problematic(self, node): | |
912 | self.body.append(self.defs['problematic'][0]) |
|
912 | self.body.append(self.defs['problematic'][0]) | |
913 |
|
913 | |||
914 | def depart_problematic(self, node): |
|
914 | def depart_problematic(self, node): | |
915 | self.body.append(self.defs['problematic'][1]) |
|
915 | self.body.append(self.defs['problematic'][1]) | |
916 |
|
916 | |||
917 | def visit_raw(self, node): |
|
917 | def visit_raw(self, node): | |
918 | if node.get('format') == 'manpage': |
|
918 | if node.get('format') == 'manpage': | |
919 | self.body.append(node.astext() + "\n") |
|
919 | self.body.append(node.astext() + "\n") | |
920 | # Keep non-manpage raw text out of output: |
|
920 | # Keep non-manpage raw text out of output: | |
921 | raise nodes.SkipNode |
|
921 | raise nodes.SkipNode | |
922 |
|
922 | |||
923 | def visit_reference(self, node): |
|
923 | def visit_reference(self, node): | |
924 | """E.g. link or email address.""" |
|
924 | """E.g. link or email address.""" | |
925 | self.body.append(self.defs['reference'][0]) |
|
925 | self.body.append(self.defs['reference'][0]) | |
926 |
|
926 | |||
927 | def depart_reference(self, node): |
|
927 | def depart_reference(self, node): | |
928 | self.body.append(self.defs['reference'][1]) |
|
928 | self.body.append(self.defs['reference'][1]) | |
929 |
|
929 | |||
930 | def visit_revision(self, node): |
|
930 | def visit_revision(self, node): | |
931 | self.visit_docinfo_item(node, 'revision') |
|
931 | self.visit_docinfo_item(node, 'revision') | |
932 |
|
932 | |||
933 | depart_revision = depart_docinfo_item |
|
933 | depart_revision = depart_docinfo_item | |
934 |
|
934 | |||
935 | def visit_row(self, node): |
|
935 | def visit_row(self, node): | |
936 | self._active_table.new_row() |
|
936 | self._active_table.new_row() | |
937 |
|
937 | |||
938 | def depart_row(self, node): |
|
938 | def depart_row(self, node): | |
939 | pass |
|
939 | pass | |
940 |
|
940 | |||
941 | def visit_section(self, node): |
|
941 | def visit_section(self, node): | |
942 | self.section_level += 1 |
|
942 | self.section_level += 1 | |
943 |
|
943 | |||
944 | def depart_section(self, node): |
|
944 | def depart_section(self, node): | |
945 | self.section_level -= 1 |
|
945 | self.section_level -= 1 | |
946 |
|
946 | |||
947 | def visit_status(self, node): |
|
947 | def visit_status(self, node): | |
948 | self.visit_docinfo_item(node, 'status') |
|
948 | self.visit_docinfo_item(node, 'status') | |
949 |
|
949 | |||
950 | depart_status = depart_docinfo_item |
|
950 | depart_status = depart_docinfo_item | |
951 |
|
951 | |||
952 | def visit_strong(self, node): |
|
952 | def visit_strong(self, node): | |
953 | self.body.append(self.defs['strong'][0]) |
|
953 | self.body.append(self.defs['strong'][0]) | |
954 |
|
954 | |||
955 | def depart_strong(self, node): |
|
955 | def depart_strong(self, node): | |
956 | self.body.append(self.defs['strong'][1]) |
|
956 | self.body.append(self.defs['strong'][1]) | |
957 |
|
957 | |||
958 | def visit_substitution_definition(self, node): |
|
958 | def visit_substitution_definition(self, node): | |
959 | """Internal only.""" |
|
959 | """Internal only.""" | |
960 | raise nodes.SkipNode |
|
960 | raise nodes.SkipNode | |
961 |
|
961 | |||
962 | def visit_substitution_reference(self, node): |
|
962 | def visit_substitution_reference(self, node): | |
963 | self.document.reporter.warning('"substitution_reference" not supported', |
|
963 | self.document.reporter.warning('"substitution_reference" not supported', | |
964 | base_node=node) |
|
964 | base_node=node) | |
965 |
|
965 | |||
966 | def visit_subtitle(self, node): |
|
966 | def visit_subtitle(self, node): | |
967 | if isinstance(node.parent, nodes.sidebar): |
|
967 | if isinstance(node.parent, nodes.sidebar): | |
968 | self.body.append(self.defs['strong'][0]) |
|
968 | self.body.append(self.defs['strong'][0]) | |
969 | elif isinstance(node.parent, nodes.document): |
|
969 | elif isinstance(node.parent, nodes.document): | |
970 | self.visit_docinfo_item(node, 'subtitle') |
|
970 | self.visit_docinfo_item(node, 'subtitle') | |
971 | elif isinstance(node.parent, nodes.section): |
|
971 | elif isinstance(node.parent, nodes.section): | |
972 | self.body.append(self.defs['strong'][0]) |
|
972 | self.body.append(self.defs['strong'][0]) | |
973 |
|
973 | |||
974 | def depart_subtitle(self, node): |
|
974 | def depart_subtitle(self, node): | |
975 | # document subtitle calls SkipNode |
|
975 | # document subtitle calls SkipNode | |
976 | self.body.append(self.defs['strong'][1]+'\n.PP\n') |
|
976 | self.body.append(self.defs['strong'][1]+'\n.PP\n') | |
977 |
|
977 | |||
978 | def visit_system_message(self, node): |
|
978 | def visit_system_message(self, node): | |
979 | # TODO add report_level |
|
979 | # TODO add report_level | |
980 | #if node['level'] < self.document.reporter['writer'].report_level: |
|
980 | #if node['level'] < self.document.reporter['writer'].report_level: | |
981 | # Level is too low to display: |
|
981 | # Level is too low to display: | |
982 | # raise nodes.SkipNode |
|
982 | # raise nodes.SkipNode | |
983 | attr = {} |
|
983 | attr = {} | |
984 | backref_text = '' |
|
|||
985 | if node.hasattr('id'): |
|
984 | if node.hasattr('id'): | |
986 | attr['name'] = node['id'] |
|
985 | attr['name'] = node['id'] | |
987 | if node.hasattr('line'): |
|
986 | if node.hasattr('line'): | |
988 | line = ', line %s' % node['line'] |
|
987 | line = ', line %s' % node['line'] | |
989 | else: |
|
988 | else: | |
990 | line = '' |
|
989 | line = '' | |
991 | self.body.append('.IP "System Message: %s/%s (%s:%s)"\n' |
|
990 | self.body.append('.IP "System Message: %s/%s (%s:%s)"\n' | |
992 | % (node['type'], node['level'], node['source'], line)) |
|
991 | % (node['type'], node['level'], node['source'], line)) | |
993 |
|
992 | |||
994 | def depart_system_message(self, node): |
|
993 | def depart_system_message(self, node): | |
995 | pass |
|
994 | pass | |
996 |
|
995 | |||
997 | def visit_table(self, node): |
|
996 | def visit_table(self, node): | |
998 | self._active_table = Table() |
|
997 | self._active_table = Table() | |
999 |
|
998 | |||
1000 | def depart_table(self, node): |
|
999 | def depart_table(self, node): | |
1001 | self.ensure_eol() |
|
1000 | self.ensure_eol() | |
1002 | self.body.extend(self._active_table.as_list()) |
|
1001 | self.body.extend(self._active_table.as_list()) | |
1003 | self._active_table = None |
|
1002 | self._active_table = None | |
1004 |
|
1003 | |||
1005 | def visit_target(self, node): |
|
1004 | def visit_target(self, node): | |
1006 | # targets are in-document hyper targets, without any use for man-pages. |
|
1005 | # targets are in-document hyper targets, without any use for man-pages. | |
1007 | raise nodes.SkipNode |
|
1006 | raise nodes.SkipNode | |
1008 |
|
1007 | |||
1009 | def visit_tbody(self, node): |
|
1008 | def visit_tbody(self, node): | |
1010 | pass |
|
1009 | pass | |
1011 |
|
1010 | |||
1012 | def depart_tbody(self, node): |
|
1011 | def depart_tbody(self, node): | |
1013 | pass |
|
1012 | pass | |
1014 |
|
1013 | |||
1015 | def visit_term(self, node): |
|
1014 | def visit_term(self, node): | |
1016 | self.body.append(self.defs['term'][0]) |
|
1015 | self.body.append(self.defs['term'][0]) | |
1017 |
|
1016 | |||
1018 | def depart_term(self, node): |
|
1017 | def depart_term(self, node): | |
1019 | self.body.append(self.defs['term'][1]) |
|
1018 | self.body.append(self.defs['term'][1]) | |
1020 |
|
1019 | |||
1021 | def visit_tgroup(self, node): |
|
1020 | def visit_tgroup(self, node): | |
1022 | pass |
|
1021 | pass | |
1023 |
|
1022 | |||
1024 | def depart_tgroup(self, node): |
|
1023 | def depart_tgroup(self, node): | |
1025 | pass |
|
1024 | pass | |
1026 |
|
1025 | |||
1027 | def visit_thead(self, node): |
|
1026 | def visit_thead(self, node): | |
1028 | # MAYBE double line '=' |
|
1027 | # MAYBE double line '=' | |
1029 | pass |
|
1028 | pass | |
1030 |
|
1029 | |||
1031 | def depart_thead(self, node): |
|
1030 | def depart_thead(self, node): | |
1032 | # MAYBE double line '=' |
|
1031 | # MAYBE double line '=' | |
1033 | pass |
|
1032 | pass | |
1034 |
|
1033 | |||
1035 | def visit_tip(self, node): |
|
1034 | def visit_tip(self, node): | |
1036 | self.visit_admonition(node, 'tip') |
|
1035 | self.visit_admonition(node, 'tip') | |
1037 |
|
1036 | |||
1038 | depart_tip = depart_admonition |
|
1037 | depart_tip = depart_admonition | |
1039 |
|
1038 | |||
1040 | def visit_title(self, node): |
|
1039 | def visit_title(self, node): | |
1041 | if isinstance(node.parent, nodes.topic): |
|
1040 | if isinstance(node.parent, nodes.topic): | |
1042 | self.body.append(self.defs['topic-title'][0]) |
|
1041 | self.body.append(self.defs['topic-title'][0]) | |
1043 | elif isinstance(node.parent, nodes.sidebar): |
|
1042 | elif isinstance(node.parent, nodes.sidebar): | |
1044 | self.body.append(self.defs['sidebar-title'][0]) |
|
1043 | self.body.append(self.defs['sidebar-title'][0]) | |
1045 | elif isinstance(node.parent, nodes.admonition): |
|
1044 | elif isinstance(node.parent, nodes.admonition): | |
1046 | self.body.append('.IP "') |
|
1045 | self.body.append('.IP "') | |
1047 | elif self.section_level == 0: |
|
1046 | elif self.section_level == 0: | |
1048 | self._docinfo['title'] = node.astext() |
|
1047 | self._docinfo['title'] = node.astext() | |
1049 | # document title for .TH |
|
1048 | # document title for .TH | |
1050 | self._docinfo['title_upper'] = node.astext().upper() |
|
1049 | self._docinfo['title_upper'] = node.astext().upper() | |
1051 | raise nodes.SkipNode |
|
1050 | raise nodes.SkipNode | |
1052 | elif self.section_level == 1: |
|
1051 | elif self.section_level == 1: | |
1053 | self.body.append('.SH ') |
|
1052 | self.body.append('.SH ') | |
1054 | for n in node.traverse(nodes.Text): |
|
1053 | for n in node.traverse(nodes.Text): | |
1055 | n.parent.replace(n, nodes.Text(n.astext().upper())) |
|
1054 | n.parent.replace(n, nodes.Text(n.astext().upper())) | |
1056 | else: |
|
1055 | else: | |
1057 | self.body.append('.SS ') |
|
1056 | self.body.append('.SS ') | |
1058 |
|
1057 | |||
1059 | def depart_title(self, node): |
|
1058 | def depart_title(self, node): | |
1060 | if isinstance(node.parent, nodes.admonition): |
|
1059 | if isinstance(node.parent, nodes.admonition): | |
1061 | self.body.append('"') |
|
1060 | self.body.append('"') | |
1062 | self.body.append('\n') |
|
1061 | self.body.append('\n') | |
1063 |
|
1062 | |||
1064 | def visit_title_reference(self, node): |
|
1063 | def visit_title_reference(self, node): | |
1065 | """inline citation reference""" |
|
1064 | """inline citation reference""" | |
1066 | self.body.append(self.defs['title_reference'][0]) |
|
1065 | self.body.append(self.defs['title_reference'][0]) | |
1067 |
|
1066 | |||
1068 | def depart_title_reference(self, node): |
|
1067 | def depart_title_reference(self, node): | |
1069 | self.body.append(self.defs['title_reference'][1]) |
|
1068 | self.body.append(self.defs['title_reference'][1]) | |
1070 |
|
1069 | |||
1071 | def visit_topic(self, node): |
|
1070 | def visit_topic(self, node): | |
1072 | pass |
|
1071 | pass | |
1073 |
|
1072 | |||
1074 | def depart_topic(self, node): |
|
1073 | def depart_topic(self, node): | |
1075 | pass |
|
1074 | pass | |
1076 |
|
1075 | |||
1077 | def visit_sidebar(self, node): |
|
1076 | def visit_sidebar(self, node): | |
1078 | pass |
|
1077 | pass | |
1079 |
|
1078 | |||
1080 | def depart_sidebar(self, node): |
|
1079 | def depart_sidebar(self, node): | |
1081 | pass |
|
1080 | pass | |
1082 |
|
1081 | |||
1083 | def visit_rubric(self, node): |
|
1082 | def visit_rubric(self, node): | |
1084 | pass |
|
1083 | pass | |
1085 |
|
1084 | |||
1086 | def depart_rubric(self, node): |
|
1085 | def depart_rubric(self, node): | |
1087 | pass |
|
1086 | pass | |
1088 |
|
1087 | |||
1089 | def visit_transition(self, node): |
|
1088 | def visit_transition(self, node): | |
1090 | # .PP Begin a new paragraph and reset prevailing indent. |
|
1089 | # .PP Begin a new paragraph and reset prevailing indent. | |
1091 | # .sp N leaves N lines of blank space. |
|
1090 | # .sp N leaves N lines of blank space. | |
1092 | # .ce centers the next line |
|
1091 | # .ce centers the next line | |
1093 | self.body.append('\n.sp\n.ce\n----\n') |
|
1092 | self.body.append('\n.sp\n.ce\n----\n') | |
1094 |
|
1093 | |||
1095 | def depart_transition(self, node): |
|
1094 | def depart_transition(self, node): | |
1096 | self.body.append('\n.ce 0\n.sp\n') |
|
1095 | self.body.append('\n.ce 0\n.sp\n') | |
1097 |
|
1096 | |||
1098 | def visit_version(self, node): |
|
1097 | def visit_version(self, node): | |
1099 | self.visit_docinfo_item(node, 'version') |
|
1098 | self.visit_docinfo_item(node, 'version') | |
1100 |
|
1099 | |||
1101 | def visit_warning(self, node): |
|
1100 | def visit_warning(self, node): | |
1102 | self.visit_admonition(node, 'warning') |
|
1101 | self.visit_admonition(node, 'warning') | |
1103 |
|
1102 | |||
1104 | depart_warning = depart_admonition |
|
1103 | depart_warning = depart_admonition | |
1105 |
|
1104 | |||
1106 | def unimplemented_visit(self, node): |
|
1105 | def unimplemented_visit(self, node): | |
1107 | raise NotImplementedError('visiting unimplemented node type: %s' |
|
1106 | raise NotImplementedError('visiting unimplemented node type: %s' | |
1108 | % node.__class__.__name__) |
|
1107 | % node.__class__.__name__) | |
1109 |
|
1108 | |||
1110 | # vim: set fileencoding=utf-8 et ts=4 ai : |
|
1109 | # vim: set fileencoding=utf-8 et ts=4 ai : |
@@ -1,321 +1,321 b'' | |||||
1 | import os, stat, socket |
|
1 | import os, stat, socket | |
2 | import re |
|
2 | import re | |
3 | import sys |
|
3 | import sys | |
4 | import tempfile |
|
4 | import tempfile | |
5 |
|
5 | |||
6 | tempprefix = 'hg-hghave-' |
|
6 | tempprefix = 'hg-hghave-' | |
7 |
|
7 | |||
8 | def matchoutput(cmd, regexp, ignorestatus=False): |
|
8 | def matchoutput(cmd, regexp, ignorestatus=False): | |
9 | """Return True if cmd executes successfully and its output |
|
9 | """Return True if cmd executes successfully and its output | |
10 | is matched by the supplied regular expression. |
|
10 | is matched by the supplied regular expression. | |
11 | """ |
|
11 | """ | |
12 | r = re.compile(regexp) |
|
12 | r = re.compile(regexp) | |
13 | fh = os.popen(cmd) |
|
13 | fh = os.popen(cmd) | |
14 | s = fh.read() |
|
14 | s = fh.read() | |
15 | try: |
|
15 | try: | |
16 | ret = fh.close() |
|
16 | ret = fh.close() | |
17 | except IOError: |
|
17 | except IOError: | |
18 | # Happen in Windows test environment |
|
18 | # Happen in Windows test environment | |
19 | ret = 1 |
|
19 | ret = 1 | |
20 | return (ignorestatus or ret is None) and r.search(s) |
|
20 | return (ignorestatus or ret is None) and r.search(s) | |
21 |
|
21 | |||
22 | def has_baz(): |
|
22 | def has_baz(): | |
23 | return matchoutput('baz --version 2>&1', r'baz Bazaar version') |
|
23 | return matchoutput('baz --version 2>&1', r'baz Bazaar version') | |
24 |
|
24 | |||
25 | def has_bzr(): |
|
25 | def has_bzr(): | |
26 | try: |
|
26 | try: | |
27 | import bzrlib |
|
27 | import bzrlib | |
28 | return bzrlib.__doc__ is not None |
|
28 | return bzrlib.__doc__ is not None | |
29 | except ImportError: |
|
29 | except ImportError: | |
30 | return False |
|
30 | return False | |
31 |
|
31 | |||
32 | def has_bzr114(): |
|
32 | def has_bzr114(): | |
33 | try: |
|
33 | try: | |
34 | import bzrlib |
|
34 | import bzrlib | |
35 | return (bzrlib.__doc__ is not None |
|
35 | return (bzrlib.__doc__ is not None | |
36 | and bzrlib.version_info[:2] >= (1, 14)) |
|
36 | and bzrlib.version_info[:2] >= (1, 14)) | |
37 | except ImportError: |
|
37 | except ImportError: | |
38 | return False |
|
38 | return False | |
39 |
|
39 | |||
40 | def has_cvs(): |
|
40 | def has_cvs(): | |
41 | re = r'Concurrent Versions System.*?server' |
|
41 | re = r'Concurrent Versions System.*?server' | |
42 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
42 | return matchoutput('cvs --version 2>&1', re) and not has_msys() | |
43 |
|
43 | |||
44 | def has_cvs112(): |
|
44 | def has_cvs112(): | |
45 | re = r'Concurrent Versions System \(CVS\) 1.12.*?server' |
|
45 | re = r'Concurrent Versions System \(CVS\) 1.12.*?server' | |
46 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
46 | return matchoutput('cvs --version 2>&1', re) and not has_msys() | |
47 |
|
47 | |||
48 | def has_darcs(): |
|
48 | def has_darcs(): | |
49 | return matchoutput('darcs --version', r'2\.[2-9]', True) |
|
49 | return matchoutput('darcs --version', r'2\.[2-9]', True) | |
50 |
|
50 | |||
51 | def has_mtn(): |
|
51 | def has_mtn(): | |
52 | return matchoutput('mtn --version', r'monotone', True) and not matchoutput( |
|
52 | return matchoutput('mtn --version', r'monotone', True) and not matchoutput( | |
53 | 'mtn --version', r'monotone 0\.', True) |
|
53 | 'mtn --version', r'monotone 0\.', True) | |
54 |
|
54 | |||
55 | def has_eol_in_paths(): |
|
55 | def has_eol_in_paths(): | |
56 | try: |
|
56 | try: | |
57 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r') |
|
57 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r') | |
58 | os.close(fd) |
|
58 | os.close(fd) | |
59 | os.remove(path) |
|
59 | os.remove(path) | |
60 | return True |
|
60 | return True | |
61 | except (IOError, OSError): |
|
61 | except (IOError, OSError): | |
62 | return False |
|
62 | return False | |
63 |
|
63 | |||
64 | def has_executablebit(): |
|
64 | def has_executablebit(): | |
65 | try: |
|
65 | try: | |
66 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
66 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | |
67 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
67 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
68 | try: |
|
68 | try: | |
69 | os.close(fh) |
|
69 | os.close(fh) | |
70 | m = os.stat(fn).st_mode & 0777 |
|
70 | m = os.stat(fn).st_mode & 0777 | |
71 | new_file_has_exec = m & EXECFLAGS |
|
71 | new_file_has_exec = m & EXECFLAGS | |
72 | os.chmod(fn, m ^ EXECFLAGS) |
|
72 | os.chmod(fn, m ^ EXECFLAGS) | |
73 | exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m) |
|
73 | exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m) | |
74 | finally: |
|
74 | finally: | |
75 | os.unlink(fn) |
|
75 | os.unlink(fn) | |
76 | except (IOError, OSError): |
|
76 | except (IOError, OSError): | |
77 | # we don't care, the user probably won't be able to commit anyway |
|
77 | # we don't care, the user probably won't be able to commit anyway | |
78 | return False |
|
78 | return False | |
79 | return not (new_file_has_exec or exec_flags_cannot_flip) |
|
79 | return not (new_file_has_exec or exec_flags_cannot_flip) | |
80 |
|
80 | |||
81 | def has_icasefs(): |
|
81 | def has_icasefs(): | |
82 | # Stolen from mercurial.util |
|
82 | # Stolen from mercurial.util | |
83 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
83 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
84 | os.close(fd) |
|
84 | os.close(fd) | |
85 | try: |
|
85 | try: | |
86 | s1 = os.stat(path) |
|
86 | s1 = os.stat(path) | |
87 | d, b = os.path.split(path) |
|
87 | d, b = os.path.split(path) | |
88 | p2 = os.path.join(d, b.upper()) |
|
88 | p2 = os.path.join(d, b.upper()) | |
89 | if path == p2: |
|
89 | if path == p2: | |
90 | p2 = os.path.join(d, b.lower()) |
|
90 | p2 = os.path.join(d, b.lower()) | |
91 | try: |
|
91 | try: | |
92 | s2 = os.stat(p2) |
|
92 | s2 = os.stat(p2) | |
93 | return s2 == s1 |
|
93 | return s2 == s1 | |
94 | except OSError: |
|
94 | except OSError: | |
95 | return False |
|
95 | return False | |
96 | finally: |
|
96 | finally: | |
97 | os.remove(path) |
|
97 | os.remove(path) | |
98 |
|
98 | |||
99 | def has_inotify(): |
|
99 | def has_inotify(): | |
100 | try: |
|
100 | try: | |
101 | import hgext.inotify.linux.watcher |
|
101 | import hgext.inotify.linux.watcher | |
102 | except ImportError: |
|
102 | except ImportError: | |
103 | return False |
|
103 | return False | |
104 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
104 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
105 | sock = socket.socket(socket.AF_UNIX) |
|
105 | sock = socket.socket(socket.AF_UNIX) | |
106 | try: |
|
106 | try: | |
107 | sock.bind(name) |
|
107 | sock.bind(name) | |
108 |
except socket.error |
|
108 | except socket.error: | |
109 | return False |
|
109 | return False | |
110 | sock.close() |
|
110 | sock.close() | |
111 | os.unlink(name) |
|
111 | os.unlink(name) | |
112 | return True |
|
112 | return True | |
113 |
|
113 | |||
114 | def has_fifo(): |
|
114 | def has_fifo(): | |
115 | if getattr(os, "mkfifo", None) is None: |
|
115 | if getattr(os, "mkfifo", None) is None: | |
116 | return False |
|
116 | return False | |
117 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
117 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
118 | try: |
|
118 | try: | |
119 | os.mkfifo(name) |
|
119 | os.mkfifo(name) | |
120 | os.unlink(name) |
|
120 | os.unlink(name) | |
121 | return True |
|
121 | return True | |
122 | except OSError: |
|
122 | except OSError: | |
123 | return False |
|
123 | return False | |
124 |
|
124 | |||
125 | def has_killdaemons(): |
|
125 | def has_killdaemons(): | |
126 | return True |
|
126 | return True | |
127 |
|
127 | |||
128 | def has_cacheable_fs(): |
|
128 | def has_cacheable_fs(): | |
129 | from mercurial import util |
|
129 | from mercurial import util | |
130 |
|
130 | |||
131 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
131 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
132 | os.close(fd) |
|
132 | os.close(fd) | |
133 | try: |
|
133 | try: | |
134 | return util.cachestat(path).cacheable() |
|
134 | return util.cachestat(path).cacheable() | |
135 | finally: |
|
135 | finally: | |
136 | os.remove(path) |
|
136 | os.remove(path) | |
137 |
|
137 | |||
138 | def has_lsprof(): |
|
138 | def has_lsprof(): | |
139 | try: |
|
139 | try: | |
140 | import _lsprof |
|
140 | import _lsprof | |
141 | return True |
|
141 | return True | |
142 | except ImportError: |
|
142 | except ImportError: | |
143 | return False |
|
143 | return False | |
144 |
|
144 | |||
145 | def has_gettext(): |
|
145 | def has_gettext(): | |
146 | return matchoutput('msgfmt --version', 'GNU gettext-tools') |
|
146 | return matchoutput('msgfmt --version', 'GNU gettext-tools') | |
147 |
|
147 | |||
148 | def has_git(): |
|
148 | def has_git(): | |
149 | return matchoutput('git --version 2>&1', r'^git version') |
|
149 | return matchoutput('git --version 2>&1', r'^git version') | |
150 |
|
150 | |||
151 | def has_docutils(): |
|
151 | def has_docutils(): | |
152 | try: |
|
152 | try: | |
153 | from docutils.core import publish_cmdline |
|
153 | from docutils.core import publish_cmdline | |
154 | return True |
|
154 | return True | |
155 | except ImportError: |
|
155 | except ImportError: | |
156 | return False |
|
156 | return False | |
157 |
|
157 | |||
158 | def getsvnversion(): |
|
158 | def getsvnversion(): | |
159 | m = matchoutput('svn --version --quiet 2>&1', r'^(\d+)\.(\d+)') |
|
159 | m = matchoutput('svn --version --quiet 2>&1', r'^(\d+)\.(\d+)') | |
160 | if not m: |
|
160 | if not m: | |
161 | return (0, 0) |
|
161 | return (0, 0) | |
162 | return (int(m.group(1)), int(m.group(2))) |
|
162 | return (int(m.group(1)), int(m.group(2))) | |
163 |
|
163 | |||
164 | def has_svn15(): |
|
164 | def has_svn15(): | |
165 | return getsvnversion() >= (1, 5) |
|
165 | return getsvnversion() >= (1, 5) | |
166 |
|
166 | |||
167 | def has_svn13(): |
|
167 | def has_svn13(): | |
168 | return getsvnversion() >= (1, 3) |
|
168 | return getsvnversion() >= (1, 3) | |
169 |
|
169 | |||
170 | def has_svn(): |
|
170 | def has_svn(): | |
171 | return matchoutput('svn --version 2>&1', r'^svn, version') and \ |
|
171 | return matchoutput('svn --version 2>&1', r'^svn, version') and \ | |
172 | matchoutput('svnadmin --version 2>&1', r'^svnadmin, version') |
|
172 | matchoutput('svnadmin --version 2>&1', r'^svnadmin, version') | |
173 |
|
173 | |||
174 | def has_svn_bindings(): |
|
174 | def has_svn_bindings(): | |
175 | try: |
|
175 | try: | |
176 | import svn.core |
|
176 | import svn.core | |
177 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
177 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR | |
178 | if version < (1, 4): |
|
178 | if version < (1, 4): | |
179 | return False |
|
179 | return False | |
180 | return True |
|
180 | return True | |
181 | except ImportError: |
|
181 | except ImportError: | |
182 | return False |
|
182 | return False | |
183 |
|
183 | |||
184 | def has_p4(): |
|
184 | def has_p4(): | |
185 | return (matchoutput('p4 -V', r'Rev\. P4/') and |
|
185 | return (matchoutput('p4 -V', r'Rev\. P4/') and | |
186 | matchoutput('p4d -V', r'Rev\. P4D/')) |
|
186 | matchoutput('p4d -V', r'Rev\. P4D/')) | |
187 |
|
187 | |||
188 | def has_symlink(): |
|
188 | def has_symlink(): | |
189 | if getattr(os, "symlink", None) is None: |
|
189 | if getattr(os, "symlink", None) is None: | |
190 | return False |
|
190 | return False | |
191 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
191 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
192 | try: |
|
192 | try: | |
193 | os.symlink(".", name) |
|
193 | os.symlink(".", name) | |
194 | os.unlink(name) |
|
194 | os.unlink(name) | |
195 | return True |
|
195 | return True | |
196 | except (OSError, AttributeError): |
|
196 | except (OSError, AttributeError): | |
197 | return False |
|
197 | return False | |
198 |
|
198 | |||
199 | def has_hardlink(): |
|
199 | def has_hardlink(): | |
200 | from mercurial import util |
|
200 | from mercurial import util | |
201 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
201 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) | |
202 | os.close(fh) |
|
202 | os.close(fh) | |
203 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
203 | name = tempfile.mktemp(dir='.', prefix=tempprefix) | |
204 | try: |
|
204 | try: | |
205 | try: |
|
205 | try: | |
206 | util.oslink(fn, name) |
|
206 | util.oslink(fn, name) | |
207 | os.unlink(name) |
|
207 | os.unlink(name) | |
208 | return True |
|
208 | return True | |
209 | except OSError: |
|
209 | except OSError: | |
210 | return False |
|
210 | return False | |
211 | finally: |
|
211 | finally: | |
212 | os.unlink(fn) |
|
212 | os.unlink(fn) | |
213 |
|
213 | |||
214 | def has_tla(): |
|
214 | def has_tla(): | |
215 | return matchoutput('tla --version 2>&1', r'The GNU Arch Revision') |
|
215 | return matchoutput('tla --version 2>&1', r'The GNU Arch Revision') | |
216 |
|
216 | |||
217 | def has_gpg(): |
|
217 | def has_gpg(): | |
218 | return matchoutput('gpg --version 2>&1', r'GnuPG') |
|
218 | return matchoutput('gpg --version 2>&1', r'GnuPG') | |
219 |
|
219 | |||
220 | def has_unix_permissions(): |
|
220 | def has_unix_permissions(): | |
221 | d = tempfile.mkdtemp(dir='.', prefix=tempprefix) |
|
221 | d = tempfile.mkdtemp(dir='.', prefix=tempprefix) | |
222 | try: |
|
222 | try: | |
223 | fname = os.path.join(d, 'foo') |
|
223 | fname = os.path.join(d, 'foo') | |
224 | for umask in (077, 007, 022): |
|
224 | for umask in (077, 007, 022): | |
225 | os.umask(umask) |
|
225 | os.umask(umask) | |
226 | f = open(fname, 'w') |
|
226 | f = open(fname, 'w') | |
227 | f.close() |
|
227 | f.close() | |
228 | mode = os.stat(fname).st_mode |
|
228 | mode = os.stat(fname).st_mode | |
229 | os.unlink(fname) |
|
229 | os.unlink(fname) | |
230 | if mode & 0777 != ~umask & 0666: |
|
230 | if mode & 0777 != ~umask & 0666: | |
231 | return False |
|
231 | return False | |
232 | return True |
|
232 | return True | |
233 | finally: |
|
233 | finally: | |
234 | os.rmdir(d) |
|
234 | os.rmdir(d) | |
235 |
|
235 | |||
236 | def has_pyflakes(): |
|
236 | def has_pyflakes(): | |
237 | return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"", |
|
237 | return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"", | |
238 | r"<stdin>:1: 're' imported but unused", |
|
238 | r"<stdin>:1: 're' imported but unused", | |
239 | True) |
|
239 | True) | |
240 |
|
240 | |||
241 | def has_pygments(): |
|
241 | def has_pygments(): | |
242 | try: |
|
242 | try: | |
243 | import pygments |
|
243 | import pygments | |
244 | return True |
|
244 | return True | |
245 | except ImportError: |
|
245 | except ImportError: | |
246 | return False |
|
246 | return False | |
247 |
|
247 | |||
248 | def has_outer_repo(): |
|
248 | def has_outer_repo(): | |
249 | # failing for other reasons than 'no repo' imply that there is a repo |
|
249 | # failing for other reasons than 'no repo' imply that there is a repo | |
250 | return not matchoutput('hg root 2>&1', |
|
250 | return not matchoutput('hg root 2>&1', | |
251 | r'abort: no repository found', True) |
|
251 | r'abort: no repository found', True) | |
252 |
|
252 | |||
253 | def has_ssl(): |
|
253 | def has_ssl(): | |
254 | try: |
|
254 | try: | |
255 | import ssl |
|
255 | import ssl | |
256 | import OpenSSL |
|
256 | import OpenSSL | |
257 | OpenSSL.SSL.Context |
|
257 | OpenSSL.SSL.Context | |
258 | return True |
|
258 | return True | |
259 | except ImportError: |
|
259 | except ImportError: | |
260 | return False |
|
260 | return False | |
261 |
|
261 | |||
262 | def has_windows(): |
|
262 | def has_windows(): | |
263 | return os.name == 'nt' |
|
263 | return os.name == 'nt' | |
264 |
|
264 | |||
265 | def has_system_sh(): |
|
265 | def has_system_sh(): | |
266 | return os.name != 'nt' |
|
266 | return os.name != 'nt' | |
267 |
|
267 | |||
268 | def has_serve(): |
|
268 | def has_serve(): | |
269 | return os.name != 'nt' # gross approximation |
|
269 | return os.name != 'nt' # gross approximation | |
270 |
|
270 | |||
271 | def has_tic(): |
|
271 | def has_tic(): | |
272 | return matchoutput('test -x "`which tic`"', '') |
|
272 | return matchoutput('test -x "`which tic`"', '') | |
273 |
|
273 | |||
274 | def has_msys(): |
|
274 | def has_msys(): | |
275 | return os.getenv('MSYSTEM') |
|
275 | return os.getenv('MSYSTEM') | |
276 |
|
276 | |||
277 | def has_aix(): |
|
277 | def has_aix(): | |
278 | return sys.platform.startswith("aix") |
|
278 | return sys.platform.startswith("aix") | |
279 |
|
279 | |||
280 | checks = { |
|
280 | checks = { | |
281 | "true": (lambda: True, "yak shaving"), |
|
281 | "true": (lambda: True, "yak shaving"), | |
282 | "false": (lambda: False, "nail clipper"), |
|
282 | "false": (lambda: False, "nail clipper"), | |
283 | "baz": (has_baz, "GNU Arch baz client"), |
|
283 | "baz": (has_baz, "GNU Arch baz client"), | |
284 | "bzr": (has_bzr, "Canonical's Bazaar client"), |
|
284 | "bzr": (has_bzr, "Canonical's Bazaar client"), | |
285 | "bzr114": (has_bzr114, "Canonical's Bazaar client >= 1.14"), |
|
285 | "bzr114": (has_bzr114, "Canonical's Bazaar client >= 1.14"), | |
286 | "cacheable": (has_cacheable_fs, "cacheable filesystem"), |
|
286 | "cacheable": (has_cacheable_fs, "cacheable filesystem"), | |
287 | "cvs": (has_cvs, "cvs client/server"), |
|
287 | "cvs": (has_cvs, "cvs client/server"), | |
288 | "cvs112": (has_cvs112, "cvs client/server >= 1.12"), |
|
288 | "cvs112": (has_cvs112, "cvs client/server >= 1.12"), | |
289 | "darcs": (has_darcs, "darcs client"), |
|
289 | "darcs": (has_darcs, "darcs client"), | |
290 | "docutils": (has_docutils, "Docutils text processing library"), |
|
290 | "docutils": (has_docutils, "Docutils text processing library"), | |
291 | "eol-in-paths": (has_eol_in_paths, "end-of-lines in paths"), |
|
291 | "eol-in-paths": (has_eol_in_paths, "end-of-lines in paths"), | |
292 | "execbit": (has_executablebit, "executable bit"), |
|
292 | "execbit": (has_executablebit, "executable bit"), | |
293 | "fifo": (has_fifo, "named pipes"), |
|
293 | "fifo": (has_fifo, "named pipes"), | |
294 | "gettext": (has_gettext, "GNU Gettext (msgfmt)"), |
|
294 | "gettext": (has_gettext, "GNU Gettext (msgfmt)"), | |
295 | "git": (has_git, "git command line client"), |
|
295 | "git": (has_git, "git command line client"), | |
296 | "gpg": (has_gpg, "gpg client"), |
|
296 | "gpg": (has_gpg, "gpg client"), | |
297 | "hardlink": (has_hardlink, "hardlinks"), |
|
297 | "hardlink": (has_hardlink, "hardlinks"), | |
298 | "icasefs": (has_icasefs, "case insensitive file system"), |
|
298 | "icasefs": (has_icasefs, "case insensitive file system"), | |
299 | "inotify": (has_inotify, "inotify extension support"), |
|
299 | "inotify": (has_inotify, "inotify extension support"), | |
300 | "killdaemons": (has_killdaemons, 'killdaemons.py support'), |
|
300 | "killdaemons": (has_killdaemons, 'killdaemons.py support'), | |
301 | "lsprof": (has_lsprof, "python lsprof module"), |
|
301 | "lsprof": (has_lsprof, "python lsprof module"), | |
302 | "mtn": (has_mtn, "monotone client (>= 1.0)"), |
|
302 | "mtn": (has_mtn, "monotone client (>= 1.0)"), | |
303 | "outer-repo": (has_outer_repo, "outer repo"), |
|
303 | "outer-repo": (has_outer_repo, "outer repo"), | |
304 | "p4": (has_p4, "Perforce server and client"), |
|
304 | "p4": (has_p4, "Perforce server and client"), | |
305 | "pyflakes": (has_pyflakes, "Pyflakes python linter"), |
|
305 | "pyflakes": (has_pyflakes, "Pyflakes python linter"), | |
306 | "pygments": (has_pygments, "Pygments source highlighting library"), |
|
306 | "pygments": (has_pygments, "Pygments source highlighting library"), | |
307 | "serve": (has_serve, "platform and python can manage 'hg serve -d'"), |
|
307 | "serve": (has_serve, "platform and python can manage 'hg serve -d'"), | |
308 | "ssl": (has_ssl, "python >= 2.6 ssl module and python OpenSSL"), |
|
308 | "ssl": (has_ssl, "python >= 2.6 ssl module and python OpenSSL"), | |
309 | "svn": (has_svn, "subversion client and admin tools"), |
|
309 | "svn": (has_svn, "subversion client and admin tools"), | |
310 | "svn13": (has_svn13, "subversion client and admin tools >= 1.3"), |
|
310 | "svn13": (has_svn13, "subversion client and admin tools >= 1.3"), | |
311 | "svn15": (has_svn15, "subversion client and admin tools >= 1.5"), |
|
311 | "svn15": (has_svn15, "subversion client and admin tools >= 1.5"), | |
312 | "svn-bindings": (has_svn_bindings, "subversion python bindings"), |
|
312 | "svn-bindings": (has_svn_bindings, "subversion python bindings"), | |
313 | "symlink": (has_symlink, "symbolic links"), |
|
313 | "symlink": (has_symlink, "symbolic links"), | |
314 | "system-sh": (has_system_sh, "system() uses sh"), |
|
314 | "system-sh": (has_system_sh, "system() uses sh"), | |
315 | "tic": (has_tic, "terminfo compiler"), |
|
315 | "tic": (has_tic, "terminfo compiler"), | |
316 | "tla": (has_tla, "GNU Arch tla client"), |
|
316 | "tla": (has_tla, "GNU Arch tla client"), | |
317 | "unix-permissions": (has_unix_permissions, "unix-style permissions"), |
|
317 | "unix-permissions": (has_unix_permissions, "unix-style permissions"), | |
318 | "windows": (has_windows, "Windows"), |
|
318 | "windows": (has_windows, "Windows"), | |
319 | "msys": (has_msys, "Windows with MSYS"), |
|
319 | "msys": (has_msys, "Windows with MSYS"), | |
320 | "aix": (has_aix, "AIX"), |
|
320 | "aix": (has_aix, "AIX"), | |
321 | } |
|
321 | } |
@@ -1,30 +1,21 b'' | |||||
1 | $ "$TESTDIR/hghave" pyflakes || exit 80 |
|
1 | $ "$TESTDIR/hghave" pyflakes || exit 80 | |
2 | $ cd "`dirname "$TESTDIR"`" |
|
2 | $ cd "`dirname "$TESTDIR"`" | |
3 |
|
3 | |||
4 | run pyflakes on all tracked files ending in .py or without a file ending |
|
4 | run pyflakes on all tracked files ending in .py or without a file ending | |
5 | (skipping binary file random-seed) |
|
5 | (skipping binary file random-seed) | |
6 | $ hg manifest 2>/dev/null | egrep "\.py$|^[^.]*$" | grep -v /random_seed$ \ |
|
6 | $ hg manifest 2>/dev/null | egrep "\.py$|^[^.]*$" | grep -v /random_seed$ \ | |
7 | > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py" |
|
7 | > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py" | |
8 | contrib/simplemerge:*: 'os' imported but unused (glob) |
|
|||
9 | contrib/win32/hgwebdir_wsgi.py:*: 'win32traceutil' imported but unused (glob) |
|
8 | contrib/win32/hgwebdir_wsgi.py:*: 'win32traceutil' imported but unused (glob) | |
10 | setup.py:*: 'sha' imported but unused (glob) |
|
9 | setup.py:*: 'sha' imported but unused (glob) | |
11 | setup.py:*: 'zlib' imported but unused (glob) |
|
10 | setup.py:*: 'zlib' imported but unused (glob) | |
12 | setup.py:*: 'bz2' imported but unused (glob) |
|
11 | setup.py:*: 'bz2' imported but unused (glob) | |
13 | setup.py:*: 'py2exe' imported but unused (glob) |
|
12 | setup.py:*: 'py2exe' imported but unused (glob) | |
14 | tests/hghave.py:*: 'hgext' imported but unused (glob) |
|
13 | tests/hghave.py:*: 'hgext' imported but unused (glob) | |
15 | tests/hghave.py:*: '_lsprof' imported but unused (glob) |
|
14 | tests/hghave.py:*: '_lsprof' imported but unused (glob) | |
16 | tests/hghave.py:*: 'publish_cmdline' imported but unused (glob) |
|
15 | tests/hghave.py:*: 'publish_cmdline' imported but unused (glob) | |
17 | tests/hghave.py:*: 'pygments' imported but unused (glob) |
|
16 | tests/hghave.py:*: 'pygments' imported but unused (glob) | |
18 | tests/hghave.py:*: 'ssl' imported but unused (glob) |
|
17 | tests/hghave.py:*: 'ssl' imported but unused (glob) | |
19 | contrib/casesmash.py:*: local variable 'inst' is assigned to but never used (glob) |
|
|||
20 | contrib/check-code.py:*: local variable 'po' is assigned to but never used (glob) |
|
|||
21 | contrib/hgfixes/fix_leftover_imports.py:*: local variable 'bare_names' is assigned to but never used (glob) |
|
|||
22 | contrib/perf.py:*: local variable 'm' is assigned to but never used (glob) |
|
|||
23 | contrib/perf.py:*: local variable 'c' is assigned to but never used (glob) |
|
|||
24 | doc/hgmanpage.py:*: local variable 'backref_text' is assigned to but never used (glob) |
|
|||
25 | tests/hghave.py:*: local variable 'err' is assigned to but never used (glob) |
|
|||
26 | tests/test-hgweb-auth.py:*: local variable 'e' is assigned to but never used (glob) |
|
|||
27 | contrib/win32/hgwebdir_wsgi.py:*: 'from isapi.install import *' used; unable to detect undefined names (glob) |
|
18 | contrib/win32/hgwebdir_wsgi.py:*: 'from isapi.install import *' used; unable to detect undefined names (glob) | |
28 | hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob) |
|
19 | hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob) | |
29 |
|
20 | |||
30 |
|
21 |
@@ -1,107 +1,107 b'' | |||||
1 | from mercurial import demandimport; demandimport.enable() |
|
1 | from mercurial import demandimport; demandimport.enable() | |
2 | import urllib2 |
|
2 | import urllib2 | |
3 | from mercurial import ui, util |
|
3 | from mercurial import ui, util | |
4 | from mercurial import url |
|
4 | from mercurial import url | |
5 | from mercurial.error import Abort |
|
5 | from mercurial.error import Abort | |
6 |
|
6 | |||
7 | class myui(ui.ui): |
|
7 | class myui(ui.ui): | |
8 | def interactive(self): |
|
8 | def interactive(self): | |
9 | return False |
|
9 | return False | |
10 |
|
10 | |||
11 | origui = myui() |
|
11 | origui = myui() | |
12 |
|
12 | |||
13 | def writeauth(items): |
|
13 | def writeauth(items): | |
14 | ui = origui.copy() |
|
14 | ui = origui.copy() | |
15 | for name, value in items.iteritems(): |
|
15 | for name, value in items.iteritems(): | |
16 | ui.setconfig('auth', name, value) |
|
16 | ui.setconfig('auth', name, value) | |
17 | return ui |
|
17 | return ui | |
18 |
|
18 | |||
19 | def dumpdict(dict): |
|
19 | def dumpdict(dict): | |
20 | return '{' + ', '.join(['%s: %s' % (k, dict[k]) |
|
20 | return '{' + ', '.join(['%s: %s' % (k, dict[k]) | |
21 | for k in sorted(dict.iterkeys())]) + '}' |
|
21 | for k in sorted(dict.iterkeys())]) + '}' | |
22 |
|
22 | |||
23 | def test(auth, urls=None): |
|
23 | def test(auth, urls=None): | |
24 | print 'CFG:', dumpdict(auth) |
|
24 | print 'CFG:', dumpdict(auth) | |
25 | prefixes = set() |
|
25 | prefixes = set() | |
26 | for k in auth: |
|
26 | for k in auth: | |
27 | prefixes.add(k.split('.', 1)[0]) |
|
27 | prefixes.add(k.split('.', 1)[0]) | |
28 | for p in prefixes: |
|
28 | for p in prefixes: | |
29 | for name in ('.username', '.password'): |
|
29 | for name in ('.username', '.password'): | |
30 | if (p + name) not in auth: |
|
30 | if (p + name) not in auth: | |
31 | auth[p + name] = p |
|
31 | auth[p + name] = p | |
32 | auth = dict((k, v) for k, v in auth.iteritems() if v is not None) |
|
32 | auth = dict((k, v) for k, v in auth.iteritems() if v is not None) | |
33 |
|
33 | |||
34 | ui = writeauth(auth) |
|
34 | ui = writeauth(auth) | |
35 |
|
35 | |||
36 | def _test(uri): |
|
36 | def _test(uri): | |
37 | print 'URI:', uri |
|
37 | print 'URI:', uri | |
38 | try: |
|
38 | try: | |
39 | pm = url.passwordmgr(ui) |
|
39 | pm = url.passwordmgr(ui) | |
40 | u, authinfo = util.url(uri).authinfo() |
|
40 | u, authinfo = util.url(uri).authinfo() | |
41 | if authinfo is not None: |
|
41 | if authinfo is not None: | |
42 | pm.add_password(*authinfo) |
|
42 | pm.add_password(*authinfo) | |
43 | print ' ', pm.find_user_password('test', u) |
|
43 | print ' ', pm.find_user_password('test', u) | |
44 |
except Abort |
|
44 | except Abort: | |
45 | print 'abort' |
|
45 | print 'abort' | |
46 |
|
46 | |||
47 | if not urls: |
|
47 | if not urls: | |
48 | urls = [ |
|
48 | urls = [ | |
49 | 'http://example.org/foo', |
|
49 | 'http://example.org/foo', | |
50 | 'http://example.org/foo/bar', |
|
50 | 'http://example.org/foo/bar', | |
51 | 'http://example.org/bar', |
|
51 | 'http://example.org/bar', | |
52 | 'https://example.org/foo', |
|
52 | 'https://example.org/foo', | |
53 | 'https://example.org/foo/bar', |
|
53 | 'https://example.org/foo/bar', | |
54 | 'https://example.org/bar', |
|
54 | 'https://example.org/bar', | |
55 | 'https://x@example.org/bar', |
|
55 | 'https://x@example.org/bar', | |
56 | 'https://y@example.org/bar', |
|
56 | 'https://y@example.org/bar', | |
57 | ] |
|
57 | ] | |
58 | for u in urls: |
|
58 | for u in urls: | |
59 | _test(u) |
|
59 | _test(u) | |
60 |
|
60 | |||
61 |
|
61 | |||
62 | print '\n*** Test in-uri schemes\n' |
|
62 | print '\n*** Test in-uri schemes\n' | |
63 | test({'x.prefix': 'http://example.org'}) |
|
63 | test({'x.prefix': 'http://example.org'}) | |
64 | test({'x.prefix': 'https://example.org'}) |
|
64 | test({'x.prefix': 'https://example.org'}) | |
65 | test({'x.prefix': 'http://example.org', 'x.schemes': 'https'}) |
|
65 | test({'x.prefix': 'http://example.org', 'x.schemes': 'https'}) | |
66 | test({'x.prefix': 'https://example.org', 'x.schemes': 'http'}) |
|
66 | test({'x.prefix': 'https://example.org', 'x.schemes': 'http'}) | |
67 |
|
67 | |||
68 | print '\n*** Test separately configured schemes\n' |
|
68 | print '\n*** Test separately configured schemes\n' | |
69 | test({'x.prefix': 'example.org', 'x.schemes': 'http'}) |
|
69 | test({'x.prefix': 'example.org', 'x.schemes': 'http'}) | |
70 | test({'x.prefix': 'example.org', 'x.schemes': 'https'}) |
|
70 | test({'x.prefix': 'example.org', 'x.schemes': 'https'}) | |
71 | test({'x.prefix': 'example.org', 'x.schemes': 'http https'}) |
|
71 | test({'x.prefix': 'example.org', 'x.schemes': 'http https'}) | |
72 |
|
72 | |||
73 | print '\n*** Test prefix matching\n' |
|
73 | print '\n*** Test prefix matching\n' | |
74 | test({'x.prefix': 'http://example.org/foo', |
|
74 | test({'x.prefix': 'http://example.org/foo', | |
75 | 'y.prefix': 'http://example.org/bar'}) |
|
75 | 'y.prefix': 'http://example.org/bar'}) | |
76 | test({'x.prefix': 'http://example.org/foo', |
|
76 | test({'x.prefix': 'http://example.org/foo', | |
77 | 'y.prefix': 'http://example.org/foo/bar'}) |
|
77 | 'y.prefix': 'http://example.org/foo/bar'}) | |
78 | test({'x.prefix': '*', 'y.prefix': 'https://example.org/bar'}) |
|
78 | test({'x.prefix': '*', 'y.prefix': 'https://example.org/bar'}) | |
79 |
|
79 | |||
80 | print '\n*** Test user matching\n' |
|
80 | print '\n*** Test user matching\n' | |
81 | test({'x.prefix': 'http://example.org/foo', |
|
81 | test({'x.prefix': 'http://example.org/foo', | |
82 | 'x.username': None, |
|
82 | 'x.username': None, | |
83 | 'x.password': 'xpassword'}, |
|
83 | 'x.password': 'xpassword'}, | |
84 | urls=['http://y@example.org/foo']) |
|
84 | urls=['http://y@example.org/foo']) | |
85 | test({'x.prefix': 'http://example.org/foo', |
|
85 | test({'x.prefix': 'http://example.org/foo', | |
86 | 'x.username': None, |
|
86 | 'x.username': None, | |
87 | 'x.password': 'xpassword', |
|
87 | 'x.password': 'xpassword', | |
88 | 'y.prefix': 'http://example.org/foo', |
|
88 | 'y.prefix': 'http://example.org/foo', | |
89 | 'y.username': 'y', |
|
89 | 'y.username': 'y', | |
90 | 'y.password': 'ypassword'}, |
|
90 | 'y.password': 'ypassword'}, | |
91 | urls=['http://y@example.org/foo']) |
|
91 | urls=['http://y@example.org/foo']) | |
92 | test({'x.prefix': 'http://example.org/foo/bar', |
|
92 | test({'x.prefix': 'http://example.org/foo/bar', | |
93 | 'x.username': None, |
|
93 | 'x.username': None, | |
94 | 'x.password': 'xpassword', |
|
94 | 'x.password': 'xpassword', | |
95 | 'y.prefix': 'http://example.org/foo', |
|
95 | 'y.prefix': 'http://example.org/foo', | |
96 | 'y.username': 'y', |
|
96 | 'y.username': 'y', | |
97 | 'y.password': 'ypassword'}, |
|
97 | 'y.password': 'ypassword'}, | |
98 | urls=['http://y@example.org/foo/bar']) |
|
98 | urls=['http://y@example.org/foo/bar']) | |
99 |
|
99 | |||
100 | def testauthinfo(fullurl, authurl): |
|
100 | def testauthinfo(fullurl, authurl): | |
101 | print 'URIs:', fullurl, authurl |
|
101 | print 'URIs:', fullurl, authurl | |
102 | pm = urllib2.HTTPPasswordMgrWithDefaultRealm() |
|
102 | pm = urllib2.HTTPPasswordMgrWithDefaultRealm() | |
103 | pm.add_password(*util.url(fullurl).authinfo()[1]) |
|
103 | pm.add_password(*util.url(fullurl).authinfo()[1]) | |
104 | print pm.find_user_password('test', authurl) |
|
104 | print pm.find_user_password('test', authurl) | |
105 |
|
105 | |||
106 | print '\n*** Test urllib2 and util.url\n' |
|
106 | print '\n*** Test urllib2 and util.url\n' | |
107 | testauthinfo('http://user@example.com:8080/foo', 'http://example.com:8080/foo') |
|
107 | testauthinfo('http://user@example.com:8080/foo', 'http://example.com:8080/foo') |
General Comments 0
You need to be logged in to leave comments.
Login now