Show More
@@ -1,34 +1,34 b'' | |||
|
1 | 1 | import os, __builtin__ |
|
2 | 2 | from mercurial import util |
|
3 | 3 | |
|
4 | 4 | def lowerwrap(scope, funcname): |
|
5 | 5 | f = getattr(scope, funcname) |
|
6 | 6 | def wrap(fname, *args, **kwargs): |
|
7 | 7 | d, base = os.path.split(fname) |
|
8 | 8 | try: |
|
9 | 9 | files = os.listdir(d or '.') |
|
10 |
except OSError |
|
|
10 | except OSError: | |
|
11 | 11 | files = [] |
|
12 | 12 | if base in files: |
|
13 | 13 | return f(fname, *args, **kwargs) |
|
14 | 14 | for fn in files: |
|
15 | 15 | if fn.lower() == base.lower(): |
|
16 | 16 | return f(os.path.join(d, fn), *args, **kwargs) |
|
17 | 17 | return f(fname, *args, **kwargs) |
|
18 | 18 | scope.__dict__[funcname] = wrap |
|
19 | 19 | |
|
20 | 20 | def normcase(path): |
|
21 | 21 | return path.lower() |
|
22 | 22 | |
|
23 | 23 | os.path.normcase = normcase |
|
24 | 24 | |
|
25 | 25 | for f in 'file open'.split(): |
|
26 | 26 | lowerwrap(__builtin__, f) |
|
27 | 27 | |
|
28 | 28 | for f in "chmod chown open lstat stat remove unlink".split(): |
|
29 | 29 | lowerwrap(os, f) |
|
30 | 30 | |
|
31 | 31 | for f in "exists lexists".split(): |
|
32 | 32 | lowerwrap(os.path, f) |
|
33 | 33 | |
|
34 | 34 | lowerwrap(util, 'posixfile') |
@@ -1,512 +1,511 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | # |
|
3 | 3 | # check-code - a style and portability checker for Mercurial |
|
4 | 4 | # |
|
5 | 5 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
6 | 6 | # |
|
7 | 7 | # This software may be used and distributed according to the terms of the |
|
8 | 8 | # GNU General Public License version 2 or any later version. |
|
9 | 9 | |
|
10 | 10 | import re, glob, os, sys |
|
11 | 11 | import keyword |
|
12 | 12 | import optparse |
|
13 | 13 | try: |
|
14 | 14 | import re2 |
|
15 | 15 | except ImportError: |
|
16 | 16 | re2 = None |
|
17 | 17 | |
|
18 | 18 | def compilere(pat, multiline=False): |
|
19 | 19 | if multiline: |
|
20 | 20 | pat = '(?m)' + pat |
|
21 | 21 | if re2: |
|
22 | 22 | try: |
|
23 | 23 | return re2.compile(pat) |
|
24 | 24 | except re2.error: |
|
25 | 25 | pass |
|
26 | 26 | return re.compile(pat) |
|
27 | 27 | |
|
28 | 28 | def repquote(m): |
|
29 | 29 | t = re.sub(r"\w", "x", m.group('text')) |
|
30 | 30 | t = re.sub(r"[^\s\nx]", "o", t) |
|
31 | 31 | return m.group('quote') + t + m.group('quote') |
|
32 | 32 | |
|
33 | 33 | def reppython(m): |
|
34 | 34 | comment = m.group('comment') |
|
35 | 35 | if comment: |
|
36 | 36 | l = len(comment.rstrip()) |
|
37 | 37 | return "#" * l + comment[l:] |
|
38 | 38 | return repquote(m) |
|
39 | 39 | |
|
40 | 40 | def repcomment(m): |
|
41 | 41 | return m.group(1) + "#" * len(m.group(2)) |
|
42 | 42 | |
|
43 | 43 | def repccomment(m): |
|
44 | 44 | t = re.sub(r"((?<=\n) )|\S", "x", m.group(2)) |
|
45 | 45 | return m.group(1) + t + "*/" |
|
46 | 46 | |
|
47 | 47 | def repcallspaces(m): |
|
48 | 48 | t = re.sub(r"\n\s+", "\n", m.group(2)) |
|
49 | 49 | return m.group(1) + t |
|
50 | 50 | |
|
51 | 51 | def repinclude(m): |
|
52 | 52 | return m.group(1) + "<foo>" |
|
53 | 53 | |
|
54 | 54 | def rephere(m): |
|
55 | 55 | t = re.sub(r"\S", "x", m.group(2)) |
|
56 | 56 | return m.group(1) + t |
|
57 | 57 | |
|
58 | 58 | |
|
59 | 59 | testpats = [ |
|
60 | 60 | [ |
|
61 | 61 | (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"), |
|
62 | 62 | (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"), |
|
63 | 63 | (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"), |
|
64 | 64 | (r'sed.*-i', "don't use 'sed -i', use a temporary file"), |
|
65 | 65 | (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"), |
|
66 | 66 | (r'echo -n', "don't use 'echo -n', use printf"), |
|
67 | 67 | (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"), |
|
68 | 68 | (r'head -c', "don't use 'head -c', use 'dd'"), |
|
69 | 69 | (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"), |
|
70 | 70 | (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"), |
|
71 | 71 | (r'printf.*\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"), |
|
72 | 72 | (r'printf.*\\x', "don't use printf \\x, use Python"), |
|
73 | 73 | (r'\$\(.*\)', "don't use $(expr), use `expr`"), |
|
74 | 74 | (r'rm -rf \*', "don't use naked rm -rf, target a directory"), |
|
75 | 75 | (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w', |
|
76 | 76 | "use egrep for extended grep syntax"), |
|
77 | 77 | (r'/bin/', "don't use explicit paths for tools"), |
|
78 | 78 | (r'[^\n]\Z', "no trailing newline"), |
|
79 | 79 | (r'export.*=', "don't export and assign at once"), |
|
80 | 80 | (r'^source\b', "don't use 'source', use '.'"), |
|
81 | 81 | (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"), |
|
82 | 82 | (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"), |
|
83 | 83 | (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"), |
|
84 | 84 | (r'^stop\(\)', "don't use 'stop' as a shell function name"), |
|
85 | 85 | (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"), |
|
86 | 86 | (r'^alias\b.*=', "don't use alias, use a function"), |
|
87 | 87 | (r'if\s*!', "don't use '!' to negate exit status"), |
|
88 | 88 | (r'/dev/u?random', "don't use entropy, use /dev/zero"), |
|
89 | 89 | (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"), |
|
90 | 90 | (r'^( *)\t', "don't use tabs to indent"), |
|
91 | 91 | (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', |
|
92 | 92 | "put a backslash-escaped newline after sed 'i' command"), |
|
93 | 93 | ], |
|
94 | 94 | # warnings |
|
95 | 95 | [ |
|
96 | 96 | (r'^function', "don't use 'function', use old style"), |
|
97 | 97 | (r'^diff.*-\w*N', "don't use 'diff -N'"), |
|
98 | 98 | (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"), |
|
99 | 99 | (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"), |
|
100 | 100 | (r'kill (`|\$\()', "don't use kill, use killdaemons.py") |
|
101 | 101 | ] |
|
102 | 102 | ] |
|
103 | 103 | |
|
104 | 104 | testfilters = [ |
|
105 | 105 | (r"( *)(#([^\n]*\S)?)", repcomment), |
|
106 | 106 | (r"<<(\S+)((.|\n)*?\n\1)", rephere), |
|
107 | 107 | ] |
|
108 | 108 | |
|
109 | 109 | winglobmsg = "use (glob) to match Windows paths too" |
|
110 | 110 | uprefix = r"^ \$ " |
|
111 | 111 | utestpats = [ |
|
112 | 112 | [ |
|
113 | 113 | (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"), |
|
114 | 114 | (uprefix + r'.*\|\s*sed[^|>\n]*\n', |
|
115 | 115 | "use regex test output patterns instead of sed"), |
|
116 | 116 | (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"), |
|
117 | 117 | (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"), |
|
118 | 118 | (uprefix + r'.*\|\| echo.*(fail|error)', |
|
119 | 119 | "explicit exit code checks unnecessary"), |
|
120 | 120 | (uprefix + r'set -e', "don't use set -e"), |
|
121 | 121 | (uprefix + r'\s', "don't indent commands, use > for continued lines"), |
|
122 | 122 | (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), |
|
123 | 123 | (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', |
|
124 | 124 | winglobmsg), |
|
125 | 125 | (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
126 | 126 | (r'^ reverting .*/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
127 | 127 | (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
128 | 128 | (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), |
|
129 | 129 | (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg, |
|
130 | 130 | '\$TESTTMP/unix-repo$'), |
|
131 | 131 | (r'^ moving \S+/.*[^)]$', winglobmsg), |
|
132 | 132 | (r'^ no changes made to subrepo since.*/.*[^)]$', |
|
133 | 133 | winglobmsg, '\$TESTTMP/unix-repo$'), |
|
134 | 134 | (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', |
|
135 | 135 | winglobmsg, '\$TESTTMP/unix-repo$'), |
|
136 | 136 | ], |
|
137 | 137 | # warnings |
|
138 | 138 | [ |
|
139 | 139 | (r'^ [^*?/\n]* \(glob\)$', |
|
140 | 140 | "warning: glob match with no glob character (?*/)"), |
|
141 | 141 | ] |
|
142 | 142 | ] |
|
143 | 143 | |
|
144 | 144 | for i in [0, 1]: |
|
145 | 145 | for p, m in testpats[i]: |
|
146 | 146 | if p.startswith(r'^'): |
|
147 | 147 | p = r"^ [$>] (%s)" % p[1:] |
|
148 | 148 | else: |
|
149 | 149 | p = r"^ [$>] .*(%s)" % p |
|
150 | 150 | utestpats[i].append((p, m)) |
|
151 | 151 | |
|
152 | 152 | utestfilters = [ |
|
153 | 153 | (r"<<(\S+)((.|\n)*?\n > \1)", rephere), |
|
154 | 154 | (r"( *)(#([^\n]*\S)?)", repcomment), |
|
155 | 155 | ] |
|
156 | 156 | |
|
157 | 157 | pypats = [ |
|
158 | 158 | [ |
|
159 | 159 | (r'^\s*def\s*\w+\s*\(.*,\s*\(', |
|
160 | 160 | "tuple parameter unpacking not available in Python 3+"), |
|
161 | 161 | (r'lambda\s*\(.*,.*\)', |
|
162 | 162 | "tuple parameter unpacking not available in Python 3+"), |
|
163 | 163 | (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), |
|
164 | 164 | (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), |
|
165 | 165 | (r'\.has_key\b', "dict.has_key is not available in Python 3+"), |
|
166 | 166 | (r'\s<>\s', '<> operator is not available in Python 3+, use !='), |
|
167 | 167 | (r'^\s*\t', "don't use tabs"), |
|
168 | 168 | (r'\S;\s*\n', "semicolon"), |
|
169 | 169 | (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"), |
|
170 | 170 | (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"), |
|
171 | 171 | (r'(\w|\)),\w', "missing whitespace after ,"), |
|
172 | 172 | (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), |
|
173 | 173 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), |
|
174 | 174 | (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n' |
|
175 | 175 | r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'), |
|
176 | 176 | (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?' |
|
177 | 177 | r'((?:\n|\1\s.*\n)+?)\1finally:', |
|
178 | 178 | 'no yield inside try/finally in Python 2.4'), |
|
179 | 179 | (r'.{81}', "line too long"), |
|
180 | 180 | (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'), |
|
181 | 181 | (r'[^\n]\Z', "no trailing newline"), |
|
182 | 182 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), |
|
183 | 183 | # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=', |
|
184 | 184 | # "don't use underbars in identifiers"), |
|
185 | 185 | (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ', |
|
186 | 186 | "don't use camelcase in identifiers"), |
|
187 | 187 | (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+', |
|
188 | 188 | "linebreak after :"), |
|
189 | 189 | (r'class\s[^( \n]+:', "old-style class, use class foo(object)"), |
|
190 | 190 | (r'class\s[^( \n]+\(\):', |
|
191 | 191 | "class foo() not available in Python 2.4, use class foo(object)"), |
|
192 | 192 | (r'\b(%s)\(' % '|'.join(keyword.kwlist), |
|
193 | 193 | "Python keyword is not a function"), |
|
194 | 194 | (r',]', "unneeded trailing ',' in list"), |
|
195 | 195 | # (r'class\s[A-Z][^\(]*\((?!Exception)', |
|
196 | 196 | # "don't capitalize non-exception classes"), |
|
197 | 197 | # (r'in range\(', "use xrange"), |
|
198 | 198 | # (r'^\s*print\s+', "avoid using print in core and extensions"), |
|
199 | 199 | (r'[\x80-\xff]', "non-ASCII character literal"), |
|
200 | 200 | (r'("\')\.format\(', "str.format() not available in Python 2.4"), |
|
201 | 201 | (r'^\s*with\s+', "with not available in Python 2.4"), |
|
202 | 202 | (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"), |
|
203 | 203 | (r'^\s*except.* as .*:', "except as not available in Python 2.4"), |
|
204 | 204 | (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"), |
|
205 | 205 | (r'(?<!def)\s+(any|all|format)\(', |
|
206 | 206 | "any/all/format not available in Python 2.4"), |
|
207 | 207 | (r'(?<!def)\s+(callable)\(', |
|
208 | 208 | "callable not available in Python 3, use getattr(f, '__call__', None)"), |
|
209 | 209 | (r'if\s.*\selse', "if ... else form not available in Python 2.4"), |
|
210 | 210 | (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist), |
|
211 | 211 | "gratuitous whitespace after Python keyword"), |
|
212 | 212 | (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"), |
|
213 | 213 | # (r'\s\s=', "gratuitous whitespace before ="), |
|
214 | 214 | (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S', |
|
215 | 215 | "missing whitespace around operator"), |
|
216 | 216 | (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s', |
|
217 | 217 | "missing whitespace around operator"), |
|
218 | 218 | (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S', |
|
219 | 219 | "missing whitespace around operator"), |
|
220 | 220 | (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', |
|
221 | 221 | "wrong whitespace around ="), |
|
222 | 222 | (r'raise Exception', "don't raise generic exceptions"), |
|
223 | 223 | (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$', |
|
224 | 224 | "don't use old-style two-argument raise, use Exception(message)"), |
|
225 | 225 | (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"), |
|
226 | 226 | (r' [=!]=\s+(True|False|None)', |
|
227 | 227 | "comparison with singleton, use 'is' or 'is not' instead"), |
|
228 | 228 | (r'^\s*(while|if) [01]:', |
|
229 | 229 | "use True/False for constant Boolean expression"), |
|
230 | 230 | (r'(?:(?<!def)\s+|\()hasattr', |
|
231 | 231 | 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'), |
|
232 | 232 | (r'opener\([^)]*\).read\(', |
|
233 | 233 | "use opener.read() instead"), |
|
234 | 234 | (r'BaseException', 'not in Python 2.4, use Exception'), |
|
235 | 235 | (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'), |
|
236 | 236 | (r'opener\([^)]*\).write\(', |
|
237 | 237 | "use opener.write() instead"), |
|
238 | 238 | (r'[\s\(](open|file)\([^)]*\)\.read\(', |
|
239 | 239 | "use util.readfile() instead"), |
|
240 | 240 | (r'[\s\(](open|file)\([^)]*\)\.write\(', |
|
241 | 241 | "use util.readfile() instead"), |
|
242 | 242 | (r'^[\s\(]*(open(er)?|file)\([^)]*\)', |
|
243 | 243 | "always assign an opened file to a variable, and close it afterwards"), |
|
244 | 244 | (r'[\s\(](open|file)\([^)]*\)\.', |
|
245 | 245 | "always assign an opened file to a variable, and close it afterwards"), |
|
246 | 246 | (r'(?i)descendent', "the proper spelling is descendAnt"), |
|
247 | 247 | (r'\.debug\(\_', "don't mark debug messages for translation"), |
|
248 | 248 | (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), |
|
249 | 249 | (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'), |
|
250 | 250 | (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"), |
|
251 | 251 | (r'ui\.(status|progress|write|note|warn)\([\'\"]x', |
|
252 | 252 | "missing _() in ui message (use () to hide false-positives)"), |
|
253 | 253 | (r'release\(.*wlock, .*lock\)', "wrong lock release order"), |
|
254 | 254 | ], |
|
255 | 255 | # warnings |
|
256 | 256 | [ |
|
257 | 257 | ] |
|
258 | 258 | ] |
|
259 | 259 | |
|
260 | 260 | pyfilters = [ |
|
261 | 261 | (r"""(?msx)(?P<comment>\#.*?$)| |
|
262 | 262 | ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!"))) |
|
263 | 263 | (?P<text>(([^\\]|\\.)*?)) |
|
264 | 264 | (?P=quote))""", reppython), |
|
265 | 265 | ] |
|
266 | 266 | |
|
267 | 267 | txtfilters = [] |
|
268 | 268 | |
|
269 | 269 | txtpats = [ |
|
270 | 270 | [ |
|
271 | 271 | ('\s$', 'trailing whitespace'), |
|
272 | 272 | ], |
|
273 | 273 | [] |
|
274 | 274 | ] |
|
275 | 275 | |
|
276 | 276 | cpats = [ |
|
277 | 277 | [ |
|
278 | 278 | (r'//', "don't use //-style comments"), |
|
279 | 279 | (r'^ ', "don't use spaces to indent"), |
|
280 | 280 | (r'\S\t', "don't use tabs except for indent"), |
|
281 | 281 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), |
|
282 | 282 | (r'.{81}', "line too long"), |
|
283 | 283 | (r'(while|if|do|for)\(', "use space after while/if/do/for"), |
|
284 | 284 | (r'return\(', "return is not a function"), |
|
285 | 285 | (r' ;', "no space before ;"), |
|
286 | 286 | (r'\w+\* \w+', "use int *foo, not int* foo"), |
|
287 | 287 | (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"), |
|
288 | 288 | (r'\w+ (\+\+|--)', "use foo++, not foo ++"), |
|
289 | 289 | (r'\w,\w', "missing whitespace after ,"), |
|
290 | 290 | (r'^[^#]\w[+/*]\w', "missing whitespace in expression"), |
|
291 | 291 | (r'^#\s+\w', "use #foo, not # foo"), |
|
292 | 292 | (r'[^\n]\Z', "no trailing newline"), |
|
293 | 293 | (r'^\s*#import\b', "use only #include in standard C code"), |
|
294 | 294 | ], |
|
295 | 295 | # warnings |
|
296 | 296 | [] |
|
297 | 297 | ] |
|
298 | 298 | |
|
299 | 299 | cfilters = [ |
|
300 | 300 | (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment), |
|
301 | 301 | (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote), |
|
302 | 302 | (r'''(#\s*include\s+<)([^>]+)>''', repinclude), |
|
303 | 303 | (r'(\()([^)]+\))', repcallspaces), |
|
304 | 304 | ] |
|
305 | 305 | |
|
306 | 306 | inutilpats = [ |
|
307 | 307 | [ |
|
308 | 308 | (r'\bui\.', "don't use ui in util"), |
|
309 | 309 | ], |
|
310 | 310 | # warnings |
|
311 | 311 | [] |
|
312 | 312 | ] |
|
313 | 313 | |
|
314 | 314 | inrevlogpats = [ |
|
315 | 315 | [ |
|
316 | 316 | (r'\brepo\.', "don't use repo in revlog"), |
|
317 | 317 | ], |
|
318 | 318 | # warnings |
|
319 | 319 | [] |
|
320 | 320 | ] |
|
321 | 321 | |
|
322 | 322 | checks = [ |
|
323 | 323 | ('python', r'.*\.(py|cgi)$', pyfilters, pypats), |
|
324 | 324 | ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats), |
|
325 | 325 | ('c', r'.*\.c$', cfilters, cpats), |
|
326 | 326 | ('unified test', r'.*\.t$', utestfilters, utestpats), |
|
327 | 327 | ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters, |
|
328 | 328 | inrevlogpats), |
|
329 | 329 | ('layering violation ui in util', r'mercurial/util\.py', pyfilters, |
|
330 | 330 | inutilpats), |
|
331 | 331 | ('txt', r'.*\.txt$', txtfilters, txtpats), |
|
332 | 332 | ] |
|
333 | 333 | |
|
334 | 334 | def _preparepats(): |
|
335 | 335 | for c in checks: |
|
336 | 336 | failandwarn = c[-1] |
|
337 | 337 | for pats in failandwarn: |
|
338 | 338 | for i, pseq in enumerate(pats): |
|
339 | 339 | # fix-up regexes for multi-line searches |
|
340 |
|
|
|
340 | p = pseq[0] | |
|
341 | 341 | # \s doesn't match \n |
|
342 | 342 | p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p) |
|
343 | 343 | # [^...] doesn't match newline |
|
344 | 344 | p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p) |
|
345 | 345 | |
|
346 | #print po, '=>', p | |
|
347 | 346 | pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:] |
|
348 | 347 | filters = c[2] |
|
349 | 348 | for i, flt in enumerate(filters): |
|
350 | 349 | filters[i] = re.compile(flt[0]), flt[1] |
|
351 | 350 | _preparepats() |
|
352 | 351 | |
|
353 | 352 | class norepeatlogger(object): |
|
354 | 353 | def __init__(self): |
|
355 | 354 | self._lastseen = None |
|
356 | 355 | |
|
357 | 356 | def log(self, fname, lineno, line, msg, blame): |
|
358 | 357 | """print error related a to given line of a given file. |
|
359 | 358 | |
|
360 | 359 | The faulty line will also be printed but only once in the case |
|
361 | 360 | of multiple errors. |
|
362 | 361 | |
|
363 | 362 | :fname: filename |
|
364 | 363 | :lineno: line number |
|
365 | 364 | :line: actual content of the line |
|
366 | 365 | :msg: error message |
|
367 | 366 | """ |
|
368 | 367 | msgid = fname, lineno, line |
|
369 | 368 | if msgid != self._lastseen: |
|
370 | 369 | if blame: |
|
371 | 370 | print "%s:%d (%s):" % (fname, lineno, blame) |
|
372 | 371 | else: |
|
373 | 372 | print "%s:%d:" % (fname, lineno) |
|
374 | 373 | print " > %s" % line |
|
375 | 374 | self._lastseen = msgid |
|
376 | 375 | print " " + msg |
|
377 | 376 | |
|
378 | 377 | _defaultlogger = norepeatlogger() |
|
379 | 378 | |
|
380 | 379 | def getblame(f): |
|
381 | 380 | lines = [] |
|
382 | 381 | for l in os.popen('hg annotate -un %s' % f): |
|
383 | 382 | start, line = l.split(':', 1) |
|
384 | 383 | user, rev = start.split() |
|
385 | 384 | lines.append((line[1:-1], user, rev)) |
|
386 | 385 | return lines |
|
387 | 386 | |
|
388 | 387 | def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False, |
|
389 | 388 | blame=False, debug=False, lineno=True): |
|
390 | 389 | """checks style and portability of a given file |
|
391 | 390 | |
|
392 | 391 | :f: filepath |
|
393 | 392 | :logfunc: function used to report error |
|
394 | 393 | logfunc(filename, linenumber, linecontent, errormessage) |
|
395 | 394 | :maxerr: number of error to display before aborting. |
|
396 | 395 | Set to false (default) to report all errors |
|
397 | 396 | |
|
398 | 397 | return True if no error is found, False otherwise. |
|
399 | 398 | """ |
|
400 | 399 | blamecache = None |
|
401 | 400 | result = True |
|
402 | 401 | for name, match, filters, pats in checks: |
|
403 | 402 | if debug: |
|
404 | 403 | print name, f |
|
405 | 404 | fc = 0 |
|
406 | 405 | if not re.match(match, f): |
|
407 | 406 | if debug: |
|
408 | 407 | print "Skipping %s for %s it doesn't match %s" % ( |
|
409 | 408 | name, match, f) |
|
410 | 409 | continue |
|
411 | 410 | fp = open(f) |
|
412 | 411 | pre = post = fp.read() |
|
413 | 412 | fp.close() |
|
414 | 413 | if "no-" + "check-code" in pre: |
|
415 | 414 | if debug: |
|
416 | 415 | print "Skipping %s for %s it has no- and check-code" % ( |
|
417 | 416 | name, f) |
|
418 | 417 | break |
|
419 | 418 | for p, r in filters: |
|
420 | 419 | post = re.sub(p, r, post) |
|
421 | 420 | if warnings: |
|
422 | 421 | pats = pats[0] + pats[1] |
|
423 | 422 | else: |
|
424 | 423 | pats = pats[0] |
|
425 | 424 | # print post # uncomment to show filtered version |
|
426 | 425 | |
|
427 | 426 | if debug: |
|
428 | 427 | print "Checking %s for %s" % (name, f) |
|
429 | 428 | |
|
430 | 429 | prelines = None |
|
431 | 430 | errors = [] |
|
432 | 431 | for pat in pats: |
|
433 | 432 | if len(pat) == 3: |
|
434 | 433 | p, msg, ignore = pat |
|
435 | 434 | else: |
|
436 | 435 | p, msg = pat |
|
437 | 436 | ignore = None |
|
438 | 437 | |
|
439 | 438 | pos = 0 |
|
440 | 439 | n = 0 |
|
441 | 440 | for m in p.finditer(post): |
|
442 | 441 | if prelines is None: |
|
443 | 442 | prelines = pre.splitlines() |
|
444 | 443 | postlines = post.splitlines(True) |
|
445 | 444 | |
|
446 | 445 | start = m.start() |
|
447 | 446 | while n < len(postlines): |
|
448 | 447 | step = len(postlines[n]) |
|
449 | 448 | if pos + step > start: |
|
450 | 449 | break |
|
451 | 450 | pos += step |
|
452 | 451 | n += 1 |
|
453 | 452 | l = prelines[n] |
|
454 | 453 | |
|
455 | 454 | if "check-code" + "-ignore" in l: |
|
456 | 455 | if debug: |
|
457 | 456 | print "Skipping %s for %s:%s (check-code -ignore)" % ( |
|
458 | 457 | name, f, n) |
|
459 | 458 | continue |
|
460 | 459 | elif ignore and re.search(ignore, l, re.MULTILINE): |
|
461 | 460 | continue |
|
462 | 461 | bd = "" |
|
463 | 462 | if blame: |
|
464 | 463 | bd = 'working directory' |
|
465 | 464 | if not blamecache: |
|
466 | 465 | blamecache = getblame(f) |
|
467 | 466 | if n < len(blamecache): |
|
468 | 467 | bl, bu, br = blamecache[n] |
|
469 | 468 | if bl == l: |
|
470 | 469 | bd = '%s@%s' % (bu, br) |
|
471 | 470 | errors.append((f, lineno and n + 1, l, msg, bd)) |
|
472 | 471 | result = False |
|
473 | 472 | |
|
474 | 473 | errors.sort() |
|
475 | 474 | for e in errors: |
|
476 | 475 | logfunc(*e) |
|
477 | 476 | fc += 1 |
|
478 | 477 | if maxerr and fc >= maxerr: |
|
479 | 478 | print " (too many errors, giving up)" |
|
480 | 479 | break |
|
481 | 480 | |
|
482 | 481 | return result |
|
483 | 482 | |
|
484 | 483 | if __name__ == "__main__": |
|
485 | 484 | parser = optparse.OptionParser("%prog [options] [files]") |
|
486 | 485 | parser.add_option("-w", "--warnings", action="store_true", |
|
487 | 486 | help="include warning-level checks") |
|
488 | 487 | parser.add_option("-p", "--per-file", type="int", |
|
489 | 488 | help="max warnings per file") |
|
490 | 489 | parser.add_option("-b", "--blame", action="store_true", |
|
491 | 490 | help="use annotate to generate blame info") |
|
492 | 491 | parser.add_option("", "--debug", action="store_true", |
|
493 | 492 | help="show debug information") |
|
494 | 493 | parser.add_option("", "--nolineno", action="store_false", |
|
495 | 494 | dest='lineno', help="don't show line numbers") |
|
496 | 495 | |
|
497 | 496 | parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False, |
|
498 | 497 | lineno=True) |
|
499 | 498 | (options, args) = parser.parse_args() |
|
500 | 499 | |
|
501 | 500 | if len(args) == 0: |
|
502 | 501 | check = glob.glob("*") |
|
503 | 502 | else: |
|
504 | 503 | check = args |
|
505 | 504 | |
|
506 | 505 | ret = 0 |
|
507 | 506 | for f in check: |
|
508 | 507 | if not checkfile(f, maxerr=options.per_file, warnings=options.warnings, |
|
509 | 508 | blame=options.blame, debug=options.debug, |
|
510 | 509 | lineno=options.lineno): |
|
511 | 510 | ret = 1 |
|
512 | 511 | sys.exit(ret) |
@@ -1,108 +1,107 b'' | |||
|
1 | 1 | "Fixer that translates some APIs ignored by the default 2to3 fixers." |
|
2 | 2 | |
|
3 | 3 | # FIXME: This fixer has some ugly hacks. Its main design is based on that of |
|
4 | 4 | # fix_imports, from lib2to3. Unfortunately, the fix_imports framework only |
|
5 | 5 | # changes module names "without dots", meaning it won't work for some changes |
|
6 | 6 | # in the email module/package. Thus this fixer was born. I believe that with a |
|
7 | 7 | # bit more thinking, a more generic fixer can be implemented, but I'll leave |
|
8 | 8 | # that as future work. |
|
9 | 9 | |
|
10 | 10 | from lib2to3.fixer_util import Name |
|
11 | 11 | from lib2to3.fixes import fix_imports |
|
12 | 12 | |
|
13 | 13 | # This maps the old names to the new names. Note that a drawback of the current |
|
14 | 14 | # design is that the dictionary keys MUST have EXACTLY one dot (.) in them, |
|
15 | 15 | # otherwise things will break. (If you don't need a module hierarchy, you're |
|
16 | 16 | # better of just inherit from fix_imports and overriding the MAPPING dict.) |
|
17 | 17 | |
|
18 | 18 | MAPPING = {'email.Utils': 'email.utils', |
|
19 | 19 | 'email.Errors': 'email.errors', |
|
20 | 20 | 'email.Header': 'email.header', |
|
21 | 21 | 'email.Parser': 'email.parser', |
|
22 | 22 | 'email.Encoders': 'email.encoders', |
|
23 | 23 | 'email.MIMEText': 'email.mime.text', |
|
24 | 24 | 'email.MIMEBase': 'email.mime.base', |
|
25 | 25 | 'email.Generator': 'email.generator', |
|
26 | 26 | 'email.MIMEMultipart': 'email.mime.multipart', |
|
27 | 27 | } |
|
28 | 28 | |
|
29 | 29 | def alternates(members): |
|
30 | 30 | return "(" + "|".join(map(repr, members)) + ")" |
|
31 | 31 | |
|
32 | 32 | def build_pattern(mapping=MAPPING): |
|
33 | 33 | packages = {} |
|
34 | 34 | for key in mapping: |
|
35 | 35 | # What we are doing here is the following: with dotted names, we'll |
|
36 | 36 | # have something like package_name <trailer '.' module>. Then, we are |
|
37 | 37 | # making a dictionary to copy this structure. For example, if |
|
38 | 38 | # mapping={'A.B': 'a.b', 'A.C': 'a.c'}, it will generate the dictionary |
|
39 | 39 | # {'A': ['b', 'c']} to, then, generate something like "A <trailer '.' |
|
40 | 40 | # ('b' | 'c')". |
|
41 | 41 | name = key.split('.') |
|
42 | 42 | prefix = name[0] |
|
43 | 43 | if prefix in packages: |
|
44 | 44 | packages[prefix].append(name[1:][0]) |
|
45 | 45 | else: |
|
46 | 46 | packages[prefix] = name[1:] |
|
47 | 47 | |
|
48 | 48 | mod_list = ' | '.join(["'%s' '.' ('%s')" % |
|
49 | 49 | (key, "' | '".join(packages[key])) for key in packages]) |
|
50 | 50 | mod_list = '(' + mod_list + ' )' |
|
51 | bare_names = alternates(mapping.keys()) | |
|
52 | 51 | |
|
53 | 52 | yield """name_import=import_name< 'import' module_name=dotted_name< %s > > |
|
54 | 53 | """ % mod_list |
|
55 | 54 | |
|
56 | 55 | yield """name_import=import_name< 'import' |
|
57 | 56 | multiple_imports=dotted_as_names< any* |
|
58 | 57 | module_name=dotted_name< %s > |
|
59 | 58 | any* > |
|
60 | 59 | >""" % mod_list |
|
61 | 60 | |
|
62 | 61 | packs = ' | '.join(["'%s' trailer<'.' ('%s')>" % (key, |
|
63 | 62 | "' | '".join(packages[key])) for key in packages]) |
|
64 | 63 | |
|
65 | 64 | yield "power< package=(%s) trailer<'.' any > any* >" % packs |
|
66 | 65 | |
|
67 | 66 | class FixLeftoverImports(fix_imports.FixImports): |
|
68 | 67 | # We want to run this fixer after fix_import has run (this shouldn't matter |
|
69 | 68 | # for hg, though, as setup3k prefers to run the default fixers first) |
|
70 | 69 | mapping = MAPPING |
|
71 | 70 | |
|
72 | 71 | def build_pattern(self): |
|
73 | 72 | return "|".join(build_pattern(self.mapping)) |
|
74 | 73 | |
|
75 | 74 | def transform(self, node, results): |
|
76 | 75 | # Mostly copied from fix_imports.py |
|
77 | 76 | import_mod = results.get("module_name") |
|
78 | 77 | if import_mod: |
|
79 | 78 | try: |
|
80 | 79 | mod_name = import_mod.value |
|
81 | 80 | except AttributeError: |
|
82 | 81 | # XXX: A hack to remove whitespace prefixes and suffixes |
|
83 | 82 | mod_name = str(import_mod).strip() |
|
84 | 83 | new_name = self.mapping[mod_name] |
|
85 | 84 | import_mod.replace(Name(new_name, prefix=import_mod.prefix)) |
|
86 | 85 | if "name_import" in results: |
|
87 | 86 | # If it's not a "from x import x, y" or "import x as y" import, |
|
88 | 87 | # marked its usage to be replaced. |
|
89 | 88 | self.replace[mod_name] = new_name |
|
90 | 89 | if "multiple_imports" in results: |
|
91 | 90 | # This is a nasty hack to fix multiple imports on a line (e.g., |
|
92 | 91 | # "import StringIO, urlparse"). The problem is that I can't |
|
93 | 92 | # figure out an easy way to make a pattern recognize the keys of |
|
94 | 93 | # MAPPING randomly sprinkled in an import statement. |
|
95 | 94 | results = self.match(node) |
|
96 | 95 | if results: |
|
97 | 96 | self.transform(node, results) |
|
98 | 97 | else: |
|
99 | 98 | # Replace usage of the module. |
|
100 | 99 | # Now this is, mostly, a hack |
|
101 | 100 | bare_name = results["package"][0] |
|
102 | 101 | bare_name_text = ''.join(map(str, results['package'])).strip() |
|
103 | 102 | new_name = self.replace.get(bare_name_text) |
|
104 | 103 | prefix = results['package'][0].prefix |
|
105 | 104 | if new_name: |
|
106 | 105 | bare_name.replace(Name(new_name, prefix=prefix)) |
|
107 | 106 | results["package"][1].replace(Name('')) |
|
108 | 107 |
@@ -1,410 +1,410 b'' | |||
|
1 | 1 | # perf.py - performance test routines |
|
2 | 2 | '''helper extension to measure performance''' |
|
3 | 3 | |
|
4 | 4 | from mercurial import cmdutil, scmutil, util, commands, obsolete |
|
5 | 5 | from mercurial import repoview, branchmap, merge, copies |
|
6 | 6 | import time, os, sys |
|
7 | 7 | |
|
8 | 8 | cmdtable = {} |
|
9 | 9 | command = cmdutil.command(cmdtable) |
|
10 | 10 | |
|
11 | 11 | def timer(func, title=None): |
|
12 | 12 | results = [] |
|
13 | 13 | begin = time.time() |
|
14 | 14 | count = 0 |
|
15 | 15 | while True: |
|
16 | 16 | ostart = os.times() |
|
17 | 17 | cstart = time.time() |
|
18 | 18 | r = func() |
|
19 | 19 | cstop = time.time() |
|
20 | 20 | ostop = os.times() |
|
21 | 21 | count += 1 |
|
22 | 22 | a, b = ostart, ostop |
|
23 | 23 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) |
|
24 | 24 | if cstop - begin > 3 and count >= 100: |
|
25 | 25 | break |
|
26 | 26 | if cstop - begin > 10 and count >= 3: |
|
27 | 27 | break |
|
28 | 28 | if title: |
|
29 | 29 | sys.stderr.write("! %s\n" % title) |
|
30 | 30 | if r: |
|
31 | 31 | sys.stderr.write("! result: %s\n" % r) |
|
32 | 32 | m = min(results) |
|
33 | 33 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" |
|
34 | 34 | % (m[0], m[1] + m[2], m[1], m[2], count)) |
|
35 | 35 | |
|
36 | 36 | @command('perfwalk') |
|
37 | 37 | def perfwalk(ui, repo, *pats): |
|
38 | 38 | try: |
|
39 | 39 | m = scmutil.match(repo[None], pats, {}) |
|
40 | 40 | timer(lambda: len(list(repo.dirstate.walk(m, [], True, False)))) |
|
41 | 41 | except Exception: |
|
42 | 42 | try: |
|
43 | 43 | m = scmutil.match(repo[None], pats, {}) |
|
44 | 44 | timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)])) |
|
45 | 45 | except Exception: |
|
46 | 46 | timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) |
|
47 | 47 | |
|
48 | 48 | @command('perfannotate') |
|
49 | 49 | def perfannotate(ui, repo, f): |
|
50 | 50 | fc = repo['.'][f] |
|
51 | 51 | timer(lambda: len(fc.annotate(True))) |
|
52 | 52 | |
|
53 | 53 | @command('perfstatus', |
|
54 | 54 | [('u', 'unknown', False, |
|
55 | 55 | 'ask status to look for unknown files')]) |
|
56 | 56 | def perfstatus(ui, repo, **opts): |
|
57 | 57 | #m = match.always(repo.root, repo.getcwd()) |
|
58 | 58 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, |
|
59 | 59 | # False)))) |
|
60 | 60 | timer(lambda: sum(map(len, repo.status(**opts)))) |
|
61 | 61 | |
|
62 | 62 | @command('perfaddremove') |
|
63 | 63 | def perfaddremove(ui, repo): |
|
64 | 64 | try: |
|
65 | 65 | oldquiet = repo.ui.quiet |
|
66 | 66 | repo.ui.quiet = True |
|
67 | 67 | timer(lambda: scmutil.addremove(repo, dry_run=True)) |
|
68 | 68 | finally: |
|
69 | 69 | repo.ui.quiet = oldquiet |
|
70 | 70 | |
|
71 | 71 | def clearcaches(cl): |
|
72 | 72 | # behave somewhat consistently across internal API changes |
|
73 | 73 | if util.safehasattr(cl, 'clearcaches'): |
|
74 | 74 | cl.clearcaches() |
|
75 | 75 | elif util.safehasattr(cl, '_nodecache'): |
|
76 | 76 | from mercurial.node import nullid, nullrev |
|
77 | 77 | cl._nodecache = {nullid: nullrev} |
|
78 | 78 | cl._nodepos = None |
|
79 | 79 | |
|
80 | 80 | @command('perfheads') |
|
81 | 81 | def perfheads(ui, repo): |
|
82 | 82 | cl = repo.changelog |
|
83 | 83 | def d(): |
|
84 | 84 | len(cl.headrevs()) |
|
85 | 85 | clearcaches(cl) |
|
86 | 86 | timer(d) |
|
87 | 87 | |
|
88 | 88 | @command('perftags') |
|
89 | 89 | def perftags(ui, repo): |
|
90 | 90 | import mercurial.changelog, mercurial.manifest |
|
91 | 91 | def t(): |
|
92 | 92 | repo.changelog = mercurial.changelog.changelog(repo.sopener) |
|
93 | 93 | repo.manifest = mercurial.manifest.manifest(repo.sopener) |
|
94 | 94 | repo._tags = None |
|
95 | 95 | return len(repo.tags()) |
|
96 | 96 | timer(t) |
|
97 | 97 | |
|
98 | 98 | @command('perfancestors') |
|
99 | 99 | def perfancestors(ui, repo): |
|
100 | 100 | heads = repo.changelog.headrevs() |
|
101 | 101 | def d(): |
|
102 | 102 | for a in repo.changelog.ancestors(heads): |
|
103 | 103 | pass |
|
104 | 104 | timer(d) |
|
105 | 105 | |
|
106 | 106 | @command('perfancestorset') |
|
107 | 107 | def perfancestorset(ui, repo, revset): |
|
108 | 108 | revs = repo.revs(revset) |
|
109 | 109 | heads = repo.changelog.headrevs() |
|
110 | 110 | def d(): |
|
111 | 111 | s = repo.changelog.ancestors(heads) |
|
112 | 112 | for rev in revs: |
|
113 | 113 | rev in s |
|
114 | 114 | timer(d) |
|
115 | 115 | |
|
116 | 116 | @command('perfdirs') |
|
117 | 117 | def perfdirs(ui, repo): |
|
118 | 118 | dirstate = repo.dirstate |
|
119 | 119 | 'a' in dirstate |
|
120 | 120 | def d(): |
|
121 | 121 | dirstate.dirs() |
|
122 | 122 | del dirstate._dirs |
|
123 | 123 | timer(d) |
|
124 | 124 | |
|
125 | 125 | @command('perfdirstate') |
|
126 | 126 | def perfdirstate(ui, repo): |
|
127 | 127 | "a" in repo.dirstate |
|
128 | 128 | def d(): |
|
129 | 129 | repo.dirstate.invalidate() |
|
130 | 130 | "a" in repo.dirstate |
|
131 | 131 | timer(d) |
|
132 | 132 | |
|
133 | 133 | @command('perfdirstatedirs') |
|
134 | 134 | def perfdirstatedirs(ui, repo): |
|
135 | 135 | "a" in repo.dirstate |
|
136 | 136 | def d(): |
|
137 | 137 | "a" in repo.dirstate._dirs |
|
138 | 138 | del repo.dirstate._dirs |
|
139 | 139 | timer(d) |
|
140 | 140 | |
|
141 | 141 | @command('perfdirstatewrite') |
|
142 | 142 | def perfdirstatewrite(ui, repo): |
|
143 | 143 | ds = repo.dirstate |
|
144 | 144 | "a" in ds |
|
145 | 145 | def d(): |
|
146 | 146 | ds._dirty = True |
|
147 | 147 | ds.write() |
|
148 | 148 | timer(d) |
|
149 | 149 | |
|
150 | 150 | @command('perfmergecalculate', |
|
151 | 151 | [('r', 'rev', '.', 'rev to merge against')]) |
|
152 | 152 | def perfmergecalculate(ui, repo, rev): |
|
153 | 153 | wctx = repo[None] |
|
154 | 154 | rctx = scmutil.revsingle(repo, rev, rev) |
|
155 | 155 | ancestor = wctx.ancestor(rctx) |
|
156 | 156 | # we don't want working dir files to be stat'd in the benchmark, so prime |
|
157 | 157 | # that cache |
|
158 | 158 | wctx.dirty() |
|
159 | 159 | def d(): |
|
160 | 160 | # acceptremote is True because we don't want prompts in the middle of |
|
161 | 161 | # our benchmark |
|
162 | 162 | merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False, |
|
163 | 163 | acceptremote=True) |
|
164 | 164 | timer(d) |
|
165 | 165 | |
|
166 | 166 | @command('perfpathcopies', [], "REV REV") |
|
167 | 167 | def perfpathcopies(ui, repo, rev1, rev2): |
|
168 | 168 | ctx1 = scmutil.revsingle(repo, rev1, rev1) |
|
169 | 169 | ctx2 = scmutil.revsingle(repo, rev2, rev2) |
|
170 | 170 | def d(): |
|
171 | 171 | copies.pathcopies(ctx1, ctx2) |
|
172 | 172 | timer(d) |
|
173 | 173 | |
|
174 | 174 | @command('perfmanifest') |
|
175 | 175 | def perfmanifest(ui, repo): |
|
176 | 176 | def d(): |
|
177 | 177 | t = repo.manifest.tip() |
|
178 |
|
|
|
178 | repo.manifest.read(t) | |
|
179 | 179 | repo.manifest.mapcache = None |
|
180 | 180 | repo.manifest._cache = None |
|
181 | 181 | timer(d) |
|
182 | 182 | |
|
183 | 183 | @command('perfchangeset') |
|
184 | 184 | def perfchangeset(ui, repo, rev): |
|
185 | 185 | n = repo[rev].node() |
|
186 | 186 | def d(): |
|
187 |
|
|
|
187 | repo.changelog.read(n) | |
|
188 | 188 | #repo.changelog._cache = None |
|
189 | 189 | timer(d) |
|
190 | 190 | |
|
191 | 191 | @command('perfindex') |
|
192 | 192 | def perfindex(ui, repo): |
|
193 | 193 | import mercurial.revlog |
|
194 | 194 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
195 | 195 | n = repo["tip"].node() |
|
196 | 196 | def d(): |
|
197 | 197 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") |
|
198 | 198 | cl.rev(n) |
|
199 | 199 | timer(d) |
|
200 | 200 | |
|
201 | 201 | @command('perfstartup') |
|
202 | 202 | def perfstartup(ui, repo): |
|
203 | 203 | cmd = sys.argv[0] |
|
204 | 204 | def d(): |
|
205 | 205 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) |
|
206 | 206 | timer(d) |
|
207 | 207 | |
|
208 | 208 | @command('perfparents') |
|
209 | 209 | def perfparents(ui, repo): |
|
210 | 210 | nl = [repo.changelog.node(i) for i in xrange(1000)] |
|
211 | 211 | def d(): |
|
212 | 212 | for n in nl: |
|
213 | 213 | repo.changelog.parents(n) |
|
214 | 214 | timer(d) |
|
215 | 215 | |
|
216 | 216 | @command('perflookup') |
|
217 | 217 | def perflookup(ui, repo, rev): |
|
218 | 218 | timer(lambda: len(repo.lookup(rev))) |
|
219 | 219 | |
|
220 | 220 | @command('perfrevrange') |
|
221 | 221 | def perfrevrange(ui, repo, *specs): |
|
222 | 222 | revrange = scmutil.revrange |
|
223 | 223 | timer(lambda: len(revrange(repo, specs))) |
|
224 | 224 | |
|
225 | 225 | @command('perfnodelookup') |
|
226 | 226 | def perfnodelookup(ui, repo, rev): |
|
227 | 227 | import mercurial.revlog |
|
228 | 228 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
229 | 229 | n = repo[rev].node() |
|
230 | 230 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") |
|
231 | 231 | def d(): |
|
232 | 232 | cl.rev(n) |
|
233 | 233 | clearcaches(cl) |
|
234 | 234 | timer(d) |
|
235 | 235 | |
|
236 | 236 | @command('perflog', |
|
237 | 237 | [('', 'rename', False, 'ask log to follow renames')]) |
|
238 | 238 | def perflog(ui, repo, **opts): |
|
239 | 239 | ui.pushbuffer() |
|
240 | 240 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
241 | 241 | copies=opts.get('rename'))) |
|
242 | 242 | ui.popbuffer() |
|
243 | 243 | |
|
244 | 244 | @command('perftemplating') |
|
245 | 245 | def perftemplating(ui, repo): |
|
246 | 246 | ui.pushbuffer() |
|
247 | 247 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
248 | 248 | template='{date|shortdate} [{rev}:{node|short}]' |
|
249 | 249 | ' {author|person}: {desc|firstline}\n')) |
|
250 | 250 | ui.popbuffer() |
|
251 | 251 | |
|
252 | 252 | @command('perfcca') |
|
253 | 253 | def perfcca(ui, repo): |
|
254 | 254 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) |
|
255 | 255 | |
|
256 | 256 | @command('perffncacheload') |
|
257 | 257 | def perffncacheload(ui, repo): |
|
258 | 258 | s = repo.store |
|
259 | 259 | def d(): |
|
260 | 260 | s.fncache._load() |
|
261 | 261 | timer(d) |
|
262 | 262 | |
|
263 | 263 | @command('perffncachewrite') |
|
264 | 264 | def perffncachewrite(ui, repo): |
|
265 | 265 | s = repo.store |
|
266 | 266 | s.fncache._load() |
|
267 | 267 | def d(): |
|
268 | 268 | s.fncache._dirty = True |
|
269 | 269 | s.fncache.write() |
|
270 | 270 | timer(d) |
|
271 | 271 | |
|
272 | 272 | @command('perffncacheencode') |
|
273 | 273 | def perffncacheencode(ui, repo): |
|
274 | 274 | s = repo.store |
|
275 | 275 | s.fncache._load() |
|
276 | 276 | def d(): |
|
277 | 277 | for p in s.fncache.entries: |
|
278 | 278 | s.encode(p) |
|
279 | 279 | timer(d) |
|
280 | 280 | |
|
281 | 281 | @command('perfdiffwd') |
|
282 | 282 | def perfdiffwd(ui, repo): |
|
283 | 283 | """Profile diff of working directory changes""" |
|
284 | 284 | options = { |
|
285 | 285 | 'w': 'ignore_all_space', |
|
286 | 286 | 'b': 'ignore_space_change', |
|
287 | 287 | 'B': 'ignore_blank_lines', |
|
288 | 288 | } |
|
289 | 289 | |
|
290 | 290 | for diffopt in ('', 'w', 'b', 'B', 'wB'): |
|
291 | 291 | opts = dict((options[c], '1') for c in diffopt) |
|
292 | 292 | def d(): |
|
293 | 293 | ui.pushbuffer() |
|
294 | 294 | commands.diff(ui, repo, **opts) |
|
295 | 295 | ui.popbuffer() |
|
296 | 296 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') |
|
297 | 297 | timer(d, title) |
|
298 | 298 | |
|
299 | 299 | @command('perfrevlog', |
|
300 | 300 | [('d', 'dist', 100, 'distance between the revisions')], |
|
301 | 301 | "[INDEXFILE]") |
|
302 | 302 | def perfrevlog(ui, repo, file_, **opts): |
|
303 | 303 | from mercurial import revlog |
|
304 | 304 | dist = opts['dist'] |
|
305 | 305 | def d(): |
|
306 | 306 | r = revlog.revlog(lambda fn: open(fn, 'rb'), file_) |
|
307 | 307 | for x in xrange(0, len(r), dist): |
|
308 | 308 | r.revision(r.node(x)) |
|
309 | 309 | |
|
310 | 310 | timer(d) |
|
311 | 311 | |
|
312 | 312 | @command('perfrevset', |
|
313 | 313 | [('C', 'clear', False, 'clear volatile cache between each call.')], |
|
314 | 314 | "REVSET") |
|
315 | 315 | def perfrevset(ui, repo, expr, clear=False): |
|
316 | 316 | """benchmark the execution time of a revset |
|
317 | 317 | |
|
318 | 318 | Use the --clean option if need to evaluate the impact of build volatile |
|
319 | 319 | revisions set cache on the revset execution. Volatile cache hold filtered |
|
320 | 320 | and obsolete related cache.""" |
|
321 | 321 | def d(): |
|
322 | 322 | if clear: |
|
323 | 323 | repo.invalidatevolatilesets() |
|
324 | 324 | repo.revs(expr) |
|
325 | 325 | timer(d) |
|
326 | 326 | |
|
327 | 327 | @command('perfvolatilesets') |
|
328 | 328 | def perfvolatilesets(ui, repo, *names): |
|
329 | 329 | """benchmark the computation of various volatile set |
|
330 | 330 | |
|
331 | 331 | Volatile set computes element related to filtering and obsolescence.""" |
|
332 | 332 | repo = repo.unfiltered() |
|
333 | 333 | |
|
334 | 334 | def getobs(name): |
|
335 | 335 | def d(): |
|
336 | 336 | repo.invalidatevolatilesets() |
|
337 | 337 | obsolete.getrevs(repo, name) |
|
338 | 338 | return d |
|
339 | 339 | |
|
340 | 340 | allobs = sorted(obsolete.cachefuncs) |
|
341 | 341 | if names: |
|
342 | 342 | allobs = [n for n in allobs if n in names] |
|
343 | 343 | |
|
344 | 344 | for name in allobs: |
|
345 | 345 | timer(getobs(name), title=name) |
|
346 | 346 | |
|
347 | 347 | def getfiltered(name): |
|
348 | 348 | def d(): |
|
349 | 349 | repo.invalidatevolatilesets() |
|
350 | 350 | repoview.filteredrevs(repo, name) |
|
351 | 351 | return d |
|
352 | 352 | |
|
353 | 353 | allfilter = sorted(repoview.filtertable) |
|
354 | 354 | if names: |
|
355 | 355 | allfilter = [n for n in allfilter if n in names] |
|
356 | 356 | |
|
357 | 357 | for name in allfilter: |
|
358 | 358 | timer(getfiltered(name), title=name) |
|
359 | 359 | |
|
360 | 360 | @command('perfbranchmap', |
|
361 | 361 | [('f', 'full', False, |
|
362 | 362 | 'Includes build time of subset'), |
|
363 | 363 | ]) |
|
364 | 364 | def perfbranchmap(ui, repo, full=False): |
|
365 | 365 | """benchmark the update of a branchmap |
|
366 | 366 | |
|
367 | 367 | This benchmarks the full repo.branchmap() call with read and write disabled |
|
368 | 368 | """ |
|
369 | 369 | def getbranchmap(filtername): |
|
370 | 370 | """generate a benchmark function for the filtername""" |
|
371 | 371 | if filtername is None: |
|
372 | 372 | view = repo |
|
373 | 373 | else: |
|
374 | 374 | view = repo.filtered(filtername) |
|
375 | 375 | def d(): |
|
376 | 376 | if full: |
|
377 | 377 | view._branchcaches.clear() |
|
378 | 378 | else: |
|
379 | 379 | view._branchcaches.pop(filtername, None) |
|
380 | 380 | view.branchmap() |
|
381 | 381 | return d |
|
382 | 382 | # add filter in smaller subset to bigger subset |
|
383 | 383 | possiblefilters = set(repoview.filtertable) |
|
384 | 384 | allfilters = [] |
|
385 | 385 | while possiblefilters: |
|
386 | 386 | for name in possiblefilters: |
|
387 | 387 | subset = repoview.subsettable.get(name) |
|
388 | 388 | if subset not in possiblefilters: |
|
389 | 389 | break |
|
390 | 390 | else: |
|
391 | 391 | assert False, 'subset cycle %s!' % possiblefilters |
|
392 | 392 | allfilters.append(name) |
|
393 | 393 | possiblefilters.remove(name) |
|
394 | 394 | |
|
395 | 395 | # warm the cache |
|
396 | 396 | if not full: |
|
397 | 397 | for name in allfilters: |
|
398 | 398 | repo.filtered(name).branchmap() |
|
399 | 399 | # add unfiltered |
|
400 | 400 | allfilters.append(None) |
|
401 | 401 | oldread = branchmap.read |
|
402 | 402 | oldwrite = branchmap.branchcache.write |
|
403 | 403 | try: |
|
404 | 404 | branchmap.read = lambda repo: None |
|
405 | 405 | branchmap.write = lambda repo: None |
|
406 | 406 | for name in allfilters: |
|
407 | 407 | timer(getbranchmap(name), title=str(name)) |
|
408 | 408 | finally: |
|
409 | 409 | branchmap.read = oldread |
|
410 | 410 | branchmap.branchcache.write = oldwrite |
@@ -1,67 +1,67 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | |
|
3 | 3 | from mercurial import demandimport |
|
4 | 4 | demandimport.enable() |
|
5 | 5 | |
|
6 |
import |
|
|
6 | import sys | |
|
7 | 7 | from mercurial.i18n import _ |
|
8 | 8 | from mercurial import simplemerge, fancyopts, util, ui |
|
9 | 9 | |
|
10 | 10 | options = [('L', 'label', [], _('labels to use on conflict markers')), |
|
11 | 11 | ('a', 'text', None, _('treat all files as text')), |
|
12 | 12 | ('p', 'print', None, |
|
13 | 13 | _('print results instead of overwriting LOCAL')), |
|
14 | 14 | ('', 'no-minimal', None, |
|
15 | 15 | _('do not try to minimize conflict regions')), |
|
16 | 16 | ('h', 'help', None, _('display help and exit')), |
|
17 | 17 | ('q', 'quiet', None, _('suppress output'))] |
|
18 | 18 | |
|
19 | 19 | usage = _('''simplemerge [OPTS] LOCAL BASE OTHER |
|
20 | 20 | |
|
21 | 21 | Simple three-way file merge utility with a minimal feature set. |
|
22 | 22 | |
|
23 | 23 | Apply to LOCAL the changes necessary to go from BASE to OTHER. |
|
24 | 24 | |
|
25 | 25 | By default, LOCAL is overwritten with the results of this operation. |
|
26 | 26 | ''') |
|
27 | 27 | |
|
28 | 28 | class ParseError(Exception): |
|
29 | 29 | """Exception raised on errors in parsing the command line.""" |
|
30 | 30 | |
|
31 | 31 | def showhelp(): |
|
32 | 32 | sys.stdout.write(usage) |
|
33 | 33 | sys.stdout.write('\noptions:\n') |
|
34 | 34 | |
|
35 | 35 | out_opts = [] |
|
36 | 36 | for shortopt, longopt, default, desc in options: |
|
37 | 37 | out_opts.append(('%2s%s' % (shortopt and '-%s' % shortopt, |
|
38 | 38 | longopt and ' --%s' % longopt), |
|
39 | 39 | '%s' % desc)) |
|
40 | 40 | opts_len = max([len(opt[0]) for opt in out_opts]) |
|
41 | 41 | for first, second in out_opts: |
|
42 | 42 | sys.stdout.write(' %-*s %s\n' % (opts_len, first, second)) |
|
43 | 43 | |
|
44 | 44 | try: |
|
45 | 45 | for fp in (sys.stdin, sys.stdout, sys.stderr): |
|
46 | 46 | util.setbinary(fp) |
|
47 | 47 | |
|
48 | 48 | opts = {} |
|
49 | 49 | try: |
|
50 | 50 | args = fancyopts.fancyopts(sys.argv[1:], options, opts) |
|
51 | 51 | except fancyopts.getopt.GetoptError, e: |
|
52 | 52 | raise ParseError(e) |
|
53 | 53 | if opts['help']: |
|
54 | 54 | showhelp() |
|
55 | 55 | sys.exit(0) |
|
56 | 56 | if len(args) != 3: |
|
57 | 57 | raise ParseError(_('wrong number of arguments')) |
|
58 | 58 | sys.exit(simplemerge.simplemerge(ui.ui(), *args, **opts)) |
|
59 | 59 | except ParseError, e: |
|
60 | 60 | sys.stdout.write("%s: %s\n" % (sys.argv[0], e)) |
|
61 | 61 | showhelp() |
|
62 | 62 | sys.exit(1) |
|
63 | 63 | except util.Abort, e: |
|
64 | 64 | sys.stderr.write("abort: %s\n" % e) |
|
65 | 65 | sys.exit(255) |
|
66 | 66 | except KeyboardInterrupt: |
|
67 | 67 | sys.exit(255) |
@@ -1,1110 +1,1109 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $ |
|
3 | 3 | # Author: Engelbert Gruber <grubert@users.sourceforge.net> |
|
4 | 4 | # Copyright: This module is put into the public domain. |
|
5 | 5 | |
|
6 | 6 | """ |
|
7 | 7 | Simple man page writer for reStructuredText. |
|
8 | 8 | |
|
9 | 9 | Man pages (short for "manual pages") contain system documentation on unix-like |
|
10 | 10 | systems. The pages are grouped in numbered sections: |
|
11 | 11 | |
|
12 | 12 | 1 executable programs and shell commands |
|
13 | 13 | 2 system calls |
|
14 | 14 | 3 library functions |
|
15 | 15 | 4 special files |
|
16 | 16 | 5 file formats |
|
17 | 17 | 6 games |
|
18 | 18 | 7 miscellaneous |
|
19 | 19 | 8 system administration |
|
20 | 20 | |
|
21 | 21 | Man pages are written *troff*, a text file formatting system. |
|
22 | 22 | |
|
23 | 23 | See http://www.tldp.org/HOWTO/Man-Page for a start. |
|
24 | 24 | |
|
25 | 25 | Man pages have no subsection only parts. |
|
26 | 26 | Standard parts |
|
27 | 27 | |
|
28 | 28 | NAME , |
|
29 | 29 | SYNOPSIS , |
|
30 | 30 | DESCRIPTION , |
|
31 | 31 | OPTIONS , |
|
32 | 32 | FILES , |
|
33 | 33 | SEE ALSO , |
|
34 | 34 | BUGS , |
|
35 | 35 | |
|
36 | 36 | and |
|
37 | 37 | |
|
38 | 38 | AUTHOR . |
|
39 | 39 | |
|
40 | 40 | A unix-like system keeps an index of the DESCRIPTIONs, which is accesable |
|
41 | 41 | by the command whatis or apropos. |
|
42 | 42 | |
|
43 | 43 | """ |
|
44 | 44 | |
|
45 | 45 | __docformat__ = 'reStructuredText' |
|
46 | 46 | |
|
47 | 47 | import re |
|
48 | 48 | |
|
49 | 49 | from docutils import nodes, writers, languages |
|
50 | 50 | try: |
|
51 | 51 | import roman |
|
52 | 52 | except ImportError: |
|
53 | 53 | from docutils.utils import roman |
|
54 | 54 | import inspect |
|
55 | 55 | |
|
56 | 56 | FIELD_LIST_INDENT = 7 |
|
57 | 57 | DEFINITION_LIST_INDENT = 7 |
|
58 | 58 | OPTION_LIST_INDENT = 7 |
|
59 | 59 | BLOCKQOUTE_INDENT = 3.5 |
|
60 | 60 | |
|
61 | 61 | # Define two macros so man/roff can calculate the |
|
62 | 62 | # indent/unindent margins by itself |
|
63 | 63 | MACRO_DEF = (r""". |
|
64 | 64 | .nr rst2man-indent-level 0 |
|
65 | 65 | . |
|
66 | 66 | .de1 rstReportMargin |
|
67 | 67 | \\$1 \\n[an-margin] |
|
68 | 68 | level \\n[rst2man-indent-level] |
|
69 | 69 | level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
70 | 70 | - |
|
71 | 71 | \\n[rst2man-indent0] |
|
72 | 72 | \\n[rst2man-indent1] |
|
73 | 73 | \\n[rst2man-indent2] |
|
74 | 74 | .. |
|
75 | 75 | .de1 INDENT |
|
76 | 76 | .\" .rstReportMargin pre: |
|
77 | 77 | . RS \\$1 |
|
78 | 78 | . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] |
|
79 | 79 | . nr rst2man-indent-level +1 |
|
80 | 80 | .\" .rstReportMargin post: |
|
81 | 81 | .. |
|
82 | 82 | .de UNINDENT |
|
83 | 83 | . RE |
|
84 | 84 | .\" indent \\n[an-margin] |
|
85 | 85 | .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
86 | 86 | .nr rst2man-indent-level -1 |
|
87 | 87 | .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
88 | 88 | .in \\n[rst2man-indent\\n[rst2man-indent-level]]u |
|
89 | 89 | .. |
|
90 | 90 | """) |
|
91 | 91 | |
|
92 | 92 | class Writer(writers.Writer): |
|
93 | 93 | |
|
94 | 94 | supported = ('manpage') |
|
95 | 95 | """Formats this writer supports.""" |
|
96 | 96 | |
|
97 | 97 | output = None |
|
98 | 98 | """Final translated form of `document`.""" |
|
99 | 99 | |
|
100 | 100 | def __init__(self): |
|
101 | 101 | writers.Writer.__init__(self) |
|
102 | 102 | self.translator_class = Translator |
|
103 | 103 | |
|
104 | 104 | def translate(self): |
|
105 | 105 | visitor = self.translator_class(self.document) |
|
106 | 106 | self.document.walkabout(visitor) |
|
107 | 107 | self.output = visitor.astext() |
|
108 | 108 | |
|
109 | 109 | |
|
110 | 110 | class Table(object): |
|
111 | 111 | def __init__(self): |
|
112 | 112 | self._rows = [] |
|
113 | 113 | self._options = ['center'] |
|
114 | 114 | self._tab_char = '\t' |
|
115 | 115 | self._coldefs = [] |
|
116 | 116 | def new_row(self): |
|
117 | 117 | self._rows.append([]) |
|
118 | 118 | def append_separator(self, separator): |
|
119 | 119 | """Append the separator for table head.""" |
|
120 | 120 | self._rows.append([separator]) |
|
121 | 121 | def append_cell(self, cell_lines): |
|
122 | 122 | """cell_lines is an array of lines""" |
|
123 | 123 | start = 0 |
|
124 | 124 | if len(cell_lines) > 0 and cell_lines[0] == '.sp\n': |
|
125 | 125 | start = 1 |
|
126 | 126 | self._rows[-1].append(cell_lines[start:]) |
|
127 | 127 | if len(self._coldefs) < len(self._rows[-1]): |
|
128 | 128 | self._coldefs.append('l') |
|
129 | 129 | def _minimize_cell(self, cell_lines): |
|
130 | 130 | """Remove leading and trailing blank and ``.sp`` lines""" |
|
131 | 131 | while (cell_lines and cell_lines[0] in ('\n', '.sp\n')): |
|
132 | 132 | del cell_lines[0] |
|
133 | 133 | while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')): |
|
134 | 134 | del cell_lines[-1] |
|
135 | 135 | def as_list(self): |
|
136 | 136 | text = ['.TS\n'] |
|
137 | 137 | text.append(' '.join(self._options) + ';\n') |
|
138 | 138 | text.append('|%s|.\n' % ('|'.join(self._coldefs))) |
|
139 | 139 | for row in self._rows: |
|
140 | 140 | # row = array of cells. cell = array of lines. |
|
141 | 141 | text.append('_\n') # line above |
|
142 | 142 | text.append('T{\n') |
|
143 | 143 | for i in range(len(row)): |
|
144 | 144 | cell = row[i] |
|
145 | 145 | self._minimize_cell(cell) |
|
146 | 146 | text.extend(cell) |
|
147 | 147 | if not text[-1].endswith('\n'): |
|
148 | 148 | text[-1] += '\n' |
|
149 | 149 | if i < len(row) - 1: |
|
150 | 150 | text.append('T}'+self._tab_char+'T{\n') |
|
151 | 151 | else: |
|
152 | 152 | text.append('T}\n') |
|
153 | 153 | text.append('_\n') |
|
154 | 154 | text.append('.TE\n') |
|
155 | 155 | return text |
|
156 | 156 | |
|
157 | 157 | class Translator(nodes.NodeVisitor): |
|
158 | 158 | """""" |
|
159 | 159 | |
|
160 | 160 | words_and_spaces = re.compile(r'\S+| +|\n') |
|
161 | 161 | document_start = """Man page generated from reStructuredText.""" |
|
162 | 162 | |
|
163 | 163 | def __init__(self, document): |
|
164 | 164 | nodes.NodeVisitor.__init__(self, document) |
|
165 | 165 | self.settings = settings = document.settings |
|
166 | 166 | lcode = settings.language_code |
|
167 | 167 | arglen = len(inspect.getargspec(languages.get_language)[0]) |
|
168 | 168 | if arglen == 2: |
|
169 | 169 | self.language = languages.get_language(lcode, |
|
170 | 170 | self.document.reporter) |
|
171 | 171 | else: |
|
172 | 172 | self.language = languages.get_language(lcode) |
|
173 | 173 | self.head = [] |
|
174 | 174 | self.body = [] |
|
175 | 175 | self.foot = [] |
|
176 | 176 | self.section_level = 0 |
|
177 | 177 | self.context = [] |
|
178 | 178 | self.topic_class = '' |
|
179 | 179 | self.colspecs = [] |
|
180 | 180 | self.compact_p = 1 |
|
181 | 181 | self.compact_simple = None |
|
182 | 182 | # the list style "*" bullet or "#" numbered |
|
183 | 183 | self._list_char = [] |
|
184 | 184 | # writing the header .TH and .SH NAME is postboned after |
|
185 | 185 | # docinfo. |
|
186 | 186 | self._docinfo = { |
|
187 | 187 | "title" : "", "title_upper": "", |
|
188 | 188 | "subtitle" : "", |
|
189 | 189 | "manual_section" : "", "manual_group" : "", |
|
190 | 190 | "author" : [], |
|
191 | 191 | "date" : "", |
|
192 | 192 | "copyright" : "", |
|
193 | 193 | "version" : "", |
|
194 | 194 | } |
|
195 | 195 | self._docinfo_keys = [] # a list to keep the sequence as in source. |
|
196 | 196 | self._docinfo_names = {} # to get name from text not normalized. |
|
197 | 197 | self._in_docinfo = None |
|
198 | 198 | self._active_table = None |
|
199 | 199 | self._in_literal = False |
|
200 | 200 | self.header_written = 0 |
|
201 | 201 | self._line_block = 0 |
|
202 | 202 | self.authors = [] |
|
203 | 203 | self.section_level = 0 |
|
204 | 204 | self._indent = [0] |
|
205 | 205 | # central definition of simple processing rules |
|
206 | 206 | # what to output on : visit, depart |
|
207 | 207 | # Do not use paragraph requests ``.PP`` because these set indentation. |
|
208 | 208 | # use ``.sp``. Remove superfluous ``.sp`` in ``astext``. |
|
209 | 209 | # |
|
210 | 210 | # Fonts are put on a stack, the top one is used. |
|
211 | 211 | # ``.ft P`` or ``\\fP`` pop from stack. |
|
212 | 212 | # ``B`` bold, ``I`` italic, ``R`` roman should be available. |
|
213 | 213 | # Hopefully ``C`` courier too. |
|
214 | 214 | self.defs = { |
|
215 | 215 | 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'), |
|
216 | 216 | 'definition_list_item' : ('.TP', ''), |
|
217 | 217 | 'field_name' : ('.TP\n.B ', '\n'), |
|
218 | 218 | 'literal' : ('\\fB', '\\fP'), |
|
219 | 219 | 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'), |
|
220 | 220 | |
|
221 | 221 | 'option_list_item' : ('.TP\n', ''), |
|
222 | 222 | |
|
223 | 223 | 'reference' : (r'\%', r'\:'), |
|
224 | 224 | 'emphasis': ('\\fI', '\\fP'), |
|
225 | 225 | 'strong' : ('\\fB', '\\fP'), |
|
226 | 226 | 'term' : ('\n.B ', '\n'), |
|
227 | 227 | 'title_reference' : ('\\fI', '\\fP'), |
|
228 | 228 | |
|
229 | 229 | 'topic-title' : ('.SS ',), |
|
230 | 230 | 'sidebar-title' : ('.SS ',), |
|
231 | 231 | |
|
232 | 232 | 'problematic' : ('\n.nf\n', '\n.fi\n'), |
|
233 | 233 | } |
|
234 | 234 | # NOTE don't specify the newline before a dot-command, but ensure |
|
235 | 235 | # it is there. |
|
236 | 236 | |
|
237 | 237 | def comment_begin(self, text): |
|
238 | 238 | """Return commented version of the passed text WITHOUT end of |
|
239 | 239 | line/comment.""" |
|
240 | 240 | prefix = '.\\" ' |
|
241 | 241 | out_text = ''.join( |
|
242 | 242 | [(prefix + in_line + '\n') |
|
243 | 243 | for in_line in text.split('\n')]) |
|
244 | 244 | return out_text |
|
245 | 245 | |
|
246 | 246 | def comment(self, text): |
|
247 | 247 | """Return commented version of the passed text.""" |
|
248 | 248 | return self.comment_begin(text)+'.\n' |
|
249 | 249 | |
|
250 | 250 | def ensure_eol(self): |
|
251 | 251 | """Ensure the last line in body is terminated by new line.""" |
|
252 | 252 | if self.body[-1][-1] != '\n': |
|
253 | 253 | self.body.append('\n') |
|
254 | 254 | |
|
255 | 255 | def astext(self): |
|
256 | 256 | """Return the final formatted document as a string.""" |
|
257 | 257 | if not self.header_written: |
|
258 | 258 | # ensure we get a ".TH" as viewers require it. |
|
259 | 259 | self.head.append(self.header()) |
|
260 | 260 | # filter body |
|
261 | 261 | for i in xrange(len(self.body) - 1, 0, -1): |
|
262 | 262 | # remove superfluous vertical gaps. |
|
263 | 263 | if self.body[i] == '.sp\n': |
|
264 | 264 | if self.body[i - 1][:4] in ('.BI ','.IP '): |
|
265 | 265 | self.body[i] = '.\n' |
|
266 | 266 | elif (self.body[i - 1][:3] == '.B ' and |
|
267 | 267 | self.body[i - 2][:4] == '.TP\n'): |
|
268 | 268 | self.body[i] = '.\n' |
|
269 | 269 | elif (self.body[i - 1] == '\n' and |
|
270 | 270 | self.body[i - 2][0] != '.' and |
|
271 | 271 | (self.body[i - 3][:7] == '.TP\n.B ' |
|
272 | 272 | or self.body[i - 3][:4] == '\n.B ') |
|
273 | 273 | ): |
|
274 | 274 | self.body[i] = '.\n' |
|
275 | 275 | return ''.join(self.head + self.body + self.foot) |
|
276 | 276 | |
|
277 | 277 | def deunicode(self, text): |
|
278 | 278 | text = text.replace(u'\xa0', '\\ ') |
|
279 | 279 | text = text.replace(u'\u2020', '\\(dg') |
|
280 | 280 | return text |
|
281 | 281 | |
|
282 | 282 | def visit_Text(self, node): |
|
283 | 283 | text = node.astext() |
|
284 | 284 | text = text.replace('\\','\\e') |
|
285 | 285 | replace_pairs = [ |
|
286 | 286 | (u'-', ur'\-'), |
|
287 | 287 | (u'\'', ur'\(aq'), |
|
288 | 288 | (u'Β΄', ur'\''), |
|
289 | 289 | (u'`', ur'\(ga'), |
|
290 | 290 | ] |
|
291 | 291 | for (in_char, out_markup) in replace_pairs: |
|
292 | 292 | text = text.replace(in_char, out_markup) |
|
293 | 293 | # unicode |
|
294 | 294 | text = self.deunicode(text) |
|
295 | 295 | if self._in_literal: |
|
296 | 296 | # prevent interpretation of "." at line start |
|
297 | 297 | if text[0] == '.': |
|
298 | 298 | text = '\\&' + text |
|
299 | 299 | text = text.replace('\n.', '\n\\&.') |
|
300 | 300 | self.body.append(text) |
|
301 | 301 | |
|
302 | 302 | def depart_Text(self, node): |
|
303 | 303 | pass |
|
304 | 304 | |
|
305 | 305 | def list_start(self, node): |
|
306 | 306 | class enum_char(object): |
|
307 | 307 | enum_style = { |
|
308 | 308 | 'bullet' : '\\(bu', |
|
309 | 309 | 'emdash' : '\\(em', |
|
310 | 310 | } |
|
311 | 311 | |
|
312 | 312 | def __init__(self, style): |
|
313 | 313 | self._style = style |
|
314 | 314 | if 'start' in node: |
|
315 | 315 | self._cnt = node['start'] - 1 |
|
316 | 316 | else: |
|
317 | 317 | self._cnt = 0 |
|
318 | 318 | self._indent = 2 |
|
319 | 319 | if style == 'arabic': |
|
320 | 320 | # indentation depends on number of childrens |
|
321 | 321 | # and start value. |
|
322 | 322 | self._indent = len(str(len(node.children))) |
|
323 | 323 | self._indent += len(str(self._cnt)) + 1 |
|
324 | 324 | elif style == 'loweralpha': |
|
325 | 325 | self._cnt += ord('a') - 1 |
|
326 | 326 | self._indent = 3 |
|
327 | 327 | elif style == 'upperalpha': |
|
328 | 328 | self._cnt += ord('A') - 1 |
|
329 | 329 | self._indent = 3 |
|
330 | 330 | elif style.endswith('roman'): |
|
331 | 331 | self._indent = 5 |
|
332 | 332 | |
|
333 | 333 | def next(self): |
|
334 | 334 | if self._style == 'bullet': |
|
335 | 335 | return self.enum_style[self._style] |
|
336 | 336 | elif self._style == 'emdash': |
|
337 | 337 | return self.enum_style[self._style] |
|
338 | 338 | self._cnt += 1 |
|
339 | 339 | # TODO add prefix postfix |
|
340 | 340 | if self._style == 'arabic': |
|
341 | 341 | return "%d." % self._cnt |
|
342 | 342 | elif self._style in ('loweralpha', 'upperalpha'): |
|
343 | 343 | return "%c." % self._cnt |
|
344 | 344 | elif self._style.endswith('roman'): |
|
345 | 345 | res = roman.toRoman(self._cnt) + '.' |
|
346 | 346 | if self._style.startswith('upper'): |
|
347 | 347 | return res.upper() |
|
348 | 348 | return res.lower() |
|
349 | 349 | else: |
|
350 | 350 | return "%d." % self._cnt |
|
351 | 351 | def get_width(self): |
|
352 | 352 | return self._indent |
|
353 | 353 | def __repr__(self): |
|
354 | 354 | return 'enum_style-%s' % list(self._style) |
|
355 | 355 | |
|
356 | 356 | if 'enumtype' in node: |
|
357 | 357 | self._list_char.append(enum_char(node['enumtype'])) |
|
358 | 358 | else: |
|
359 | 359 | self._list_char.append(enum_char('bullet')) |
|
360 | 360 | if len(self._list_char) > 1: |
|
361 | 361 | # indent nested lists |
|
362 | 362 | self.indent(self._list_char[-2].get_width()) |
|
363 | 363 | else: |
|
364 | 364 | self.indent(self._list_char[-1].get_width()) |
|
365 | 365 | |
|
366 | 366 | def list_end(self): |
|
367 | 367 | self.dedent() |
|
368 | 368 | self._list_char.pop() |
|
369 | 369 | |
|
370 | 370 | def header(self): |
|
371 | 371 | tmpl = (".TH %(title_upper)s %(manual_section)s" |
|
372 | 372 | " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n" |
|
373 | 373 | ".SH NAME\n" |
|
374 | 374 | "%(title)s \- %(subtitle)s\n") |
|
375 | 375 | return tmpl % self._docinfo |
|
376 | 376 | |
|
377 | 377 | def append_header(self): |
|
378 | 378 | """append header with .TH and .SH NAME""" |
|
379 | 379 | # NOTE before everything |
|
380 | 380 | # .TH title_upper section date source manual |
|
381 | 381 | if self.header_written: |
|
382 | 382 | return |
|
383 | 383 | self.body.append(self.header()) |
|
384 | 384 | self.body.append(MACRO_DEF) |
|
385 | 385 | self.header_written = 1 |
|
386 | 386 | |
|
387 | 387 | def visit_address(self, node): |
|
388 | 388 | self.visit_docinfo_item(node, 'address') |
|
389 | 389 | |
|
390 | 390 | def depart_address(self, node): |
|
391 | 391 | pass |
|
392 | 392 | |
|
393 | 393 | def visit_admonition(self, node, name=None): |
|
394 | 394 | if name: |
|
395 | 395 | self.body.append('.IP %s\n' % |
|
396 | 396 | self.language.labels.get(name, name)) |
|
397 | 397 | |
|
398 | 398 | def depart_admonition(self, node): |
|
399 | 399 | self.body.append('.RE\n') |
|
400 | 400 | |
|
401 | 401 | def visit_attention(self, node): |
|
402 | 402 | self.visit_admonition(node, 'attention') |
|
403 | 403 | |
|
404 | 404 | depart_attention = depart_admonition |
|
405 | 405 | |
|
406 | 406 | def visit_docinfo_item(self, node, name): |
|
407 | 407 | if name == 'author': |
|
408 | 408 | self._docinfo[name].append(node.astext()) |
|
409 | 409 | else: |
|
410 | 410 | self._docinfo[name] = node.astext() |
|
411 | 411 | self._docinfo_keys.append(name) |
|
412 | 412 | raise nodes.SkipNode |
|
413 | 413 | |
|
414 | 414 | def depart_docinfo_item(self, node): |
|
415 | 415 | pass |
|
416 | 416 | |
|
417 | 417 | def visit_author(self, node): |
|
418 | 418 | self.visit_docinfo_item(node, 'author') |
|
419 | 419 | |
|
420 | 420 | depart_author = depart_docinfo_item |
|
421 | 421 | |
|
422 | 422 | def visit_authors(self, node): |
|
423 | 423 | # _author is called anyway. |
|
424 | 424 | pass |
|
425 | 425 | |
|
426 | 426 | def depart_authors(self, node): |
|
427 | 427 | pass |
|
428 | 428 | |
|
429 | 429 | def visit_block_quote(self, node): |
|
430 | 430 | # BUG/HACK: indent alway uses the _last_ indention, |
|
431 | 431 | # thus we need two of them. |
|
432 | 432 | self.indent(BLOCKQOUTE_INDENT) |
|
433 | 433 | self.indent(0) |
|
434 | 434 | |
|
435 | 435 | def depart_block_quote(self, node): |
|
436 | 436 | self.dedent() |
|
437 | 437 | self.dedent() |
|
438 | 438 | |
|
439 | 439 | def visit_bullet_list(self, node): |
|
440 | 440 | self.list_start(node) |
|
441 | 441 | |
|
442 | 442 | def depart_bullet_list(self, node): |
|
443 | 443 | self.list_end() |
|
444 | 444 | |
|
445 | 445 | def visit_caption(self, node): |
|
446 | 446 | pass |
|
447 | 447 | |
|
448 | 448 | def depart_caption(self, node): |
|
449 | 449 | pass |
|
450 | 450 | |
|
451 | 451 | def visit_caution(self, node): |
|
452 | 452 | self.visit_admonition(node, 'caution') |
|
453 | 453 | |
|
454 | 454 | depart_caution = depart_admonition |
|
455 | 455 | |
|
456 | 456 | def visit_citation(self, node): |
|
457 | 457 | num, text = node.astext().split(None, 1) |
|
458 | 458 | num = num.strip() |
|
459 | 459 | self.body.append('.IP [%s] 5\n' % num) |
|
460 | 460 | |
|
461 | 461 | def depart_citation(self, node): |
|
462 | 462 | pass |
|
463 | 463 | |
|
464 | 464 | def visit_citation_reference(self, node): |
|
465 | 465 | self.body.append('['+node.astext()+']') |
|
466 | 466 | raise nodes.SkipNode |
|
467 | 467 | |
|
468 | 468 | def visit_classifier(self, node): |
|
469 | 469 | pass |
|
470 | 470 | |
|
471 | 471 | def depart_classifier(self, node): |
|
472 | 472 | pass |
|
473 | 473 | |
|
474 | 474 | def visit_colspec(self, node): |
|
475 | 475 | self.colspecs.append(node) |
|
476 | 476 | |
|
477 | 477 | def depart_colspec(self, node): |
|
478 | 478 | pass |
|
479 | 479 | |
|
480 | 480 | def write_colspecs(self): |
|
481 | 481 | self.body.append("%s.\n" % ('L '*len(self.colspecs))) |
|
482 | 482 | |
|
483 | 483 | def visit_comment(self, node, |
|
484 | 484 | sub=re.compile('-(?=-)').sub): |
|
485 | 485 | self.body.append(self.comment(node.astext())) |
|
486 | 486 | raise nodes.SkipNode |
|
487 | 487 | |
|
488 | 488 | def visit_contact(self, node): |
|
489 | 489 | self.visit_docinfo_item(node, 'contact') |
|
490 | 490 | |
|
491 | 491 | depart_contact = depart_docinfo_item |
|
492 | 492 | |
|
493 | 493 | def visit_container(self, node): |
|
494 | 494 | pass |
|
495 | 495 | |
|
496 | 496 | def depart_container(self, node): |
|
497 | 497 | pass |
|
498 | 498 | |
|
499 | 499 | def visit_compound(self, node): |
|
500 | 500 | pass |
|
501 | 501 | |
|
502 | 502 | def depart_compound(self, node): |
|
503 | 503 | pass |
|
504 | 504 | |
|
505 | 505 | def visit_copyright(self, node): |
|
506 | 506 | self.visit_docinfo_item(node, 'copyright') |
|
507 | 507 | |
|
508 | 508 | def visit_danger(self, node): |
|
509 | 509 | self.visit_admonition(node, 'danger') |
|
510 | 510 | |
|
511 | 511 | depart_danger = depart_admonition |
|
512 | 512 | |
|
513 | 513 | def visit_date(self, node): |
|
514 | 514 | self.visit_docinfo_item(node, 'date') |
|
515 | 515 | |
|
516 | 516 | def visit_decoration(self, node): |
|
517 | 517 | pass |
|
518 | 518 | |
|
519 | 519 | def depart_decoration(self, node): |
|
520 | 520 | pass |
|
521 | 521 | |
|
522 | 522 | def visit_definition(self, node): |
|
523 | 523 | pass |
|
524 | 524 | |
|
525 | 525 | def depart_definition(self, node): |
|
526 | 526 | pass |
|
527 | 527 | |
|
528 | 528 | def visit_definition_list(self, node): |
|
529 | 529 | self.indent(DEFINITION_LIST_INDENT) |
|
530 | 530 | |
|
531 | 531 | def depart_definition_list(self, node): |
|
532 | 532 | self.dedent() |
|
533 | 533 | |
|
534 | 534 | def visit_definition_list_item(self, node): |
|
535 | 535 | self.body.append(self.defs['definition_list_item'][0]) |
|
536 | 536 | |
|
537 | 537 | def depart_definition_list_item(self, node): |
|
538 | 538 | self.body.append(self.defs['definition_list_item'][1]) |
|
539 | 539 | |
|
540 | 540 | def visit_description(self, node): |
|
541 | 541 | pass |
|
542 | 542 | |
|
543 | 543 | def depart_description(self, node): |
|
544 | 544 | pass |
|
545 | 545 | |
|
546 | 546 | def visit_docinfo(self, node): |
|
547 | 547 | self._in_docinfo = 1 |
|
548 | 548 | |
|
549 | 549 | def depart_docinfo(self, node): |
|
550 | 550 | self._in_docinfo = None |
|
551 | 551 | # NOTE nothing should be written before this |
|
552 | 552 | self.append_header() |
|
553 | 553 | |
|
554 | 554 | def visit_doctest_block(self, node): |
|
555 | 555 | self.body.append(self.defs['literal_block'][0]) |
|
556 | 556 | self._in_literal = True |
|
557 | 557 | |
|
558 | 558 | def depart_doctest_block(self, node): |
|
559 | 559 | self._in_literal = False |
|
560 | 560 | self.body.append(self.defs['literal_block'][1]) |
|
561 | 561 | |
|
562 | 562 | def visit_document(self, node): |
|
563 | 563 | # no blank line between comment and header. |
|
564 | 564 | self.body.append(self.comment(self.document_start).rstrip()+'\n') |
|
565 | 565 | # writing header is postboned |
|
566 | 566 | self.header_written = 0 |
|
567 | 567 | |
|
568 | 568 | def depart_document(self, node): |
|
569 | 569 | if self._docinfo['author']: |
|
570 | 570 | self.body.append('.SH AUTHOR\n%s\n' |
|
571 | 571 | % ', '.join(self._docinfo['author'])) |
|
572 | 572 | skip = ('author', 'copyright', 'date', |
|
573 | 573 | 'manual_group', 'manual_section', |
|
574 | 574 | 'subtitle', |
|
575 | 575 | 'title', 'title_upper', 'version') |
|
576 | 576 | for name in self._docinfo_keys: |
|
577 | 577 | if name == 'address': |
|
578 | 578 | self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % ( |
|
579 | 579 | self.language.labels.get(name, name), |
|
580 | 580 | self.defs['indent'][0] % 0, |
|
581 | 581 | self.defs['indent'][0] % BLOCKQOUTE_INDENT, |
|
582 | 582 | self._docinfo[name], |
|
583 | 583 | self.defs['indent'][1], |
|
584 | 584 | self.defs['indent'][1])) |
|
585 | 585 | elif name not in skip: |
|
586 | 586 | if name in self._docinfo_names: |
|
587 | 587 | label = self._docinfo_names[name] |
|
588 | 588 | else: |
|
589 | 589 | label = self.language.labels.get(name, name) |
|
590 | 590 | self.body.append("\n%s: %s\n" % (label, self._docinfo[name])) |
|
591 | 591 | if self._docinfo['copyright']: |
|
592 | 592 | self.body.append('.SH COPYRIGHT\n%s\n' |
|
593 | 593 | % self._docinfo['copyright']) |
|
594 | 594 | self.body.append(self.comment( |
|
595 | 595 | 'Generated by docutils manpage writer.\n')) |
|
596 | 596 | |
|
597 | 597 | def visit_emphasis(self, node): |
|
598 | 598 | self.body.append(self.defs['emphasis'][0]) |
|
599 | 599 | |
|
600 | 600 | def depart_emphasis(self, node): |
|
601 | 601 | self.body.append(self.defs['emphasis'][1]) |
|
602 | 602 | |
|
603 | 603 | def visit_entry(self, node): |
|
604 | 604 | # a cell in a table row |
|
605 | 605 | if 'morerows' in node: |
|
606 | 606 | self.document.reporter.warning('"table row spanning" not supported', |
|
607 | 607 | base_node=node) |
|
608 | 608 | if 'morecols' in node: |
|
609 | 609 | self.document.reporter.warning( |
|
610 | 610 | '"table cell spanning" not supported', base_node=node) |
|
611 | 611 | self.context.append(len(self.body)) |
|
612 | 612 | |
|
613 | 613 | def depart_entry(self, node): |
|
614 | 614 | start = self.context.pop() |
|
615 | 615 | self._active_table.append_cell(self.body[start:]) |
|
616 | 616 | del self.body[start:] |
|
617 | 617 | |
|
618 | 618 | def visit_enumerated_list(self, node): |
|
619 | 619 | self.list_start(node) |
|
620 | 620 | |
|
621 | 621 | def depart_enumerated_list(self, node): |
|
622 | 622 | self.list_end() |
|
623 | 623 | |
|
624 | 624 | def visit_error(self, node): |
|
625 | 625 | self.visit_admonition(node, 'error') |
|
626 | 626 | |
|
627 | 627 | depart_error = depart_admonition |
|
628 | 628 | |
|
629 | 629 | def visit_field(self, node): |
|
630 | 630 | pass |
|
631 | 631 | |
|
632 | 632 | def depart_field(self, node): |
|
633 | 633 | pass |
|
634 | 634 | |
|
635 | 635 | def visit_field_body(self, node): |
|
636 | 636 | if self._in_docinfo: |
|
637 | 637 | name_normalized = self._field_name.lower().replace(" ","_") |
|
638 | 638 | self._docinfo_names[name_normalized] = self._field_name |
|
639 | 639 | self.visit_docinfo_item(node, name_normalized) |
|
640 | 640 | raise nodes.SkipNode |
|
641 | 641 | |
|
642 | 642 | def depart_field_body(self, node): |
|
643 | 643 | pass |
|
644 | 644 | |
|
645 | 645 | def visit_field_list(self, node): |
|
646 | 646 | self.indent(FIELD_LIST_INDENT) |
|
647 | 647 | |
|
648 | 648 | def depart_field_list(self, node): |
|
649 | 649 | self.dedent() |
|
650 | 650 | |
|
651 | 651 | def visit_field_name(self, node): |
|
652 | 652 | if self._in_docinfo: |
|
653 | 653 | self._field_name = node.astext() |
|
654 | 654 | raise nodes.SkipNode |
|
655 | 655 | else: |
|
656 | 656 | self.body.append(self.defs['field_name'][0]) |
|
657 | 657 | |
|
658 | 658 | def depart_field_name(self, node): |
|
659 | 659 | self.body.append(self.defs['field_name'][1]) |
|
660 | 660 | |
|
661 | 661 | def visit_figure(self, node): |
|
662 | 662 | self.indent(2.5) |
|
663 | 663 | self.indent(0) |
|
664 | 664 | |
|
665 | 665 | def depart_figure(self, node): |
|
666 | 666 | self.dedent() |
|
667 | 667 | self.dedent() |
|
668 | 668 | |
|
669 | 669 | def visit_footer(self, node): |
|
670 | 670 | self.document.reporter.warning('"footer" not supported', |
|
671 | 671 | base_node=node) |
|
672 | 672 | |
|
673 | 673 | def depart_footer(self, node): |
|
674 | 674 | pass |
|
675 | 675 | |
|
676 | 676 | def visit_footnote(self, node): |
|
677 | 677 | num, text = node.astext().split(None, 1) |
|
678 | 678 | num = num.strip() |
|
679 | 679 | self.body.append('.IP [%s] 5\n' % self.deunicode(num)) |
|
680 | 680 | |
|
681 | 681 | def depart_footnote(self, node): |
|
682 | 682 | pass |
|
683 | 683 | |
|
684 | 684 | def footnote_backrefs(self, node): |
|
685 | 685 | self.document.reporter.warning('"footnote_backrefs" not supported', |
|
686 | 686 | base_node=node) |
|
687 | 687 | |
|
688 | 688 | def visit_footnote_reference(self, node): |
|
689 | 689 | self.body.append('['+self.deunicode(node.astext())+']') |
|
690 | 690 | raise nodes.SkipNode |
|
691 | 691 | |
|
692 | 692 | def depart_footnote_reference(self, node): |
|
693 | 693 | pass |
|
694 | 694 | |
|
695 | 695 | def visit_generated(self, node): |
|
696 | 696 | pass |
|
697 | 697 | |
|
698 | 698 | def depart_generated(self, node): |
|
699 | 699 | pass |
|
700 | 700 | |
|
701 | 701 | def visit_header(self, node): |
|
702 | 702 | raise NotImplementedError, node.astext() |
|
703 | 703 | |
|
704 | 704 | def depart_header(self, node): |
|
705 | 705 | pass |
|
706 | 706 | |
|
707 | 707 | def visit_hint(self, node): |
|
708 | 708 | self.visit_admonition(node, 'hint') |
|
709 | 709 | |
|
710 | 710 | depart_hint = depart_admonition |
|
711 | 711 | |
|
712 | 712 | def visit_subscript(self, node): |
|
713 | 713 | self.body.append('\\s-2\\d') |
|
714 | 714 | |
|
715 | 715 | def depart_subscript(self, node): |
|
716 | 716 | self.body.append('\\u\\s0') |
|
717 | 717 | |
|
718 | 718 | def visit_superscript(self, node): |
|
719 | 719 | self.body.append('\\s-2\\u') |
|
720 | 720 | |
|
721 | 721 | def depart_superscript(self, node): |
|
722 | 722 | self.body.append('\\d\\s0') |
|
723 | 723 | |
|
724 | 724 | def visit_attribution(self, node): |
|
725 | 725 | self.body.append('\\(em ') |
|
726 | 726 | |
|
727 | 727 | def depart_attribution(self, node): |
|
728 | 728 | self.body.append('\n') |
|
729 | 729 | |
|
730 | 730 | def visit_image(self, node): |
|
731 | 731 | self.document.reporter.warning('"image" not supported', |
|
732 | 732 | base_node=node) |
|
733 | 733 | text = [] |
|
734 | 734 | if 'alt' in node.attributes: |
|
735 | 735 | text.append(node.attributes['alt']) |
|
736 | 736 | if 'uri' in node.attributes: |
|
737 | 737 | text.append(node.attributes['uri']) |
|
738 | 738 | self.body.append('[image: %s]\n' % ('/'.join(text))) |
|
739 | 739 | raise nodes.SkipNode |
|
740 | 740 | |
|
741 | 741 | def visit_important(self, node): |
|
742 | 742 | self.visit_admonition(node, 'important') |
|
743 | 743 | |
|
744 | 744 | depart_important = depart_admonition |
|
745 | 745 | |
|
746 | 746 | def visit_label(self, node): |
|
747 | 747 | # footnote and citation |
|
748 | 748 | if (isinstance(node.parent, nodes.footnote) |
|
749 | 749 | or isinstance(node.parent, nodes.citation)): |
|
750 | 750 | raise nodes.SkipNode |
|
751 | 751 | self.document.reporter.warning('"unsupported "label"', |
|
752 | 752 | base_node=node) |
|
753 | 753 | self.body.append('[') |
|
754 | 754 | |
|
755 | 755 | def depart_label(self, node): |
|
756 | 756 | self.body.append(']\n') |
|
757 | 757 | |
|
758 | 758 | def visit_legend(self, node): |
|
759 | 759 | pass |
|
760 | 760 | |
|
761 | 761 | def depart_legend(self, node): |
|
762 | 762 | pass |
|
763 | 763 | |
|
764 | 764 | # WHAT should we use .INDENT, .UNINDENT ? |
|
765 | 765 | def visit_line_block(self, node): |
|
766 | 766 | self._line_block += 1 |
|
767 | 767 | if self._line_block == 1: |
|
768 | 768 | self.body.append('.sp\n') |
|
769 | 769 | self.body.append('.nf\n') |
|
770 | 770 | else: |
|
771 | 771 | self.body.append('.in +2\n') |
|
772 | 772 | |
|
773 | 773 | def depart_line_block(self, node): |
|
774 | 774 | self._line_block -= 1 |
|
775 | 775 | if self._line_block == 0: |
|
776 | 776 | self.body.append('.fi\n') |
|
777 | 777 | self.body.append('.sp\n') |
|
778 | 778 | else: |
|
779 | 779 | self.body.append('.in -2\n') |
|
780 | 780 | |
|
781 | 781 | def visit_line(self, node): |
|
782 | 782 | pass |
|
783 | 783 | |
|
784 | 784 | def depart_line(self, node): |
|
785 | 785 | self.body.append('\n') |
|
786 | 786 | |
|
787 | 787 | def visit_list_item(self, node): |
|
788 | 788 | # man 7 man argues to use ".IP" instead of ".TP" |
|
789 | 789 | self.body.append('.IP %s %d\n' % ( |
|
790 | 790 | self._list_char[-1].next(), |
|
791 | 791 | self._list_char[-1].get_width(),)) |
|
792 | 792 | |
|
793 | 793 | def depart_list_item(self, node): |
|
794 | 794 | pass |
|
795 | 795 | |
|
796 | 796 | def visit_literal(self, node): |
|
797 | 797 | self.body.append(self.defs['literal'][0]) |
|
798 | 798 | |
|
799 | 799 | def depart_literal(self, node): |
|
800 | 800 | self.body.append(self.defs['literal'][1]) |
|
801 | 801 | |
|
802 | 802 | def visit_literal_block(self, node): |
|
803 | 803 | self.body.append(self.defs['literal_block'][0]) |
|
804 | 804 | self._in_literal = True |
|
805 | 805 | |
|
806 | 806 | def depart_literal_block(self, node): |
|
807 | 807 | self._in_literal = False |
|
808 | 808 | self.body.append(self.defs['literal_block'][1]) |
|
809 | 809 | |
|
810 | 810 | def visit_meta(self, node): |
|
811 | 811 | raise NotImplementedError, node.astext() |
|
812 | 812 | |
|
813 | 813 | def depart_meta(self, node): |
|
814 | 814 | pass |
|
815 | 815 | |
|
816 | 816 | def visit_note(self, node): |
|
817 | 817 | self.visit_admonition(node, 'note') |
|
818 | 818 | |
|
819 | 819 | depart_note = depart_admonition |
|
820 | 820 | |
|
821 | 821 | def indent(self, by=0.5): |
|
822 | 822 | # if we are in a section ".SH" there already is a .RS |
|
823 | 823 | step = self._indent[-1] |
|
824 | 824 | self._indent.append(by) |
|
825 | 825 | self.body.append(self.defs['indent'][0] % step) |
|
826 | 826 | |
|
827 | 827 | def dedent(self): |
|
828 | 828 | self._indent.pop() |
|
829 | 829 | self.body.append(self.defs['indent'][1]) |
|
830 | 830 | |
|
831 | 831 | def visit_option_list(self, node): |
|
832 | 832 | self.indent(OPTION_LIST_INDENT) |
|
833 | 833 | |
|
834 | 834 | def depart_option_list(self, node): |
|
835 | 835 | self.dedent() |
|
836 | 836 | |
|
837 | 837 | def visit_option_list_item(self, node): |
|
838 | 838 | # one item of the list |
|
839 | 839 | self.body.append(self.defs['option_list_item'][0]) |
|
840 | 840 | |
|
841 | 841 | def depart_option_list_item(self, node): |
|
842 | 842 | self.body.append(self.defs['option_list_item'][1]) |
|
843 | 843 | |
|
844 | 844 | def visit_option_group(self, node): |
|
845 | 845 | # as one option could have several forms it is a group |
|
846 | 846 | # options without parameter bold only, .B, -v |
|
847 | 847 | # options with parameter bold italic, .BI, -f file |
|
848 | 848 | # |
|
849 | 849 | # we do not know if .B or .BI |
|
850 | 850 | self.context.append('.B') # blind guess |
|
851 | 851 | self.context.append(len(self.body)) # to be able to insert later |
|
852 | 852 | self.context.append(0) # option counter |
|
853 | 853 | |
|
854 | 854 | def depart_option_group(self, node): |
|
855 | 855 | self.context.pop() # the counter |
|
856 | 856 | start_position = self.context.pop() |
|
857 | 857 | text = self.body[start_position:] |
|
858 | 858 | del self.body[start_position:] |
|
859 | 859 | self.body.append('%s%s\n' % (self.context.pop(), ''.join(text))) |
|
860 | 860 | |
|
861 | 861 | def visit_option(self, node): |
|
862 | 862 | # each form of the option will be presented separately |
|
863 | 863 | if self.context[-1] > 0: |
|
864 | 864 | self.body.append(', ') |
|
865 | 865 | if self.context[-3] == '.BI': |
|
866 | 866 | self.body.append('\\') |
|
867 | 867 | self.body.append(' ') |
|
868 | 868 | |
|
869 | 869 | def depart_option(self, node): |
|
870 | 870 | self.context[-1] += 1 |
|
871 | 871 | |
|
872 | 872 | def visit_option_string(self, node): |
|
873 | 873 | # do not know if .B or .BI |
|
874 | 874 | pass |
|
875 | 875 | |
|
876 | 876 | def depart_option_string(self, node): |
|
877 | 877 | pass |
|
878 | 878 | |
|
879 | 879 | def visit_option_argument(self, node): |
|
880 | 880 | self.context[-3] = '.BI' # bold/italic alternate |
|
881 | 881 | if node['delimiter'] != ' ': |
|
882 | 882 | self.body.append('\\fB%s ' % node['delimiter']) |
|
883 | 883 | elif self.body[len(self.body) - 1].endswith('='): |
|
884 | 884 | # a blank only means no blank in output, just changing font |
|
885 | 885 | self.body.append(' ') |
|
886 | 886 | else: |
|
887 | 887 | # blank backslash blank, switch font then a blank |
|
888 | 888 | self.body.append(' \\ ') |
|
889 | 889 | |
|
890 | 890 | def depart_option_argument(self, node): |
|
891 | 891 | pass |
|
892 | 892 | |
|
893 | 893 | def visit_organization(self, node): |
|
894 | 894 | self.visit_docinfo_item(node, 'organization') |
|
895 | 895 | |
|
896 | 896 | def depart_organization(self, node): |
|
897 | 897 | pass |
|
898 | 898 | |
|
899 | 899 | def visit_paragraph(self, node): |
|
900 | 900 | # ``.PP`` : Start standard indented paragraph. |
|
901 | 901 | # ``.LP`` : Start block paragraph, all except the first. |
|
902 | 902 | # ``.P [type]`` : Start paragraph type. |
|
903 | 903 | # NOTE don't use paragraph starts because they reset indentation. |
|
904 | 904 | # ``.sp`` is only vertical space |
|
905 | 905 | self.ensure_eol() |
|
906 | 906 | self.body.append('.sp\n') |
|
907 | 907 | |
|
908 | 908 | def depart_paragraph(self, node): |
|
909 | 909 | self.body.append('\n') |
|
910 | 910 | |
|
911 | 911 | def visit_problematic(self, node): |
|
912 | 912 | self.body.append(self.defs['problematic'][0]) |
|
913 | 913 | |
|
914 | 914 | def depart_problematic(self, node): |
|
915 | 915 | self.body.append(self.defs['problematic'][1]) |
|
916 | 916 | |
|
917 | 917 | def visit_raw(self, node): |
|
918 | 918 | if node.get('format') == 'manpage': |
|
919 | 919 | self.body.append(node.astext() + "\n") |
|
920 | 920 | # Keep non-manpage raw text out of output: |
|
921 | 921 | raise nodes.SkipNode |
|
922 | 922 | |
|
923 | 923 | def visit_reference(self, node): |
|
924 | 924 | """E.g. link or email address.""" |
|
925 | 925 | self.body.append(self.defs['reference'][0]) |
|
926 | 926 | |
|
927 | 927 | def depart_reference(self, node): |
|
928 | 928 | self.body.append(self.defs['reference'][1]) |
|
929 | 929 | |
|
930 | 930 | def visit_revision(self, node): |
|
931 | 931 | self.visit_docinfo_item(node, 'revision') |
|
932 | 932 | |
|
933 | 933 | depart_revision = depart_docinfo_item |
|
934 | 934 | |
|
935 | 935 | def visit_row(self, node): |
|
936 | 936 | self._active_table.new_row() |
|
937 | 937 | |
|
938 | 938 | def depart_row(self, node): |
|
939 | 939 | pass |
|
940 | 940 | |
|
941 | 941 | def visit_section(self, node): |
|
942 | 942 | self.section_level += 1 |
|
943 | 943 | |
|
944 | 944 | def depart_section(self, node): |
|
945 | 945 | self.section_level -= 1 |
|
946 | 946 | |
|
947 | 947 | def visit_status(self, node): |
|
948 | 948 | self.visit_docinfo_item(node, 'status') |
|
949 | 949 | |
|
950 | 950 | depart_status = depart_docinfo_item |
|
951 | 951 | |
|
952 | 952 | def visit_strong(self, node): |
|
953 | 953 | self.body.append(self.defs['strong'][0]) |
|
954 | 954 | |
|
955 | 955 | def depart_strong(self, node): |
|
956 | 956 | self.body.append(self.defs['strong'][1]) |
|
957 | 957 | |
|
958 | 958 | def visit_substitution_definition(self, node): |
|
959 | 959 | """Internal only.""" |
|
960 | 960 | raise nodes.SkipNode |
|
961 | 961 | |
|
962 | 962 | def visit_substitution_reference(self, node): |
|
963 | 963 | self.document.reporter.warning('"substitution_reference" not supported', |
|
964 | 964 | base_node=node) |
|
965 | 965 | |
|
966 | 966 | def visit_subtitle(self, node): |
|
967 | 967 | if isinstance(node.parent, nodes.sidebar): |
|
968 | 968 | self.body.append(self.defs['strong'][0]) |
|
969 | 969 | elif isinstance(node.parent, nodes.document): |
|
970 | 970 | self.visit_docinfo_item(node, 'subtitle') |
|
971 | 971 | elif isinstance(node.parent, nodes.section): |
|
972 | 972 | self.body.append(self.defs['strong'][0]) |
|
973 | 973 | |
|
974 | 974 | def depart_subtitle(self, node): |
|
975 | 975 | # document subtitle calls SkipNode |
|
976 | 976 | self.body.append(self.defs['strong'][1]+'\n.PP\n') |
|
977 | 977 | |
|
978 | 978 | def visit_system_message(self, node): |
|
979 | 979 | # TODO add report_level |
|
980 | 980 | #if node['level'] < self.document.reporter['writer'].report_level: |
|
981 | 981 | # Level is too low to display: |
|
982 | 982 | # raise nodes.SkipNode |
|
983 | 983 | attr = {} |
|
984 | backref_text = '' | |
|
985 | 984 | if node.hasattr('id'): |
|
986 | 985 | attr['name'] = node['id'] |
|
987 | 986 | if node.hasattr('line'): |
|
988 | 987 | line = ', line %s' % node['line'] |
|
989 | 988 | else: |
|
990 | 989 | line = '' |
|
991 | 990 | self.body.append('.IP "System Message: %s/%s (%s:%s)"\n' |
|
992 | 991 | % (node['type'], node['level'], node['source'], line)) |
|
993 | 992 | |
|
994 | 993 | def depart_system_message(self, node): |
|
995 | 994 | pass |
|
996 | 995 | |
|
997 | 996 | def visit_table(self, node): |
|
998 | 997 | self._active_table = Table() |
|
999 | 998 | |
|
1000 | 999 | def depart_table(self, node): |
|
1001 | 1000 | self.ensure_eol() |
|
1002 | 1001 | self.body.extend(self._active_table.as_list()) |
|
1003 | 1002 | self._active_table = None |
|
1004 | 1003 | |
|
1005 | 1004 | def visit_target(self, node): |
|
1006 | 1005 | # targets are in-document hyper targets, without any use for man-pages. |
|
1007 | 1006 | raise nodes.SkipNode |
|
1008 | 1007 | |
|
1009 | 1008 | def visit_tbody(self, node): |
|
1010 | 1009 | pass |
|
1011 | 1010 | |
|
1012 | 1011 | def depart_tbody(self, node): |
|
1013 | 1012 | pass |
|
1014 | 1013 | |
|
1015 | 1014 | def visit_term(self, node): |
|
1016 | 1015 | self.body.append(self.defs['term'][0]) |
|
1017 | 1016 | |
|
1018 | 1017 | def depart_term(self, node): |
|
1019 | 1018 | self.body.append(self.defs['term'][1]) |
|
1020 | 1019 | |
|
1021 | 1020 | def visit_tgroup(self, node): |
|
1022 | 1021 | pass |
|
1023 | 1022 | |
|
1024 | 1023 | def depart_tgroup(self, node): |
|
1025 | 1024 | pass |
|
1026 | 1025 | |
|
1027 | 1026 | def visit_thead(self, node): |
|
1028 | 1027 | # MAYBE double line '=' |
|
1029 | 1028 | pass |
|
1030 | 1029 | |
|
1031 | 1030 | def depart_thead(self, node): |
|
1032 | 1031 | # MAYBE double line '=' |
|
1033 | 1032 | pass |
|
1034 | 1033 | |
|
1035 | 1034 | def visit_tip(self, node): |
|
1036 | 1035 | self.visit_admonition(node, 'tip') |
|
1037 | 1036 | |
|
1038 | 1037 | depart_tip = depart_admonition |
|
1039 | 1038 | |
|
1040 | 1039 | def visit_title(self, node): |
|
1041 | 1040 | if isinstance(node.parent, nodes.topic): |
|
1042 | 1041 | self.body.append(self.defs['topic-title'][0]) |
|
1043 | 1042 | elif isinstance(node.parent, nodes.sidebar): |
|
1044 | 1043 | self.body.append(self.defs['sidebar-title'][0]) |
|
1045 | 1044 | elif isinstance(node.parent, nodes.admonition): |
|
1046 | 1045 | self.body.append('.IP "') |
|
1047 | 1046 | elif self.section_level == 0: |
|
1048 | 1047 | self._docinfo['title'] = node.astext() |
|
1049 | 1048 | # document title for .TH |
|
1050 | 1049 | self._docinfo['title_upper'] = node.astext().upper() |
|
1051 | 1050 | raise nodes.SkipNode |
|
1052 | 1051 | elif self.section_level == 1: |
|
1053 | 1052 | self.body.append('.SH ') |
|
1054 | 1053 | for n in node.traverse(nodes.Text): |
|
1055 | 1054 | n.parent.replace(n, nodes.Text(n.astext().upper())) |
|
1056 | 1055 | else: |
|
1057 | 1056 | self.body.append('.SS ') |
|
1058 | 1057 | |
|
1059 | 1058 | def depart_title(self, node): |
|
1060 | 1059 | if isinstance(node.parent, nodes.admonition): |
|
1061 | 1060 | self.body.append('"') |
|
1062 | 1061 | self.body.append('\n') |
|
1063 | 1062 | |
|
1064 | 1063 | def visit_title_reference(self, node): |
|
1065 | 1064 | """inline citation reference""" |
|
1066 | 1065 | self.body.append(self.defs['title_reference'][0]) |
|
1067 | 1066 | |
|
1068 | 1067 | def depart_title_reference(self, node): |
|
1069 | 1068 | self.body.append(self.defs['title_reference'][1]) |
|
1070 | 1069 | |
|
1071 | 1070 | def visit_topic(self, node): |
|
1072 | 1071 | pass |
|
1073 | 1072 | |
|
1074 | 1073 | def depart_topic(self, node): |
|
1075 | 1074 | pass |
|
1076 | 1075 | |
|
1077 | 1076 | def visit_sidebar(self, node): |
|
1078 | 1077 | pass |
|
1079 | 1078 | |
|
1080 | 1079 | def depart_sidebar(self, node): |
|
1081 | 1080 | pass |
|
1082 | 1081 | |
|
1083 | 1082 | def visit_rubric(self, node): |
|
1084 | 1083 | pass |
|
1085 | 1084 | |
|
1086 | 1085 | def depart_rubric(self, node): |
|
1087 | 1086 | pass |
|
1088 | 1087 | |
|
1089 | 1088 | def visit_transition(self, node): |
|
1090 | 1089 | # .PP Begin a new paragraph and reset prevailing indent. |
|
1091 | 1090 | # .sp N leaves N lines of blank space. |
|
1092 | 1091 | # .ce centers the next line |
|
1093 | 1092 | self.body.append('\n.sp\n.ce\n----\n') |
|
1094 | 1093 | |
|
1095 | 1094 | def depart_transition(self, node): |
|
1096 | 1095 | self.body.append('\n.ce 0\n.sp\n') |
|
1097 | 1096 | |
|
1098 | 1097 | def visit_version(self, node): |
|
1099 | 1098 | self.visit_docinfo_item(node, 'version') |
|
1100 | 1099 | |
|
1101 | 1100 | def visit_warning(self, node): |
|
1102 | 1101 | self.visit_admonition(node, 'warning') |
|
1103 | 1102 | |
|
1104 | 1103 | depart_warning = depart_admonition |
|
1105 | 1104 | |
|
1106 | 1105 | def unimplemented_visit(self, node): |
|
1107 | 1106 | raise NotImplementedError('visiting unimplemented node type: %s' |
|
1108 | 1107 | % node.__class__.__name__) |
|
1109 | 1108 | |
|
1110 | 1109 | # vim: set fileencoding=utf-8 et ts=4 ai : |
@@ -1,321 +1,321 b'' | |||
|
1 | 1 | import os, stat, socket |
|
2 | 2 | import re |
|
3 | 3 | import sys |
|
4 | 4 | import tempfile |
|
5 | 5 | |
|
6 | 6 | tempprefix = 'hg-hghave-' |
|
7 | 7 | |
|
8 | 8 | def matchoutput(cmd, regexp, ignorestatus=False): |
|
9 | 9 | """Return True if cmd executes successfully and its output |
|
10 | 10 | is matched by the supplied regular expression. |
|
11 | 11 | """ |
|
12 | 12 | r = re.compile(regexp) |
|
13 | 13 | fh = os.popen(cmd) |
|
14 | 14 | s = fh.read() |
|
15 | 15 | try: |
|
16 | 16 | ret = fh.close() |
|
17 | 17 | except IOError: |
|
18 | 18 | # Happen in Windows test environment |
|
19 | 19 | ret = 1 |
|
20 | 20 | return (ignorestatus or ret is None) and r.search(s) |
|
21 | 21 | |
|
22 | 22 | def has_baz(): |
|
23 | 23 | return matchoutput('baz --version 2>&1', r'baz Bazaar version') |
|
24 | 24 | |
|
25 | 25 | def has_bzr(): |
|
26 | 26 | try: |
|
27 | 27 | import bzrlib |
|
28 | 28 | return bzrlib.__doc__ is not None |
|
29 | 29 | except ImportError: |
|
30 | 30 | return False |
|
31 | 31 | |
|
32 | 32 | def has_bzr114(): |
|
33 | 33 | try: |
|
34 | 34 | import bzrlib |
|
35 | 35 | return (bzrlib.__doc__ is not None |
|
36 | 36 | and bzrlib.version_info[:2] >= (1, 14)) |
|
37 | 37 | except ImportError: |
|
38 | 38 | return False |
|
39 | 39 | |
|
40 | 40 | def has_cvs(): |
|
41 | 41 | re = r'Concurrent Versions System.*?server' |
|
42 | 42 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
43 | 43 | |
|
44 | 44 | def has_cvs112(): |
|
45 | 45 | re = r'Concurrent Versions System \(CVS\) 1.12.*?server' |
|
46 | 46 | return matchoutput('cvs --version 2>&1', re) and not has_msys() |
|
47 | 47 | |
|
48 | 48 | def has_darcs(): |
|
49 | 49 | return matchoutput('darcs --version', r'2\.[2-9]', True) |
|
50 | 50 | |
|
51 | 51 | def has_mtn(): |
|
52 | 52 | return matchoutput('mtn --version', r'monotone', True) and not matchoutput( |
|
53 | 53 | 'mtn --version', r'monotone 0\.', True) |
|
54 | 54 | |
|
55 | 55 | def has_eol_in_paths(): |
|
56 | 56 | try: |
|
57 | 57 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r') |
|
58 | 58 | os.close(fd) |
|
59 | 59 | os.remove(path) |
|
60 | 60 | return True |
|
61 | 61 | except (IOError, OSError): |
|
62 | 62 | return False |
|
63 | 63 | |
|
64 | 64 | def has_executablebit(): |
|
65 | 65 | try: |
|
66 | 66 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
67 | 67 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
68 | 68 | try: |
|
69 | 69 | os.close(fh) |
|
70 | 70 | m = os.stat(fn).st_mode & 0777 |
|
71 | 71 | new_file_has_exec = m & EXECFLAGS |
|
72 | 72 | os.chmod(fn, m ^ EXECFLAGS) |
|
73 | 73 | exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m) |
|
74 | 74 | finally: |
|
75 | 75 | os.unlink(fn) |
|
76 | 76 | except (IOError, OSError): |
|
77 | 77 | # we don't care, the user probably won't be able to commit anyway |
|
78 | 78 | return False |
|
79 | 79 | return not (new_file_has_exec or exec_flags_cannot_flip) |
|
80 | 80 | |
|
81 | 81 | def has_icasefs(): |
|
82 | 82 | # Stolen from mercurial.util |
|
83 | 83 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
84 | 84 | os.close(fd) |
|
85 | 85 | try: |
|
86 | 86 | s1 = os.stat(path) |
|
87 | 87 | d, b = os.path.split(path) |
|
88 | 88 | p2 = os.path.join(d, b.upper()) |
|
89 | 89 | if path == p2: |
|
90 | 90 | p2 = os.path.join(d, b.lower()) |
|
91 | 91 | try: |
|
92 | 92 | s2 = os.stat(p2) |
|
93 | 93 | return s2 == s1 |
|
94 | 94 | except OSError: |
|
95 | 95 | return False |
|
96 | 96 | finally: |
|
97 | 97 | os.remove(path) |
|
98 | 98 | |
|
99 | 99 | def has_inotify(): |
|
100 | 100 | try: |
|
101 | 101 | import hgext.inotify.linux.watcher |
|
102 | 102 | except ImportError: |
|
103 | 103 | return False |
|
104 | 104 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
105 | 105 | sock = socket.socket(socket.AF_UNIX) |
|
106 | 106 | try: |
|
107 | 107 | sock.bind(name) |
|
108 |
except socket.error |
|
|
108 | except socket.error: | |
|
109 | 109 | return False |
|
110 | 110 | sock.close() |
|
111 | 111 | os.unlink(name) |
|
112 | 112 | return True |
|
113 | 113 | |
|
114 | 114 | def has_fifo(): |
|
115 | 115 | if getattr(os, "mkfifo", None) is None: |
|
116 | 116 | return False |
|
117 | 117 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
118 | 118 | try: |
|
119 | 119 | os.mkfifo(name) |
|
120 | 120 | os.unlink(name) |
|
121 | 121 | return True |
|
122 | 122 | except OSError: |
|
123 | 123 | return False |
|
124 | 124 | |
|
125 | 125 | def has_killdaemons(): |
|
126 | 126 | return True |
|
127 | 127 | |
|
128 | 128 | def has_cacheable_fs(): |
|
129 | 129 | from mercurial import util |
|
130 | 130 | |
|
131 | 131 | fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
132 | 132 | os.close(fd) |
|
133 | 133 | try: |
|
134 | 134 | return util.cachestat(path).cacheable() |
|
135 | 135 | finally: |
|
136 | 136 | os.remove(path) |
|
137 | 137 | |
|
138 | 138 | def has_lsprof(): |
|
139 | 139 | try: |
|
140 | 140 | import _lsprof |
|
141 | 141 | return True |
|
142 | 142 | except ImportError: |
|
143 | 143 | return False |
|
144 | 144 | |
|
145 | 145 | def has_gettext(): |
|
146 | 146 | return matchoutput('msgfmt --version', 'GNU gettext-tools') |
|
147 | 147 | |
|
148 | 148 | def has_git(): |
|
149 | 149 | return matchoutput('git --version 2>&1', r'^git version') |
|
150 | 150 | |
|
151 | 151 | def has_docutils(): |
|
152 | 152 | try: |
|
153 | 153 | from docutils.core import publish_cmdline |
|
154 | 154 | return True |
|
155 | 155 | except ImportError: |
|
156 | 156 | return False |
|
157 | 157 | |
|
158 | 158 | def getsvnversion(): |
|
159 | 159 | m = matchoutput('svn --version --quiet 2>&1', r'^(\d+)\.(\d+)') |
|
160 | 160 | if not m: |
|
161 | 161 | return (0, 0) |
|
162 | 162 | return (int(m.group(1)), int(m.group(2))) |
|
163 | 163 | |
|
164 | 164 | def has_svn15(): |
|
165 | 165 | return getsvnversion() >= (1, 5) |
|
166 | 166 | |
|
167 | 167 | def has_svn13(): |
|
168 | 168 | return getsvnversion() >= (1, 3) |
|
169 | 169 | |
|
170 | 170 | def has_svn(): |
|
171 | 171 | return matchoutput('svn --version 2>&1', r'^svn, version') and \ |
|
172 | 172 | matchoutput('svnadmin --version 2>&1', r'^svnadmin, version') |
|
173 | 173 | |
|
174 | 174 | def has_svn_bindings(): |
|
175 | 175 | try: |
|
176 | 176 | import svn.core |
|
177 | 177 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
178 | 178 | if version < (1, 4): |
|
179 | 179 | return False |
|
180 | 180 | return True |
|
181 | 181 | except ImportError: |
|
182 | 182 | return False |
|
183 | 183 | |
|
184 | 184 | def has_p4(): |
|
185 | 185 | return (matchoutput('p4 -V', r'Rev\. P4/') and |
|
186 | 186 | matchoutput('p4d -V', r'Rev\. P4D/')) |
|
187 | 187 | |
|
188 | 188 | def has_symlink(): |
|
189 | 189 | if getattr(os, "symlink", None) is None: |
|
190 | 190 | return False |
|
191 | 191 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
192 | 192 | try: |
|
193 | 193 | os.symlink(".", name) |
|
194 | 194 | os.unlink(name) |
|
195 | 195 | return True |
|
196 | 196 | except (OSError, AttributeError): |
|
197 | 197 | return False |
|
198 | 198 | |
|
199 | 199 | def has_hardlink(): |
|
200 | 200 | from mercurial import util |
|
201 | 201 | fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) |
|
202 | 202 | os.close(fh) |
|
203 | 203 | name = tempfile.mktemp(dir='.', prefix=tempprefix) |
|
204 | 204 | try: |
|
205 | 205 | try: |
|
206 | 206 | util.oslink(fn, name) |
|
207 | 207 | os.unlink(name) |
|
208 | 208 | return True |
|
209 | 209 | except OSError: |
|
210 | 210 | return False |
|
211 | 211 | finally: |
|
212 | 212 | os.unlink(fn) |
|
213 | 213 | |
|
214 | 214 | def has_tla(): |
|
215 | 215 | return matchoutput('tla --version 2>&1', r'The GNU Arch Revision') |
|
216 | 216 | |
|
217 | 217 | def has_gpg(): |
|
218 | 218 | return matchoutput('gpg --version 2>&1', r'GnuPG') |
|
219 | 219 | |
|
220 | 220 | def has_unix_permissions(): |
|
221 | 221 | d = tempfile.mkdtemp(dir='.', prefix=tempprefix) |
|
222 | 222 | try: |
|
223 | 223 | fname = os.path.join(d, 'foo') |
|
224 | 224 | for umask in (077, 007, 022): |
|
225 | 225 | os.umask(umask) |
|
226 | 226 | f = open(fname, 'w') |
|
227 | 227 | f.close() |
|
228 | 228 | mode = os.stat(fname).st_mode |
|
229 | 229 | os.unlink(fname) |
|
230 | 230 | if mode & 0777 != ~umask & 0666: |
|
231 | 231 | return False |
|
232 | 232 | return True |
|
233 | 233 | finally: |
|
234 | 234 | os.rmdir(d) |
|
235 | 235 | |
|
236 | 236 | def has_pyflakes(): |
|
237 | 237 | return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"", |
|
238 | 238 | r"<stdin>:1: 're' imported but unused", |
|
239 | 239 | True) |
|
240 | 240 | |
|
241 | 241 | def has_pygments(): |
|
242 | 242 | try: |
|
243 | 243 | import pygments |
|
244 | 244 | return True |
|
245 | 245 | except ImportError: |
|
246 | 246 | return False |
|
247 | 247 | |
|
248 | 248 | def has_outer_repo(): |
|
249 | 249 | # failing for other reasons than 'no repo' imply that there is a repo |
|
250 | 250 | return not matchoutput('hg root 2>&1', |
|
251 | 251 | r'abort: no repository found', True) |
|
252 | 252 | |
|
253 | 253 | def has_ssl(): |
|
254 | 254 | try: |
|
255 | 255 | import ssl |
|
256 | 256 | import OpenSSL |
|
257 | 257 | OpenSSL.SSL.Context |
|
258 | 258 | return True |
|
259 | 259 | except ImportError: |
|
260 | 260 | return False |
|
261 | 261 | |
|
262 | 262 | def has_windows(): |
|
263 | 263 | return os.name == 'nt' |
|
264 | 264 | |
|
265 | 265 | def has_system_sh(): |
|
266 | 266 | return os.name != 'nt' |
|
267 | 267 | |
|
268 | 268 | def has_serve(): |
|
269 | 269 | return os.name != 'nt' # gross approximation |
|
270 | 270 | |
|
271 | 271 | def has_tic(): |
|
272 | 272 | return matchoutput('test -x "`which tic`"', '') |
|
273 | 273 | |
|
274 | 274 | def has_msys(): |
|
275 | 275 | return os.getenv('MSYSTEM') |
|
276 | 276 | |
|
277 | 277 | def has_aix(): |
|
278 | 278 | return sys.platform.startswith("aix") |
|
279 | 279 | |
|
280 | 280 | checks = { |
|
281 | 281 | "true": (lambda: True, "yak shaving"), |
|
282 | 282 | "false": (lambda: False, "nail clipper"), |
|
283 | 283 | "baz": (has_baz, "GNU Arch baz client"), |
|
284 | 284 | "bzr": (has_bzr, "Canonical's Bazaar client"), |
|
285 | 285 | "bzr114": (has_bzr114, "Canonical's Bazaar client >= 1.14"), |
|
286 | 286 | "cacheable": (has_cacheable_fs, "cacheable filesystem"), |
|
287 | 287 | "cvs": (has_cvs, "cvs client/server"), |
|
288 | 288 | "cvs112": (has_cvs112, "cvs client/server >= 1.12"), |
|
289 | 289 | "darcs": (has_darcs, "darcs client"), |
|
290 | 290 | "docutils": (has_docutils, "Docutils text processing library"), |
|
291 | 291 | "eol-in-paths": (has_eol_in_paths, "end-of-lines in paths"), |
|
292 | 292 | "execbit": (has_executablebit, "executable bit"), |
|
293 | 293 | "fifo": (has_fifo, "named pipes"), |
|
294 | 294 | "gettext": (has_gettext, "GNU Gettext (msgfmt)"), |
|
295 | 295 | "git": (has_git, "git command line client"), |
|
296 | 296 | "gpg": (has_gpg, "gpg client"), |
|
297 | 297 | "hardlink": (has_hardlink, "hardlinks"), |
|
298 | 298 | "icasefs": (has_icasefs, "case insensitive file system"), |
|
299 | 299 | "inotify": (has_inotify, "inotify extension support"), |
|
300 | 300 | "killdaemons": (has_killdaemons, 'killdaemons.py support'), |
|
301 | 301 | "lsprof": (has_lsprof, "python lsprof module"), |
|
302 | 302 | "mtn": (has_mtn, "monotone client (>= 1.0)"), |
|
303 | 303 | "outer-repo": (has_outer_repo, "outer repo"), |
|
304 | 304 | "p4": (has_p4, "Perforce server and client"), |
|
305 | 305 | "pyflakes": (has_pyflakes, "Pyflakes python linter"), |
|
306 | 306 | "pygments": (has_pygments, "Pygments source highlighting library"), |
|
307 | 307 | "serve": (has_serve, "platform and python can manage 'hg serve -d'"), |
|
308 | 308 | "ssl": (has_ssl, "python >= 2.6 ssl module and python OpenSSL"), |
|
309 | 309 | "svn": (has_svn, "subversion client and admin tools"), |
|
310 | 310 | "svn13": (has_svn13, "subversion client and admin tools >= 1.3"), |
|
311 | 311 | "svn15": (has_svn15, "subversion client and admin tools >= 1.5"), |
|
312 | 312 | "svn-bindings": (has_svn_bindings, "subversion python bindings"), |
|
313 | 313 | "symlink": (has_symlink, "symbolic links"), |
|
314 | 314 | "system-sh": (has_system_sh, "system() uses sh"), |
|
315 | 315 | "tic": (has_tic, "terminfo compiler"), |
|
316 | 316 | "tla": (has_tla, "GNU Arch tla client"), |
|
317 | 317 | "unix-permissions": (has_unix_permissions, "unix-style permissions"), |
|
318 | 318 | "windows": (has_windows, "Windows"), |
|
319 | 319 | "msys": (has_msys, "Windows with MSYS"), |
|
320 | 320 | "aix": (has_aix, "AIX"), |
|
321 | 321 | } |
@@ -1,30 +1,21 b'' | |||
|
1 | 1 | $ "$TESTDIR/hghave" pyflakes || exit 80 |
|
2 | 2 | $ cd "`dirname "$TESTDIR"`" |
|
3 | 3 | |
|
4 | 4 | run pyflakes on all tracked files ending in .py or without a file ending |
|
5 | 5 | (skipping binary file random-seed) |
|
6 | 6 | $ hg manifest 2>/dev/null | egrep "\.py$|^[^.]*$" | grep -v /random_seed$ \ |
|
7 | 7 | > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py" |
|
8 | contrib/simplemerge:*: 'os' imported but unused (glob) | |
|
9 | 8 | contrib/win32/hgwebdir_wsgi.py:*: 'win32traceutil' imported but unused (glob) |
|
10 | 9 | setup.py:*: 'sha' imported but unused (glob) |
|
11 | 10 | setup.py:*: 'zlib' imported but unused (glob) |
|
12 | 11 | setup.py:*: 'bz2' imported but unused (glob) |
|
13 | 12 | setup.py:*: 'py2exe' imported but unused (glob) |
|
14 | 13 | tests/hghave.py:*: 'hgext' imported but unused (glob) |
|
15 | 14 | tests/hghave.py:*: '_lsprof' imported but unused (glob) |
|
16 | 15 | tests/hghave.py:*: 'publish_cmdline' imported but unused (glob) |
|
17 | 16 | tests/hghave.py:*: 'pygments' imported but unused (glob) |
|
18 | 17 | tests/hghave.py:*: 'ssl' imported but unused (glob) |
|
19 | contrib/casesmash.py:*: local variable 'inst' is assigned to but never used (glob) | |
|
20 | contrib/check-code.py:*: local variable 'po' is assigned to but never used (glob) | |
|
21 | contrib/hgfixes/fix_leftover_imports.py:*: local variable 'bare_names' is assigned to but never used (glob) | |
|
22 | contrib/perf.py:*: local variable 'm' is assigned to but never used (glob) | |
|
23 | contrib/perf.py:*: local variable 'c' is assigned to but never used (glob) | |
|
24 | doc/hgmanpage.py:*: local variable 'backref_text' is assigned to but never used (glob) | |
|
25 | tests/hghave.py:*: local variable 'err' is assigned to but never used (glob) | |
|
26 | tests/test-hgweb-auth.py:*: local variable 'e' is assigned to but never used (glob) | |
|
27 | 18 | contrib/win32/hgwebdir_wsgi.py:*: 'from isapi.install import *' used; unable to detect undefined names (glob) |
|
28 | 19 | hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob) |
|
29 | 20 | |
|
30 | 21 |
@@ -1,107 +1,107 b'' | |||
|
1 | 1 | from mercurial import demandimport; demandimport.enable() |
|
2 | 2 | import urllib2 |
|
3 | 3 | from mercurial import ui, util |
|
4 | 4 | from mercurial import url |
|
5 | 5 | from mercurial.error import Abort |
|
6 | 6 | |
|
7 | 7 | class myui(ui.ui): |
|
8 | 8 | def interactive(self): |
|
9 | 9 | return False |
|
10 | 10 | |
|
11 | 11 | origui = myui() |
|
12 | 12 | |
|
13 | 13 | def writeauth(items): |
|
14 | 14 | ui = origui.copy() |
|
15 | 15 | for name, value in items.iteritems(): |
|
16 | 16 | ui.setconfig('auth', name, value) |
|
17 | 17 | return ui |
|
18 | 18 | |
|
19 | 19 | def dumpdict(dict): |
|
20 | 20 | return '{' + ', '.join(['%s: %s' % (k, dict[k]) |
|
21 | 21 | for k in sorted(dict.iterkeys())]) + '}' |
|
22 | 22 | |
|
23 | 23 | def test(auth, urls=None): |
|
24 | 24 | print 'CFG:', dumpdict(auth) |
|
25 | 25 | prefixes = set() |
|
26 | 26 | for k in auth: |
|
27 | 27 | prefixes.add(k.split('.', 1)[0]) |
|
28 | 28 | for p in prefixes: |
|
29 | 29 | for name in ('.username', '.password'): |
|
30 | 30 | if (p + name) not in auth: |
|
31 | 31 | auth[p + name] = p |
|
32 | 32 | auth = dict((k, v) for k, v in auth.iteritems() if v is not None) |
|
33 | 33 | |
|
34 | 34 | ui = writeauth(auth) |
|
35 | 35 | |
|
36 | 36 | def _test(uri): |
|
37 | 37 | print 'URI:', uri |
|
38 | 38 | try: |
|
39 | 39 | pm = url.passwordmgr(ui) |
|
40 | 40 | u, authinfo = util.url(uri).authinfo() |
|
41 | 41 | if authinfo is not None: |
|
42 | 42 | pm.add_password(*authinfo) |
|
43 | 43 | print ' ', pm.find_user_password('test', u) |
|
44 |
except Abort |
|
|
44 | except Abort: | |
|
45 | 45 | print 'abort' |
|
46 | 46 | |
|
47 | 47 | if not urls: |
|
48 | 48 | urls = [ |
|
49 | 49 | 'http://example.org/foo', |
|
50 | 50 | 'http://example.org/foo/bar', |
|
51 | 51 | 'http://example.org/bar', |
|
52 | 52 | 'https://example.org/foo', |
|
53 | 53 | 'https://example.org/foo/bar', |
|
54 | 54 | 'https://example.org/bar', |
|
55 | 55 | 'https://x@example.org/bar', |
|
56 | 56 | 'https://y@example.org/bar', |
|
57 | 57 | ] |
|
58 | 58 | for u in urls: |
|
59 | 59 | _test(u) |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | print '\n*** Test in-uri schemes\n' |
|
63 | 63 | test({'x.prefix': 'http://example.org'}) |
|
64 | 64 | test({'x.prefix': 'https://example.org'}) |
|
65 | 65 | test({'x.prefix': 'http://example.org', 'x.schemes': 'https'}) |
|
66 | 66 | test({'x.prefix': 'https://example.org', 'x.schemes': 'http'}) |
|
67 | 67 | |
|
68 | 68 | print '\n*** Test separately configured schemes\n' |
|
69 | 69 | test({'x.prefix': 'example.org', 'x.schemes': 'http'}) |
|
70 | 70 | test({'x.prefix': 'example.org', 'x.schemes': 'https'}) |
|
71 | 71 | test({'x.prefix': 'example.org', 'x.schemes': 'http https'}) |
|
72 | 72 | |
|
73 | 73 | print '\n*** Test prefix matching\n' |
|
74 | 74 | test({'x.prefix': 'http://example.org/foo', |
|
75 | 75 | 'y.prefix': 'http://example.org/bar'}) |
|
76 | 76 | test({'x.prefix': 'http://example.org/foo', |
|
77 | 77 | 'y.prefix': 'http://example.org/foo/bar'}) |
|
78 | 78 | test({'x.prefix': '*', 'y.prefix': 'https://example.org/bar'}) |
|
79 | 79 | |
|
80 | 80 | print '\n*** Test user matching\n' |
|
81 | 81 | test({'x.prefix': 'http://example.org/foo', |
|
82 | 82 | 'x.username': None, |
|
83 | 83 | 'x.password': 'xpassword'}, |
|
84 | 84 | urls=['http://y@example.org/foo']) |
|
85 | 85 | test({'x.prefix': 'http://example.org/foo', |
|
86 | 86 | 'x.username': None, |
|
87 | 87 | 'x.password': 'xpassword', |
|
88 | 88 | 'y.prefix': 'http://example.org/foo', |
|
89 | 89 | 'y.username': 'y', |
|
90 | 90 | 'y.password': 'ypassword'}, |
|
91 | 91 | urls=['http://y@example.org/foo']) |
|
92 | 92 | test({'x.prefix': 'http://example.org/foo/bar', |
|
93 | 93 | 'x.username': None, |
|
94 | 94 | 'x.password': 'xpassword', |
|
95 | 95 | 'y.prefix': 'http://example.org/foo', |
|
96 | 96 | 'y.username': 'y', |
|
97 | 97 | 'y.password': 'ypassword'}, |
|
98 | 98 | urls=['http://y@example.org/foo/bar']) |
|
99 | 99 | |
|
100 | 100 | def testauthinfo(fullurl, authurl): |
|
101 | 101 | print 'URIs:', fullurl, authurl |
|
102 | 102 | pm = urllib2.HTTPPasswordMgrWithDefaultRealm() |
|
103 | 103 | pm.add_password(*util.url(fullurl).authinfo()[1]) |
|
104 | 104 | print pm.find_user_password('test', authurl) |
|
105 | 105 | |
|
106 | 106 | print '\n*** Test urllib2 and util.url\n' |
|
107 | 107 | testauthinfo('http://user@example.com:8080/foo', 'http://example.com:8080/foo') |
General Comments 0
You need to be logged in to leave comments.
Login now