##// END OF EJS Templates
py3: conditionalize httplib import...
Pulkit Goyal -
r29455:0c741fd6 default
parent child Browse files
Show More
@@ -1,649 +1,650 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # check-code - a style and portability checker for Mercurial
4 4 #
5 5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """style and portability checker for Mercurial
11 11
12 12 when a rule triggers wrong, do one of the following (prefer one from top):
13 13 * do the work-around the rule suggests
14 14 * doublecheck that it is a false match
15 15 * improve the rule pattern
16 16 * add an ignore pattern to the rule (3rd arg) which matches your good line
17 17 (you can append a short comment and match this, like: #re-raises)
18 18 * change the pattern to a warning and list the exception in test-check-code-hg
19 19 * ONLY use no--check-code for skipping entire files from external sources
20 20 """
21 21
22 22 from __future__ import absolute_import, print_function
23 23 import glob
24 24 import keyword
25 25 import optparse
26 26 import os
27 27 import re
28 28 import sys
29 29 if sys.version_info[0] < 3:
30 30 opentext = open
31 31 else:
32 32 def opentext(f):
33 33 return open(f, encoding='ascii')
34 34 try:
35 35 xrange
36 36 except NameError:
37 37 xrange = range
38 38 try:
39 39 import re2
40 40 except ImportError:
41 41 re2 = None
42 42
43 43 def compilere(pat, multiline=False):
44 44 if multiline:
45 45 pat = '(?m)' + pat
46 46 if re2:
47 47 try:
48 48 return re2.compile(pat)
49 49 except re2.error:
50 50 pass
51 51 return re.compile(pat)
52 52
53 53 # check "rules depending on implementation of repquote()" in each
54 54 # patterns (especially pypats), before changing around repquote()
55 55 _repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q',
56 56 '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'}
57 57 def _repquoteencodechr(i):
58 58 if i > 255:
59 59 return 'u'
60 60 c = chr(i)
61 61 if c in _repquotefixedmap:
62 62 return _repquotefixedmap[c]
63 63 if c.isalpha():
64 64 return 'x'
65 65 if c.isdigit():
66 66 return 'n'
67 67 return 'o'
68 68 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
69 69
70 70 def repquote(m):
71 71 t = m.group('text')
72 72 t = t.translate(_repquotett)
73 73 return m.group('quote') + t + m.group('quote')
74 74
75 75 def reppython(m):
76 76 comment = m.group('comment')
77 77 if comment:
78 78 l = len(comment.rstrip())
79 79 return "#" * l + comment[l:]
80 80 return repquote(m)
81 81
82 82 def repcomment(m):
83 83 return m.group(1) + "#" * len(m.group(2))
84 84
85 85 def repccomment(m):
86 86 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
87 87 return m.group(1) + t + "*/"
88 88
89 89 def repcallspaces(m):
90 90 t = re.sub(r"\n\s+", "\n", m.group(2))
91 91 return m.group(1) + t
92 92
93 93 def repinclude(m):
94 94 return m.group(1) + "<foo>"
95 95
96 96 def rephere(m):
97 97 t = re.sub(r"\S", "x", m.group(2))
98 98 return m.group(1) + t
99 99
100 100
101 101 testpats = [
102 102 [
103 103 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
104 104 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
105 105 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
106 106 (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
107 107 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
108 108 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
109 109 (r'echo -n', "don't use 'echo -n', use printf"),
110 110 (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
111 111 (r'head -c', "don't use 'head -c', use 'dd'"),
112 112 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
113 113 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
114 114 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
115 115 (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
116 116 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
117 117 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
118 118 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
119 119 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
120 120 "use egrep for extended grep syntax"),
121 121 (r'/bin/', "don't use explicit paths for tools"),
122 122 (r'[^\n]\Z', "no trailing newline"),
123 123 (r'export .*=', "don't export and assign at once"),
124 124 (r'^source\b', "don't use 'source', use '.'"),
125 125 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
126 126 (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
127 127 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
128 128 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
129 129 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
130 130 (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
131 131 (r'^alias\b.*=', "don't use alias, use a function"),
132 132 (r'if\s*!', "don't use '!' to negate exit status"),
133 133 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
134 134 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
135 135 (r'^( *)\t', "don't use tabs to indent"),
136 136 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
137 137 "put a backslash-escaped newline after sed 'i' command"),
138 138 (r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
139 139 (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
140 140 (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
141 141 (r'\butil\.Abort\b', "directly use error.Abort"),
142 142 (r'\|&', "don't use |&, use 2>&1"),
143 143 (r'\w = +\w', "only one space after = allowed"),
144 144 (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
145 145 (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'")
146 146 ],
147 147 # warnings
148 148 [
149 149 (r'^function', "don't use 'function', use old style"),
150 150 (r'^diff.*-\w*N', "don't use 'diff -N'"),
151 151 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
152 152 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
153 153 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
154 154 ]
155 155 ]
156 156
157 157 testfilters = [
158 158 (r"( *)(#([^\n]*\S)?)", repcomment),
159 159 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
160 160 ]
161 161
162 162 winglobmsg = "use (glob) to match Windows paths too"
163 163 uprefix = r"^ \$ "
164 164 utestpats = [
165 165 [
166 166 (r'^(\S.*|| [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
167 167 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
168 168 "use regex test output patterns instead of sed"),
169 169 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
170 170 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
171 171 (uprefix + r'.*\|\| echo.*(fail|error)',
172 172 "explicit exit code checks unnecessary"),
173 173 (uprefix + r'set -e', "don't use set -e"),
174 174 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
175 175 (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
176 176 "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
177 177 '# no-msys'), # in test-pull.t which is skipped on windows
178 178 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
179 179 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
180 180 winglobmsg),
181 181 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
182 182 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
183 183 (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg),
184 184 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
185 185 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
186 186 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
187 187 (r'^ moving \S+/.*[^)]$', winglobmsg),
188 188 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
189 189 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
190 190 (r'^ .*file://\$TESTTMP',
191 191 'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
192 192 (r'^ (cat|find): .*: No such file or directory',
193 193 'use test -f to test for file existence'),
194 194 (r'^ diff -[^ -]*p',
195 195 "don't use (external) diff with -p for portability"),
196 196 (r'^ [-+][-+][-+] .* [-+]0000 \(glob\)',
197 197 "glob timezone field in diff output for portability"),
198 198 (r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
199 199 "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"),
200 200 (r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
201 201 "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"),
202 202 (r'^ @@ -[0-9]+ [+][0-9]+ @@',
203 203 "use '@@ -N* +N* @@ (glob)' style chunk header for portability"),
204 204 (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
205 205 r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
206 206 "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"),
207 207 ],
208 208 # warnings
209 209 [
210 210 (r'^ [^*?/\n]* \(glob\)$',
211 211 "glob match with no glob character (?*/)"),
212 212 ]
213 213 ]
214 214
215 215 for i in [0, 1]:
216 216 for tp in testpats[i]:
217 217 p = tp[0]
218 218 m = tp[1]
219 219 if p.startswith(r'^'):
220 220 p = r"^ [$>] (%s)" % p[1:]
221 221 else:
222 222 p = r"^ [$>] .*(%s)" % p
223 223 utestpats[i].append((p, m) + tp[2:])
224 224
225 225 utestfilters = [
226 226 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
227 227 (r"( *)(#([^\n]*\S)?)", repcomment),
228 228 ]
229 229
230 230 pypats = [
231 231 [
232 232 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
233 233 "tuple parameter unpacking not available in Python 3+"),
234 234 (r'lambda\s*\(.*,.*\)',
235 235 "tuple parameter unpacking not available in Python 3+"),
236 236 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
237 237 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
238 238 (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
239 239 'dict-from-generator'),
240 240 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
241 241 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
242 242 (r'^\s*\t', "don't use tabs"),
243 243 (r'\S;\s*\n', "semicolon"),
244 244 (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
245 245 (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
246 246 (r'(\w|\)),\w', "missing whitespace after ,"),
247 247 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
248 248 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
249 249 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
250 250 (r'.{81}', "line too long"),
251 251 (r'[^\n]\Z', "no trailing newline"),
252 252 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
253 253 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
254 254 # "don't use underbars in identifiers"),
255 255 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
256 256 "don't use camelcase in identifiers"),
257 257 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
258 258 "linebreak after :"),
259 259 (r'class\s[^( \n]+:', "old-style class, use class foo(object)",
260 260 r'#.*old-style'),
261 261 (r'class\s[^( \n]+\(\):',
262 262 "class foo() creates old style object, use class foo(object)",
263 263 r'#.*old-style'),
264 264 (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist
265 265 if k not in ('print', 'exec')),
266 266 "Python keyword is not a function"),
267 267 (r',]', "unneeded trailing ',' in list"),
268 268 # (r'class\s[A-Z][^\(]*\((?!Exception)',
269 269 # "don't capitalize non-exception classes"),
270 270 # (r'in range\(', "use xrange"),
271 271 # (r'^\s*print\s+', "avoid using print in core and extensions"),
272 272 (r'[\x80-\xff]', "non-ASCII character literal"),
273 273 (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
274 274 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
275 275 "gratuitous whitespace after Python keyword"),
276 276 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
277 277 # (r'\s\s=', "gratuitous whitespace before ="),
278 278 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
279 279 "missing whitespace around operator"),
280 280 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
281 281 "missing whitespace around operator"),
282 282 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
283 283 "missing whitespace around operator"),
284 284 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
285 285 "wrong whitespace around ="),
286 286 (r'\([^()]*( =[^=]|[^<>!=]= )',
287 287 "no whitespace around = for named parameters"),
288 288 (r'raise Exception', "don't raise generic exceptions"),
289 289 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
290 290 "don't use old-style two-argument raise, use Exception(message)"),
291 291 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
292 292 (r' [=!]=\s+(True|False|None)',
293 293 "comparison with singleton, use 'is' or 'is not' instead"),
294 294 (r'^\s*(while|if) [01]:',
295 295 "use True/False for constant Boolean expression"),
296 296 (r'(?:(?<!def)\s+|\()hasattr',
297 297 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
298 298 (r'opener\([^)]*\).read\(',
299 299 "use opener.read() instead"),
300 300 (r'opener\([^)]*\).write\(',
301 301 "use opener.write() instead"),
302 302 (r'[\s\(](open|file)\([^)]*\)\.read\(',
303 303 "use util.readfile() instead"),
304 304 (r'[\s\(](open|file)\([^)]*\)\.write\(',
305 305 "use util.writefile() instead"),
306 306 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
307 307 "always assign an opened file to a variable, and close it afterwards"),
308 308 (r'[\s\(](open|file)\([^)]*\)\.',
309 309 "always assign an opened file to a variable, and close it afterwards"),
310 310 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
311 311 (r'\.debug\(\_', "don't mark debug messages for translation"),
312 312 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
313 313 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
314 314 (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
315 315 'legacy exception syntax; use "as" instead of ","'),
316 316 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
317 317 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
318 318 (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
319 319 (r'os\.path\.join\(.*, *(""|\'\')\)',
320 320 "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
321 321 (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
322 322 # XXX only catch mutable arguments on the first line of the definition
323 323 (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
324 324 (r'\butil\.Abort\b', "directly use error.Abort"),
325 325 (r'^import Queue', "don't use Queue, use util.queue + util.empty"),
326 326 (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
327 327 (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
328 328 (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
329 329 (r'^import urlparse', "don't use urlparse, use util.urlparse"),
330 330 (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
331 331 (r'^import cPickle', "don't use cPickle, use util.pickle"),
332 332 (r'^import pickle', "don't use pickle, use util.pickle"),
333 (r'^import httplib', "don't use httplib, use util.httplib"),
333 334 (r'\.next\(\)', "don't use .next(), use next(...)"),
334 335
335 336 # rules depending on implementation of repquote()
336 337 (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
337 338 'string join across lines with no space'),
338 339 (r'''(?x)ui\.(status|progress|write|note|warn)\(
339 340 [ \t\n#]*
340 341 (?# any strings/comments might precede a string, which
341 342 # contains translatable message)
342 343 ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
343 344 (?# sequence consisting of below might precede translatable message
344 345 # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
345 346 # - escaped character: "\\", "\n", "\0" ...
346 347 # - character other than '%', 'b' as '\', and 'x' as alphabet)
347 348 (['"]|\'\'\'|""")
348 349 ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x
349 350 (?# this regexp can't use [^...] style,
350 351 # because _preparepats forcibly adds "\n" into [^...],
351 352 # even though this regexp wants match it against "\n")''',
352 353 "missing _() in ui message (use () to hide false-positives)"),
353 354 ],
354 355 # warnings
355 356 [
356 357 # rules depending on implementation of repquote()
357 358 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
358 359 ]
359 360 ]
360 361
361 362 pyfilters = [
362 363 (r"""(?msx)(?P<comment>\#.*?$)|
363 364 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
364 365 (?P<text>(([^\\]|\\.)*?))
365 366 (?P=quote))""", reppython),
366 367 ]
367 368
368 369 txtfilters = []
369 370
370 371 txtpats = [
371 372 [
372 373 ('\s$', 'trailing whitespace'),
373 374 ('.. note::[ \n][^\n]', 'add two newlines after note::')
374 375 ],
375 376 []
376 377 ]
377 378
378 379 cpats = [
379 380 [
380 381 (r'//', "don't use //-style comments"),
381 382 (r'^ ', "don't use spaces to indent"),
382 383 (r'\S\t', "don't use tabs except for indent"),
383 384 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
384 385 (r'.{81}', "line too long"),
385 386 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
386 387 (r'return\(', "return is not a function"),
387 388 (r' ;', "no space before ;"),
388 389 (r'[^;] \)', "no space before )"),
389 390 (r'[)][{]', "space between ) and {"),
390 391 (r'\w+\* \w+', "use int *foo, not int* foo"),
391 392 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
392 393 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
393 394 (r'\w,\w', "missing whitespace after ,"),
394 395 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
395 396 (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
396 397 (r'^#\s+\w', "use #foo, not # foo"),
397 398 (r'[^\n]\Z', "no trailing newline"),
398 399 (r'^\s*#import\b', "use only #include in standard C code"),
399 400 (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
400 401 (r'strcat\(', "don't use strcat"),
401 402
402 403 # rules depending on implementation of repquote()
403 404 ],
404 405 # warnings
405 406 [
406 407 # rules depending on implementation of repquote()
407 408 ]
408 409 ]
409 410
410 411 cfilters = [
411 412 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
412 413 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
413 414 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
414 415 (r'(\()([^)]+\))', repcallspaces),
415 416 ]
416 417
417 418 inutilpats = [
418 419 [
419 420 (r'\bui\.', "don't use ui in util"),
420 421 ],
421 422 # warnings
422 423 []
423 424 ]
424 425
425 426 inrevlogpats = [
426 427 [
427 428 (r'\brepo\.', "don't use repo in revlog"),
428 429 ],
429 430 # warnings
430 431 []
431 432 ]
432 433
433 434 webtemplatefilters = []
434 435
435 436 webtemplatepats = [
436 437 [],
437 438 [
438 439 (r'{desc(\|(?!websub|firstline)[^\|]*)+}',
439 440 'follow desc keyword with either firstline or websub'),
440 441 ]
441 442 ]
442 443
443 444 checks = [
444 445 ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
445 446 ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
446 447 ('c', r'.*\.[ch]$', '', cfilters, cpats),
447 448 ('unified test', r'.*\.t$', '', utestfilters, utestpats),
448 449 ('layering violation repo in revlog', r'mercurial/revlog\.py', '',
449 450 pyfilters, inrevlogpats),
450 451 ('layering violation ui in util', r'mercurial/util\.py', '', pyfilters,
451 452 inutilpats),
452 453 ('txt', r'.*\.txt$', '', txtfilters, txtpats),
453 454 ('web template', r'mercurial/templates/.*\.tmpl', '',
454 455 webtemplatefilters, webtemplatepats),
455 456 ]
456 457
457 458 def _preparepats():
458 459 for c in checks:
459 460 failandwarn = c[-1]
460 461 for pats in failandwarn:
461 462 for i, pseq in enumerate(pats):
462 463 # fix-up regexes for multi-line searches
463 464 p = pseq[0]
464 465 # \s doesn't match \n
465 466 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
466 467 # [^...] doesn't match newline
467 468 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
468 469
469 470 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
470 471 filters = c[3]
471 472 for i, flt in enumerate(filters):
472 473 filters[i] = re.compile(flt[0]), flt[1]
473 474 _preparepats()
474 475
475 476 class norepeatlogger(object):
476 477 def __init__(self):
477 478 self._lastseen = None
478 479
479 480 def log(self, fname, lineno, line, msg, blame):
480 481 """print error related a to given line of a given file.
481 482
482 483 The faulty line will also be printed but only once in the case
483 484 of multiple errors.
484 485
485 486 :fname: filename
486 487 :lineno: line number
487 488 :line: actual content of the line
488 489 :msg: error message
489 490 """
490 491 msgid = fname, lineno, line
491 492 if msgid != self._lastseen:
492 493 if blame:
493 494 print("%s:%d (%s):" % (fname, lineno, blame))
494 495 else:
495 496 print("%s:%d:" % (fname, lineno))
496 497 print(" > %s" % line)
497 498 self._lastseen = msgid
498 499 print(" " + msg)
499 500
500 501 _defaultlogger = norepeatlogger()
501 502
502 503 def getblame(f):
503 504 lines = []
504 505 for l in os.popen('hg annotate -un %s' % f):
505 506 start, line = l.split(':', 1)
506 507 user, rev = start.split()
507 508 lines.append((line[1:-1], user, rev))
508 509 return lines
509 510
510 511 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
511 512 blame=False, debug=False, lineno=True):
512 513 """checks style and portability of a given file
513 514
514 515 :f: filepath
515 516 :logfunc: function used to report error
516 517 logfunc(filename, linenumber, linecontent, errormessage)
517 518 :maxerr: number of error to display before aborting.
518 519 Set to false (default) to report all errors
519 520
520 521 return True if no error is found, False otherwise.
521 522 """
522 523 blamecache = None
523 524 result = True
524 525
525 526 try:
526 527 with opentext(f) as fp:
527 528 try:
528 529 pre = post = fp.read()
529 530 except UnicodeDecodeError as e:
530 531 print("%s while reading %s" % (e, f))
531 532 return result
532 533 except IOError as e:
533 534 print("Skipping %s, %s" % (f, str(e).split(':', 1)[0]))
534 535 return result
535 536
536 537 for name, match, magic, filters, pats in checks:
537 538 if debug:
538 539 print(name, f)
539 540 fc = 0
540 541 if not (re.match(match, f) or (magic and re.search(magic, pre))):
541 542 if debug:
542 543 print("Skipping %s for %s it doesn't match %s" % (
543 544 name, match, f))
544 545 continue
545 546 if "no-" "check-code" in pre:
546 547 # If you're looking at this line, it's because a file has:
547 548 # no- check- code
548 549 # but the reason to output skipping is to make life for
549 550 # tests easier. So, instead of writing it with a normal
550 551 # spelling, we write it with the expected spelling from
551 552 # tests/test-check-code.t
552 553 print("Skipping %s it has no-che?k-code (glob)" % f)
553 554 return "Skip" # skip checking this file
554 555 for p, r in filters:
555 556 post = re.sub(p, r, post)
556 557 nerrs = len(pats[0]) # nerr elements are errors
557 558 if warnings:
558 559 pats = pats[0] + pats[1]
559 560 else:
560 561 pats = pats[0]
561 562 # print post # uncomment to show filtered version
562 563
563 564 if debug:
564 565 print("Checking %s for %s" % (name, f))
565 566
566 567 prelines = None
567 568 errors = []
568 569 for i, pat in enumerate(pats):
569 570 if len(pat) == 3:
570 571 p, msg, ignore = pat
571 572 else:
572 573 p, msg = pat
573 574 ignore = None
574 575 if i >= nerrs:
575 576 msg = "warning: " + msg
576 577
577 578 pos = 0
578 579 n = 0
579 580 for m in p.finditer(post):
580 581 if prelines is None:
581 582 prelines = pre.splitlines()
582 583 postlines = post.splitlines(True)
583 584
584 585 start = m.start()
585 586 while n < len(postlines):
586 587 step = len(postlines[n])
587 588 if pos + step > start:
588 589 break
589 590 pos += step
590 591 n += 1
591 592 l = prelines[n]
592 593
593 594 if ignore and re.search(ignore, l, re.MULTILINE):
594 595 if debug:
595 596 print("Skipping %s for %s:%s (ignore pattern)" % (
596 597 name, f, n))
597 598 continue
598 599 bd = ""
599 600 if blame:
600 601 bd = 'working directory'
601 602 if not blamecache:
602 603 blamecache = getblame(f)
603 604 if n < len(blamecache):
604 605 bl, bu, br = blamecache[n]
605 606 if bl == l:
606 607 bd = '%s@%s' % (bu, br)
607 608
608 609 errors.append((f, lineno and n + 1, l, msg, bd))
609 610 result = False
610 611
611 612 errors.sort()
612 613 for e in errors:
613 614 logfunc(*e)
614 615 fc += 1
615 616 if maxerr and fc >= maxerr:
616 617 print(" (too many errors, giving up)")
617 618 break
618 619
619 620 return result
620 621
621 622 if __name__ == "__main__":
622 623 parser = optparse.OptionParser("%prog [options] [files]")
623 624 parser.add_option("-w", "--warnings", action="store_true",
624 625 help="include warning-level checks")
625 626 parser.add_option("-p", "--per-file", type="int",
626 627 help="max warnings per file")
627 628 parser.add_option("-b", "--blame", action="store_true",
628 629 help="use annotate to generate blame info")
629 630 parser.add_option("", "--debug", action="store_true",
630 631 help="show debug information")
631 632 parser.add_option("", "--nolineno", action="store_false",
632 633 dest='lineno', help="don't show line numbers")
633 634
634 635 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
635 636 lineno=True)
636 637 (options, args) = parser.parse_args()
637 638
638 639 if len(args) == 0:
639 640 check = glob.glob("*")
640 641 else:
641 642 check = args
642 643
643 644 ret = 0
644 645 for f in check:
645 646 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
646 647 blame=options.blame, debug=options.debug,
647 648 lineno=options.lineno):
648 649 ret = 1
649 650 sys.exit(ret)
@@ -1,308 +1,308 b''
1 1 # httppeer.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 import httplib
13 12 import os
14 13 import socket
15 14 import tempfile
16 15 import zlib
17 16
18 17 from .i18n import _
19 18 from .node import nullid
20 19 from . import (
21 20 bundle2,
22 21 error,
23 22 httpconnection,
24 23 statichttprepo,
25 24 url,
26 25 util,
27 26 wireproto,
28 27 )
29 28
29 httplib = util.httplib
30 30 urlerr = util.urlerr
31 31 urlreq = util.urlreq
32 32
33 33 def zgenerator(f):
34 34 zd = zlib.decompressobj()
35 35 try:
36 36 for chunk in util.filechunkiter(f):
37 37 while chunk:
38 38 yield zd.decompress(chunk, 2**18)
39 39 chunk = zd.unconsumed_tail
40 40 except httplib.HTTPException:
41 41 raise IOError(None, _('connection ended unexpectedly'))
42 42 yield zd.flush()
43 43
44 44 class httppeer(wireproto.wirepeer):
45 45 def __init__(self, ui, path):
46 46 self.path = path
47 47 self.caps = None
48 48 self.handler = None
49 49 self.urlopener = None
50 50 self.requestbuilder = None
51 51 u = util.url(path)
52 52 if u.query or u.fragment:
53 53 raise error.Abort(_('unsupported URL component: "%s"') %
54 54 (u.query or u.fragment))
55 55
56 56 # urllib cannot handle URLs with embedded user or passwd
57 57 self._url, authinfo = u.authinfo()
58 58
59 59 self.ui = ui
60 60 self.ui.debug('using %s\n' % self._url)
61 61
62 62 self.urlopener = url.opener(ui, authinfo)
63 63 self.requestbuilder = urlreq.request
64 64
65 65 def __del__(self):
66 66 if self.urlopener:
67 67 for h in self.urlopener.handlers:
68 68 h.close()
69 69 getattr(h, "close_all", lambda : None)()
70 70
71 71 def url(self):
72 72 return self.path
73 73
74 74 # look up capabilities only when needed
75 75
76 76 def _fetchcaps(self):
77 77 self.caps = set(self._call('capabilities').split())
78 78
79 79 def _capabilities(self):
80 80 if self.caps is None:
81 81 try:
82 82 self._fetchcaps()
83 83 except error.RepoError:
84 84 self.caps = set()
85 85 self.ui.debug('capabilities: %s\n' %
86 86 (' '.join(self.caps or ['none'])))
87 87 return self.caps
88 88
89 89 def lock(self):
90 90 raise error.Abort(_('operation not supported over http'))
91 91
92 92 def _callstream(self, cmd, **args):
93 93 if cmd == 'pushkey':
94 94 args['data'] = ''
95 95 data = args.pop('data', None)
96 96 headers = args.pop('headers', {})
97 97
98 98 self.ui.debug("sending %s command\n" % cmd)
99 99 q = [('cmd', cmd)]
100 100 headersize = 0
101 101 # Important: don't use self.capable() here or else you end up
102 102 # with infinite recursion when trying to look up capabilities
103 103 # for the first time.
104 104 postargsok = self.caps is not None and 'httppostargs' in self.caps
105 105 # TODO: support for httppostargs when data is a file-like
106 106 # object rather than a basestring
107 107 canmungedata = not data or isinstance(data, basestring)
108 108 if postargsok and canmungedata:
109 109 strargs = urlreq.urlencode(sorted(args.items()))
110 110 if strargs:
111 111 if not data:
112 112 data = strargs
113 113 elif isinstance(data, basestring):
114 114 data = strargs + data
115 115 headers['X-HgArgs-Post'] = len(strargs)
116 116 else:
117 117 if len(args) > 0:
118 118 httpheader = self.capable('httpheader')
119 119 if httpheader:
120 120 headersize = int(httpheader.split(',', 1)[0])
121 121 if headersize > 0:
122 122 # The headers can typically carry more data than the URL.
123 123 encargs = urlreq.urlencode(sorted(args.items()))
124 124 headerfmt = 'X-HgArg-%s'
125 125 contentlen = headersize - len(headerfmt % '000' + ': \r\n')
126 126 headernum = 0
127 127 varyheaders = []
128 128 for i in xrange(0, len(encargs), contentlen):
129 129 headernum += 1
130 130 header = headerfmt % str(headernum)
131 131 headers[header] = encargs[i:i + contentlen]
132 132 varyheaders.append(header)
133 133 headers['Vary'] = ','.join(varyheaders)
134 134 else:
135 135 q += sorted(args.items())
136 136 qs = '?%s' % urlreq.urlencode(q)
137 137 cu = "%s%s" % (self._url, qs)
138 138 size = 0
139 139 if util.safehasattr(data, 'length'):
140 140 size = data.length
141 141 elif data is not None:
142 142 size = len(data)
143 143 if size and self.ui.configbool('ui', 'usehttp2', False):
144 144 headers['Expect'] = '100-Continue'
145 145 headers['X-HgHttp2'] = '1'
146 146 if data is not None and 'Content-Type' not in headers:
147 147 headers['Content-Type'] = 'application/mercurial-0.1'
148 148 req = self.requestbuilder(cu, data, headers)
149 149 if data is not None:
150 150 self.ui.debug("sending %s bytes\n" % size)
151 151 req.add_unredirected_header('Content-Length', '%d' % size)
152 152 try:
153 153 resp = self.urlopener.open(req)
154 154 except urlerr.httperror as inst:
155 155 if inst.code == 401:
156 156 raise error.Abort(_('authorization failed'))
157 157 raise
158 158 except httplib.HTTPException as inst:
159 159 self.ui.debug('http error while sending %s command\n' % cmd)
160 160 self.ui.traceback()
161 161 raise IOError(None, inst)
162 162 except IndexError:
163 163 # this only happens with Python 2.3, later versions raise URLError
164 164 raise error.Abort(_('http error, possibly caused by proxy setting'))
165 165 # record the url we got redirected to
166 166 resp_url = resp.geturl()
167 167 if resp_url.endswith(qs):
168 168 resp_url = resp_url[:-len(qs)]
169 169 if self._url.rstrip('/') != resp_url.rstrip('/'):
170 170 if not self.ui.quiet:
171 171 self.ui.warn(_('real URL is %s\n') % resp_url)
172 172 self._url = resp_url
173 173 try:
174 174 proto = resp.getheader('content-type')
175 175 except AttributeError:
176 176 proto = resp.headers.get('content-type', '')
177 177
178 178 safeurl = util.hidepassword(self._url)
179 179 if proto.startswith('application/hg-error'):
180 180 raise error.OutOfBandError(resp.read())
181 181 # accept old "text/plain" and "application/hg-changegroup" for now
182 182 if not (proto.startswith('application/mercurial-') or
183 183 (proto.startswith('text/plain')
184 184 and not resp.headers.get('content-length')) or
185 185 proto.startswith('application/hg-changegroup')):
186 186 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
187 187 raise error.RepoError(
188 188 _("'%s' does not appear to be an hg repository:\n"
189 189 "---%%<--- (%s)\n%s\n---%%<---\n")
190 190 % (safeurl, proto or 'no content-type', resp.read(1024)))
191 191
192 192 if proto.startswith('application/mercurial-'):
193 193 try:
194 194 version = proto.split('-', 1)[1]
195 195 version_info = tuple([int(n) for n in version.split('.')])
196 196 except ValueError:
197 197 raise error.RepoError(_("'%s' sent a broken Content-Type "
198 198 "header (%s)") % (safeurl, proto))
199 199 if version_info > (0, 1):
200 200 raise error.RepoError(_("'%s' uses newer protocol %s") %
201 201 (safeurl, version))
202 202
203 203 return resp
204 204
205 205 def _call(self, cmd, **args):
206 206 fp = self._callstream(cmd, **args)
207 207 try:
208 208 return fp.read()
209 209 finally:
210 210 # if using keepalive, allow connection to be reused
211 211 fp.close()
212 212
213 213 def _callpush(self, cmd, cg, **args):
214 214 # have to stream bundle to a temp file because we do not have
215 215 # http 1.1 chunked transfer.
216 216
217 217 types = self.capable('unbundle')
218 218 try:
219 219 types = types.split(',')
220 220 except AttributeError:
221 221 # servers older than d1b16a746db6 will send 'unbundle' as a
222 222 # boolean capability. They only support headerless/uncompressed
223 223 # bundles.
224 224 types = [""]
225 225 for x in types:
226 226 if x in bundle2.bundletypes:
227 227 type = x
228 228 break
229 229
230 230 tempname = bundle2.writebundle(self.ui, cg, None, type)
231 231 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
232 232 headers = {'Content-Type': 'application/mercurial-0.1'}
233 233
234 234 try:
235 235 r = self._call(cmd, data=fp, headers=headers, **args)
236 236 vals = r.split('\n', 1)
237 237 if len(vals) < 2:
238 238 raise error.ResponseError(_("unexpected response:"), r)
239 239 return vals
240 240 except socket.error as err:
241 241 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
242 242 raise error.Abort(_('push failed: %s') % err.args[1])
243 243 raise error.Abort(err.args[1])
244 244 finally:
245 245 fp.close()
246 246 os.unlink(tempname)
247 247
248 248 def _calltwowaystream(self, cmd, fp, **args):
249 249 fh = None
250 250 fp_ = None
251 251 filename = None
252 252 try:
253 253 # dump bundle to disk
254 254 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
255 255 fh = os.fdopen(fd, "wb")
256 256 d = fp.read(4096)
257 257 while d:
258 258 fh.write(d)
259 259 d = fp.read(4096)
260 260 fh.close()
261 261 # start http push
262 262 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
263 263 headers = {'Content-Type': 'application/mercurial-0.1'}
264 264 return self._callstream(cmd, data=fp_, headers=headers, **args)
265 265 finally:
266 266 if fp_ is not None:
267 267 fp_.close()
268 268 if fh is not None:
269 269 fh.close()
270 270 os.unlink(filename)
271 271
272 272 def _callcompressable(self, cmd, **args):
273 273 stream = self._callstream(cmd, **args)
274 274 return util.chunkbuffer(zgenerator(stream))
275 275
276 276 def _abort(self, exception):
277 277 raise exception
278 278
279 279 class httpspeer(httppeer):
280 280 def __init__(self, ui, path):
281 281 if not url.has_https:
282 282 raise error.Abort(_('Python support for SSL and HTTPS '
283 283 'is not installed'))
284 284 httppeer.__init__(self, ui, path)
285 285
286 286 def instance(ui, path, create):
287 287 if create:
288 288 raise error.Abort(_('cannot create new http repository'))
289 289 try:
290 290 if path.startswith('https:'):
291 291 inst = httpspeer(ui, path)
292 292 else:
293 293 inst = httppeer(ui, path)
294 294 try:
295 295 # Try to do useful work when checking compatibility.
296 296 # Usually saves a roundtrip since we want the caps anyway.
297 297 inst._fetchcaps()
298 298 except error.RepoError:
299 299 # No luck, try older compatibility check.
300 300 inst.between([(nullid, nullid)])
301 301 return inst
302 302 except error.RepoError as httpexception:
303 303 try:
304 304 r = statichttprepo.instance(ui, "static-" + path, create)
305 305 ui.note(_('(falling back to static-http)\n'))
306 306 return r
307 307 except error.RepoError:
308 308 raise httpexception # use the original http RepoError instead
@@ -1,759 +1,759 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, see
13 13 # <http://www.gnu.org/licenses/>.
14 14
15 15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
16 16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
17 17
18 18 # Modified by Benoit Boissinot:
19 19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
20 20 # Modified by Dirkjan Ochtman:
21 21 # - import md5 function from a local util module
22 22 # Modified by Augie Fackler:
23 23 # - add safesend method and use it to prevent broken pipe errors
24 24 # on large POST requests
25 25
26 26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
27 27
28 28 >>> import urllib2
29 29 >>> from keepalive import HTTPHandler
30 30 >>> keepalive_handler = HTTPHandler()
31 31 >>> opener = urlreq.buildopener(keepalive_handler)
32 32 >>> urlreq.installopener(opener)
33 33 >>>
34 34 >>> fo = urlreq.urlopen('http://www.python.org')
35 35
36 36 If a connection to a given host is requested, and all of the existing
37 37 connections are still in use, another connection will be opened. If
38 38 the handler tries to use an existing connection but it fails in some
39 39 way, it will be closed and removed from the pool.
40 40
41 41 To remove the handler, simply re-run build_opener with no arguments, and
42 42 install that opener.
43 43
44 44 You can explicitly close connections by using the close_connection()
45 45 method of the returned file-like object (described below) or you can
46 46 use the handler methods:
47 47
48 48 close_connection(host)
49 49 close_all()
50 50 open_connections()
51 51
52 52 NOTE: using the close_connection and close_all methods of the handler
53 53 should be done with care when using multiple threads.
54 54 * there is nothing that prevents another thread from creating new
55 55 connections immediately after connections are closed
56 56 * no checks are done to prevent in-use connections from being closed
57 57
58 58 >>> keepalive_handler.close_all()
59 59
60 60 EXTRA ATTRIBUTES AND METHODS
61 61
62 62 Upon a status of 200, the object returned has a few additional
63 63 attributes and methods, which should not be used if you want to
64 64 remain consistent with the normal urllib2-returned objects:
65 65
66 66 close_connection() - close the connection to the host
67 67 readlines() - you know, readlines()
68 68 status - the return status (i.e. 404)
69 69 reason - english translation of status (i.e. 'File not found')
70 70
71 71 If you want the best of both worlds, use this inside an
72 72 AttributeError-catching try:
73 73
74 74 >>> try: status = fo.status
75 75 >>> except AttributeError: status = None
76 76
77 77 Unfortunately, these are ONLY there if status == 200, so it's not
78 78 easy to distinguish between non-200 responses. The reason is that
79 79 urllib2 tries to do clever things with error codes 301, 302, 401,
80 80 and 407, and it wraps the object upon return.
81 81
82 82 For python versions earlier than 2.4, you can avoid this fancy error
83 83 handling by setting the module-level global HANDLE_ERRORS to zero.
84 84 You see, prior to 2.4, it's the HTTP Handler's job to determine what
85 85 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
86 86 means "pass everything up". In python 2.4, however, this job no
87 87 longer belongs to the HTTP Handler and is now done by a NEW handler,
88 88 HTTPErrorProcessor. Here's the bottom line:
89 89
90 90 python version < 2.4
91 91 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
92 92 errors
93 93 HANDLE_ERRORS == 0 pass everything up, error processing is
94 94 left to the calling code
95 95 python version >= 2.4
96 96 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
97 97 HANDLE_ERRORS == 0 (default) pass everything up, let the
98 98 other handlers (specifically,
99 99 HTTPErrorProcessor) decide what to do
100 100
101 101 In practice, setting the variable either way makes little difference
102 102 in python 2.4, so for the most consistent behavior across versions,
103 103 you probably just want to use the defaults, which will give you
104 104 exceptions on errors.
105 105
106 106 """
107 107
108 108 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
109 109
110 110 from __future__ import absolute_import, print_function
111 111
112 112 import errno
113 113 import hashlib
114 import httplib
115 114 import socket
116 115 import sys
117 116 import thread
118 117
119 118 from . import (
120 119 util,
121 120 )
122 121
122 httplib = util.httplib
123 123 urlerr = util.urlerr
124 124 urlreq = util.urlreq
125 125
126 126 DEBUG = None
127 127
128 128 if sys.version_info < (2, 4):
129 129 HANDLE_ERRORS = 1
130 130 else: HANDLE_ERRORS = 0
131 131
132 132 class ConnectionManager(object):
133 133 """
134 134 The connection manager must be able to:
135 135 * keep track of all existing
136 136 """
137 137 def __init__(self):
138 138 self._lock = thread.allocate_lock()
139 139 self._hostmap = {} # map hosts to a list of connections
140 140 self._connmap = {} # map connections to host
141 141 self._readymap = {} # map connection to ready state
142 142
143 143 def add(self, host, connection, ready):
144 144 self._lock.acquire()
145 145 try:
146 146 if host not in self._hostmap:
147 147 self._hostmap[host] = []
148 148 self._hostmap[host].append(connection)
149 149 self._connmap[connection] = host
150 150 self._readymap[connection] = ready
151 151 finally:
152 152 self._lock.release()
153 153
154 154 def remove(self, connection):
155 155 self._lock.acquire()
156 156 try:
157 157 try:
158 158 host = self._connmap[connection]
159 159 except KeyError:
160 160 pass
161 161 else:
162 162 del self._connmap[connection]
163 163 del self._readymap[connection]
164 164 self._hostmap[host].remove(connection)
165 165 if not self._hostmap[host]: del self._hostmap[host]
166 166 finally:
167 167 self._lock.release()
168 168
169 169 def set_ready(self, connection, ready):
170 170 try:
171 171 self._readymap[connection] = ready
172 172 except KeyError:
173 173 pass
174 174
175 175 def get_ready_conn(self, host):
176 176 conn = None
177 177 self._lock.acquire()
178 178 try:
179 179 if host in self._hostmap:
180 180 for c in self._hostmap[host]:
181 181 if self._readymap[c]:
182 182 self._readymap[c] = 0
183 183 conn = c
184 184 break
185 185 finally:
186 186 self._lock.release()
187 187 return conn
188 188
189 189 def get_all(self, host=None):
190 190 if host:
191 191 return list(self._hostmap.get(host, []))
192 192 else:
193 193 return dict(self._hostmap)
194 194
195 195 class KeepAliveHandler(object):
196 196 def __init__(self):
197 197 self._cm = ConnectionManager()
198 198
199 199 #### Connection Management
200 200 def open_connections(self):
201 201 """return a list of connected hosts and the number of connections
202 202 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
203 203 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
204 204
205 205 def close_connection(self, host):
206 206 """close connection(s) to <host>
207 207 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
208 208 no error occurs if there is no connection to that host."""
209 209 for h in self._cm.get_all(host):
210 210 self._cm.remove(h)
211 211 h.close()
212 212
213 213 def close_all(self):
214 214 """close all open connections"""
215 215 for host, conns in self._cm.get_all().iteritems():
216 216 for h in conns:
217 217 self._cm.remove(h)
218 218 h.close()
219 219
220 220 def _request_closed(self, request, host, connection):
221 221 """tells us that this request is now closed and that the
222 222 connection is ready for another request"""
223 223 self._cm.set_ready(connection, 1)
224 224
225 225 def _remove_connection(self, host, connection, close=0):
226 226 if close:
227 227 connection.close()
228 228 self._cm.remove(connection)
229 229
230 230 #### Transaction Execution
231 231 def http_open(self, req):
232 232 return self.do_open(HTTPConnection, req)
233 233
234 234 def do_open(self, http_class, req):
235 235 host = req.get_host()
236 236 if not host:
237 237 raise urlerr.urlerror('no host given')
238 238
239 239 try:
240 240 h = self._cm.get_ready_conn(host)
241 241 while h:
242 242 r = self._reuse_connection(h, req, host)
243 243
244 244 # if this response is non-None, then it worked and we're
245 245 # done. Break out, skipping the else block.
246 246 if r:
247 247 break
248 248
249 249 # connection is bad - possibly closed by server
250 250 # discard it and ask for the next free connection
251 251 h.close()
252 252 self._cm.remove(h)
253 253 h = self._cm.get_ready_conn(host)
254 254 else:
255 255 # no (working) free connections were found. Create a new one.
256 256 h = http_class(host)
257 257 if DEBUG:
258 258 DEBUG.info("creating new connection to %s (%d)",
259 259 host, id(h))
260 260 self._cm.add(host, h, 0)
261 261 self._start_transaction(h, req)
262 262 r = h.getresponse()
263 263 except (socket.error, httplib.HTTPException) as err:
264 264 raise urlerr.urlerror(err)
265 265
266 266 # if not a persistent connection, don't try to reuse it
267 267 if r.will_close:
268 268 self._cm.remove(h)
269 269
270 270 if DEBUG:
271 271 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
272 272 r._handler = self
273 273 r._host = host
274 274 r._url = req.get_full_url()
275 275 r._connection = h
276 276 r.code = r.status
277 277 r.headers = r.msg
278 278 r.msg = r.reason
279 279
280 280 if r.status == 200 or not HANDLE_ERRORS:
281 281 return r
282 282 else:
283 283 return self.parent.error('http', req, r,
284 284 r.status, r.msg, r.headers)
285 285
286 286 def _reuse_connection(self, h, req, host):
287 287 """start the transaction with a re-used connection
288 288 return a response object (r) upon success or None on failure.
289 289 This DOES not close or remove bad connections in cases where
290 290 it returns. However, if an unexpected exception occurs, it
291 291 will close and remove the connection before re-raising.
292 292 """
293 293 try:
294 294 self._start_transaction(h, req)
295 295 r = h.getresponse()
296 296 # note: just because we got something back doesn't mean it
297 297 # worked. We'll check the version below, too.
298 298 except (socket.error, httplib.HTTPException):
299 299 r = None
300 300 except: # re-raises
301 301 # adding this block just in case we've missed
302 302 # something we will still raise the exception, but
303 303 # lets try and close the connection and remove it
304 304 # first. We previously got into a nasty loop
305 305 # where an exception was uncaught, and so the
306 306 # connection stayed open. On the next try, the
307 307 # same exception was raised, etc. The trade-off is
308 308 # that it's now possible this call will raise
309 309 # a DIFFERENT exception
310 310 if DEBUG:
311 311 DEBUG.error("unexpected exception - closing "
312 312 "connection to %s (%d)", host, id(h))
313 313 self._cm.remove(h)
314 314 h.close()
315 315 raise
316 316
317 317 if r is None or r.version == 9:
318 318 # httplib falls back to assuming HTTP 0.9 if it gets a
319 319 # bad header back. This is most likely to happen if
320 320 # the socket has been closed by the server since we
321 321 # last used the connection.
322 322 if DEBUG:
323 323 DEBUG.info("failed to re-use connection to %s (%d)",
324 324 host, id(h))
325 325 r = None
326 326 else:
327 327 if DEBUG:
328 328 DEBUG.info("re-using connection to %s (%d)", host, id(h))
329 329
330 330 return r
331 331
332 332 def _start_transaction(self, h, req):
333 333 # What follows mostly reimplements HTTPConnection.request()
334 334 # except it adds self.parent.addheaders in the mix.
335 335 headers = req.headers.copy()
336 336 if sys.version_info >= (2, 4):
337 337 headers.update(req.unredirected_hdrs)
338 338 headers.update(self.parent.addheaders)
339 339 headers = dict((n.lower(), v) for n, v in headers.items())
340 340 skipheaders = {}
341 341 for n in ('host', 'accept-encoding'):
342 342 if n in headers:
343 343 skipheaders['skip_' + n.replace('-', '_')] = 1
344 344 try:
345 345 if req.has_data():
346 346 data = req.get_data()
347 347 h.putrequest('POST', req.get_selector(), **skipheaders)
348 348 if 'content-type' not in headers:
349 349 h.putheader('Content-type',
350 350 'application/x-www-form-urlencoded')
351 351 if 'content-length' not in headers:
352 352 h.putheader('Content-length', '%d' % len(data))
353 353 else:
354 354 h.putrequest('GET', req.get_selector(), **skipheaders)
355 355 except socket.error as err:
356 356 raise urlerr.urlerror(err)
357 357 for k, v in headers.items():
358 358 h.putheader(k, v)
359 359 h.endheaders()
360 360 if req.has_data():
361 361 h.send(data)
362 362
363 363 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
364 364 pass
365 365
366 366 class HTTPResponse(httplib.HTTPResponse):
367 367 # we need to subclass HTTPResponse in order to
368 368 # 1) add readline() and readlines() methods
369 369 # 2) add close_connection() methods
370 370 # 3) add info() and geturl() methods
371 371
372 372 # in order to add readline(), read must be modified to deal with a
373 373 # buffer. example: readline must read a buffer and then spit back
374 374 # one line at a time. The only real alternative is to read one
375 375 # BYTE at a time (ick). Once something has been read, it can't be
376 376 # put back (ok, maybe it can, but that's even uglier than this),
377 377 # so if you THEN do a normal read, you must first take stuff from
378 378 # the buffer.
379 379
380 380 # the read method wraps the original to accommodate buffering,
381 381 # although read() never adds to the buffer.
382 382 # Both readline and readlines have been stolen with almost no
383 383 # modification from socket.py
384 384
385 385
386 386 def __init__(self, sock, debuglevel=0, strict=0, method=None):
387 387 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
388 388 self.fileno = sock.fileno
389 389 self.code = None
390 390 self._rbuf = ''
391 391 self._rbufsize = 8096
392 392 self._handler = None # inserted by the handler later
393 393 self._host = None # (same)
394 394 self._url = None # (same)
395 395 self._connection = None # (same)
396 396
397 397 _raw_read = httplib.HTTPResponse.read
398 398
399 399 def close(self):
400 400 if self.fp:
401 401 self.fp.close()
402 402 self.fp = None
403 403 if self._handler:
404 404 self._handler._request_closed(self, self._host,
405 405 self._connection)
406 406
407 407 def close_connection(self):
408 408 self._handler._remove_connection(self._host, self._connection, close=1)
409 409 self.close()
410 410
411 411 def info(self):
412 412 return self.headers
413 413
414 414 def geturl(self):
415 415 return self._url
416 416
417 417 def read(self, amt=None):
418 418 # the _rbuf test is only in this first if for speed. It's not
419 419 # logically necessary
420 420 if self._rbuf and not amt is None:
421 421 L = len(self._rbuf)
422 422 if amt > L:
423 423 amt -= L
424 424 else:
425 425 s = self._rbuf[:amt]
426 426 self._rbuf = self._rbuf[amt:]
427 427 return s
428 428
429 429 s = self._rbuf + self._raw_read(amt)
430 430 self._rbuf = ''
431 431 return s
432 432
433 433 # stolen from Python SVN #68532 to fix issue1088
434 434 def _read_chunked(self, amt):
435 435 chunk_left = self.chunk_left
436 436 value = ''
437 437
438 438 # XXX This accumulates chunks by repeated string concatenation,
439 439 # which is not efficient as the number or size of chunks gets big.
440 440 while True:
441 441 if chunk_left is None:
442 442 line = self.fp.readline()
443 443 i = line.find(';')
444 444 if i >= 0:
445 445 line = line[:i] # strip chunk-extensions
446 446 try:
447 447 chunk_left = int(line, 16)
448 448 except ValueError:
449 449 # close the connection as protocol synchronization is
450 450 # probably lost
451 451 self.close()
452 452 raise httplib.IncompleteRead(value)
453 453 if chunk_left == 0:
454 454 break
455 455 if amt is None:
456 456 value += self._safe_read(chunk_left)
457 457 elif amt < chunk_left:
458 458 value += self._safe_read(amt)
459 459 self.chunk_left = chunk_left - amt
460 460 return value
461 461 elif amt == chunk_left:
462 462 value += self._safe_read(amt)
463 463 self._safe_read(2) # toss the CRLF at the end of the chunk
464 464 self.chunk_left = None
465 465 return value
466 466 else:
467 467 value += self._safe_read(chunk_left)
468 468 amt -= chunk_left
469 469
470 470 # we read the whole chunk, get another
471 471 self._safe_read(2) # toss the CRLF at the end of the chunk
472 472 chunk_left = None
473 473
474 474 # read and discard trailer up to the CRLF terminator
475 475 ### note: we shouldn't have any trailers!
476 476 while True:
477 477 line = self.fp.readline()
478 478 if not line:
479 479 # a vanishingly small number of sites EOF without
480 480 # sending the trailer
481 481 break
482 482 if line == '\r\n':
483 483 break
484 484
485 485 # we read everything; close the "file"
486 486 self.close()
487 487
488 488 return value
489 489
490 490 def readline(self, limit=-1):
491 491 i = self._rbuf.find('\n')
492 492 while i < 0 and not (0 < limit <= len(self._rbuf)):
493 493 new = self._raw_read(self._rbufsize)
494 494 if not new:
495 495 break
496 496 i = new.find('\n')
497 497 if i >= 0:
498 498 i = i + len(self._rbuf)
499 499 self._rbuf = self._rbuf + new
500 500 if i < 0:
501 501 i = len(self._rbuf)
502 502 else:
503 503 i = i + 1
504 504 if 0 <= limit < len(self._rbuf):
505 505 i = limit
506 506 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
507 507 return data
508 508
509 509 def readlines(self, sizehint=0):
510 510 total = 0
511 511 list = []
512 512 while True:
513 513 line = self.readline()
514 514 if not line:
515 515 break
516 516 list.append(line)
517 517 total += len(line)
518 518 if sizehint and total >= sizehint:
519 519 break
520 520 return list
521 521
522 522 def safesend(self, str):
523 523 """Send `str' to the server.
524 524
525 525 Shamelessly ripped off from httplib to patch a bad behavior.
526 526 """
527 527 # _broken_pipe_resp is an attribute we set in this function
528 528 # if the socket is closed while we're sending data but
529 529 # the server sent us a response before hanging up.
530 530 # In that case, we want to pretend to send the rest of the
531 531 # outgoing data, and then let the user use getresponse()
532 532 # (which we wrap) to get this last response before
533 533 # opening a new socket.
534 534 if getattr(self, '_broken_pipe_resp', None) is not None:
535 535 return
536 536
537 537 if self.sock is None:
538 538 if self.auto_open:
539 539 self.connect()
540 540 else:
541 541 raise httplib.NotConnected
542 542
543 543 # send the data to the server. if we get a broken pipe, then close
544 544 # the socket. we want to reconnect when somebody tries to send again.
545 545 #
546 546 # NOTE: we DO propagate the error, though, because we cannot simply
547 547 # ignore the error... the caller will know if they can retry.
548 548 if self.debuglevel > 0:
549 549 print("send:", repr(str))
550 550 try:
551 551 blocksize = 8192
552 552 read = getattr(str, 'read', None)
553 553 if read is not None:
554 554 if self.debuglevel > 0:
555 555 print("sending a read()able")
556 556 data = read(blocksize)
557 557 while data:
558 558 self.sock.sendall(data)
559 559 data = read(blocksize)
560 560 else:
561 561 self.sock.sendall(str)
562 562 except socket.error as v:
563 563 reraise = True
564 564 if v[0] == errno.EPIPE: # Broken pipe
565 565 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
566 566 self._broken_pipe_resp = None
567 567 self._broken_pipe_resp = self.getresponse()
568 568 reraise = False
569 569 self.close()
570 570 if reraise:
571 571 raise
572 572
573 573 def wrapgetresponse(cls):
574 574 """Wraps getresponse in cls with a broken-pipe sane version.
575 575 """
576 576 def safegetresponse(self):
577 577 # In safesend() we might set the _broken_pipe_resp
578 578 # attribute, in which case the socket has already
579 579 # been closed and we just need to give them the response
580 580 # back. Otherwise, we use the normal response path.
581 581 r = getattr(self, '_broken_pipe_resp', None)
582 582 if r is not None:
583 583 return r
584 584 return cls.getresponse(self)
585 585 safegetresponse.__doc__ = cls.getresponse.__doc__
586 586 return safegetresponse
587 587
588 588 class HTTPConnection(httplib.HTTPConnection):
589 589 # use the modified response class
590 590 response_class = HTTPResponse
591 591 send = safesend
592 592 getresponse = wrapgetresponse(httplib.HTTPConnection)
593 593
594 594
595 595 #########################################################################
596 596 ##### TEST FUNCTIONS
597 597 #########################################################################
598 598
599 599 def error_handler(url):
600 600 global HANDLE_ERRORS
601 601 orig = HANDLE_ERRORS
602 602 keepalive_handler = HTTPHandler()
603 603 opener = urlreq.buildopener(keepalive_handler)
604 604 urlreq.installopener(opener)
605 605 pos = {0: 'off', 1: 'on'}
606 606 for i in (0, 1):
607 607 print(" fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i))
608 608 HANDLE_ERRORS = i
609 609 try:
610 610 fo = urlreq.urlopen(url)
611 611 fo.read()
612 612 fo.close()
613 613 try:
614 614 status, reason = fo.status, fo.reason
615 615 except AttributeError:
616 616 status, reason = None, None
617 617 except IOError as e:
618 618 print(" EXCEPTION: %s" % e)
619 619 raise
620 620 else:
621 621 print(" status = %s, reason = %s" % (status, reason))
622 622 HANDLE_ERRORS = orig
623 623 hosts = keepalive_handler.open_connections()
624 624 print("open connections:", hosts)
625 625 keepalive_handler.close_all()
626 626
627 627 def continuity(url):
628 628 md5 = hashlib.md5
629 629 format = '%25s: %s'
630 630
631 631 # first fetch the file with the normal http handler
632 632 opener = urlreq.buildopener()
633 633 urlreq.installopener(opener)
634 634 fo = urlreq.urlopen(url)
635 635 foo = fo.read()
636 636 fo.close()
637 637 m = md5(foo)
638 638 print(format % ('normal urllib', m.hexdigest()))
639 639
640 640 # now install the keepalive handler and try again
641 641 opener = urlreq.buildopener(HTTPHandler())
642 642 urlreq.installopener(opener)
643 643
644 644 fo = urlreq.urlopen(url)
645 645 foo = fo.read()
646 646 fo.close()
647 647 m = md5(foo)
648 648 print(format % ('keepalive read', m.hexdigest()))
649 649
650 650 fo = urlreq.urlopen(url)
651 651 foo = ''
652 652 while True:
653 653 f = fo.readline()
654 654 if f:
655 655 foo = foo + f
656 656 else: break
657 657 fo.close()
658 658 m = md5(foo)
659 659 print(format % ('keepalive readline', m.hexdigest()))
660 660
661 661 def comp(N, url):
662 662 print(' making %i connections to:\n %s' % (N, url))
663 663
664 664 sys.stdout.write(' first using the normal urllib handlers')
665 665 # first use normal opener
666 666 opener = urlreq.buildopener()
667 667 urlreq.installopener(opener)
668 668 t1 = fetch(N, url)
669 669 print(' TIME: %.3f s' % t1)
670 670
671 671 sys.stdout.write(' now using the keepalive handler ')
672 672 # now install the keepalive handler and try again
673 673 opener = urlreq.buildopener(HTTPHandler())
674 674 urlreq.installopener(opener)
675 675 t2 = fetch(N, url)
676 676 print(' TIME: %.3f s' % t2)
677 677 print(' improvement factor: %.2f' % (t1 / t2))
678 678
679 679 def fetch(N, url, delay=0):
680 680 import time
681 681 lens = []
682 682 starttime = time.time()
683 683 for i in range(N):
684 684 if delay and i > 0:
685 685 time.sleep(delay)
686 686 fo = urlreq.urlopen(url)
687 687 foo = fo.read()
688 688 fo.close()
689 689 lens.append(len(foo))
690 690 diff = time.time() - starttime
691 691
692 692 j = 0
693 693 for i in lens[1:]:
694 694 j = j + 1
695 695 if not i == lens[0]:
696 696 print("WARNING: inconsistent length on read %i: %i" % (j, i))
697 697
698 698 return diff
699 699
700 700 def test_timeout(url):
701 701 global DEBUG
702 702 dbbackup = DEBUG
703 703 class FakeLogger(object):
704 704 def debug(self, msg, *args):
705 705 print(msg % args)
706 706 info = warning = error = debug
707 707 DEBUG = FakeLogger()
708 708 print(" fetching the file to establish a connection")
709 709 fo = urlreq.urlopen(url)
710 710 data1 = fo.read()
711 711 fo.close()
712 712
713 713 i = 20
714 714 print(" waiting %i seconds for the server to close the connection" % i)
715 715 while i > 0:
716 716 sys.stdout.write('\r %2i' % i)
717 717 sys.stdout.flush()
718 718 time.sleep(1)
719 719 i -= 1
720 720 sys.stderr.write('\r')
721 721
722 722 print(" fetching the file a second time")
723 723 fo = urlreq.urlopen(url)
724 724 data2 = fo.read()
725 725 fo.close()
726 726
727 727 if data1 == data2:
728 728 print(' data are identical')
729 729 else:
730 730 print(' ERROR: DATA DIFFER')
731 731
732 732 DEBUG = dbbackup
733 733
734 734
735 735 def test(url, N=10):
736 736 print("checking error handler (do this on a non-200)")
737 737 try: error_handler(url)
738 738 except IOError:
739 739 print("exiting - exception will prevent further tests")
740 740 sys.exit()
741 741 print('')
742 742 print("performing continuity test (making sure stuff isn't corrupted)")
743 743 continuity(url)
744 744 print('')
745 745 print("performing speed comparison")
746 746 comp(N, url)
747 747 print('')
748 748 print("performing dropped-connection check")
749 749 test_timeout(url)
750 750
751 751 if __name__ == '__main__':
752 752 import time
753 753 try:
754 754 N = int(sys.argv[1])
755 755 url = sys.argv[2]
756 756 except (IndexError, ValueError):
757 757 print("%s <integer> <url>" % sys.argv[0])
758 758 else:
759 759 test(url, N)
@@ -1,152 +1,159 b''
1 1 # pycompat.py - portability shim for python 3
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """Mercurial portability shim for python 3.
7 7
8 8 This contains aliases to hide python version-specific details from the core.
9 9 """
10 10
11 11 from __future__ import absolute_import
12 12
13 13 try:
14 14 import cPickle as pickle
15 15 pickle.dumps
16 16 except ImportError:
17 17 import pickle
18 18 pickle.dumps # silence pyflakes
19 19
20 20 try:
21 import httplib
22 httplib.HTTPException
23 except ImportError:
24 import http.client as httplib
25 httplib.HTTPException
26
27 try:
21 28 import SocketServer as socketserver
22 29 socketserver.ThreadingMixIn
23 30 except ImportError:
24 31 import socketserver
25 32 socketserver.ThreadingMixIn
26 33
27 34 try:
28 35 import xmlrpclib
29 36 xmlrpclib.Transport
30 37 except ImportError:
31 38 import xmlrpc.client as xmlrpclib
32 39 xmlrpclib.Transport
33 40
34 41 try:
35 42 import urlparse
36 43 urlparse.urlparse
37 44 except ImportError:
38 45 import urllib.parse as urlparse
39 46 urlparse.urlparse
40 47
41 48 try:
42 49 import cStringIO as io
43 50 stringio = io.StringIO
44 51 except ImportError:
45 52 import io
46 53 stringio = io.StringIO
47 54
48 55 try:
49 56 import Queue as _queue
50 57 _queue.Queue
51 58 except ImportError:
52 59 import queue as _queue
53 60 empty = _queue.Empty
54 61 queue = _queue.Queue
55 62
56 63 class _pycompatstub(object):
57 64 pass
58 65
59 66 def _alias(alias, origin, items):
60 67 """ populate a _pycompatstub
61 68
62 69 copies items from origin to alias
63 70 """
64 71 def hgcase(item):
65 72 return item.replace('_', '').lower()
66 73 for item in items:
67 74 try:
68 75 setattr(alias, hgcase(item), getattr(origin, item))
69 76 except AttributeError:
70 77 pass
71 78
72 79 urlreq = _pycompatstub()
73 80 urlerr = _pycompatstub()
74 81 try:
75 82 import urllib2
76 83 import urllib
77 84 _alias(urlreq, urllib, (
78 85 "addclosehook",
79 86 "addinfourl",
80 87 "ftpwrapper",
81 88 "pathname2url",
82 89 "quote",
83 90 "splitattr",
84 91 "splitpasswd",
85 92 "splitport",
86 93 "splituser",
87 94 "unquote",
88 95 "url2pathname",
89 96 "urlencode",
90 97 "urlencode",
91 98 ))
92 99 _alias(urlreq, urllib2, (
93 100 "AbstractHTTPHandler",
94 101 "BaseHandler",
95 102 "build_opener",
96 103 "FileHandler",
97 104 "FTPHandler",
98 105 "HTTPBasicAuthHandler",
99 106 "HTTPDigestAuthHandler",
100 107 "HTTPHandler",
101 108 "HTTPPasswordMgrWithDefaultRealm",
102 109 "HTTPSHandler",
103 110 "install_opener",
104 111 "ProxyHandler",
105 112 "Request",
106 113 "urlopen",
107 114 ))
108 115 _alias(urlerr, urllib2, (
109 116 "HTTPError",
110 117 "URLError",
111 118 ))
112 119
113 120 except ImportError:
114 121 import urllib.request
115 122 _alias(urlreq, urllib.request, (
116 123 "AbstractHTTPHandler",
117 124 "addclosehook",
118 125 "addinfourl",
119 126 "BaseHandler",
120 127 "build_opener",
121 128 "FileHandler",
122 129 "FTPHandler",
123 130 "ftpwrapper",
124 131 "HTTPHandler",
125 132 "HTTPSHandler",
126 133 "install_opener",
127 134 "pathname2url",
128 135 "HTTPBasicAuthHandler",
129 136 "HTTPDigestAuthHandler",
130 137 "HTTPPasswordMgrWithDefaultRealm",
131 138 "ProxyHandler",
132 139 "quote",
133 140 "Request",
134 141 "splitattr",
135 142 "splitpasswd",
136 143 "splitport",
137 144 "splituser",
138 145 "unquote",
139 146 "url2pathname",
140 147 "urlopen",
141 148 ))
142 149 import urllib.error
143 150 _alias(urlerr, urllib.error, (
144 151 "HTTPError",
145 152 "URLError",
146 153 ))
147 154
148 155 try:
149 156 xrange
150 157 except NameError:
151 158 import builtins
152 159 builtins.xrange = range
@@ -1,522 +1,522 b''
1 1 # url.py - HTTP handling for mercurial
2 2 #
3 3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import base64
13 import httplib
14 13 import os
15 14 import socket
16 15
17 16 from .i18n import _
18 17 from . import (
19 18 error,
20 19 httpconnection as httpconnectionmod,
21 20 keepalive,
22 21 sslutil,
23 22 util,
24 23 )
24
25 httplib = util.httplib
25 26 stringio = util.stringio
26
27 27 urlerr = util.urlerr
28 28 urlreq = util.urlreq
29 29
30 30 class passwordmgr(object):
31 31 def __init__(self, ui, passwddb):
32 32 self.ui = ui
33 33 self.passwddb = passwddb
34 34
35 35 def add_password(self, realm, uri, user, passwd):
36 36 return self.passwddb.add_password(realm, uri, user, passwd)
37 37
38 38 def find_user_password(self, realm, authuri):
39 39 authinfo = self.passwddb.find_user_password(realm, authuri)
40 40 user, passwd = authinfo
41 41 if user and passwd:
42 42 self._writedebug(user, passwd)
43 43 return (user, passwd)
44 44
45 45 if not user or not passwd:
46 46 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
47 47 if res:
48 48 group, auth = res
49 49 user, passwd = auth.get('username'), auth.get('password')
50 50 self.ui.debug("using auth.%s.* for authentication\n" % group)
51 51 if not user or not passwd:
52 52 u = util.url(authuri)
53 53 u.query = None
54 54 if not self.ui.interactive():
55 55 raise error.Abort(_('http authorization required for %s') %
56 56 util.hidepassword(str(u)))
57 57
58 58 self.ui.write(_("http authorization required for %s\n") %
59 59 util.hidepassword(str(u)))
60 60 self.ui.write(_("realm: %s\n") % realm)
61 61 if user:
62 62 self.ui.write(_("user: %s\n") % user)
63 63 else:
64 64 user = self.ui.prompt(_("user:"), default=None)
65 65
66 66 if not passwd:
67 67 passwd = self.ui.getpass()
68 68
69 69 self.passwddb.add_password(realm, authuri, user, passwd)
70 70 self._writedebug(user, passwd)
71 71 return (user, passwd)
72 72
73 73 def _writedebug(self, user, passwd):
74 74 msg = _('http auth: user %s, password %s\n')
75 75 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
76 76
77 77 def find_stored_password(self, authuri):
78 78 return self.passwddb.find_user_password(None, authuri)
79 79
80 80 class proxyhandler(urlreq.proxyhandler):
81 81 def __init__(self, ui):
82 82 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
83 83 # XXX proxyauthinfo = None
84 84
85 85 if proxyurl:
86 86 # proxy can be proper url or host[:port]
87 87 if not (proxyurl.startswith('http:') or
88 88 proxyurl.startswith('https:')):
89 89 proxyurl = 'http://' + proxyurl + '/'
90 90 proxy = util.url(proxyurl)
91 91 if not proxy.user:
92 92 proxy.user = ui.config("http_proxy", "user")
93 93 proxy.passwd = ui.config("http_proxy", "passwd")
94 94
95 95 # see if we should use a proxy for this url
96 96 no_list = ["localhost", "127.0.0.1"]
97 97 no_list.extend([p.lower() for
98 98 p in ui.configlist("http_proxy", "no")])
99 99 no_list.extend([p.strip().lower() for
100 100 p in os.getenv("no_proxy", '').split(',')
101 101 if p.strip()])
102 102 # "http_proxy.always" config is for running tests on localhost
103 103 if ui.configbool("http_proxy", "always"):
104 104 self.no_list = []
105 105 else:
106 106 self.no_list = no_list
107 107
108 108 proxyurl = str(proxy)
109 109 proxies = {'http': proxyurl, 'https': proxyurl}
110 110 ui.debug('proxying through http://%s:%s\n' %
111 111 (proxy.host, proxy.port))
112 112 else:
113 113 proxies = {}
114 114
115 115 # urllib2 takes proxy values from the environment and those
116 116 # will take precedence if found. So, if there's a config entry
117 117 # defining a proxy, drop the environment ones
118 118 if ui.config("http_proxy", "host"):
119 119 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
120 120 try:
121 121 if env in os.environ:
122 122 del os.environ[env]
123 123 except OSError:
124 124 pass
125 125
126 126 urlreq.proxyhandler.__init__(self, proxies)
127 127 self.ui = ui
128 128
129 129 def proxy_open(self, req, proxy, type_):
130 130 host = req.get_host().split(':')[0]
131 131 for e in self.no_list:
132 132 if host == e:
133 133 return None
134 134 if e.startswith('*.') and host.endswith(e[2:]):
135 135 return None
136 136 if e.startswith('.') and host.endswith(e[1:]):
137 137 return None
138 138
139 139 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
140 140
141 141 def _gen_sendfile(orgsend):
142 142 def _sendfile(self, data):
143 143 # send a file
144 144 if isinstance(data, httpconnectionmod.httpsendfile):
145 145 # if auth required, some data sent twice, so rewind here
146 146 data.seek(0)
147 147 for chunk in util.filechunkiter(data):
148 148 orgsend(self, chunk)
149 149 else:
150 150 orgsend(self, data)
151 151 return _sendfile
152 152
153 153 has_https = util.safehasattr(urlreq, 'httpshandler')
154 154 if has_https:
155 155 try:
156 156 _create_connection = socket.create_connection
157 157 except AttributeError:
158 158 _GLOBAL_DEFAULT_TIMEOUT = object()
159 159
160 160 def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
161 161 source_address=None):
162 162 # lifted from Python 2.6
163 163
164 164 msg = "getaddrinfo returns an empty list"
165 165 host, port = address
166 166 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
167 167 af, socktype, proto, canonname, sa = res
168 168 sock = None
169 169 try:
170 170 sock = socket.socket(af, socktype, proto)
171 171 if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
172 172 sock.settimeout(timeout)
173 173 if source_address:
174 174 sock.bind(source_address)
175 175 sock.connect(sa)
176 176 return sock
177 177
178 178 except socket.error as msg:
179 179 if sock is not None:
180 180 sock.close()
181 181
182 182 raise socket.error(msg)
183 183
184 184 class httpconnection(keepalive.HTTPConnection):
185 185 # must be able to send big bundle as stream.
186 186 send = _gen_sendfile(keepalive.HTTPConnection.send)
187 187
188 188 def connect(self):
189 189 if has_https and self.realhostport: # use CONNECT proxy
190 190 self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
191 191 self.sock.connect((self.host, self.port))
192 192 if _generic_proxytunnel(self):
193 193 # we do not support client X.509 certificates
194 194 self.sock = sslutil.wrapsocket(self.sock, None, None, None,
195 195 serverhostname=self.host)
196 196 else:
197 197 keepalive.HTTPConnection.connect(self)
198 198
199 199 def getresponse(self):
200 200 proxyres = getattr(self, 'proxyres', None)
201 201 if proxyres:
202 202 if proxyres.will_close:
203 203 self.close()
204 204 self.proxyres = None
205 205 return proxyres
206 206 return keepalive.HTTPConnection.getresponse(self)
207 207
208 208 # general transaction handler to support different ways to handle
209 209 # HTTPS proxying before and after Python 2.6.3.
210 210 def _generic_start_transaction(handler, h, req):
211 211 tunnel_host = getattr(req, '_tunnel_host', None)
212 212 if tunnel_host:
213 213 if tunnel_host[:7] not in ['http://', 'https:/']:
214 214 tunnel_host = 'https://' + tunnel_host
215 215 new_tunnel = True
216 216 else:
217 217 tunnel_host = req.get_selector()
218 218 new_tunnel = False
219 219
220 220 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
221 221 u = util.url(tunnel_host)
222 222 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
223 223 h.realhostport = ':'.join([u.host, (u.port or '443')])
224 224 h.headers = req.headers.copy()
225 225 h.headers.update(handler.parent.addheaders)
226 226 return
227 227
228 228 h.realhostport = None
229 229 h.headers = None
230 230
231 231 def _generic_proxytunnel(self):
232 232 proxyheaders = dict(
233 233 [(x, self.headers[x]) for x in self.headers
234 234 if x.lower().startswith('proxy-')])
235 235 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
236 236 for header in proxyheaders.iteritems():
237 237 self.send('%s: %s\r\n' % header)
238 238 self.send('\r\n')
239 239
240 240 # majority of the following code is duplicated from
241 241 # httplib.HTTPConnection as there are no adequate places to
242 242 # override functions to provide the needed functionality
243 243 res = self.response_class(self.sock,
244 244 strict=self.strict,
245 245 method=self._method)
246 246
247 247 while True:
248 248 version, status, reason = res._read_status()
249 249 if status != httplib.CONTINUE:
250 250 break
251 251 while True:
252 252 skip = res.fp.readline().strip()
253 253 if not skip:
254 254 break
255 255 res.status = status
256 256 res.reason = reason.strip()
257 257
258 258 if res.status == 200:
259 259 while True:
260 260 line = res.fp.readline()
261 261 if line == '\r\n':
262 262 break
263 263 return True
264 264
265 265 if version == 'HTTP/1.0':
266 266 res.version = 10
267 267 elif version.startswith('HTTP/1.'):
268 268 res.version = 11
269 269 elif version == 'HTTP/0.9':
270 270 res.version = 9
271 271 else:
272 272 raise httplib.UnknownProtocol(version)
273 273
274 274 if res.version == 9:
275 275 res.length = None
276 276 res.chunked = 0
277 277 res.will_close = 1
278 278 res.msg = httplib.HTTPMessage(stringio())
279 279 return False
280 280
281 281 res.msg = httplib.HTTPMessage(res.fp)
282 282 res.msg.fp = None
283 283
284 284 # are we using the chunked-style of transfer encoding?
285 285 trenc = res.msg.getheader('transfer-encoding')
286 286 if trenc and trenc.lower() == "chunked":
287 287 res.chunked = 1
288 288 res.chunk_left = None
289 289 else:
290 290 res.chunked = 0
291 291
292 292 # will the connection close at the end of the response?
293 293 res.will_close = res._check_close()
294 294
295 295 # do we have a Content-Length?
296 296 # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
297 297 # transfer-encoding is "chunked"
298 298 length = res.msg.getheader('content-length')
299 299 if length and not res.chunked:
300 300 try:
301 301 res.length = int(length)
302 302 except ValueError:
303 303 res.length = None
304 304 else:
305 305 if res.length < 0: # ignore nonsensical negative lengths
306 306 res.length = None
307 307 else:
308 308 res.length = None
309 309
310 310 # does the body have a fixed length? (of zero)
311 311 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
312 312 100 <= status < 200 or # 1xx codes
313 313 res._method == 'HEAD'):
314 314 res.length = 0
315 315
316 316 # if the connection remains open, and we aren't using chunked, and
317 317 # a content-length was not provided, then assume that the connection
318 318 # WILL close.
319 319 if (not res.will_close and
320 320 not res.chunked and
321 321 res.length is None):
322 322 res.will_close = 1
323 323
324 324 self.proxyres = res
325 325
326 326 return False
327 327
328 328 class httphandler(keepalive.HTTPHandler):
329 329 def http_open(self, req):
330 330 return self.do_open(httpconnection, req)
331 331
332 332 def _start_transaction(self, h, req):
333 333 _generic_start_transaction(self, h, req)
334 334 return keepalive.HTTPHandler._start_transaction(self, h, req)
335 335
336 336 if has_https:
337 337 class httpsconnection(httplib.HTTPConnection):
338 338 response_class = keepalive.HTTPResponse
339 339 default_port = httplib.HTTPS_PORT
340 340 # must be able to send big bundle as stream.
341 341 send = _gen_sendfile(keepalive.safesend)
342 342 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
343 343
344 344 def __init__(self, host, port=None, key_file=None, cert_file=None,
345 345 *args, **kwargs):
346 346 httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
347 347 self.key_file = key_file
348 348 self.cert_file = cert_file
349 349
350 350 def connect(self):
351 351 self.sock = _create_connection((self.host, self.port))
352 352
353 353 host = self.host
354 354 if self.realhostport: # use CONNECT proxy
355 355 _generic_proxytunnel(self)
356 356 host = self.realhostport.rsplit(':', 1)[0]
357 357 self.sock = sslutil.wrapsocket(
358 358 self.sock, self.key_file, self.cert_file, ui=self.ui,
359 359 serverhostname=host)
360 360 sslutil.validatesocket(self.sock)
361 361
362 362 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
363 363 def __init__(self, ui):
364 364 keepalive.KeepAliveHandler.__init__(self)
365 365 urlreq.httpshandler.__init__(self)
366 366 self.ui = ui
367 367 self.pwmgr = passwordmgr(self.ui,
368 368 self.ui.httppasswordmgrdb)
369 369
370 370 def _start_transaction(self, h, req):
371 371 _generic_start_transaction(self, h, req)
372 372 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
373 373
374 374 def https_open(self, req):
375 375 # req.get_full_url() does not contain credentials and we may
376 376 # need them to match the certificates.
377 377 url = req.get_full_url()
378 378 user, password = self.pwmgr.find_stored_password(url)
379 379 res = httpconnectionmod.readauthforuri(self.ui, url, user)
380 380 if res:
381 381 group, auth = res
382 382 self.auth = auth
383 383 self.ui.debug("using auth.%s.* for authentication\n" % group)
384 384 else:
385 385 self.auth = None
386 386 return self.do_open(self._makeconnection, req)
387 387
388 388 def _makeconnection(self, host, port=None, *args, **kwargs):
389 389 keyfile = None
390 390 certfile = None
391 391
392 392 if len(args) >= 1: # key_file
393 393 keyfile = args[0]
394 394 if len(args) >= 2: # cert_file
395 395 certfile = args[1]
396 396 args = args[2:]
397 397
398 398 # if the user has specified different key/cert files in
399 399 # hgrc, we prefer these
400 400 if self.auth and 'key' in self.auth and 'cert' in self.auth:
401 401 keyfile = self.auth['key']
402 402 certfile = self.auth['cert']
403 403
404 404 conn = httpsconnection(host, port, keyfile, certfile, *args,
405 405 **kwargs)
406 406 conn.ui = self.ui
407 407 return conn
408 408
409 409 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
410 410 def __init__(self, *args, **kwargs):
411 411 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
412 412 self.retried_req = None
413 413
414 414 def reset_retry_count(self):
415 415 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
416 416 # forever. We disable reset_retry_count completely and reset in
417 417 # http_error_auth_reqed instead.
418 418 pass
419 419
420 420 def http_error_auth_reqed(self, auth_header, host, req, headers):
421 421 # Reset the retry counter once for each request.
422 422 if req is not self.retried_req:
423 423 self.retried_req = req
424 424 self.retried = 0
425 425 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
426 426 self, auth_header, host, req, headers)
427 427
428 428 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
429 429 def __init__(self, *args, **kwargs):
430 430 self.auth = None
431 431 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
432 432 self.retried_req = None
433 433
434 434 def http_request(self, request):
435 435 if self.auth:
436 436 request.add_unredirected_header(self.auth_header, self.auth)
437 437
438 438 return request
439 439
440 440 def https_request(self, request):
441 441 if self.auth:
442 442 request.add_unredirected_header(self.auth_header, self.auth)
443 443
444 444 return request
445 445
446 446 def reset_retry_count(self):
447 447 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
448 448 # forever. We disable reset_retry_count completely and reset in
449 449 # http_error_auth_reqed instead.
450 450 pass
451 451
452 452 def http_error_auth_reqed(self, auth_header, host, req, headers):
453 453 # Reset the retry counter once for each request.
454 454 if req is not self.retried_req:
455 455 self.retried_req = req
456 456 self.retried = 0
457 457 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
458 458 self, auth_header, host, req, headers)
459 459
460 460 def retry_http_basic_auth(self, host, req, realm):
461 461 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
462 462 if pw is not None:
463 463 raw = "%s:%s" % (user, pw)
464 464 auth = 'Basic %s' % base64.b64encode(raw).strip()
465 465 if req.headers.get(self.auth_header, None) == auth:
466 466 return None
467 467 self.auth = auth
468 468 req.add_unredirected_header(self.auth_header, auth)
469 469 return self.parent.open(req)
470 470 else:
471 471 return None
472 472
473 473 handlerfuncs = []
474 474
475 475 def opener(ui, authinfo=None):
476 476 '''
477 477 construct an opener suitable for urllib2
478 478 authinfo will be added to the password manager
479 479 '''
480 480 # experimental config: ui.usehttp2
481 481 if ui.configbool('ui', 'usehttp2', False):
482 482 handlers = [
483 483 httpconnectionmod.http2handler(
484 484 ui,
485 485 passwordmgr(ui, ui.httppasswordmgrdb))
486 486 ]
487 487 else:
488 488 handlers = [httphandler()]
489 489 if has_https:
490 490 handlers.append(httpshandler(ui))
491 491
492 492 handlers.append(proxyhandler(ui))
493 493
494 494 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
495 495 if authinfo is not None:
496 496 realm, uris, user, passwd = authinfo
497 497 saveduser, savedpass = passmgr.find_stored_password(uris[0])
498 498 if user != saveduser or passwd:
499 499 passmgr.add_password(realm, uris, user, passwd)
500 500 ui.debug('http auth: user %s, password %s\n' %
501 501 (user, passwd and '*' * len(passwd) or 'not set'))
502 502
503 503 handlers.extend((httpbasicauthhandler(passmgr),
504 504 httpdigestauthhandler(passmgr)))
505 505 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
506 506 opener = urlreq.buildopener(*handlers)
507 507
508 508 # 1.0 here is the _protocol_ version
509 509 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
510 510 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
511 511 return opener
512 512
513 513 def open(ui, url_, data=None):
514 514 u = util.url(url_)
515 515 if u.scheme:
516 516 u.scheme = u.scheme.lower()
517 517 url_, authinfo = u.authinfo()
518 518 else:
519 519 path = util.normpath(os.path.abspath(url_))
520 520 url_ = 'file://' + urlreq.pathname2url(path)
521 521 authinfo = None
522 522 return opener(ui, authinfo).open(url_, data)
@@ -1,2856 +1,2857 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import zlib
38 38
39 39 from . import (
40 40 encoding,
41 41 error,
42 42 i18n,
43 43 osutil,
44 44 parsers,
45 45 pycompat,
46 46 )
47 47
48 48 for attr in (
49 49 'empty',
50 'httplib',
50 51 'pickle',
51 52 'queue',
52 53 'urlerr',
53 54 'urlparse',
54 55 # we do import urlreq, but we do it outside the loop
55 56 #'urlreq',
56 57 'stringio',
57 58 'socketserver',
58 59 'xmlrpclib',
59 60 ):
60 61 globals()[attr] = getattr(pycompat, attr)
61 62
62 63 # This line is to make pyflakes happy:
63 64 urlreq = pycompat.urlreq
64 65
65 66 if os.name == 'nt':
66 67 from . import windows as platform
67 68 else:
68 69 from . import posix as platform
69 70
70 71 _ = i18n._
71 72
72 73 cachestat = platform.cachestat
73 74 checkexec = platform.checkexec
74 75 checklink = platform.checklink
75 76 copymode = platform.copymode
76 77 executablepath = platform.executablepath
77 78 expandglobs = platform.expandglobs
78 79 explainexit = platform.explainexit
79 80 findexe = platform.findexe
80 81 gethgcmd = platform.gethgcmd
81 82 getuser = platform.getuser
82 83 getpid = os.getpid
83 84 groupmembers = platform.groupmembers
84 85 groupname = platform.groupname
85 86 hidewindow = platform.hidewindow
86 87 isexec = platform.isexec
87 88 isowner = platform.isowner
88 89 localpath = platform.localpath
89 90 lookupreg = platform.lookupreg
90 91 makedir = platform.makedir
91 92 nlinks = platform.nlinks
92 93 normpath = platform.normpath
93 94 normcase = platform.normcase
94 95 normcasespec = platform.normcasespec
95 96 normcasefallback = platform.normcasefallback
96 97 openhardlinks = platform.openhardlinks
97 98 oslink = platform.oslink
98 99 parsepatchoutput = platform.parsepatchoutput
99 100 pconvert = platform.pconvert
100 101 poll = platform.poll
101 102 popen = platform.popen
102 103 posixfile = platform.posixfile
103 104 quotecommand = platform.quotecommand
104 105 readpipe = platform.readpipe
105 106 rename = platform.rename
106 107 removedirs = platform.removedirs
107 108 samedevice = platform.samedevice
108 109 samefile = platform.samefile
109 110 samestat = platform.samestat
110 111 setbinary = platform.setbinary
111 112 setflags = platform.setflags
112 113 setsignalhandler = platform.setsignalhandler
113 114 shellquote = platform.shellquote
114 115 spawndetached = platform.spawndetached
115 116 split = platform.split
116 117 sshargs = platform.sshargs
117 118 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
118 119 statisexec = platform.statisexec
119 120 statislink = platform.statislink
120 121 termwidth = platform.termwidth
121 122 testpid = platform.testpid
122 123 umask = platform.umask
123 124 unlink = platform.unlink
124 125 unlinkpath = platform.unlinkpath
125 126 username = platform.username
126 127
127 128 # Python compatibility
128 129
129 130 _notset = object()
130 131
131 132 # disable Python's problematic floating point timestamps (issue4836)
132 133 # (Python hypocritically says you shouldn't change this behavior in
133 134 # libraries, and sure enough Mercurial is not a library.)
134 135 os.stat_float_times(False)
135 136
136 137 def safehasattr(thing, attr):
137 138 return getattr(thing, attr, _notset) is not _notset
138 139
139 140 DIGESTS = {
140 141 'md5': hashlib.md5,
141 142 'sha1': hashlib.sha1,
142 143 'sha512': hashlib.sha512,
143 144 }
144 145 # List of digest types from strongest to weakest
145 146 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
146 147
147 148 for k in DIGESTS_BY_STRENGTH:
148 149 assert k in DIGESTS
149 150
150 151 class digester(object):
151 152 """helper to compute digests.
152 153
153 154 This helper can be used to compute one or more digests given their name.
154 155
155 156 >>> d = digester(['md5', 'sha1'])
156 157 >>> d.update('foo')
157 158 >>> [k for k in sorted(d)]
158 159 ['md5', 'sha1']
159 160 >>> d['md5']
160 161 'acbd18db4cc2f85cedef654fccc4a4d8'
161 162 >>> d['sha1']
162 163 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
163 164 >>> digester.preferred(['md5', 'sha1'])
164 165 'sha1'
165 166 """
166 167
167 168 def __init__(self, digests, s=''):
168 169 self._hashes = {}
169 170 for k in digests:
170 171 if k not in DIGESTS:
171 172 raise Abort(_('unknown digest type: %s') % k)
172 173 self._hashes[k] = DIGESTS[k]()
173 174 if s:
174 175 self.update(s)
175 176
176 177 def update(self, data):
177 178 for h in self._hashes.values():
178 179 h.update(data)
179 180
180 181 def __getitem__(self, key):
181 182 if key not in DIGESTS:
182 183 raise Abort(_('unknown digest type: %s') % k)
183 184 return self._hashes[key].hexdigest()
184 185
185 186 def __iter__(self):
186 187 return iter(self._hashes)
187 188
188 189 @staticmethod
189 190 def preferred(supported):
190 191 """returns the strongest digest type in both supported and DIGESTS."""
191 192
192 193 for k in DIGESTS_BY_STRENGTH:
193 194 if k in supported:
194 195 return k
195 196 return None
196 197
197 198 class digestchecker(object):
198 199 """file handle wrapper that additionally checks content against a given
199 200 size and digests.
200 201
201 202 d = digestchecker(fh, size, {'md5': '...'})
202 203
203 204 When multiple digests are given, all of them are validated.
204 205 """
205 206
206 207 def __init__(self, fh, size, digests):
207 208 self._fh = fh
208 209 self._size = size
209 210 self._got = 0
210 211 self._digests = dict(digests)
211 212 self._digester = digester(self._digests.keys())
212 213
213 214 def read(self, length=-1):
214 215 content = self._fh.read(length)
215 216 self._digester.update(content)
216 217 self._got += len(content)
217 218 return content
218 219
219 220 def validate(self):
220 221 if self._size != self._got:
221 222 raise Abort(_('size mismatch: expected %d, got %d') %
222 223 (self._size, self._got))
223 224 for k, v in self._digests.items():
224 225 if v != self._digester[k]:
225 226 # i18n: first parameter is a digest name
226 227 raise Abort(_('%s mismatch: expected %s, got %s') %
227 228 (k, v, self._digester[k]))
228 229
229 230 try:
230 231 buffer = buffer
231 232 except NameError:
232 233 if sys.version_info[0] < 3:
233 234 def buffer(sliceable, offset=0):
234 235 return sliceable[offset:]
235 236 else:
236 237 def buffer(sliceable, offset=0):
237 238 return memoryview(sliceable)[offset:]
238 239
239 240 closefds = os.name == 'posix'
240 241
241 242 _chunksize = 4096
242 243
243 244 class bufferedinputpipe(object):
244 245 """a manually buffered input pipe
245 246
246 247 Python will not let us use buffered IO and lazy reading with 'polling' at
247 248 the same time. We cannot probe the buffer state and select will not detect
248 249 that data are ready to read if they are already buffered.
249 250
250 251 This class let us work around that by implementing its own buffering
251 252 (allowing efficient readline) while offering a way to know if the buffer is
252 253 empty from the output (allowing collaboration of the buffer with polling).
253 254
254 255 This class lives in the 'util' module because it makes use of the 'os'
255 256 module from the python stdlib.
256 257 """
257 258
258 259 def __init__(self, input):
259 260 self._input = input
260 261 self._buffer = []
261 262 self._eof = False
262 263 self._lenbuf = 0
263 264
264 265 @property
265 266 def hasbuffer(self):
266 267 """True is any data is currently buffered
267 268
268 269 This will be used externally a pre-step for polling IO. If there is
269 270 already data then no polling should be set in place."""
270 271 return bool(self._buffer)
271 272
272 273 @property
273 274 def closed(self):
274 275 return self._input.closed
275 276
276 277 def fileno(self):
277 278 return self._input.fileno()
278 279
279 280 def close(self):
280 281 return self._input.close()
281 282
282 283 def read(self, size):
283 284 while (not self._eof) and (self._lenbuf < size):
284 285 self._fillbuffer()
285 286 return self._frombuffer(size)
286 287
287 288 def readline(self, *args, **kwargs):
288 289 if 1 < len(self._buffer):
289 290 # this should not happen because both read and readline end with a
290 291 # _frombuffer call that collapse it.
291 292 self._buffer = [''.join(self._buffer)]
292 293 self._lenbuf = len(self._buffer[0])
293 294 lfi = -1
294 295 if self._buffer:
295 296 lfi = self._buffer[-1].find('\n')
296 297 while (not self._eof) and lfi < 0:
297 298 self._fillbuffer()
298 299 if self._buffer:
299 300 lfi = self._buffer[-1].find('\n')
300 301 size = lfi + 1
301 302 if lfi < 0: # end of file
302 303 size = self._lenbuf
303 304 elif 1 < len(self._buffer):
304 305 # we need to take previous chunks into account
305 306 size += self._lenbuf - len(self._buffer[-1])
306 307 return self._frombuffer(size)
307 308
308 309 def _frombuffer(self, size):
309 310 """return at most 'size' data from the buffer
310 311
311 312 The data are removed from the buffer."""
312 313 if size == 0 or not self._buffer:
313 314 return ''
314 315 buf = self._buffer[0]
315 316 if 1 < len(self._buffer):
316 317 buf = ''.join(self._buffer)
317 318
318 319 data = buf[:size]
319 320 buf = buf[len(data):]
320 321 if buf:
321 322 self._buffer = [buf]
322 323 self._lenbuf = len(buf)
323 324 else:
324 325 self._buffer = []
325 326 self._lenbuf = 0
326 327 return data
327 328
328 329 def _fillbuffer(self):
329 330 """read data to the buffer"""
330 331 data = os.read(self._input.fileno(), _chunksize)
331 332 if not data:
332 333 self._eof = True
333 334 else:
334 335 self._lenbuf += len(data)
335 336 self._buffer.append(data)
336 337
337 338 def popen2(cmd, env=None, newlines=False):
338 339 # Setting bufsize to -1 lets the system decide the buffer size.
339 340 # The default for bufsize is 0, meaning unbuffered. This leads to
340 341 # poor performance on Mac OS X: http://bugs.python.org/issue4194
341 342 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
342 343 close_fds=closefds,
343 344 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
344 345 universal_newlines=newlines,
345 346 env=env)
346 347 return p.stdin, p.stdout
347 348
348 349 def popen3(cmd, env=None, newlines=False):
349 350 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
350 351 return stdin, stdout, stderr
351 352
352 353 def popen4(cmd, env=None, newlines=False, bufsize=-1):
353 354 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
354 355 close_fds=closefds,
355 356 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
356 357 stderr=subprocess.PIPE,
357 358 universal_newlines=newlines,
358 359 env=env)
359 360 return p.stdin, p.stdout, p.stderr, p
360 361
361 362 def version():
362 363 """Return version information if available."""
363 364 try:
364 365 from . import __version__
365 366 return __version__.version
366 367 except ImportError:
367 368 return 'unknown'
368 369
369 370 def versiontuple(v=None, n=4):
370 371 """Parses a Mercurial version string into an N-tuple.
371 372
372 373 The version string to be parsed is specified with the ``v`` argument.
373 374 If it isn't defined, the current Mercurial version string will be parsed.
374 375
375 376 ``n`` can be 2, 3, or 4. Here is how some version strings map to
376 377 returned values:
377 378
378 379 >>> v = '3.6.1+190-df9b73d2d444'
379 380 >>> versiontuple(v, 2)
380 381 (3, 6)
381 382 >>> versiontuple(v, 3)
382 383 (3, 6, 1)
383 384 >>> versiontuple(v, 4)
384 385 (3, 6, 1, '190-df9b73d2d444')
385 386
386 387 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
387 388 (3, 6, 1, '190-df9b73d2d444+20151118')
388 389
389 390 >>> v = '3.6'
390 391 >>> versiontuple(v, 2)
391 392 (3, 6)
392 393 >>> versiontuple(v, 3)
393 394 (3, 6, None)
394 395 >>> versiontuple(v, 4)
395 396 (3, 6, None, None)
396 397 """
397 398 if not v:
398 399 v = version()
399 400 parts = v.split('+', 1)
400 401 if len(parts) == 1:
401 402 vparts, extra = parts[0], None
402 403 else:
403 404 vparts, extra = parts
404 405
405 406 vints = []
406 407 for i in vparts.split('.'):
407 408 try:
408 409 vints.append(int(i))
409 410 except ValueError:
410 411 break
411 412 # (3, 6) -> (3, 6, None)
412 413 while len(vints) < 3:
413 414 vints.append(None)
414 415
415 416 if n == 2:
416 417 return (vints[0], vints[1])
417 418 if n == 3:
418 419 return (vints[0], vints[1], vints[2])
419 420 if n == 4:
420 421 return (vints[0], vints[1], vints[2], extra)
421 422
422 423 # used by parsedate
423 424 defaultdateformats = (
424 425 '%Y-%m-%d %H:%M:%S',
425 426 '%Y-%m-%d %I:%M:%S%p',
426 427 '%Y-%m-%d %H:%M',
427 428 '%Y-%m-%d %I:%M%p',
428 429 '%Y-%m-%d',
429 430 '%m-%d',
430 431 '%m/%d',
431 432 '%m/%d/%y',
432 433 '%m/%d/%Y',
433 434 '%a %b %d %H:%M:%S %Y',
434 435 '%a %b %d %I:%M:%S%p %Y',
435 436 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
436 437 '%b %d %H:%M:%S %Y',
437 438 '%b %d %I:%M:%S%p %Y',
438 439 '%b %d %H:%M:%S',
439 440 '%b %d %I:%M:%S%p',
440 441 '%b %d %H:%M',
441 442 '%b %d %I:%M%p',
442 443 '%b %d %Y',
443 444 '%b %d',
444 445 '%H:%M:%S',
445 446 '%I:%M:%S%p',
446 447 '%H:%M',
447 448 '%I:%M%p',
448 449 )
449 450
450 451 extendeddateformats = defaultdateformats + (
451 452 "%Y",
452 453 "%Y-%m",
453 454 "%b",
454 455 "%b %Y",
455 456 )
456 457
457 458 def cachefunc(func):
458 459 '''cache the result of function calls'''
459 460 # XXX doesn't handle keywords args
460 461 if func.__code__.co_argcount == 0:
461 462 cache = []
462 463 def f():
463 464 if len(cache) == 0:
464 465 cache.append(func())
465 466 return cache[0]
466 467 return f
467 468 cache = {}
468 469 if func.__code__.co_argcount == 1:
469 470 # we gain a small amount of time because
470 471 # we don't need to pack/unpack the list
471 472 def f(arg):
472 473 if arg not in cache:
473 474 cache[arg] = func(arg)
474 475 return cache[arg]
475 476 else:
476 477 def f(*args):
477 478 if args not in cache:
478 479 cache[args] = func(*args)
479 480 return cache[args]
480 481
481 482 return f
482 483
483 484 class sortdict(dict):
484 485 '''a simple sorted dictionary'''
485 486 def __init__(self, data=None):
486 487 self._list = []
487 488 if data:
488 489 self.update(data)
489 490 def copy(self):
490 491 return sortdict(self)
491 492 def __setitem__(self, key, val):
492 493 if key in self:
493 494 self._list.remove(key)
494 495 self._list.append(key)
495 496 dict.__setitem__(self, key, val)
496 497 def __iter__(self):
497 498 return self._list.__iter__()
498 499 def update(self, src):
499 500 if isinstance(src, dict):
500 501 src = src.iteritems()
501 502 for k, v in src:
502 503 self[k] = v
503 504 def clear(self):
504 505 dict.clear(self)
505 506 self._list = []
506 507 def items(self):
507 508 return [(k, self[k]) for k in self._list]
508 509 def __delitem__(self, key):
509 510 dict.__delitem__(self, key)
510 511 self._list.remove(key)
511 512 def pop(self, key, *args, **kwargs):
512 513 dict.pop(self, key, *args, **kwargs)
513 514 try:
514 515 self._list.remove(key)
515 516 except ValueError:
516 517 pass
517 518 def keys(self):
518 519 return self._list
519 520 def iterkeys(self):
520 521 return self._list.__iter__()
521 522 def iteritems(self):
522 523 for k in self._list:
523 524 yield k, self[k]
524 525 def insert(self, index, key, val):
525 526 self._list.insert(index, key)
526 527 dict.__setitem__(self, key, val)
527 528
528 529 class _lrucachenode(object):
529 530 """A node in a doubly linked list.
530 531
531 532 Holds a reference to nodes on either side as well as a key-value
532 533 pair for the dictionary entry.
533 534 """
534 535 __slots__ = ('next', 'prev', 'key', 'value')
535 536
536 537 def __init__(self):
537 538 self.next = None
538 539 self.prev = None
539 540
540 541 self.key = _notset
541 542 self.value = None
542 543
543 544 def markempty(self):
544 545 """Mark the node as emptied."""
545 546 self.key = _notset
546 547
547 548 class lrucachedict(object):
548 549 """Dict that caches most recent accesses and sets.
549 550
550 551 The dict consists of an actual backing dict - indexed by original
551 552 key - and a doubly linked circular list defining the order of entries in
552 553 the cache.
553 554
554 555 The head node is the newest entry in the cache. If the cache is full,
555 556 we recycle head.prev and make it the new head. Cache accesses result in
556 557 the node being moved to before the existing head and being marked as the
557 558 new head node.
558 559 """
559 560 def __init__(self, max):
560 561 self._cache = {}
561 562
562 563 self._head = head = _lrucachenode()
563 564 head.prev = head
564 565 head.next = head
565 566 self._size = 1
566 567 self._capacity = max
567 568
568 569 def __len__(self):
569 570 return len(self._cache)
570 571
571 572 def __contains__(self, k):
572 573 return k in self._cache
573 574
574 575 def __iter__(self):
575 576 # We don't have to iterate in cache order, but why not.
576 577 n = self._head
577 578 for i in range(len(self._cache)):
578 579 yield n.key
579 580 n = n.next
580 581
581 582 def __getitem__(self, k):
582 583 node = self._cache[k]
583 584 self._movetohead(node)
584 585 return node.value
585 586
586 587 def __setitem__(self, k, v):
587 588 node = self._cache.get(k)
588 589 # Replace existing value and mark as newest.
589 590 if node is not None:
590 591 node.value = v
591 592 self._movetohead(node)
592 593 return
593 594
594 595 if self._size < self._capacity:
595 596 node = self._addcapacity()
596 597 else:
597 598 # Grab the last/oldest item.
598 599 node = self._head.prev
599 600
600 601 # At capacity. Kill the old entry.
601 602 if node.key is not _notset:
602 603 del self._cache[node.key]
603 604
604 605 node.key = k
605 606 node.value = v
606 607 self._cache[k] = node
607 608 # And mark it as newest entry. No need to adjust order since it
608 609 # is already self._head.prev.
609 610 self._head = node
610 611
611 612 def __delitem__(self, k):
612 613 node = self._cache.pop(k)
613 614 node.markempty()
614 615
615 616 # Temporarily mark as newest item before re-adjusting head to make
616 617 # this node the oldest item.
617 618 self._movetohead(node)
618 619 self._head = node.next
619 620
620 621 # Additional dict methods.
621 622
622 623 def get(self, k, default=None):
623 624 try:
624 625 return self._cache[k]
625 626 except KeyError:
626 627 return default
627 628
628 629 def clear(self):
629 630 n = self._head
630 631 while n.key is not _notset:
631 632 n.markempty()
632 633 n = n.next
633 634
634 635 self._cache.clear()
635 636
636 637 def copy(self):
637 638 result = lrucachedict(self._capacity)
638 639 n = self._head.prev
639 640 # Iterate in oldest-to-newest order, so the copy has the right ordering
640 641 for i in range(len(self._cache)):
641 642 result[n.key] = n.value
642 643 n = n.prev
643 644 return result
644 645
645 646 def _movetohead(self, node):
646 647 """Mark a node as the newest, making it the new head.
647 648
648 649 When a node is accessed, it becomes the freshest entry in the LRU
649 650 list, which is denoted by self._head.
650 651
651 652 Visually, let's make ``N`` the new head node (* denotes head):
652 653
653 654 previous/oldest <-> head <-> next/next newest
654 655
655 656 ----<->--- A* ---<->-----
656 657 | |
657 658 E <-> D <-> N <-> C <-> B
658 659
659 660 To:
660 661
661 662 ----<->--- N* ---<->-----
662 663 | |
663 664 E <-> D <-> C <-> B <-> A
664 665
665 666 This requires the following moves:
666 667
667 668 C.next = D (node.prev.next = node.next)
668 669 D.prev = C (node.next.prev = node.prev)
669 670 E.next = N (head.prev.next = node)
670 671 N.prev = E (node.prev = head.prev)
671 672 N.next = A (node.next = head)
672 673 A.prev = N (head.prev = node)
673 674 """
674 675 head = self._head
675 676 # C.next = D
676 677 node.prev.next = node.next
677 678 # D.prev = C
678 679 node.next.prev = node.prev
679 680 # N.prev = E
680 681 node.prev = head.prev
681 682 # N.next = A
682 683 # It is tempting to do just "head" here, however if node is
683 684 # adjacent to head, this will do bad things.
684 685 node.next = head.prev.next
685 686 # E.next = N
686 687 node.next.prev = node
687 688 # A.prev = N
688 689 node.prev.next = node
689 690
690 691 self._head = node
691 692
692 693 def _addcapacity(self):
693 694 """Add a node to the circular linked list.
694 695
695 696 The new node is inserted before the head node.
696 697 """
697 698 head = self._head
698 699 node = _lrucachenode()
699 700 head.prev.next = node
700 701 node.prev = head.prev
701 702 node.next = head
702 703 head.prev = node
703 704 self._size += 1
704 705 return node
705 706
706 707 def lrucachefunc(func):
707 708 '''cache most recent results of function calls'''
708 709 cache = {}
709 710 order = collections.deque()
710 711 if func.__code__.co_argcount == 1:
711 712 def f(arg):
712 713 if arg not in cache:
713 714 if len(cache) > 20:
714 715 del cache[order.popleft()]
715 716 cache[arg] = func(arg)
716 717 else:
717 718 order.remove(arg)
718 719 order.append(arg)
719 720 return cache[arg]
720 721 else:
721 722 def f(*args):
722 723 if args not in cache:
723 724 if len(cache) > 20:
724 725 del cache[order.popleft()]
725 726 cache[args] = func(*args)
726 727 else:
727 728 order.remove(args)
728 729 order.append(args)
729 730 return cache[args]
730 731
731 732 return f
732 733
733 734 class propertycache(object):
734 735 def __init__(self, func):
735 736 self.func = func
736 737 self.name = func.__name__
737 738 def __get__(self, obj, type=None):
738 739 result = self.func(obj)
739 740 self.cachevalue(obj, result)
740 741 return result
741 742
742 743 def cachevalue(self, obj, value):
743 744 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
744 745 obj.__dict__[self.name] = value
745 746
746 747 def pipefilter(s, cmd):
747 748 '''filter string S through command CMD, returning its output'''
748 749 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
749 750 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
750 751 pout, perr = p.communicate(s)
751 752 return pout
752 753
753 754 def tempfilter(s, cmd):
754 755 '''filter string S through a pair of temporary files with CMD.
755 756 CMD is used as a template to create the real command to be run,
756 757 with the strings INFILE and OUTFILE replaced by the real names of
757 758 the temporary files generated.'''
758 759 inname, outname = None, None
759 760 try:
760 761 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
761 762 fp = os.fdopen(infd, 'wb')
762 763 fp.write(s)
763 764 fp.close()
764 765 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
765 766 os.close(outfd)
766 767 cmd = cmd.replace('INFILE', inname)
767 768 cmd = cmd.replace('OUTFILE', outname)
768 769 code = os.system(cmd)
769 770 if sys.platform == 'OpenVMS' and code & 1:
770 771 code = 0
771 772 if code:
772 773 raise Abort(_("command '%s' failed: %s") %
773 774 (cmd, explainexit(code)))
774 775 return readfile(outname)
775 776 finally:
776 777 try:
777 778 if inname:
778 779 os.unlink(inname)
779 780 except OSError:
780 781 pass
781 782 try:
782 783 if outname:
783 784 os.unlink(outname)
784 785 except OSError:
785 786 pass
786 787
787 788 filtertable = {
788 789 'tempfile:': tempfilter,
789 790 'pipe:': pipefilter,
790 791 }
791 792
792 793 def filter(s, cmd):
793 794 "filter a string through a command that transforms its input to its output"
794 795 for name, fn in filtertable.iteritems():
795 796 if cmd.startswith(name):
796 797 return fn(s, cmd[len(name):].lstrip())
797 798 return pipefilter(s, cmd)
798 799
799 800 def binary(s):
800 801 """return true if a string is binary data"""
801 802 return bool(s and '\0' in s)
802 803
803 804 def increasingchunks(source, min=1024, max=65536):
804 805 '''return no less than min bytes per chunk while data remains,
805 806 doubling min after each chunk until it reaches max'''
806 807 def log2(x):
807 808 if not x:
808 809 return 0
809 810 i = 0
810 811 while x:
811 812 x >>= 1
812 813 i += 1
813 814 return i - 1
814 815
815 816 buf = []
816 817 blen = 0
817 818 for chunk in source:
818 819 buf.append(chunk)
819 820 blen += len(chunk)
820 821 if blen >= min:
821 822 if min < max:
822 823 min = min << 1
823 824 nmin = 1 << log2(blen)
824 825 if nmin > min:
825 826 min = nmin
826 827 if min > max:
827 828 min = max
828 829 yield ''.join(buf)
829 830 blen = 0
830 831 buf = []
831 832 if buf:
832 833 yield ''.join(buf)
833 834
834 835 Abort = error.Abort
835 836
836 837 def always(fn):
837 838 return True
838 839
839 840 def never(fn):
840 841 return False
841 842
842 843 def nogc(func):
843 844 """disable garbage collector
844 845
845 846 Python's garbage collector triggers a GC each time a certain number of
846 847 container objects (the number being defined by gc.get_threshold()) are
847 848 allocated even when marked not to be tracked by the collector. Tracking has
848 849 no effect on when GCs are triggered, only on what objects the GC looks
849 850 into. As a workaround, disable GC while building complex (huge)
850 851 containers.
851 852
852 853 This garbage collector issue have been fixed in 2.7.
853 854 """
854 855 def wrapper(*args, **kwargs):
855 856 gcenabled = gc.isenabled()
856 857 gc.disable()
857 858 try:
858 859 return func(*args, **kwargs)
859 860 finally:
860 861 if gcenabled:
861 862 gc.enable()
862 863 return wrapper
863 864
864 865 def pathto(root, n1, n2):
865 866 '''return the relative path from one place to another.
866 867 root should use os.sep to separate directories
867 868 n1 should use os.sep to separate directories
868 869 n2 should use "/" to separate directories
869 870 returns an os.sep-separated path.
870 871
871 872 If n1 is a relative path, it's assumed it's
872 873 relative to root.
873 874 n2 should always be relative to root.
874 875 '''
875 876 if not n1:
876 877 return localpath(n2)
877 878 if os.path.isabs(n1):
878 879 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
879 880 return os.path.join(root, localpath(n2))
880 881 n2 = '/'.join((pconvert(root), n2))
881 882 a, b = splitpath(n1), n2.split('/')
882 883 a.reverse()
883 884 b.reverse()
884 885 while a and b and a[-1] == b[-1]:
885 886 a.pop()
886 887 b.pop()
887 888 b.reverse()
888 889 return os.sep.join((['..'] * len(a)) + b) or '.'
889 890
890 891 def mainfrozen():
891 892 """return True if we are a frozen executable.
892 893
893 894 The code supports py2exe (most common, Windows only) and tools/freeze
894 895 (portable, not much used).
895 896 """
896 897 return (safehasattr(sys, "frozen") or # new py2exe
897 898 safehasattr(sys, "importers") or # old py2exe
898 899 imp.is_frozen("__main__")) # tools/freeze
899 900
900 901 # the location of data files matching the source code
901 902 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
902 903 # executable version (py2exe) doesn't support __file__
903 904 datapath = os.path.dirname(sys.executable)
904 905 else:
905 906 datapath = os.path.dirname(__file__)
906 907
907 908 i18n.setdatapath(datapath)
908 909
909 910 _hgexecutable = None
910 911
911 912 def hgexecutable():
912 913 """return location of the 'hg' executable.
913 914
914 915 Defaults to $HG or 'hg' in the search path.
915 916 """
916 917 if _hgexecutable is None:
917 918 hg = os.environ.get('HG')
918 919 mainmod = sys.modules['__main__']
919 920 if hg:
920 921 _sethgexecutable(hg)
921 922 elif mainfrozen():
922 923 if getattr(sys, 'frozen', None) == 'macosx_app':
923 924 # Env variable set by py2app
924 925 _sethgexecutable(os.environ['EXECUTABLEPATH'])
925 926 else:
926 927 _sethgexecutable(sys.executable)
927 928 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
928 929 _sethgexecutable(mainmod.__file__)
929 930 else:
930 931 exe = findexe('hg') or os.path.basename(sys.argv[0])
931 932 _sethgexecutable(exe)
932 933 return _hgexecutable
933 934
934 935 def _sethgexecutable(path):
935 936 """set location of the 'hg' executable"""
936 937 global _hgexecutable
937 938 _hgexecutable = path
938 939
939 940 def _isstdout(f):
940 941 fileno = getattr(f, 'fileno', None)
941 942 return fileno and fileno() == sys.__stdout__.fileno()
942 943
943 944 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
944 945 '''enhanced shell command execution.
945 946 run with environment maybe modified, maybe in different dir.
946 947
947 948 if command fails and onerr is None, return status, else raise onerr
948 949 object as exception.
949 950
950 951 if out is specified, it is assumed to be a file-like object that has a
951 952 write() method. stdout and stderr will be redirected to out.'''
952 953 if environ is None:
953 954 environ = {}
954 955 try:
955 956 sys.stdout.flush()
956 957 except Exception:
957 958 pass
958 959 def py2shell(val):
959 960 'convert python object into string that is useful to shell'
960 961 if val is None or val is False:
961 962 return '0'
962 963 if val is True:
963 964 return '1'
964 965 return str(val)
965 966 origcmd = cmd
966 967 cmd = quotecommand(cmd)
967 968 if sys.platform == 'plan9' and (sys.version_info[0] == 2
968 969 and sys.version_info[1] < 7):
969 970 # subprocess kludge to work around issues in half-baked Python
970 971 # ports, notably bichued/python:
971 972 if not cwd is None:
972 973 os.chdir(cwd)
973 974 rc = os.system(cmd)
974 975 else:
975 976 env = dict(os.environ)
976 977 env.update((k, py2shell(v)) for k, v in environ.iteritems())
977 978 env['HG'] = hgexecutable()
978 979 if out is None or _isstdout(out):
979 980 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
980 981 env=env, cwd=cwd)
981 982 else:
982 983 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
983 984 env=env, cwd=cwd, stdout=subprocess.PIPE,
984 985 stderr=subprocess.STDOUT)
985 986 while True:
986 987 line = proc.stdout.readline()
987 988 if not line:
988 989 break
989 990 out.write(line)
990 991 proc.wait()
991 992 rc = proc.returncode
992 993 if sys.platform == 'OpenVMS' and rc & 1:
993 994 rc = 0
994 995 if rc and onerr:
995 996 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
996 997 explainexit(rc)[0])
997 998 if errprefix:
998 999 errmsg = '%s: %s' % (errprefix, errmsg)
999 1000 raise onerr(errmsg)
1000 1001 return rc
1001 1002
1002 1003 def checksignature(func):
1003 1004 '''wrap a function with code to check for calling errors'''
1004 1005 def check(*args, **kwargs):
1005 1006 try:
1006 1007 return func(*args, **kwargs)
1007 1008 except TypeError:
1008 1009 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1009 1010 raise error.SignatureError
1010 1011 raise
1011 1012
1012 1013 return check
1013 1014
1014 1015 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1015 1016 '''copy a file, preserving mode and optionally other stat info like
1016 1017 atime/mtime
1017 1018
1018 1019 checkambig argument is used with filestat, and is useful only if
1019 1020 destination file is guarded by any lock (e.g. repo.lock or
1020 1021 repo.wlock).
1021 1022
1022 1023 copystat and checkambig should be exclusive.
1023 1024 '''
1024 1025 assert not (copystat and checkambig)
1025 1026 oldstat = None
1026 1027 if os.path.lexists(dest):
1027 1028 if checkambig:
1028 1029 oldstat = checkambig and filestat(dest)
1029 1030 unlink(dest)
1030 1031 # hardlinks are problematic on CIFS, quietly ignore this flag
1031 1032 # until we find a way to work around it cleanly (issue4546)
1032 1033 if False and hardlink:
1033 1034 try:
1034 1035 oslink(src, dest)
1035 1036 return
1036 1037 except (IOError, OSError):
1037 1038 pass # fall back to normal copy
1038 1039 if os.path.islink(src):
1039 1040 os.symlink(os.readlink(src), dest)
1040 1041 # copytime is ignored for symlinks, but in general copytime isn't needed
1041 1042 # for them anyway
1042 1043 else:
1043 1044 try:
1044 1045 shutil.copyfile(src, dest)
1045 1046 if copystat:
1046 1047 # copystat also copies mode
1047 1048 shutil.copystat(src, dest)
1048 1049 else:
1049 1050 shutil.copymode(src, dest)
1050 1051 if oldstat and oldstat.stat:
1051 1052 newstat = filestat(dest)
1052 1053 if newstat.isambig(oldstat):
1053 1054 # stat of copied file is ambiguous to original one
1054 1055 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1055 1056 os.utime(dest, (advanced, advanced))
1056 1057 except shutil.Error as inst:
1057 1058 raise Abort(str(inst))
1058 1059
1059 1060 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1060 1061 """Copy a directory tree using hardlinks if possible."""
1061 1062 num = 0
1062 1063
1063 1064 if hardlink is None:
1064 1065 hardlink = (os.stat(src).st_dev ==
1065 1066 os.stat(os.path.dirname(dst)).st_dev)
1066 1067 if hardlink:
1067 1068 topic = _('linking')
1068 1069 else:
1069 1070 topic = _('copying')
1070 1071
1071 1072 if os.path.isdir(src):
1072 1073 os.mkdir(dst)
1073 1074 for name, kind in osutil.listdir(src):
1074 1075 srcname = os.path.join(src, name)
1075 1076 dstname = os.path.join(dst, name)
1076 1077 def nprog(t, pos):
1077 1078 if pos is not None:
1078 1079 return progress(t, pos + num)
1079 1080 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1080 1081 num += n
1081 1082 else:
1082 1083 if hardlink:
1083 1084 try:
1084 1085 oslink(src, dst)
1085 1086 except (IOError, OSError):
1086 1087 hardlink = False
1087 1088 shutil.copy(src, dst)
1088 1089 else:
1089 1090 shutil.copy(src, dst)
1090 1091 num += 1
1091 1092 progress(topic, num)
1092 1093 progress(topic, None)
1093 1094
1094 1095 return hardlink, num
1095 1096
1096 1097 _winreservednames = '''con prn aux nul
1097 1098 com1 com2 com3 com4 com5 com6 com7 com8 com9
1098 1099 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1099 1100 _winreservedchars = ':*?"<>|'
1100 1101 def checkwinfilename(path):
1101 1102 r'''Check that the base-relative path is a valid filename on Windows.
1102 1103 Returns None if the path is ok, or a UI string describing the problem.
1103 1104
1104 1105 >>> checkwinfilename("just/a/normal/path")
1105 1106 >>> checkwinfilename("foo/bar/con.xml")
1106 1107 "filename contains 'con', which is reserved on Windows"
1107 1108 >>> checkwinfilename("foo/con.xml/bar")
1108 1109 "filename contains 'con', which is reserved on Windows"
1109 1110 >>> checkwinfilename("foo/bar/xml.con")
1110 1111 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1111 1112 "filename contains 'AUX', which is reserved on Windows"
1112 1113 >>> checkwinfilename("foo/bar/bla:.txt")
1113 1114 "filename contains ':', which is reserved on Windows"
1114 1115 >>> checkwinfilename("foo/bar/b\07la.txt")
1115 1116 "filename contains '\\x07', which is invalid on Windows"
1116 1117 >>> checkwinfilename("foo/bar/bla ")
1117 1118 "filename ends with ' ', which is not allowed on Windows"
1118 1119 >>> checkwinfilename("../bar")
1119 1120 >>> checkwinfilename("foo\\")
1120 1121 "filename ends with '\\', which is invalid on Windows"
1121 1122 >>> checkwinfilename("foo\\/bar")
1122 1123 "directory name ends with '\\', which is invalid on Windows"
1123 1124 '''
1124 1125 if path.endswith('\\'):
1125 1126 return _("filename ends with '\\', which is invalid on Windows")
1126 1127 if '\\/' in path:
1127 1128 return _("directory name ends with '\\', which is invalid on Windows")
1128 1129 for n in path.replace('\\', '/').split('/'):
1129 1130 if not n:
1130 1131 continue
1131 1132 for c in n:
1132 1133 if c in _winreservedchars:
1133 1134 return _("filename contains '%s', which is reserved "
1134 1135 "on Windows") % c
1135 1136 if ord(c) <= 31:
1136 1137 return _("filename contains %r, which is invalid "
1137 1138 "on Windows") % c
1138 1139 base = n.split('.')[0]
1139 1140 if base and base.lower() in _winreservednames:
1140 1141 return _("filename contains '%s', which is reserved "
1141 1142 "on Windows") % base
1142 1143 t = n[-1]
1143 1144 if t in '. ' and n not in '..':
1144 1145 return _("filename ends with '%s', which is not allowed "
1145 1146 "on Windows") % t
1146 1147
1147 1148 if os.name == 'nt':
1148 1149 checkosfilename = checkwinfilename
1149 1150 else:
1150 1151 checkosfilename = platform.checkosfilename
1151 1152
1152 1153 def makelock(info, pathname):
1153 1154 try:
1154 1155 return os.symlink(info, pathname)
1155 1156 except OSError as why:
1156 1157 if why.errno == errno.EEXIST:
1157 1158 raise
1158 1159 except AttributeError: # no symlink in os
1159 1160 pass
1160 1161
1161 1162 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1162 1163 os.write(ld, info)
1163 1164 os.close(ld)
1164 1165
1165 1166 def readlock(pathname):
1166 1167 try:
1167 1168 return os.readlink(pathname)
1168 1169 except OSError as why:
1169 1170 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1170 1171 raise
1171 1172 except AttributeError: # no symlink in os
1172 1173 pass
1173 1174 fp = posixfile(pathname)
1174 1175 r = fp.read()
1175 1176 fp.close()
1176 1177 return r
1177 1178
1178 1179 def fstat(fp):
1179 1180 '''stat file object that may not have fileno method.'''
1180 1181 try:
1181 1182 return os.fstat(fp.fileno())
1182 1183 except AttributeError:
1183 1184 return os.stat(fp.name)
1184 1185
1185 1186 # File system features
1186 1187
1187 1188 def checkcase(path):
1188 1189 """
1189 1190 Return true if the given path is on a case-sensitive filesystem
1190 1191
1191 1192 Requires a path (like /foo/.hg) ending with a foldable final
1192 1193 directory component.
1193 1194 """
1194 1195 s1 = os.lstat(path)
1195 1196 d, b = os.path.split(path)
1196 1197 b2 = b.upper()
1197 1198 if b == b2:
1198 1199 b2 = b.lower()
1199 1200 if b == b2:
1200 1201 return True # no evidence against case sensitivity
1201 1202 p2 = os.path.join(d, b2)
1202 1203 try:
1203 1204 s2 = os.lstat(p2)
1204 1205 if s2 == s1:
1205 1206 return False
1206 1207 return True
1207 1208 except OSError:
1208 1209 return True
1209 1210
1210 1211 try:
1211 1212 import re2
1212 1213 _re2 = None
1213 1214 except ImportError:
1214 1215 _re2 = False
1215 1216
1216 1217 class _re(object):
1217 1218 def _checkre2(self):
1218 1219 global _re2
1219 1220 try:
1220 1221 # check if match works, see issue3964
1221 1222 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1222 1223 except ImportError:
1223 1224 _re2 = False
1224 1225
1225 1226 def compile(self, pat, flags=0):
1226 1227 '''Compile a regular expression, using re2 if possible
1227 1228
1228 1229 For best performance, use only re2-compatible regexp features. The
1229 1230 only flags from the re module that are re2-compatible are
1230 1231 IGNORECASE and MULTILINE.'''
1231 1232 if _re2 is None:
1232 1233 self._checkre2()
1233 1234 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1234 1235 if flags & remod.IGNORECASE:
1235 1236 pat = '(?i)' + pat
1236 1237 if flags & remod.MULTILINE:
1237 1238 pat = '(?m)' + pat
1238 1239 try:
1239 1240 return re2.compile(pat)
1240 1241 except re2.error:
1241 1242 pass
1242 1243 return remod.compile(pat, flags)
1243 1244
1244 1245 @propertycache
1245 1246 def escape(self):
1246 1247 '''Return the version of escape corresponding to self.compile.
1247 1248
1248 1249 This is imperfect because whether re2 or re is used for a particular
1249 1250 function depends on the flags, etc, but it's the best we can do.
1250 1251 '''
1251 1252 global _re2
1252 1253 if _re2 is None:
1253 1254 self._checkre2()
1254 1255 if _re2:
1255 1256 return re2.escape
1256 1257 else:
1257 1258 return remod.escape
1258 1259
1259 1260 re = _re()
1260 1261
1261 1262 _fspathcache = {}
1262 1263 def fspath(name, root):
1263 1264 '''Get name in the case stored in the filesystem
1264 1265
1265 1266 The name should be relative to root, and be normcase-ed for efficiency.
1266 1267
1267 1268 Note that this function is unnecessary, and should not be
1268 1269 called, for case-sensitive filesystems (simply because it's expensive).
1269 1270
1270 1271 The root should be normcase-ed, too.
1271 1272 '''
1272 1273 def _makefspathcacheentry(dir):
1273 1274 return dict((normcase(n), n) for n in os.listdir(dir))
1274 1275
1275 1276 seps = os.sep
1276 1277 if os.altsep:
1277 1278 seps = seps + os.altsep
1278 1279 # Protect backslashes. This gets silly very quickly.
1279 1280 seps.replace('\\','\\\\')
1280 1281 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1281 1282 dir = os.path.normpath(root)
1282 1283 result = []
1283 1284 for part, sep in pattern.findall(name):
1284 1285 if sep:
1285 1286 result.append(sep)
1286 1287 continue
1287 1288
1288 1289 if dir not in _fspathcache:
1289 1290 _fspathcache[dir] = _makefspathcacheentry(dir)
1290 1291 contents = _fspathcache[dir]
1291 1292
1292 1293 found = contents.get(part)
1293 1294 if not found:
1294 1295 # retry "once per directory" per "dirstate.walk" which
1295 1296 # may take place for each patches of "hg qpush", for example
1296 1297 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1297 1298 found = contents.get(part)
1298 1299
1299 1300 result.append(found or part)
1300 1301 dir = os.path.join(dir, part)
1301 1302
1302 1303 return ''.join(result)
1303 1304
1304 1305 def checknlink(testfile):
1305 1306 '''check whether hardlink count reporting works properly'''
1306 1307
1307 1308 # testfile may be open, so we need a separate file for checking to
1308 1309 # work around issue2543 (or testfile may get lost on Samba shares)
1309 1310 f1 = testfile + ".hgtmp1"
1310 1311 if os.path.lexists(f1):
1311 1312 return False
1312 1313 try:
1313 1314 posixfile(f1, 'w').close()
1314 1315 except IOError:
1315 1316 return False
1316 1317
1317 1318 f2 = testfile + ".hgtmp2"
1318 1319 fd = None
1319 1320 try:
1320 1321 oslink(f1, f2)
1321 1322 # nlinks() may behave differently for files on Windows shares if
1322 1323 # the file is open.
1323 1324 fd = posixfile(f2)
1324 1325 return nlinks(f2) > 1
1325 1326 except OSError:
1326 1327 return False
1327 1328 finally:
1328 1329 if fd is not None:
1329 1330 fd.close()
1330 1331 for f in (f1, f2):
1331 1332 try:
1332 1333 os.unlink(f)
1333 1334 except OSError:
1334 1335 pass
1335 1336
1336 1337 def endswithsep(path):
1337 1338 '''Check path ends with os.sep or os.altsep.'''
1338 1339 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1339 1340
1340 1341 def splitpath(path):
1341 1342 '''Split path by os.sep.
1342 1343 Note that this function does not use os.altsep because this is
1343 1344 an alternative of simple "xxx.split(os.sep)".
1344 1345 It is recommended to use os.path.normpath() before using this
1345 1346 function if need.'''
1346 1347 return path.split(os.sep)
1347 1348
1348 1349 def gui():
1349 1350 '''Are we running in a GUI?'''
1350 1351 if sys.platform == 'darwin':
1351 1352 if 'SSH_CONNECTION' in os.environ:
1352 1353 # handle SSH access to a box where the user is logged in
1353 1354 return False
1354 1355 elif getattr(osutil, 'isgui', None):
1355 1356 # check if a CoreGraphics session is available
1356 1357 return osutil.isgui()
1357 1358 else:
1358 1359 # pure build; use a safe default
1359 1360 return True
1360 1361 else:
1361 1362 return os.name == "nt" or os.environ.get("DISPLAY")
1362 1363
1363 1364 def mktempcopy(name, emptyok=False, createmode=None):
1364 1365 """Create a temporary file with the same contents from name
1365 1366
1366 1367 The permission bits are copied from the original file.
1367 1368
1368 1369 If the temporary file is going to be truncated immediately, you
1369 1370 can use emptyok=True as an optimization.
1370 1371
1371 1372 Returns the name of the temporary file.
1372 1373 """
1373 1374 d, fn = os.path.split(name)
1374 1375 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1375 1376 os.close(fd)
1376 1377 # Temporary files are created with mode 0600, which is usually not
1377 1378 # what we want. If the original file already exists, just copy
1378 1379 # its mode. Otherwise, manually obey umask.
1379 1380 copymode(name, temp, createmode)
1380 1381 if emptyok:
1381 1382 return temp
1382 1383 try:
1383 1384 try:
1384 1385 ifp = posixfile(name, "rb")
1385 1386 except IOError as inst:
1386 1387 if inst.errno == errno.ENOENT:
1387 1388 return temp
1388 1389 if not getattr(inst, 'filename', None):
1389 1390 inst.filename = name
1390 1391 raise
1391 1392 ofp = posixfile(temp, "wb")
1392 1393 for chunk in filechunkiter(ifp):
1393 1394 ofp.write(chunk)
1394 1395 ifp.close()
1395 1396 ofp.close()
1396 1397 except: # re-raises
1397 1398 try: os.unlink(temp)
1398 1399 except OSError: pass
1399 1400 raise
1400 1401 return temp
1401 1402
1402 1403 class filestat(object):
1403 1404 """help to exactly detect change of a file
1404 1405
1405 1406 'stat' attribute is result of 'os.stat()' if specified 'path'
1406 1407 exists. Otherwise, it is None. This can avoid preparative
1407 1408 'exists()' examination on client side of this class.
1408 1409 """
1409 1410 def __init__(self, path):
1410 1411 try:
1411 1412 self.stat = os.stat(path)
1412 1413 except OSError as err:
1413 1414 if err.errno != errno.ENOENT:
1414 1415 raise
1415 1416 self.stat = None
1416 1417
1417 1418 __hash__ = object.__hash__
1418 1419
1419 1420 def __eq__(self, old):
1420 1421 try:
1421 1422 # if ambiguity between stat of new and old file is
1422 1423 # avoided, comparision of size, ctime and mtime is enough
1423 1424 # to exactly detect change of a file regardless of platform
1424 1425 return (self.stat.st_size == old.stat.st_size and
1425 1426 self.stat.st_ctime == old.stat.st_ctime and
1426 1427 self.stat.st_mtime == old.stat.st_mtime)
1427 1428 except AttributeError:
1428 1429 return False
1429 1430
1430 1431 def isambig(self, old):
1431 1432 """Examine whether new (= self) stat is ambiguous against old one
1432 1433
1433 1434 "S[N]" below means stat of a file at N-th change:
1434 1435
1435 1436 - S[n-1].ctime < S[n].ctime: can detect change of a file
1436 1437 - S[n-1].ctime == S[n].ctime
1437 1438 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1438 1439 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1439 1440 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1440 1441 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1441 1442
1442 1443 Case (*2) above means that a file was changed twice or more at
1443 1444 same time in sec (= S[n-1].ctime), and comparison of timestamp
1444 1445 is ambiguous.
1445 1446
1446 1447 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1447 1448 timestamp is ambiguous".
1448 1449
1449 1450 But advancing mtime only in case (*2) doesn't work as
1450 1451 expected, because naturally advanced S[n].mtime in case (*1)
1451 1452 might be equal to manually advanced S[n-1 or earlier].mtime.
1452 1453
1453 1454 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1454 1455 treated as ambiguous regardless of mtime, to avoid overlooking
1455 1456 by confliction between such mtime.
1456 1457
1457 1458 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1458 1459 S[n].mtime", even if size of a file isn't changed.
1459 1460 """
1460 1461 try:
1461 1462 return (self.stat.st_ctime == old.stat.st_ctime)
1462 1463 except AttributeError:
1463 1464 return False
1464 1465
1465 1466 def __ne__(self, other):
1466 1467 return not self == other
1467 1468
1468 1469 class atomictempfile(object):
1469 1470 '''writable file object that atomically updates a file
1470 1471
1471 1472 All writes will go to a temporary copy of the original file. Call
1472 1473 close() when you are done writing, and atomictempfile will rename
1473 1474 the temporary copy to the original name, making the changes
1474 1475 visible. If the object is destroyed without being closed, all your
1475 1476 writes are discarded.
1476 1477
1477 1478 checkambig argument of constructor is used with filestat, and is
1478 1479 useful only if target file is guarded by any lock (e.g. repo.lock
1479 1480 or repo.wlock).
1480 1481 '''
1481 1482 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1482 1483 self.__name = name # permanent name
1483 1484 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1484 1485 createmode=createmode)
1485 1486 self._fp = posixfile(self._tempname, mode)
1486 1487 self._checkambig = checkambig
1487 1488
1488 1489 # delegated methods
1489 1490 self.read = self._fp.read
1490 1491 self.write = self._fp.write
1491 1492 self.seek = self._fp.seek
1492 1493 self.tell = self._fp.tell
1493 1494 self.fileno = self._fp.fileno
1494 1495
1495 1496 def close(self):
1496 1497 if not self._fp.closed:
1497 1498 self._fp.close()
1498 1499 filename = localpath(self.__name)
1499 1500 oldstat = self._checkambig and filestat(filename)
1500 1501 if oldstat and oldstat.stat:
1501 1502 rename(self._tempname, filename)
1502 1503 newstat = filestat(filename)
1503 1504 if newstat.isambig(oldstat):
1504 1505 # stat of changed file is ambiguous to original one
1505 1506 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1506 1507 os.utime(filename, (advanced, advanced))
1507 1508 else:
1508 1509 rename(self._tempname, filename)
1509 1510
1510 1511 def discard(self):
1511 1512 if not self._fp.closed:
1512 1513 try:
1513 1514 os.unlink(self._tempname)
1514 1515 except OSError:
1515 1516 pass
1516 1517 self._fp.close()
1517 1518
1518 1519 def __del__(self):
1519 1520 if safehasattr(self, '_fp'): # constructor actually did something
1520 1521 self.discard()
1521 1522
1522 1523 def __enter__(self):
1523 1524 return self
1524 1525
1525 1526 def __exit__(self, exctype, excvalue, traceback):
1526 1527 if exctype is not None:
1527 1528 self.discard()
1528 1529 else:
1529 1530 self.close()
1530 1531
1531 1532 def makedirs(name, mode=None, notindexed=False):
1532 1533 """recursive directory creation with parent mode inheritance
1533 1534
1534 1535 Newly created directories are marked as "not to be indexed by
1535 1536 the content indexing service", if ``notindexed`` is specified
1536 1537 for "write" mode access.
1537 1538 """
1538 1539 try:
1539 1540 makedir(name, notindexed)
1540 1541 except OSError as err:
1541 1542 if err.errno == errno.EEXIST:
1542 1543 return
1543 1544 if err.errno != errno.ENOENT or not name:
1544 1545 raise
1545 1546 parent = os.path.dirname(os.path.abspath(name))
1546 1547 if parent == name:
1547 1548 raise
1548 1549 makedirs(parent, mode, notindexed)
1549 1550 try:
1550 1551 makedir(name, notindexed)
1551 1552 except OSError as err:
1552 1553 # Catch EEXIST to handle races
1553 1554 if err.errno == errno.EEXIST:
1554 1555 return
1555 1556 raise
1556 1557 if mode is not None:
1557 1558 os.chmod(name, mode)
1558 1559
1559 1560 def readfile(path):
1560 1561 with open(path, 'rb') as fp:
1561 1562 return fp.read()
1562 1563
1563 1564 def writefile(path, text):
1564 1565 with open(path, 'wb') as fp:
1565 1566 fp.write(text)
1566 1567
1567 1568 def appendfile(path, text):
1568 1569 with open(path, 'ab') as fp:
1569 1570 fp.write(text)
1570 1571
1571 1572 class chunkbuffer(object):
1572 1573 """Allow arbitrary sized chunks of data to be efficiently read from an
1573 1574 iterator over chunks of arbitrary size."""
1574 1575
1575 1576 def __init__(self, in_iter):
1576 1577 """in_iter is the iterator that's iterating over the input chunks.
1577 1578 targetsize is how big a buffer to try to maintain."""
1578 1579 def splitbig(chunks):
1579 1580 for chunk in chunks:
1580 1581 if len(chunk) > 2**20:
1581 1582 pos = 0
1582 1583 while pos < len(chunk):
1583 1584 end = pos + 2 ** 18
1584 1585 yield chunk[pos:end]
1585 1586 pos = end
1586 1587 else:
1587 1588 yield chunk
1588 1589 self.iter = splitbig(in_iter)
1589 1590 self._queue = collections.deque()
1590 1591 self._chunkoffset = 0
1591 1592
1592 1593 def read(self, l=None):
1593 1594 """Read L bytes of data from the iterator of chunks of data.
1594 1595 Returns less than L bytes if the iterator runs dry.
1595 1596
1596 1597 If size parameter is omitted, read everything"""
1597 1598 if l is None:
1598 1599 return ''.join(self.iter)
1599 1600
1600 1601 left = l
1601 1602 buf = []
1602 1603 queue = self._queue
1603 1604 while left > 0:
1604 1605 # refill the queue
1605 1606 if not queue:
1606 1607 target = 2**18
1607 1608 for chunk in self.iter:
1608 1609 queue.append(chunk)
1609 1610 target -= len(chunk)
1610 1611 if target <= 0:
1611 1612 break
1612 1613 if not queue:
1613 1614 break
1614 1615
1615 1616 # The easy way to do this would be to queue.popleft(), modify the
1616 1617 # chunk (if necessary), then queue.appendleft(). However, for cases
1617 1618 # where we read partial chunk content, this incurs 2 dequeue
1618 1619 # mutations and creates a new str for the remaining chunk in the
1619 1620 # queue. Our code below avoids this overhead.
1620 1621
1621 1622 chunk = queue[0]
1622 1623 chunkl = len(chunk)
1623 1624 offset = self._chunkoffset
1624 1625
1625 1626 # Use full chunk.
1626 1627 if offset == 0 and left >= chunkl:
1627 1628 left -= chunkl
1628 1629 queue.popleft()
1629 1630 buf.append(chunk)
1630 1631 # self._chunkoffset remains at 0.
1631 1632 continue
1632 1633
1633 1634 chunkremaining = chunkl - offset
1634 1635
1635 1636 # Use all of unconsumed part of chunk.
1636 1637 if left >= chunkremaining:
1637 1638 left -= chunkremaining
1638 1639 queue.popleft()
1639 1640 # offset == 0 is enabled by block above, so this won't merely
1640 1641 # copy via ``chunk[0:]``.
1641 1642 buf.append(chunk[offset:])
1642 1643 self._chunkoffset = 0
1643 1644
1644 1645 # Partial chunk needed.
1645 1646 else:
1646 1647 buf.append(chunk[offset:offset + left])
1647 1648 self._chunkoffset += left
1648 1649 left -= chunkremaining
1649 1650
1650 1651 return ''.join(buf)
1651 1652
1652 1653 def filechunkiter(f, size=65536, limit=None):
1653 1654 """Create a generator that produces the data in the file size
1654 1655 (default 65536) bytes at a time, up to optional limit (default is
1655 1656 to read all data). Chunks may be less than size bytes if the
1656 1657 chunk is the last chunk in the file, or the file is a socket or
1657 1658 some other type of file that sometimes reads less data than is
1658 1659 requested."""
1659 1660 assert size >= 0
1660 1661 assert limit is None or limit >= 0
1661 1662 while True:
1662 1663 if limit is None:
1663 1664 nbytes = size
1664 1665 else:
1665 1666 nbytes = min(limit, size)
1666 1667 s = nbytes and f.read(nbytes)
1667 1668 if not s:
1668 1669 break
1669 1670 if limit:
1670 1671 limit -= len(s)
1671 1672 yield s
1672 1673
1673 1674 def makedate(timestamp=None):
1674 1675 '''Return a unix timestamp (or the current time) as a (unixtime,
1675 1676 offset) tuple based off the local timezone.'''
1676 1677 if timestamp is None:
1677 1678 timestamp = time.time()
1678 1679 if timestamp < 0:
1679 1680 hint = _("check your clock")
1680 1681 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1681 1682 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1682 1683 datetime.datetime.fromtimestamp(timestamp))
1683 1684 tz = delta.days * 86400 + delta.seconds
1684 1685 return timestamp, tz
1685 1686
1686 1687 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1687 1688 """represent a (unixtime, offset) tuple as a localized time.
1688 1689 unixtime is seconds since the epoch, and offset is the time zone's
1689 1690 number of seconds away from UTC.
1690 1691
1691 1692 >>> datestr((0, 0))
1692 1693 'Thu Jan 01 00:00:00 1970 +0000'
1693 1694 >>> datestr((42, 0))
1694 1695 'Thu Jan 01 00:00:42 1970 +0000'
1695 1696 >>> datestr((-42, 0))
1696 1697 'Wed Dec 31 23:59:18 1969 +0000'
1697 1698 >>> datestr((0x7fffffff, 0))
1698 1699 'Tue Jan 19 03:14:07 2038 +0000'
1699 1700 >>> datestr((-0x80000000, 0))
1700 1701 'Fri Dec 13 20:45:52 1901 +0000'
1701 1702 """
1702 1703 t, tz = date or makedate()
1703 1704 if "%1" in format or "%2" in format or "%z" in format:
1704 1705 sign = (tz > 0) and "-" or "+"
1705 1706 minutes = abs(tz) // 60
1706 1707 q, r = divmod(minutes, 60)
1707 1708 format = format.replace("%z", "%1%2")
1708 1709 format = format.replace("%1", "%c%02d" % (sign, q))
1709 1710 format = format.replace("%2", "%02d" % r)
1710 1711 d = t - tz
1711 1712 if d > 0x7fffffff:
1712 1713 d = 0x7fffffff
1713 1714 elif d < -0x80000000:
1714 1715 d = -0x80000000
1715 1716 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1716 1717 # because they use the gmtime() system call which is buggy on Windows
1717 1718 # for negative values.
1718 1719 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1719 1720 s = t.strftime(format)
1720 1721 return s
1721 1722
1722 1723 def shortdate(date=None):
1723 1724 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1724 1725 return datestr(date, format='%Y-%m-%d')
1725 1726
1726 1727 def parsetimezone(tz):
1727 1728 """parse a timezone string and return an offset integer"""
1728 1729 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1729 1730 sign = (tz[0] == "+") and 1 or -1
1730 1731 hours = int(tz[1:3])
1731 1732 minutes = int(tz[3:5])
1732 1733 return -sign * (hours * 60 + minutes) * 60
1733 1734 if tz == "GMT" or tz == "UTC":
1734 1735 return 0
1735 1736 return None
1736 1737
1737 1738 def strdate(string, format, defaults=[]):
1738 1739 """parse a localized time string and return a (unixtime, offset) tuple.
1739 1740 if the string cannot be parsed, ValueError is raised."""
1740 1741 # NOTE: unixtime = localunixtime + offset
1741 1742 offset, date = parsetimezone(string.split()[-1]), string
1742 1743 if offset is not None:
1743 1744 date = " ".join(string.split()[:-1])
1744 1745
1745 1746 # add missing elements from defaults
1746 1747 usenow = False # default to using biased defaults
1747 1748 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1748 1749 found = [True for p in part if ("%"+p) in format]
1749 1750 if not found:
1750 1751 date += "@" + defaults[part][usenow]
1751 1752 format += "@%" + part[0]
1752 1753 else:
1753 1754 # We've found a specific time element, less specific time
1754 1755 # elements are relative to today
1755 1756 usenow = True
1756 1757
1757 1758 timetuple = time.strptime(date, format)
1758 1759 localunixtime = int(calendar.timegm(timetuple))
1759 1760 if offset is None:
1760 1761 # local timezone
1761 1762 unixtime = int(time.mktime(timetuple))
1762 1763 offset = unixtime - localunixtime
1763 1764 else:
1764 1765 unixtime = localunixtime + offset
1765 1766 return unixtime, offset
1766 1767
1767 1768 def parsedate(date, formats=None, bias=None):
1768 1769 """parse a localized date/time and return a (unixtime, offset) tuple.
1769 1770
1770 1771 The date may be a "unixtime offset" string or in one of the specified
1771 1772 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1772 1773
1773 1774 >>> parsedate(' today ') == parsedate(\
1774 1775 datetime.date.today().strftime('%b %d'))
1775 1776 True
1776 1777 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1777 1778 datetime.timedelta(days=1)\
1778 1779 ).strftime('%b %d'))
1779 1780 True
1780 1781 >>> now, tz = makedate()
1781 1782 >>> strnow, strtz = parsedate('now')
1782 1783 >>> (strnow - now) < 1
1783 1784 True
1784 1785 >>> tz == strtz
1785 1786 True
1786 1787 """
1787 1788 if bias is None:
1788 1789 bias = {}
1789 1790 if not date:
1790 1791 return 0, 0
1791 1792 if isinstance(date, tuple) and len(date) == 2:
1792 1793 return date
1793 1794 if not formats:
1794 1795 formats = defaultdateformats
1795 1796 date = date.strip()
1796 1797
1797 1798 if date == 'now' or date == _('now'):
1798 1799 return makedate()
1799 1800 if date == 'today' or date == _('today'):
1800 1801 date = datetime.date.today().strftime('%b %d')
1801 1802 elif date == 'yesterday' or date == _('yesterday'):
1802 1803 date = (datetime.date.today() -
1803 1804 datetime.timedelta(days=1)).strftime('%b %d')
1804 1805
1805 1806 try:
1806 1807 when, offset = map(int, date.split(' '))
1807 1808 except ValueError:
1808 1809 # fill out defaults
1809 1810 now = makedate()
1810 1811 defaults = {}
1811 1812 for part in ("d", "mb", "yY", "HI", "M", "S"):
1812 1813 # this piece is for rounding the specific end of unknowns
1813 1814 b = bias.get(part)
1814 1815 if b is None:
1815 1816 if part[0] in "HMS":
1816 1817 b = "00"
1817 1818 else:
1818 1819 b = "0"
1819 1820
1820 1821 # this piece is for matching the generic end to today's date
1821 1822 n = datestr(now, "%" + part[0])
1822 1823
1823 1824 defaults[part] = (b, n)
1824 1825
1825 1826 for format in formats:
1826 1827 try:
1827 1828 when, offset = strdate(date, format, defaults)
1828 1829 except (ValueError, OverflowError):
1829 1830 pass
1830 1831 else:
1831 1832 break
1832 1833 else:
1833 1834 raise Abort(_('invalid date: %r') % date)
1834 1835 # validate explicit (probably user-specified) date and
1835 1836 # time zone offset. values must fit in signed 32 bits for
1836 1837 # current 32-bit linux runtimes. timezones go from UTC-12
1837 1838 # to UTC+14
1838 1839 if when < -0x80000000 or when > 0x7fffffff:
1839 1840 raise Abort(_('date exceeds 32 bits: %d') % when)
1840 1841 if offset < -50400 or offset > 43200:
1841 1842 raise Abort(_('impossible time zone offset: %d') % offset)
1842 1843 return when, offset
1843 1844
1844 1845 def matchdate(date):
1845 1846 """Return a function that matches a given date match specifier
1846 1847
1847 1848 Formats include:
1848 1849
1849 1850 '{date}' match a given date to the accuracy provided
1850 1851
1851 1852 '<{date}' on or before a given date
1852 1853
1853 1854 '>{date}' on or after a given date
1854 1855
1855 1856 >>> p1 = parsedate("10:29:59")
1856 1857 >>> p2 = parsedate("10:30:00")
1857 1858 >>> p3 = parsedate("10:30:59")
1858 1859 >>> p4 = parsedate("10:31:00")
1859 1860 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1860 1861 >>> f = matchdate("10:30")
1861 1862 >>> f(p1[0])
1862 1863 False
1863 1864 >>> f(p2[0])
1864 1865 True
1865 1866 >>> f(p3[0])
1866 1867 True
1867 1868 >>> f(p4[0])
1868 1869 False
1869 1870 >>> f(p5[0])
1870 1871 False
1871 1872 """
1872 1873
1873 1874 def lower(date):
1874 1875 d = {'mb': "1", 'd': "1"}
1875 1876 return parsedate(date, extendeddateformats, d)[0]
1876 1877
1877 1878 def upper(date):
1878 1879 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1879 1880 for days in ("31", "30", "29"):
1880 1881 try:
1881 1882 d["d"] = days
1882 1883 return parsedate(date, extendeddateformats, d)[0]
1883 1884 except Abort:
1884 1885 pass
1885 1886 d["d"] = "28"
1886 1887 return parsedate(date, extendeddateformats, d)[0]
1887 1888
1888 1889 date = date.strip()
1889 1890
1890 1891 if not date:
1891 1892 raise Abort(_("dates cannot consist entirely of whitespace"))
1892 1893 elif date[0] == "<":
1893 1894 if not date[1:]:
1894 1895 raise Abort(_("invalid day spec, use '<DATE'"))
1895 1896 when = upper(date[1:])
1896 1897 return lambda x: x <= when
1897 1898 elif date[0] == ">":
1898 1899 if not date[1:]:
1899 1900 raise Abort(_("invalid day spec, use '>DATE'"))
1900 1901 when = lower(date[1:])
1901 1902 return lambda x: x >= when
1902 1903 elif date[0] == "-":
1903 1904 try:
1904 1905 days = int(date[1:])
1905 1906 except ValueError:
1906 1907 raise Abort(_("invalid day spec: %s") % date[1:])
1907 1908 if days < 0:
1908 1909 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1909 1910 % date[1:])
1910 1911 when = makedate()[0] - days * 3600 * 24
1911 1912 return lambda x: x >= when
1912 1913 elif " to " in date:
1913 1914 a, b = date.split(" to ")
1914 1915 start, stop = lower(a), upper(b)
1915 1916 return lambda x: x >= start and x <= stop
1916 1917 else:
1917 1918 start, stop = lower(date), upper(date)
1918 1919 return lambda x: x >= start and x <= stop
1919 1920
1920 1921 def stringmatcher(pattern):
1921 1922 """
1922 1923 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1923 1924 returns the matcher name, pattern, and matcher function.
1924 1925 missing or unknown prefixes are treated as literal matches.
1925 1926
1926 1927 helper for tests:
1927 1928 >>> def test(pattern, *tests):
1928 1929 ... kind, pattern, matcher = stringmatcher(pattern)
1929 1930 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1930 1931
1931 1932 exact matching (no prefix):
1932 1933 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1933 1934 ('literal', 'abcdefg', [False, False, True])
1934 1935
1935 1936 regex matching ('re:' prefix)
1936 1937 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1937 1938 ('re', 'a.+b', [False, False, True])
1938 1939
1939 1940 force exact matches ('literal:' prefix)
1940 1941 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1941 1942 ('literal', 're:foobar', [False, True])
1942 1943
1943 1944 unknown prefixes are ignored and treated as literals
1944 1945 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1945 1946 ('literal', 'foo:bar', [False, False, True])
1946 1947 """
1947 1948 if pattern.startswith('re:'):
1948 1949 pattern = pattern[3:]
1949 1950 try:
1950 1951 regex = remod.compile(pattern)
1951 1952 except remod.error as e:
1952 1953 raise error.ParseError(_('invalid regular expression: %s')
1953 1954 % e)
1954 1955 return 're', pattern, regex.search
1955 1956 elif pattern.startswith('literal:'):
1956 1957 pattern = pattern[8:]
1957 1958 return 'literal', pattern, pattern.__eq__
1958 1959
1959 1960 def shortuser(user):
1960 1961 """Return a short representation of a user name or email address."""
1961 1962 f = user.find('@')
1962 1963 if f >= 0:
1963 1964 user = user[:f]
1964 1965 f = user.find('<')
1965 1966 if f >= 0:
1966 1967 user = user[f + 1:]
1967 1968 f = user.find(' ')
1968 1969 if f >= 0:
1969 1970 user = user[:f]
1970 1971 f = user.find('.')
1971 1972 if f >= 0:
1972 1973 user = user[:f]
1973 1974 return user
1974 1975
1975 1976 def emailuser(user):
1976 1977 """Return the user portion of an email address."""
1977 1978 f = user.find('@')
1978 1979 if f >= 0:
1979 1980 user = user[:f]
1980 1981 f = user.find('<')
1981 1982 if f >= 0:
1982 1983 user = user[f + 1:]
1983 1984 return user
1984 1985
1985 1986 def email(author):
1986 1987 '''get email of author.'''
1987 1988 r = author.find('>')
1988 1989 if r == -1:
1989 1990 r = None
1990 1991 return author[author.find('<') + 1:r]
1991 1992
1992 1993 def ellipsis(text, maxlength=400):
1993 1994 """Trim string to at most maxlength (default: 400) columns in display."""
1994 1995 return encoding.trim(text, maxlength, ellipsis='...')
1995 1996
1996 1997 def unitcountfn(*unittable):
1997 1998 '''return a function that renders a readable count of some quantity'''
1998 1999
1999 2000 def go(count):
2000 2001 for multiplier, divisor, format in unittable:
2001 2002 if count >= divisor * multiplier:
2002 2003 return format % (count / float(divisor))
2003 2004 return unittable[-1][2] % count
2004 2005
2005 2006 return go
2006 2007
2007 2008 bytecount = unitcountfn(
2008 2009 (100, 1 << 30, _('%.0f GB')),
2009 2010 (10, 1 << 30, _('%.1f GB')),
2010 2011 (1, 1 << 30, _('%.2f GB')),
2011 2012 (100, 1 << 20, _('%.0f MB')),
2012 2013 (10, 1 << 20, _('%.1f MB')),
2013 2014 (1, 1 << 20, _('%.2f MB')),
2014 2015 (100, 1 << 10, _('%.0f KB')),
2015 2016 (10, 1 << 10, _('%.1f KB')),
2016 2017 (1, 1 << 10, _('%.2f KB')),
2017 2018 (1, 1, _('%.0f bytes')),
2018 2019 )
2019 2020
2020 2021 def uirepr(s):
2021 2022 # Avoid double backslash in Windows path repr()
2022 2023 return repr(s).replace('\\\\', '\\')
2023 2024
2024 2025 # delay import of textwrap
2025 2026 def MBTextWrapper(**kwargs):
2026 2027 class tw(textwrap.TextWrapper):
2027 2028 """
2028 2029 Extend TextWrapper for width-awareness.
2029 2030
2030 2031 Neither number of 'bytes' in any encoding nor 'characters' is
2031 2032 appropriate to calculate terminal columns for specified string.
2032 2033
2033 2034 Original TextWrapper implementation uses built-in 'len()' directly,
2034 2035 so overriding is needed to use width information of each characters.
2035 2036
2036 2037 In addition, characters classified into 'ambiguous' width are
2037 2038 treated as wide in East Asian area, but as narrow in other.
2038 2039
2039 2040 This requires use decision to determine width of such characters.
2040 2041 """
2041 2042 def _cutdown(self, ucstr, space_left):
2042 2043 l = 0
2043 2044 colwidth = encoding.ucolwidth
2044 2045 for i in xrange(len(ucstr)):
2045 2046 l += colwidth(ucstr[i])
2046 2047 if space_left < l:
2047 2048 return (ucstr[:i], ucstr[i:])
2048 2049 return ucstr, ''
2049 2050
2050 2051 # overriding of base class
2051 2052 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2052 2053 space_left = max(width - cur_len, 1)
2053 2054
2054 2055 if self.break_long_words:
2055 2056 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2056 2057 cur_line.append(cut)
2057 2058 reversed_chunks[-1] = res
2058 2059 elif not cur_line:
2059 2060 cur_line.append(reversed_chunks.pop())
2060 2061
2061 2062 # this overriding code is imported from TextWrapper of Python 2.6
2062 2063 # to calculate columns of string by 'encoding.ucolwidth()'
2063 2064 def _wrap_chunks(self, chunks):
2064 2065 colwidth = encoding.ucolwidth
2065 2066
2066 2067 lines = []
2067 2068 if self.width <= 0:
2068 2069 raise ValueError("invalid width %r (must be > 0)" % self.width)
2069 2070
2070 2071 # Arrange in reverse order so items can be efficiently popped
2071 2072 # from a stack of chucks.
2072 2073 chunks.reverse()
2073 2074
2074 2075 while chunks:
2075 2076
2076 2077 # Start the list of chunks that will make up the current line.
2077 2078 # cur_len is just the length of all the chunks in cur_line.
2078 2079 cur_line = []
2079 2080 cur_len = 0
2080 2081
2081 2082 # Figure out which static string will prefix this line.
2082 2083 if lines:
2083 2084 indent = self.subsequent_indent
2084 2085 else:
2085 2086 indent = self.initial_indent
2086 2087
2087 2088 # Maximum width for this line.
2088 2089 width = self.width - len(indent)
2089 2090
2090 2091 # First chunk on line is whitespace -- drop it, unless this
2091 2092 # is the very beginning of the text (i.e. no lines started yet).
2092 2093 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2093 2094 del chunks[-1]
2094 2095
2095 2096 while chunks:
2096 2097 l = colwidth(chunks[-1])
2097 2098
2098 2099 # Can at least squeeze this chunk onto the current line.
2099 2100 if cur_len + l <= width:
2100 2101 cur_line.append(chunks.pop())
2101 2102 cur_len += l
2102 2103
2103 2104 # Nope, this line is full.
2104 2105 else:
2105 2106 break
2106 2107
2107 2108 # The current line is full, and the next chunk is too big to
2108 2109 # fit on *any* line (not just this one).
2109 2110 if chunks and colwidth(chunks[-1]) > width:
2110 2111 self._handle_long_word(chunks, cur_line, cur_len, width)
2111 2112
2112 2113 # If the last chunk on this line is all whitespace, drop it.
2113 2114 if (self.drop_whitespace and
2114 2115 cur_line and cur_line[-1].strip() == ''):
2115 2116 del cur_line[-1]
2116 2117
2117 2118 # Convert current line back to a string and store it in list
2118 2119 # of all lines (return value).
2119 2120 if cur_line:
2120 2121 lines.append(indent + ''.join(cur_line))
2121 2122
2122 2123 return lines
2123 2124
2124 2125 global MBTextWrapper
2125 2126 MBTextWrapper = tw
2126 2127 return tw(**kwargs)
2127 2128
2128 2129 def wrap(line, width, initindent='', hangindent=''):
2129 2130 maxindent = max(len(hangindent), len(initindent))
2130 2131 if width <= maxindent:
2131 2132 # adjust for weird terminal size
2132 2133 width = max(78, maxindent + 1)
2133 2134 line = line.decode(encoding.encoding, encoding.encodingmode)
2134 2135 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2135 2136 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2136 2137 wrapper = MBTextWrapper(width=width,
2137 2138 initial_indent=initindent,
2138 2139 subsequent_indent=hangindent)
2139 2140 return wrapper.fill(line).encode(encoding.encoding)
2140 2141
2141 2142 def iterlines(iterator):
2142 2143 for chunk in iterator:
2143 2144 for line in chunk.splitlines():
2144 2145 yield line
2145 2146
2146 2147 def expandpath(path):
2147 2148 return os.path.expanduser(os.path.expandvars(path))
2148 2149
2149 2150 def hgcmd():
2150 2151 """Return the command used to execute current hg
2151 2152
2152 2153 This is different from hgexecutable() because on Windows we want
2153 2154 to avoid things opening new shell windows like batch files, so we
2154 2155 get either the python call or current executable.
2155 2156 """
2156 2157 if mainfrozen():
2157 2158 if getattr(sys, 'frozen', None) == 'macosx_app':
2158 2159 # Env variable set by py2app
2159 2160 return [os.environ['EXECUTABLEPATH']]
2160 2161 else:
2161 2162 return [sys.executable]
2162 2163 return gethgcmd()
2163 2164
2164 2165 def rundetached(args, condfn):
2165 2166 """Execute the argument list in a detached process.
2166 2167
2167 2168 condfn is a callable which is called repeatedly and should return
2168 2169 True once the child process is known to have started successfully.
2169 2170 At this point, the child process PID is returned. If the child
2170 2171 process fails to start or finishes before condfn() evaluates to
2171 2172 True, return -1.
2172 2173 """
2173 2174 # Windows case is easier because the child process is either
2174 2175 # successfully starting and validating the condition or exiting
2175 2176 # on failure. We just poll on its PID. On Unix, if the child
2176 2177 # process fails to start, it will be left in a zombie state until
2177 2178 # the parent wait on it, which we cannot do since we expect a long
2178 2179 # running process on success. Instead we listen for SIGCHLD telling
2179 2180 # us our child process terminated.
2180 2181 terminated = set()
2181 2182 def handler(signum, frame):
2182 2183 terminated.add(os.wait())
2183 2184 prevhandler = None
2184 2185 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2185 2186 if SIGCHLD is not None:
2186 2187 prevhandler = signal.signal(SIGCHLD, handler)
2187 2188 try:
2188 2189 pid = spawndetached(args)
2189 2190 while not condfn():
2190 2191 if ((pid in terminated or not testpid(pid))
2191 2192 and not condfn()):
2192 2193 return -1
2193 2194 time.sleep(0.1)
2194 2195 return pid
2195 2196 finally:
2196 2197 if prevhandler is not None:
2197 2198 signal.signal(signal.SIGCHLD, prevhandler)
2198 2199
2199 2200 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2200 2201 """Return the result of interpolating items in the mapping into string s.
2201 2202
2202 2203 prefix is a single character string, or a two character string with
2203 2204 a backslash as the first character if the prefix needs to be escaped in
2204 2205 a regular expression.
2205 2206
2206 2207 fn is an optional function that will be applied to the replacement text
2207 2208 just before replacement.
2208 2209
2209 2210 escape_prefix is an optional flag that allows using doubled prefix for
2210 2211 its escaping.
2211 2212 """
2212 2213 fn = fn or (lambda s: s)
2213 2214 patterns = '|'.join(mapping.keys())
2214 2215 if escape_prefix:
2215 2216 patterns += '|' + prefix
2216 2217 if len(prefix) > 1:
2217 2218 prefix_char = prefix[1:]
2218 2219 else:
2219 2220 prefix_char = prefix
2220 2221 mapping[prefix_char] = prefix_char
2221 2222 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2222 2223 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2223 2224
2224 2225 def getport(port):
2225 2226 """Return the port for a given network service.
2226 2227
2227 2228 If port is an integer, it's returned as is. If it's a string, it's
2228 2229 looked up using socket.getservbyname(). If there's no matching
2229 2230 service, error.Abort is raised.
2230 2231 """
2231 2232 try:
2232 2233 return int(port)
2233 2234 except ValueError:
2234 2235 pass
2235 2236
2236 2237 try:
2237 2238 return socket.getservbyname(port)
2238 2239 except socket.error:
2239 2240 raise Abort(_("no port number associated with service '%s'") % port)
2240 2241
2241 2242 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2242 2243 '0': False, 'no': False, 'false': False, 'off': False,
2243 2244 'never': False}
2244 2245
2245 2246 def parsebool(s):
2246 2247 """Parse s into a boolean.
2247 2248
2248 2249 If s is not a valid boolean, returns None.
2249 2250 """
2250 2251 return _booleans.get(s.lower(), None)
2251 2252
2252 2253 _hexdig = '0123456789ABCDEFabcdef'
2253 2254 _hextochr = dict((a + b, chr(int(a + b, 16)))
2254 2255 for a in _hexdig for b in _hexdig)
2255 2256
2256 2257 def _urlunquote(s):
2257 2258 """Decode HTTP/HTML % encoding.
2258 2259
2259 2260 >>> _urlunquote('abc%20def')
2260 2261 'abc def'
2261 2262 """
2262 2263 res = s.split('%')
2263 2264 # fastpath
2264 2265 if len(res) == 1:
2265 2266 return s
2266 2267 s = res[0]
2267 2268 for item in res[1:]:
2268 2269 try:
2269 2270 s += _hextochr[item[:2]] + item[2:]
2270 2271 except KeyError:
2271 2272 s += '%' + item
2272 2273 except UnicodeDecodeError:
2273 2274 s += unichr(int(item[:2], 16)) + item[2:]
2274 2275 return s
2275 2276
2276 2277 class url(object):
2277 2278 r"""Reliable URL parser.
2278 2279
2279 2280 This parses URLs and provides attributes for the following
2280 2281 components:
2281 2282
2282 2283 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2283 2284
2284 2285 Missing components are set to None. The only exception is
2285 2286 fragment, which is set to '' if present but empty.
2286 2287
2287 2288 If parsefragment is False, fragment is included in query. If
2288 2289 parsequery is False, query is included in path. If both are
2289 2290 False, both fragment and query are included in path.
2290 2291
2291 2292 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2292 2293
2293 2294 Note that for backward compatibility reasons, bundle URLs do not
2294 2295 take host names. That means 'bundle://../' has a path of '../'.
2295 2296
2296 2297 Examples:
2297 2298
2298 2299 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2299 2300 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2300 2301 >>> url('ssh://[::1]:2200//home/joe/repo')
2301 2302 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2302 2303 >>> url('file:///home/joe/repo')
2303 2304 <url scheme: 'file', path: '/home/joe/repo'>
2304 2305 >>> url('file:///c:/temp/foo/')
2305 2306 <url scheme: 'file', path: 'c:/temp/foo/'>
2306 2307 >>> url('bundle:foo')
2307 2308 <url scheme: 'bundle', path: 'foo'>
2308 2309 >>> url('bundle://../foo')
2309 2310 <url scheme: 'bundle', path: '../foo'>
2310 2311 >>> url(r'c:\foo\bar')
2311 2312 <url path: 'c:\\foo\\bar'>
2312 2313 >>> url(r'\\blah\blah\blah')
2313 2314 <url path: '\\\\blah\\blah\\blah'>
2314 2315 >>> url(r'\\blah\blah\blah#baz')
2315 2316 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2316 2317 >>> url(r'file:///C:\users\me')
2317 2318 <url scheme: 'file', path: 'C:\\users\\me'>
2318 2319
2319 2320 Authentication credentials:
2320 2321
2321 2322 >>> url('ssh://joe:xyz@x/repo')
2322 2323 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2323 2324 >>> url('ssh://joe@x/repo')
2324 2325 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2325 2326
2326 2327 Query strings and fragments:
2327 2328
2328 2329 >>> url('http://host/a?b#c')
2329 2330 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2330 2331 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2331 2332 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2332 2333 """
2333 2334
2334 2335 _safechars = "!~*'()+"
2335 2336 _safepchars = "/!~*'()+:\\"
2336 2337 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2337 2338
2338 2339 def __init__(self, path, parsequery=True, parsefragment=True):
2339 2340 # We slowly chomp away at path until we have only the path left
2340 2341 self.scheme = self.user = self.passwd = self.host = None
2341 2342 self.port = self.path = self.query = self.fragment = None
2342 2343 self._localpath = True
2343 2344 self._hostport = ''
2344 2345 self._origpath = path
2345 2346
2346 2347 if parsefragment and '#' in path:
2347 2348 path, self.fragment = path.split('#', 1)
2348 2349 if not path:
2349 2350 path = None
2350 2351
2351 2352 # special case for Windows drive letters and UNC paths
2352 2353 if hasdriveletter(path) or path.startswith(r'\\'):
2353 2354 self.path = path
2354 2355 return
2355 2356
2356 2357 # For compatibility reasons, we can't handle bundle paths as
2357 2358 # normal URLS
2358 2359 if path.startswith('bundle:'):
2359 2360 self.scheme = 'bundle'
2360 2361 path = path[7:]
2361 2362 if path.startswith('//'):
2362 2363 path = path[2:]
2363 2364 self.path = path
2364 2365 return
2365 2366
2366 2367 if self._matchscheme(path):
2367 2368 parts = path.split(':', 1)
2368 2369 if parts[0]:
2369 2370 self.scheme, path = parts
2370 2371 self._localpath = False
2371 2372
2372 2373 if not path:
2373 2374 path = None
2374 2375 if self._localpath:
2375 2376 self.path = ''
2376 2377 return
2377 2378 else:
2378 2379 if self._localpath:
2379 2380 self.path = path
2380 2381 return
2381 2382
2382 2383 if parsequery and '?' in path:
2383 2384 path, self.query = path.split('?', 1)
2384 2385 if not path:
2385 2386 path = None
2386 2387 if not self.query:
2387 2388 self.query = None
2388 2389
2389 2390 # // is required to specify a host/authority
2390 2391 if path and path.startswith('//'):
2391 2392 parts = path[2:].split('/', 1)
2392 2393 if len(parts) > 1:
2393 2394 self.host, path = parts
2394 2395 else:
2395 2396 self.host = parts[0]
2396 2397 path = None
2397 2398 if not self.host:
2398 2399 self.host = None
2399 2400 # path of file:///d is /d
2400 2401 # path of file:///d:/ is d:/, not /d:/
2401 2402 if path and not hasdriveletter(path):
2402 2403 path = '/' + path
2403 2404
2404 2405 if self.host and '@' in self.host:
2405 2406 self.user, self.host = self.host.rsplit('@', 1)
2406 2407 if ':' in self.user:
2407 2408 self.user, self.passwd = self.user.split(':', 1)
2408 2409 if not self.host:
2409 2410 self.host = None
2410 2411
2411 2412 # Don't split on colons in IPv6 addresses without ports
2412 2413 if (self.host and ':' in self.host and
2413 2414 not (self.host.startswith('[') and self.host.endswith(']'))):
2414 2415 self._hostport = self.host
2415 2416 self.host, self.port = self.host.rsplit(':', 1)
2416 2417 if not self.host:
2417 2418 self.host = None
2418 2419
2419 2420 if (self.host and self.scheme == 'file' and
2420 2421 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2421 2422 raise Abort(_('file:// URLs can only refer to localhost'))
2422 2423
2423 2424 self.path = path
2424 2425
2425 2426 # leave the query string escaped
2426 2427 for a in ('user', 'passwd', 'host', 'port',
2427 2428 'path', 'fragment'):
2428 2429 v = getattr(self, a)
2429 2430 if v is not None:
2430 2431 setattr(self, a, _urlunquote(v))
2431 2432
2432 2433 def __repr__(self):
2433 2434 attrs = []
2434 2435 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2435 2436 'query', 'fragment'):
2436 2437 v = getattr(self, a)
2437 2438 if v is not None:
2438 2439 attrs.append('%s: %r' % (a, v))
2439 2440 return '<url %s>' % ', '.join(attrs)
2440 2441
2441 2442 def __str__(self):
2442 2443 r"""Join the URL's components back into a URL string.
2443 2444
2444 2445 Examples:
2445 2446
2446 2447 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2447 2448 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2448 2449 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2449 2450 'http://user:pw@host:80/?foo=bar&baz=42'
2450 2451 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2451 2452 'http://user:pw@host:80/?foo=bar%3dbaz'
2452 2453 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2453 2454 'ssh://user:pw@[::1]:2200//home/joe#'
2454 2455 >>> str(url('http://localhost:80//'))
2455 2456 'http://localhost:80//'
2456 2457 >>> str(url('http://localhost:80/'))
2457 2458 'http://localhost:80/'
2458 2459 >>> str(url('http://localhost:80'))
2459 2460 'http://localhost:80/'
2460 2461 >>> str(url('bundle:foo'))
2461 2462 'bundle:foo'
2462 2463 >>> str(url('bundle://../foo'))
2463 2464 'bundle:../foo'
2464 2465 >>> str(url('path'))
2465 2466 'path'
2466 2467 >>> str(url('file:///tmp/foo/bar'))
2467 2468 'file:///tmp/foo/bar'
2468 2469 >>> str(url('file:///c:/tmp/foo/bar'))
2469 2470 'file:///c:/tmp/foo/bar'
2470 2471 >>> print url(r'bundle:foo\bar')
2471 2472 bundle:foo\bar
2472 2473 >>> print url(r'file:///D:\data\hg')
2473 2474 file:///D:\data\hg
2474 2475 """
2475 2476 if self._localpath:
2476 2477 s = self.path
2477 2478 if self.scheme == 'bundle':
2478 2479 s = 'bundle:' + s
2479 2480 if self.fragment:
2480 2481 s += '#' + self.fragment
2481 2482 return s
2482 2483
2483 2484 s = self.scheme + ':'
2484 2485 if self.user or self.passwd or self.host:
2485 2486 s += '//'
2486 2487 elif self.scheme and (not self.path or self.path.startswith('/')
2487 2488 or hasdriveletter(self.path)):
2488 2489 s += '//'
2489 2490 if hasdriveletter(self.path):
2490 2491 s += '/'
2491 2492 if self.user:
2492 2493 s += urlreq.quote(self.user, safe=self._safechars)
2493 2494 if self.passwd:
2494 2495 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2495 2496 if self.user or self.passwd:
2496 2497 s += '@'
2497 2498 if self.host:
2498 2499 if not (self.host.startswith('[') and self.host.endswith(']')):
2499 2500 s += urlreq.quote(self.host)
2500 2501 else:
2501 2502 s += self.host
2502 2503 if self.port:
2503 2504 s += ':' + urlreq.quote(self.port)
2504 2505 if self.host:
2505 2506 s += '/'
2506 2507 if self.path:
2507 2508 # TODO: similar to the query string, we should not unescape the
2508 2509 # path when we store it, the path might contain '%2f' = '/',
2509 2510 # which we should *not* escape.
2510 2511 s += urlreq.quote(self.path, safe=self._safepchars)
2511 2512 if self.query:
2512 2513 # we store the query in escaped form.
2513 2514 s += '?' + self.query
2514 2515 if self.fragment is not None:
2515 2516 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2516 2517 return s
2517 2518
2518 2519 def authinfo(self):
2519 2520 user, passwd = self.user, self.passwd
2520 2521 try:
2521 2522 self.user, self.passwd = None, None
2522 2523 s = str(self)
2523 2524 finally:
2524 2525 self.user, self.passwd = user, passwd
2525 2526 if not self.user:
2526 2527 return (s, None)
2527 2528 # authinfo[1] is passed to urllib2 password manager, and its
2528 2529 # URIs must not contain credentials. The host is passed in the
2529 2530 # URIs list because Python < 2.4.3 uses only that to search for
2530 2531 # a password.
2531 2532 return (s, (None, (s, self.host),
2532 2533 self.user, self.passwd or ''))
2533 2534
2534 2535 def isabs(self):
2535 2536 if self.scheme and self.scheme != 'file':
2536 2537 return True # remote URL
2537 2538 if hasdriveletter(self.path):
2538 2539 return True # absolute for our purposes - can't be joined()
2539 2540 if self.path.startswith(r'\\'):
2540 2541 return True # Windows UNC path
2541 2542 if self.path.startswith('/'):
2542 2543 return True # POSIX-style
2543 2544 return False
2544 2545
2545 2546 def localpath(self):
2546 2547 if self.scheme == 'file' or self.scheme == 'bundle':
2547 2548 path = self.path or '/'
2548 2549 # For Windows, we need to promote hosts containing drive
2549 2550 # letters to paths with drive letters.
2550 2551 if hasdriveletter(self._hostport):
2551 2552 path = self._hostport + '/' + self.path
2552 2553 elif (self.host is not None and self.path
2553 2554 and not hasdriveletter(path)):
2554 2555 path = '/' + path
2555 2556 return path
2556 2557 return self._origpath
2557 2558
2558 2559 def islocal(self):
2559 2560 '''whether localpath will return something that posixfile can open'''
2560 2561 return (not self.scheme or self.scheme == 'file'
2561 2562 or self.scheme == 'bundle')
2562 2563
2563 2564 def hasscheme(path):
2564 2565 return bool(url(path).scheme)
2565 2566
2566 2567 def hasdriveletter(path):
2567 2568 return path and path[1:2] == ':' and path[0:1].isalpha()
2568 2569
2569 2570 def urllocalpath(path):
2570 2571 return url(path, parsequery=False, parsefragment=False).localpath()
2571 2572
2572 2573 def hidepassword(u):
2573 2574 '''hide user credential in a url string'''
2574 2575 u = url(u)
2575 2576 if u.passwd:
2576 2577 u.passwd = '***'
2577 2578 return str(u)
2578 2579
2579 2580 def removeauth(u):
2580 2581 '''remove all authentication information from a url string'''
2581 2582 u = url(u)
2582 2583 u.user = u.passwd = None
2583 2584 return str(u)
2584 2585
2585 2586 def isatty(fp):
2586 2587 try:
2587 2588 return fp.isatty()
2588 2589 except AttributeError:
2589 2590 return False
2590 2591
2591 2592 timecount = unitcountfn(
2592 2593 (1, 1e3, _('%.0f s')),
2593 2594 (100, 1, _('%.1f s')),
2594 2595 (10, 1, _('%.2f s')),
2595 2596 (1, 1, _('%.3f s')),
2596 2597 (100, 0.001, _('%.1f ms')),
2597 2598 (10, 0.001, _('%.2f ms')),
2598 2599 (1, 0.001, _('%.3f ms')),
2599 2600 (100, 0.000001, _('%.1f us')),
2600 2601 (10, 0.000001, _('%.2f us')),
2601 2602 (1, 0.000001, _('%.3f us')),
2602 2603 (100, 0.000000001, _('%.1f ns')),
2603 2604 (10, 0.000000001, _('%.2f ns')),
2604 2605 (1, 0.000000001, _('%.3f ns')),
2605 2606 )
2606 2607
2607 2608 _timenesting = [0]
2608 2609
2609 2610 def timed(func):
2610 2611 '''Report the execution time of a function call to stderr.
2611 2612
2612 2613 During development, use as a decorator when you need to measure
2613 2614 the cost of a function, e.g. as follows:
2614 2615
2615 2616 @util.timed
2616 2617 def foo(a, b, c):
2617 2618 pass
2618 2619 '''
2619 2620
2620 2621 def wrapper(*args, **kwargs):
2621 2622 start = time.time()
2622 2623 indent = 2
2623 2624 _timenesting[0] += indent
2624 2625 try:
2625 2626 return func(*args, **kwargs)
2626 2627 finally:
2627 2628 elapsed = time.time() - start
2628 2629 _timenesting[0] -= indent
2629 2630 sys.stderr.write('%s%s: %s\n' %
2630 2631 (' ' * _timenesting[0], func.__name__,
2631 2632 timecount(elapsed)))
2632 2633 return wrapper
2633 2634
2634 2635 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2635 2636 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2636 2637
2637 2638 def sizetoint(s):
2638 2639 '''Convert a space specifier to a byte count.
2639 2640
2640 2641 >>> sizetoint('30')
2641 2642 30
2642 2643 >>> sizetoint('2.2kb')
2643 2644 2252
2644 2645 >>> sizetoint('6M')
2645 2646 6291456
2646 2647 '''
2647 2648 t = s.strip().lower()
2648 2649 try:
2649 2650 for k, u in _sizeunits:
2650 2651 if t.endswith(k):
2651 2652 return int(float(t[:-len(k)]) * u)
2652 2653 return int(t)
2653 2654 except ValueError:
2654 2655 raise error.ParseError(_("couldn't parse size: %s") % s)
2655 2656
2656 2657 class hooks(object):
2657 2658 '''A collection of hook functions that can be used to extend a
2658 2659 function's behavior. Hooks are called in lexicographic order,
2659 2660 based on the names of their sources.'''
2660 2661
2661 2662 def __init__(self):
2662 2663 self._hooks = []
2663 2664
2664 2665 def add(self, source, hook):
2665 2666 self._hooks.append((source, hook))
2666 2667
2667 2668 def __call__(self, *args):
2668 2669 self._hooks.sort(key=lambda x: x[0])
2669 2670 results = []
2670 2671 for source, hook in self._hooks:
2671 2672 results.append(hook(*args))
2672 2673 return results
2673 2674
2674 2675 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2675 2676 '''Yields lines for a nicely formatted stacktrace.
2676 2677 Skips the 'skip' last entries.
2677 2678 Each file+linenumber is formatted according to fileline.
2678 2679 Each line is formatted according to line.
2679 2680 If line is None, it yields:
2680 2681 length of longest filepath+line number,
2681 2682 filepath+linenumber,
2682 2683 function
2683 2684
2684 2685 Not be used in production code but very convenient while developing.
2685 2686 '''
2686 2687 entries = [(fileline % (fn, ln), func)
2687 2688 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2688 2689 if entries:
2689 2690 fnmax = max(len(entry[0]) for entry in entries)
2690 2691 for fnln, func in entries:
2691 2692 if line is None:
2692 2693 yield (fnmax, fnln, func)
2693 2694 else:
2694 2695 yield line % (fnmax, fnln, func)
2695 2696
2696 2697 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2697 2698 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2698 2699 Skips the 'skip' last entries. By default it will flush stdout first.
2699 2700 It can be used everywhere and intentionally does not require an ui object.
2700 2701 Not be used in production code but very convenient while developing.
2701 2702 '''
2702 2703 if otherf:
2703 2704 otherf.flush()
2704 2705 f.write('%s at:\n' % msg)
2705 2706 for line in getstackframes(skip + 1):
2706 2707 f.write(line)
2707 2708 f.flush()
2708 2709
2709 2710 class dirs(object):
2710 2711 '''a multiset of directory names from a dirstate or manifest'''
2711 2712
2712 2713 def __init__(self, map, skip=None):
2713 2714 self._dirs = {}
2714 2715 addpath = self.addpath
2715 2716 if safehasattr(map, 'iteritems') and skip is not None:
2716 2717 for f, s in map.iteritems():
2717 2718 if s[0] != skip:
2718 2719 addpath(f)
2719 2720 else:
2720 2721 for f in map:
2721 2722 addpath(f)
2722 2723
2723 2724 def addpath(self, path):
2724 2725 dirs = self._dirs
2725 2726 for base in finddirs(path):
2726 2727 if base in dirs:
2727 2728 dirs[base] += 1
2728 2729 return
2729 2730 dirs[base] = 1
2730 2731
2731 2732 def delpath(self, path):
2732 2733 dirs = self._dirs
2733 2734 for base in finddirs(path):
2734 2735 if dirs[base] > 1:
2735 2736 dirs[base] -= 1
2736 2737 return
2737 2738 del dirs[base]
2738 2739
2739 2740 def __iter__(self):
2740 2741 return self._dirs.iterkeys()
2741 2742
2742 2743 def __contains__(self, d):
2743 2744 return d in self._dirs
2744 2745
2745 2746 if safehasattr(parsers, 'dirs'):
2746 2747 dirs = parsers.dirs
2747 2748
2748 2749 def finddirs(path):
2749 2750 pos = path.rfind('/')
2750 2751 while pos != -1:
2751 2752 yield path[:pos]
2752 2753 pos = path.rfind('/', 0, pos)
2753 2754
2754 2755 # compression utility
2755 2756
2756 2757 class nocompress(object):
2757 2758 def compress(self, x):
2758 2759 return x
2759 2760 def flush(self):
2760 2761 return ""
2761 2762
2762 2763 compressors = {
2763 2764 None: nocompress,
2764 2765 # lambda to prevent early import
2765 2766 'BZ': lambda: bz2.BZ2Compressor(),
2766 2767 'GZ': lambda: zlib.compressobj(),
2767 2768 }
2768 2769 # also support the old form by courtesies
2769 2770 compressors['UN'] = compressors[None]
2770 2771
2771 2772 def _makedecompressor(decompcls):
2772 2773 def generator(f):
2773 2774 d = decompcls()
2774 2775 for chunk in filechunkiter(f):
2775 2776 yield d.decompress(chunk)
2776 2777 def func(fh):
2777 2778 return chunkbuffer(generator(fh))
2778 2779 return func
2779 2780
2780 2781 class ctxmanager(object):
2781 2782 '''A context manager for use in 'with' blocks to allow multiple
2782 2783 contexts to be entered at once. This is both safer and more
2783 2784 flexible than contextlib.nested.
2784 2785
2785 2786 Once Mercurial supports Python 2.7+, this will become mostly
2786 2787 unnecessary.
2787 2788 '''
2788 2789
2789 2790 def __init__(self, *args):
2790 2791 '''Accepts a list of no-argument functions that return context
2791 2792 managers. These will be invoked at __call__ time.'''
2792 2793 self._pending = args
2793 2794 self._atexit = []
2794 2795
2795 2796 def __enter__(self):
2796 2797 return self
2797 2798
2798 2799 def enter(self):
2799 2800 '''Create and enter context managers in the order in which they were
2800 2801 passed to the constructor.'''
2801 2802 values = []
2802 2803 for func in self._pending:
2803 2804 obj = func()
2804 2805 values.append(obj.__enter__())
2805 2806 self._atexit.append(obj.__exit__)
2806 2807 del self._pending
2807 2808 return values
2808 2809
2809 2810 def atexit(self, func, *args, **kwargs):
2810 2811 '''Add a function to call when this context manager exits. The
2811 2812 ordering of multiple atexit calls is unspecified, save that
2812 2813 they will happen before any __exit__ functions.'''
2813 2814 def wrapper(exc_type, exc_val, exc_tb):
2814 2815 func(*args, **kwargs)
2815 2816 self._atexit.append(wrapper)
2816 2817 return func
2817 2818
2818 2819 def __exit__(self, exc_type, exc_val, exc_tb):
2819 2820 '''Context managers are exited in the reverse order from which
2820 2821 they were created.'''
2821 2822 received = exc_type is not None
2822 2823 suppressed = False
2823 2824 pending = None
2824 2825 self._atexit.reverse()
2825 2826 for exitfunc in self._atexit:
2826 2827 try:
2827 2828 if exitfunc(exc_type, exc_val, exc_tb):
2828 2829 suppressed = True
2829 2830 exc_type = None
2830 2831 exc_val = None
2831 2832 exc_tb = None
2832 2833 except BaseException:
2833 2834 pending = sys.exc_info()
2834 2835 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2835 2836 del self._atexit
2836 2837 if pending:
2837 2838 raise exc_val
2838 2839 return received and suppressed
2839 2840
2840 2841 def _bz2():
2841 2842 d = bz2.BZ2Decompressor()
2842 2843 # Bzip2 stream start with BZ, but we stripped it.
2843 2844 # we put it back for good measure.
2844 2845 d.decompress('BZ')
2845 2846 return d
2846 2847
2847 2848 decompressors = {None: lambda fh: fh,
2848 2849 '_truncatedBZ': _makedecompressor(_bz2),
2849 2850 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2850 2851 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2851 2852 }
2852 2853 # also support the old form by courtesies
2853 2854 decompressors['UN'] = decompressors[None]
2854 2855
2855 2856 # convenient shortcut
2856 2857 dst = debugstacktrace
@@ -1,78 +1,83 b''
1 1 #!/usr/bin/env python
2 2
3 3 """This does HTTP GET requests given a host:port and path and returns
4 4 a subset of the headers plus the body of the result."""
5 5
6 6 from __future__ import absolute_import, print_function
7 7
8 import httplib
9 8 import json
10 9 import os
11 10 import sys
12 11
12 from mercurial import (
13 util,
14 )
15
16 httplib = util.httplib
17
13 18 try:
14 19 import msvcrt
15 20 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
16 21 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
17 22 except ImportError:
18 23 pass
19 24
20 25 twice = False
21 26 if '--twice' in sys.argv:
22 27 sys.argv.remove('--twice')
23 28 twice = True
24 29 headeronly = False
25 30 if '--headeronly' in sys.argv:
26 31 sys.argv.remove('--headeronly')
27 32 headeronly = True
28 33 formatjson = False
29 34 if '--json' in sys.argv:
30 35 sys.argv.remove('--json')
31 36 formatjson = True
32 37
33 38 tag = None
34 39 def request(host, path, show):
35 40 assert not path.startswith('/'), path
36 41 global tag
37 42 headers = {}
38 43 if tag:
39 44 headers['If-None-Match'] = tag
40 45
41 46 conn = httplib.HTTPConnection(host)
42 47 conn.request("GET", '/' + path, None, headers)
43 48 response = conn.getresponse()
44 49 print(response.status, response.reason)
45 50 if show[:1] == ['-']:
46 51 show = sorted(h for h, v in response.getheaders()
47 52 if h.lower() not in show)
48 53 for h in [h.lower() for h in show]:
49 54 if response.getheader(h, None) is not None:
50 55 print("%s: %s" % (h, response.getheader(h)))
51 56 if not headeronly:
52 57 print()
53 58 data = response.read()
54 59
55 60 # Pretty print JSON. This also has the beneficial side-effect
56 61 # of verifying emitted JSON is well-formed.
57 62 if formatjson:
58 63 # json.dumps() will print trailing newlines. Eliminate them
59 64 # to make tests easier to write.
60 65 data = json.loads(data)
61 66 lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
62 67 for line in lines:
63 68 print(line.rstrip())
64 69 else:
65 70 sys.stdout.write(data)
66 71
67 72 if twice and response.getheader('ETag', None):
68 73 tag = response.getheader('ETag')
69 74
70 75 return response.status
71 76
72 77 status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
73 78 if twice:
74 79 status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
75 80
76 81 if 200 <= status <= 305:
77 82 sys.exit(0)
78 83 sys.exit(1)
@@ -1,150 +1,150 b''
1 1 #require test-repo
2 2
3 3 $ . "$TESTDIR/helpers-testrepo.sh"
4 4 $ cd "$TESTDIR"/..
5 5
6 6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 11 hgext/highlight/__init__.py not using absolute_import
12 12 hgext/highlight/highlight.py not using absolute_import
13 13 hgext/share.py not using absolute_import
14 14 hgext/win32text.py not using absolute_import
15 15 i18n/check-translation.py not using absolute_import
16 16 i18n/polib.py not using absolute_import
17 17 setup.py not using absolute_import
18 18 tests/heredoctest.py requires print_function
19 19 tests/md5sum.py not using absolute_import
20 20 tests/readlink.py not using absolute_import
21 21 tests/run-tests.py not using absolute_import
22 22 tests/test-demandimport.py not using absolute_import
23 23
24 24 #if py3exe
25 25 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
26 26 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
27 27 hgext/automv.py: error importing module: <SyntaxError> invalid syntax (commands.py, line *) (line *) (glob)
28 28 hgext/blackbox.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
29 29 hgext/bugzilla.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
30 30 hgext/censor.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
31 31 hgext/chgserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
32 32 hgext/children.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
33 33 hgext/churn.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
34 34 hgext/clonebundles.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
35 35 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
36 36 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
37 37 hgext/convert/convcmd.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
38 38 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
39 39 hgext/convert/cvsps.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
40 40 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
41 41 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
42 42 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
43 43 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
44 44 hgext/convert/hg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
45 45 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
46 46 hgext/convert/p*.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
47 47 hgext/convert/subversion.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
48 48 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
49 49 hgext/eol.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
50 50 hgext/extdiff.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
51 51 hgext/factotum.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
52 52 hgext/fetch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
53 53 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *) (glob)
54 54 hgext/gpg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
55 55 hgext/graphlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
56 56 hgext/hgk.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
57 57 hgext/histedit.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
58 58 hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
59 59 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
60 60 hgext/largefiles/lfcommands.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
61 61 hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
62 62 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
63 63 hgext/largefiles/overrides.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
64 hgext/largefiles/proto.py: error importing: <ImportError> No module named 'httplib' (error at httppeer.py:*) (glob)
64 hgext/largefiles/proto.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at httppeer.py:*) (glob)
65 65 hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
66 66 hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
67 67 hgext/largefiles/storefactory.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at bundlerepo.py:*) (glob)
68 68 hgext/largefiles/uisetup.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
69 69 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
70 70 hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
71 71 hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
72 72 hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
73 73 hgext/patchbomb.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
74 74 hgext/purge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
75 75 hgext/rebase.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
76 76 hgext/record.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
77 77 hgext/relink.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
78 78 hgext/schemes.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
79 79 hgext/share.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
80 80 hgext/shelve.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
81 81 hgext/strip.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
82 82 hgext/transplant.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
83 83 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
84 84 mercurial/branchmap.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
85 85 mercurial/bundle*.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
86 86 mercurial/bundlerepo.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
87 87 mercurial/changegroup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
88 88 mercurial/changelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
89 89 mercurial/cmdutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
90 90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
91 91 mercurial/context.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
92 92 mercurial/copies.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
93 93 mercurial/crecord.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
94 94 mercurial/dirstate.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
95 95 mercurial/discovery.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
96 96 mercurial/dispatch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
97 97 mercurial/exchange.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
98 98 mercurial/extensions.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
99 99 mercurial/filelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
100 100 mercurial/filemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
101 101 mercurial/fileset.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
102 102 mercurial/formatter.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
103 103 mercurial/graphmod.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
104 104 mercurial/help.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
105 105 mercurial/hg.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
106 106 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
107 107 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
108 108 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
109 109 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
110 110 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
111 111 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
112 112 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
113 113 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 114 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 115 mercurial/hook.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
116 116 mercurial/httpconnection.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
117 mercurial/httppeer.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
118 mercurial/keepalive.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
117 mercurial/httppeer.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
118 mercurial/keepalive.py: error importing module: <ImportError> No module named 'thread' (line *) (glob)
119 119 mercurial/localrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
120 120 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *) (glob)
121 121 mercurial/manifest.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
122 122 mercurial/merge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
123 123 mercurial/namespaces.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
124 124 mercurial/patch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
125 125 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
126 126 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
127 127 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
128 128 mercurial/revlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
129 129 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *) (glob)
130 130 mercurial/scmutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
131 131 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
132 132 mercurial/simplemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
133 133 mercurial/sshpeer.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
134 134 mercurial/sshserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
135 135 mercurial/statichttprepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
136 136 mercurial/store.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
137 137 mercurial/streamclone.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
138 138 mercurial/subrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
139 139 mercurial/templatefilters.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
140 140 mercurial/templatekw.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
141 141 mercurial/templater.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
142 142 mercurial/ui.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
143 143 mercurial/unionrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
144 mercurial/url.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
144 mercurial/url.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
145 145 mercurial/verify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
146 146 mercurial/win*.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
147 147 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
148 148 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
149 149
150 150 #endif
General Comments 0
You need to be logged in to leave comments. Login now