##// END OF EJS Templates
pylint, pyflakes: remove unused or duplicate imports
Nicolas Dumazet -
r10905:13a1b2fb default
parent child Browse files
Show More
@@ -1,225 +1,225 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # check-code - a style and portability checker for Mercurial
3 # check-code - a style and portability checker for Mercurial
4 #
4 #
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 import sys, re, glob
10 import re, glob
11 import optparse
11 import optparse
12
12
13 def repquote(m):
13 def repquote(m):
14 t = re.sub(r"\w", "x", m.group('text'))
14 t = re.sub(r"\w", "x", m.group('text'))
15 t = re.sub(r"[^\sx]", "o", t)
15 t = re.sub(r"[^\sx]", "o", t)
16 return m.group('quote') + t + m.group('quote')
16 return m.group('quote') + t + m.group('quote')
17
17
18 def reppython(m):
18 def reppython(m):
19 comment = m.group('comment')
19 comment = m.group('comment')
20 if comment:
20 if comment:
21 return "#" * len(comment)
21 return "#" * len(comment)
22 return repquote(m)
22 return repquote(m)
23
23
24 def repcomment(m):
24 def repcomment(m):
25 return m.group(1) + "#" * len(m.group(2))
25 return m.group(1) + "#" * len(m.group(2))
26
26
27 def repccomment(m):
27 def repccomment(m):
28 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
28 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
29 return m.group(1) + t + "*/"
29 return m.group(1) + t + "*/"
30
30
31 def repcallspaces(m):
31 def repcallspaces(m):
32 t = re.sub(r"\n\s+", "\n", m.group(2))
32 t = re.sub(r"\n\s+", "\n", m.group(2))
33 return m.group(1) + t
33 return m.group(1) + t
34
34
35 def repinclude(m):
35 def repinclude(m):
36 return m.group(1) + "<foo>"
36 return m.group(1) + "<foo>"
37
37
38 def rephere(m):
38 def rephere(m):
39 t = re.sub(r"\S", "x", m.group(2))
39 t = re.sub(r"\S", "x", m.group(2))
40 return m.group(1) + t
40 return m.group(1) + t
41
41
42
42
43 testpats = [
43 testpats = [
44 (r'(pushd|popd)', "don't use 'pushd' or 'popd', use 'cd'"),
44 (r'(pushd|popd)', "don't use 'pushd' or 'popd', use 'cd'"),
45 (r'\W\$?\(\([^\)]*\)\)', "don't use (()) or $(()), use 'expr'"),
45 (r'\W\$?\(\([^\)]*\)\)', "don't use (()) or $(()), use 'expr'"),
46 (r'^function', "don't use 'function', use old style"),
46 (r'^function', "don't use 'function', use old style"),
47 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
47 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
48 (r'echo.*\\n', "don't use 'echo \\n', use printf"),
48 (r'echo.*\\n', "don't use 'echo \\n', use printf"),
49 (r'^diff.*-\w*N', "don't use 'diff -N'"),
49 (r'^diff.*-\w*N', "don't use 'diff -N'"),
50 (r'(^| )wc[^|]*$', "filter wc output"),
50 (r'(^| )wc[^|]*$', "filter wc output"),
51 (r'head -c', "don't use 'head -c', use 'dd'"),
51 (r'head -c', "don't use 'head -c', use 'dd'"),
52 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
52 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
53 (r'printf.*\\\d\d\d', "don't use 'printf \NNN', use Python"),
53 (r'printf.*\\\d\d\d', "don't use 'printf \NNN', use Python"),
54 (r'printf.*\\x', "don't use printf \\x, use Python"),
54 (r'printf.*\\x', "don't use printf \\x, use Python"),
55 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
55 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
56 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
56 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
57 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
57 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
58 "use egrep for extended grep syntax"),
58 "use egrep for extended grep syntax"),
59 (r'/bin/', "don't use explicit paths for tools"),
59 (r'/bin/', "don't use explicit paths for tools"),
60 (r'\$PWD', "don't use $PWD, use `pwd`"),
60 (r'\$PWD', "don't use $PWD, use `pwd`"),
61 (r'[^\n]\Z', "no trailing newline"),
61 (r'[^\n]\Z', "no trailing newline"),
62 (r'export.*=', "don't export and assign at once"),
62 (r'export.*=', "don't export and assign at once"),
63 ('^([^"\']|("[^"]*")|(\'[^\']*\'))*\\^', "^ must be quoted"),
63 ('^([^"\']|("[^"]*")|(\'[^\']*\'))*\\^', "^ must be quoted"),
64 ]
64 ]
65
65
66 testfilters = [
66 testfilters = [
67 (r"( *)(#([^\n]*\S)?)", repcomment),
67 (r"( *)(#([^\n]*\S)?)", repcomment),
68 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
68 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
69 ]
69 ]
70
70
71 pypats = [
71 pypats = [
72 (r'^\s*\t', "don't use tabs"),
72 (r'^\s*\t', "don't use tabs"),
73 (r'\S;\s*\n', "semicolon"),
73 (r'\S;\s*\n', "semicolon"),
74 (r'\w,\w', "missing whitespace after ,"),
74 (r'\w,\w', "missing whitespace after ,"),
75 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
75 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
76 (r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
76 (r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
77 (r'.{85}', "line too long"),
77 (r'.{85}', "line too long"),
78 (r'[^\n]\Z', "no trailing newline"),
78 (r'[^\n]\Z', "no trailing newline"),
79 # (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
79 # (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
80 # (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
80 # (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
81 (r'^\s*(if|while|def|class|except|try)\s[^[]*:\s*[^\]#\s]+',
81 (r'^\s*(if|while|def|class|except|try)\s[^[]*:\s*[^\]#\s]+',
82 "linebreak after :"),
82 "linebreak after :"),
83 (r'class\s[^(]:', "old-style class, use class foo(object)"),
83 (r'class\s[^(]:', "old-style class, use class foo(object)"),
84 (r'^\s+del\(', "del isn't a function"),
84 (r'^\s+del\(', "del isn't a function"),
85 (r'^\s+except\(', "except isn't a function"),
85 (r'^\s+except\(', "except isn't a function"),
86 (r',]', "unneeded trailing ',' in list"),
86 (r',]', "unneeded trailing ',' in list"),
87 # (r'class\s[A-Z][^\(]*\((?!Exception)',
87 # (r'class\s[A-Z][^\(]*\((?!Exception)',
88 # "don't capitalize non-exception classes"),
88 # "don't capitalize non-exception classes"),
89 # (r'in range\(', "use xrange"),
89 # (r'in range\(', "use xrange"),
90 # (r'^\s*print\s+', "avoid using print in core and extensions"),
90 # (r'^\s*print\s+', "avoid using print in core and extensions"),
91 (r'[\x80-\xff]', "non-ASCII character literal"),
91 (r'[\x80-\xff]', "non-ASCII character literal"),
92 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
92 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
93 (r'^\s*with\s+', "with not available in Python 2.4"),
93 (r'^\s*with\s+', "with not available in Python 2.4"),
94 (r'^\s*(any|all)\(', "any/all not available in Python 2.4"),
94 (r'^\s*(any|all)\(', "any/all not available in Python 2.4"),
95 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
95 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
96 (r'([\(\[]\s\S)|(\S\s[\)\]])', "gratuitous whitespace in () or []"),
96 (r'([\(\[]\s\S)|(\S\s[\)\]])', "gratuitous whitespace in () or []"),
97 # (r'\s\s=', "gratuitous whitespace before ="),
97 # (r'\s\s=', "gratuitous whitespace before ="),
98 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\S', "missing whitespace around operator"),
98 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\S', "missing whitespace around operator"),
99 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\s', "missing whitespace around operator"),
99 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\s', "missing whitespace around operator"),
100 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=)\S', "missing whitespace around operator"),
100 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=)\S', "missing whitespace around operator"),
101 (r'[^+=*!<>&| -](\s=|=\s)[^= ]', "wrong whitespace around ="),
101 (r'[^+=*!<>&| -](\s=|=\s)[^= ]', "wrong whitespace around ="),
102 (r'raise Exception', "don't raise generic exceptions"),
102 (r'raise Exception', "don't raise generic exceptions"),
103 (r'ui\.(status|progress|write|note)\([\'\"]x',
103 (r'ui\.(status|progress|write|note)\([\'\"]x',
104 "warning: unwrapped ui message"),
104 "warning: unwrapped ui message"),
105 ]
105 ]
106
106
107 pyfilters = [
107 pyfilters = [
108 (r"""(?msx)(?P<comment>\#.*?$)|
108 (r"""(?msx)(?P<comment>\#.*?$)|
109 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
109 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
110 (?P<text>(([^\\]|\\.)*?))
110 (?P<text>(([^\\]|\\.)*?))
111 (?P=quote))""", reppython),
111 (?P=quote))""", reppython),
112 ]
112 ]
113
113
114 cpats = [
114 cpats = [
115 (r'//', "don't use //-style comments"),
115 (r'//', "don't use //-style comments"),
116 (r'^ ', "don't use spaces to indent"),
116 (r'^ ', "don't use spaces to indent"),
117 (r'\S\t', "don't use tabs except for indent"),
117 (r'\S\t', "don't use tabs except for indent"),
118 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
118 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
119 (r'.{85}', "line too long"),
119 (r'.{85}', "line too long"),
120 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
120 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
121 (r'return\(', "return is not a function"),
121 (r'return\(', "return is not a function"),
122 (r' ;', "no space before ;"),
122 (r' ;', "no space before ;"),
123 (r'\w+\* \w+', "use int *foo, not int* foo"),
123 (r'\w+\* \w+', "use int *foo, not int* foo"),
124 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
124 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
125 (r'\S+ (\+\+|--)', "use foo++, not foo ++"),
125 (r'\S+ (\+\+|--)', "use foo++, not foo ++"),
126 (r'\w,\w', "missing whitespace after ,"),
126 (r'\w,\w', "missing whitespace after ,"),
127 (r'\w[+/*]\w', "missing whitespace in expression"),
127 (r'\w[+/*]\w', "missing whitespace in expression"),
128 (r'^#\s+\w', "use #foo, not # foo"),
128 (r'^#\s+\w', "use #foo, not # foo"),
129 (r'[^\n]\Z', "no trailing newline"),
129 (r'[^\n]\Z', "no trailing newline"),
130 ]
130 ]
131
131
132 cfilters = [
132 cfilters = [
133 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
133 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
134 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
134 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
135 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
135 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
136 (r'(\()([^)]+\))', repcallspaces),
136 (r'(\()([^)]+\))', repcallspaces),
137 ]
137 ]
138
138
139 checks = [
139 checks = [
140 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
140 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
141 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
141 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
142 ('c', r'.*\.c$', cfilters, cpats),
142 ('c', r'.*\.c$', cfilters, cpats),
143 ]
143 ]
144
144
145 class norepeatlogger(object):
145 class norepeatlogger(object):
146 def __init__(self):
146 def __init__(self):
147 self._lastseen = None
147 self._lastseen = None
148
148
149 def log(self, fname, lineno, line, msg):
149 def log(self, fname, lineno, line, msg):
150 """print error related a to given line of a given file.
150 """print error related a to given line of a given file.
151
151
152 The faulty line will also be printed but only once in the case
152 The faulty line will also be printed but only once in the case
153 of multiple errors.
153 of multiple errors.
154
154
155 :fname: filename
155 :fname: filename
156 :lineno: line number
156 :lineno: line number
157 :line: actual content of the line
157 :line: actual content of the line
158 :msg: error message
158 :msg: error message
159 """
159 """
160 msgid = fname, lineno, line
160 msgid = fname, lineno, line
161 if msgid != self._lastseen:
161 if msgid != self._lastseen:
162 print "%s:%d:" % (fname, lineno)
162 print "%s:%d:" % (fname, lineno)
163 print " > %s" % line
163 print " > %s" % line
164 self._lastseen = msgid
164 self._lastseen = msgid
165 print " " + msg
165 print " " + msg
166
166
167 _defaultlogger = norepeatlogger()
167 _defaultlogger = norepeatlogger()
168
168
169 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False):
169 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False):
170 """checks style and portability of a given file
170 """checks style and portability of a given file
171
171
172 :f: filepath
172 :f: filepath
173 :logfunc: function used to report error
173 :logfunc: function used to report error
174 logfunc(filename, linenumber, linecontent, errormessage)
174 logfunc(filename, linenumber, linecontent, errormessage)
175 :maxerr: number of error to display before arborting.
175 :maxerr: number of error to display before arborting.
176 Set to None (default) to report all errors
176 Set to None (default) to report all errors
177
177
178 return True if no error is found, False otherwise.
178 return True if no error is found, False otherwise.
179 """
179 """
180 result = True
180 result = True
181 for name, match, filters, pats in checks:
181 for name, match, filters, pats in checks:
182 fc = 0
182 fc = 0
183 if not re.match(match, f):
183 if not re.match(match, f):
184 continue
184 continue
185 pre = post = open(f).read()
185 pre = post = open(f).read()
186 if "no-" + "check-code" in pre:
186 if "no-" + "check-code" in pre:
187 break
187 break
188 for p, r in filters:
188 for p, r in filters:
189 post = re.sub(p, r, post)
189 post = re.sub(p, r, post)
190 # print post # uncomment to show filtered version
190 # print post # uncomment to show filtered version
191 z = enumerate(zip(pre.splitlines(), post.splitlines(True)))
191 z = enumerate(zip(pre.splitlines(), post.splitlines(True)))
192 for n, l in z:
192 for n, l in z:
193 if "check-code" + "-ignore" in l[0]:
193 if "check-code" + "-ignore" in l[0]:
194 continue
194 continue
195 for p, msg in pats:
195 for p, msg in pats:
196 if not warnings and msg.startswith("warning"):
196 if not warnings and msg.startswith("warning"):
197 continue
197 continue
198 if re.search(p, l[1]):
198 if re.search(p, l[1]):
199 logfunc(f, n + 1, l[0], msg)
199 logfunc(f, n + 1, l[0], msg)
200 fc += 1
200 fc += 1
201 result = False
201 result = False
202 if maxerr is not None and fc >= maxerr:
202 if maxerr is not None and fc >= maxerr:
203 print " (too many errors, giving up)"
203 print " (too many errors, giving up)"
204 break
204 break
205 break
205 break
206 return result
206 return result
207
207
208
208
209 if __name__ == "__main__":
209 if __name__ == "__main__":
210 parser = optparse.OptionParser("%prog [options] [files]")
210 parser = optparse.OptionParser("%prog [options] [files]")
211 parser.add_option("-w", "--warnings", action="store_true",
211 parser.add_option("-w", "--warnings", action="store_true",
212 help="include warning-level checks")
212 help="include warning-level checks")
213 parser.add_option("-p", "--per-file", type="int",
213 parser.add_option("-p", "--per-file", type="int",
214 help="max warnings per file")
214 help="max warnings per file")
215
215
216 parser.set_defaults(per_file=15, warnings=False)
216 parser.set_defaults(per_file=15, warnings=False)
217 (options, args) = parser.parse_args()
217 (options, args) = parser.parse_args()
218
218
219 if len(args) == 0:
219 if len(args) == 0:
220 check = glob.glob("*")
220 check = glob.glob("*")
221 else:
221 else:
222 check = args
222 check = args
223
223
224 for f in check:
224 for f in check:
225 checkfile(f, maxerr=options.per_file, warnings=options.warnings)
225 checkfile(f, maxerr=options.per_file, warnings=options.warnings)
@@ -1,1114 +1,1109 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
2 # -*- coding: utf-8 -*-
3 # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
3 # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
4 # Author: Engelbert Gruber <grubert@users.sourceforge.net>
4 # Author: Engelbert Gruber <grubert@users.sourceforge.net>
5 # Copyright: This module is put into the public domain.
5 # Copyright: This module is put into the public domain.
6
6
7 """
7 """
8 Simple man page writer for reStructuredText.
8 Simple man page writer for reStructuredText.
9
9
10 Man pages (short for "manual pages") contain system documentation on unix-like
10 Man pages (short for "manual pages") contain system documentation on unix-like
11 systems. The pages are grouped in numbered sections:
11 systems. The pages are grouped in numbered sections:
12
12
13 1 executable programs and shell commands
13 1 executable programs and shell commands
14 2 system calls
14 2 system calls
15 3 library functions
15 3 library functions
16 4 special files
16 4 special files
17 5 file formats
17 5 file formats
18 6 games
18 6 games
19 7 miscellaneous
19 7 miscellaneous
20 8 system administration
20 8 system administration
21
21
22 Man pages are written *troff*, a text file formatting system.
22 Man pages are written *troff*, a text file formatting system.
23
23
24 See http://www.tldp.org/HOWTO/Man-Page for a start.
24 See http://www.tldp.org/HOWTO/Man-Page for a start.
25
25
26 Man pages have no subsection only parts.
26 Man pages have no subsection only parts.
27 Standard parts
27 Standard parts
28
28
29 NAME ,
29 NAME ,
30 SYNOPSIS ,
30 SYNOPSIS ,
31 DESCRIPTION ,
31 DESCRIPTION ,
32 OPTIONS ,
32 OPTIONS ,
33 FILES ,
33 FILES ,
34 SEE ALSO ,
34 SEE ALSO ,
35 BUGS ,
35 BUGS ,
36
36
37 and
37 and
38
38
39 AUTHOR .
39 AUTHOR .
40
40
41 A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
41 A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
42 by the command whatis or apropos.
42 by the command whatis or apropos.
43
43
44 """
44 """
45
45
46 __docformat__ = 'reStructuredText'
46 __docformat__ = 'reStructuredText'
47
47
48 import sys
49 import os
50 import time
51 import re
48 import re
52 from types import ListType
53
49
54 import docutils
50 from docutils import nodes, writers, languages
55 from docutils import nodes, utils, writers, languages
56 import roman
51 import roman
57
52
58 FIELD_LIST_INDENT = 7
53 FIELD_LIST_INDENT = 7
59 DEFINITION_LIST_INDENT = 7
54 DEFINITION_LIST_INDENT = 7
60 OPTION_LIST_INDENT = 7
55 OPTION_LIST_INDENT = 7
61 BLOCKQOUTE_INDENT = 3.5
56 BLOCKQOUTE_INDENT = 3.5
62
57
63 # Define two macros so man/roff can calculate the
58 # Define two macros so man/roff can calculate the
64 # indent/unindent margins by itself
59 # indent/unindent margins by itself
65 MACRO_DEF = (r""".
60 MACRO_DEF = (r""".
66 .nr rst2man-indent-level 0
61 .nr rst2man-indent-level 0
67 .
62 .
68 .de1 rstReportMargin
63 .de1 rstReportMargin
69 \\$1 \\n[an-margin]
64 \\$1 \\n[an-margin]
70 level \\n[rst2man-indent-level]
65 level \\n[rst2man-indent-level]
71 level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
66 level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
72 -
67 -
73 \\n[rst2man-indent0]
68 \\n[rst2man-indent0]
74 \\n[rst2man-indent1]
69 \\n[rst2man-indent1]
75 \\n[rst2man-indent2]
70 \\n[rst2man-indent2]
76 ..
71 ..
77 .de1 INDENT
72 .de1 INDENT
78 .\" .rstReportMargin pre:
73 .\" .rstReportMargin pre:
79 . RS \\$1
74 . RS \\$1
80 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
75 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
81 . nr rst2man-indent-level +1
76 . nr rst2man-indent-level +1
82 .\" .rstReportMargin post:
77 .\" .rstReportMargin post:
83 ..
78 ..
84 .de UNINDENT
79 .de UNINDENT
85 . RE
80 . RE
86 .\" indent \\n[an-margin]
81 .\" indent \\n[an-margin]
87 .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
82 .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
88 .nr rst2man-indent-level -1
83 .nr rst2man-indent-level -1
89 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
84 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
90 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
85 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
91 ..
86 ..
92 """)
87 """)
93
88
94 class Writer(writers.Writer):
89 class Writer(writers.Writer):
95
90
96 supported = ('manpage')
91 supported = ('manpage')
97 """Formats this writer supports."""
92 """Formats this writer supports."""
98
93
99 output = None
94 output = None
100 """Final translated form of `document`."""
95 """Final translated form of `document`."""
101
96
102 def __init__(self):
97 def __init__(self):
103 writers.Writer.__init__(self)
98 writers.Writer.__init__(self)
104 self.translator_class = Translator
99 self.translator_class = Translator
105
100
106 def translate(self):
101 def translate(self):
107 visitor = self.translator_class(self.document)
102 visitor = self.translator_class(self.document)
108 self.document.walkabout(visitor)
103 self.document.walkabout(visitor)
109 self.output = visitor.astext()
104 self.output = visitor.astext()
110
105
111
106
112 class Table:
107 class Table:
113 def __init__(self):
108 def __init__(self):
114 self._rows = []
109 self._rows = []
115 self._options = ['center']
110 self._options = ['center']
116 self._tab_char = '\t'
111 self._tab_char = '\t'
117 self._coldefs = []
112 self._coldefs = []
118 def new_row(self):
113 def new_row(self):
119 self._rows.append([])
114 self._rows.append([])
120 def append_separator(self, separator):
115 def append_separator(self, separator):
121 """Append the separator for table head."""
116 """Append the separator for table head."""
122 self._rows.append([separator])
117 self._rows.append([separator])
123 def append_cell(self, cell_lines):
118 def append_cell(self, cell_lines):
124 """cell_lines is an array of lines"""
119 """cell_lines is an array of lines"""
125 start = 0
120 start = 0
126 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
121 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
127 start = 1
122 start = 1
128 self._rows[-1].append(cell_lines[start:])
123 self._rows[-1].append(cell_lines[start:])
129 if len(self._coldefs) < len(self._rows[-1]):
124 if len(self._coldefs) < len(self._rows[-1]):
130 self._coldefs.append('l')
125 self._coldefs.append('l')
131 def _minimize_cell(self, cell_lines):
126 def _minimize_cell(self, cell_lines):
132 """Remove leading and trailing blank and ``.sp`` lines"""
127 """Remove leading and trailing blank and ``.sp`` lines"""
133 while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
128 while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
134 del cell_lines[0]
129 del cell_lines[0]
135 while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
130 while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
136 del cell_lines[-1]
131 del cell_lines[-1]
137 def as_list(self):
132 def as_list(self):
138 text = ['.TS\n']
133 text = ['.TS\n']
139 text.append(' '.join(self._options) + ';\n')
134 text.append(' '.join(self._options) + ';\n')
140 text.append('|%s|.\n' % ('|'.join(self._coldefs)))
135 text.append('|%s|.\n' % ('|'.join(self._coldefs)))
141 for row in self._rows:
136 for row in self._rows:
142 # row = array of cells. cell = array of lines.
137 # row = array of cells. cell = array of lines.
143 text.append('_\n') # line above
138 text.append('_\n') # line above
144 text.append('T{\n')
139 text.append('T{\n')
145 for i in range(len(row)):
140 for i in range(len(row)):
146 cell = row[i]
141 cell = row[i]
147 self._minimize_cell(cell)
142 self._minimize_cell(cell)
148 text.extend(cell)
143 text.extend(cell)
149 if not text[-1].endswith('\n'):
144 if not text[-1].endswith('\n'):
150 text[-1] += '\n'
145 text[-1] += '\n'
151 if i < len(row)-1:
146 if i < len(row)-1:
152 text.append('T}'+self._tab_char+'T{\n')
147 text.append('T}'+self._tab_char+'T{\n')
153 else:
148 else:
154 text.append('T}\n')
149 text.append('T}\n')
155 text.append('_\n')
150 text.append('_\n')
156 text.append('.TE\n')
151 text.append('.TE\n')
157 return text
152 return text
158
153
159 class Translator(nodes.NodeVisitor):
154 class Translator(nodes.NodeVisitor):
160 """"""
155 """"""
161
156
162 words_and_spaces = re.compile(r'\S+| +|\n')
157 words_and_spaces = re.compile(r'\S+| +|\n')
163 document_start = """Man page generated from reStructeredText."""
158 document_start = """Man page generated from reStructeredText."""
164
159
165 def __init__(self, document):
160 def __init__(self, document):
166 nodes.NodeVisitor.__init__(self, document)
161 nodes.NodeVisitor.__init__(self, document)
167 self.settings = settings = document.settings
162 self.settings = settings = document.settings
168 lcode = settings.language_code
163 lcode = settings.language_code
169 self.language = languages.get_language(lcode)
164 self.language = languages.get_language(lcode)
170 self.head = []
165 self.head = []
171 self.body = []
166 self.body = []
172 self.foot = []
167 self.foot = []
173 self.section_level = 0
168 self.section_level = 0
174 self.context = []
169 self.context = []
175 self.topic_class = ''
170 self.topic_class = ''
176 self.colspecs = []
171 self.colspecs = []
177 self.compact_p = 1
172 self.compact_p = 1
178 self.compact_simple = None
173 self.compact_simple = None
179 # the list style "*" bullet or "#" numbered
174 # the list style "*" bullet or "#" numbered
180 self._list_char = []
175 self._list_char = []
181 # writing the header .TH and .SH NAME is postboned after
176 # writing the header .TH and .SH NAME is postboned after
182 # docinfo.
177 # docinfo.
183 self._docinfo = {
178 self._docinfo = {
184 "title" : "", "title_upper": "",
179 "title" : "", "title_upper": "",
185 "subtitle" : "",
180 "subtitle" : "",
186 "manual_section" : "", "manual_group" : "",
181 "manual_section" : "", "manual_group" : "",
187 "author" : [],
182 "author" : [],
188 "date" : "",
183 "date" : "",
189 "copyright" : "",
184 "copyright" : "",
190 "version" : "",
185 "version" : "",
191 }
186 }
192 self._docinfo_keys = [] # a list to keep the sequence as in source.
187 self._docinfo_keys = [] # a list to keep the sequence as in source.
193 self._docinfo_names = {} # to get name from text not normalized.
188 self._docinfo_names = {} # to get name from text not normalized.
194 self._in_docinfo = None
189 self._in_docinfo = None
195 self._active_table = None
190 self._active_table = None
196 self._in_literal = False
191 self._in_literal = False
197 self.header_written = 0
192 self.header_written = 0
198 self._line_block = 0
193 self._line_block = 0
199 self.authors = []
194 self.authors = []
200 self.section_level = 0
195 self.section_level = 0
201 self._indent = [0]
196 self._indent = [0]
202 # central definition of simple processing rules
197 # central definition of simple processing rules
203 # what to output on : visit, depart
198 # what to output on : visit, depart
204 # Do not use paragraph requests ``.PP`` because these set indentation.
199 # Do not use paragraph requests ``.PP`` because these set indentation.
205 # use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
200 # use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
206 #
201 #
207 # Fonts are put on a stack, the top one is used.
202 # Fonts are put on a stack, the top one is used.
208 # ``.ft P`` or ``\\fP`` pop from stack.
203 # ``.ft P`` or ``\\fP`` pop from stack.
209 # ``B`` bold, ``I`` italic, ``R`` roman should be available.
204 # ``B`` bold, ``I`` italic, ``R`` roman should be available.
210 # Hopefully ``C`` courier too.
205 # Hopefully ``C`` courier too.
211 self.defs = {
206 self.defs = {
212 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
207 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
213 'definition_list_item' : ('.TP', ''),
208 'definition_list_item' : ('.TP', ''),
214 'field_name' : ('.TP\n.B ', '\n'),
209 'field_name' : ('.TP\n.B ', '\n'),
215 'literal' : ('\\fB', '\\fP'),
210 'literal' : ('\\fB', '\\fP'),
216 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
211 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
217
212
218 'option_list_item' : ('.TP\n', ''),
213 'option_list_item' : ('.TP\n', ''),
219
214
220 'reference' : (r'\%', r'\:'),
215 'reference' : (r'\%', r'\:'),
221 'emphasis': ('\\fI', '\\fP'),
216 'emphasis': ('\\fI', '\\fP'),
222 'strong' : ('\\fB', '\\fP'),
217 'strong' : ('\\fB', '\\fP'),
223 'term' : ('\n.B ', '\n'),
218 'term' : ('\n.B ', '\n'),
224 'title_reference' : ('\\fI', '\\fP'),
219 'title_reference' : ('\\fI', '\\fP'),
225
220
226 'topic-title' : ('.SS ',),
221 'topic-title' : ('.SS ',),
227 'sidebar-title' : ('.SS ',),
222 'sidebar-title' : ('.SS ',),
228
223
229 'problematic' : ('\n.nf\n', '\n.fi\n'),
224 'problematic' : ('\n.nf\n', '\n.fi\n'),
230 }
225 }
231 # NOTE don't specify the newline before a dot-command, but ensure
226 # NOTE don't specify the newline before a dot-command, but ensure
232 # it is there.
227 # it is there.
233
228
234 def comment_begin(self, text):
229 def comment_begin(self, text):
235 """Return commented version of the passed text WITHOUT end of
230 """Return commented version of the passed text WITHOUT end of
236 line/comment."""
231 line/comment."""
237 prefix = '.\\" '
232 prefix = '.\\" '
238 out_text = ''.join(
233 out_text = ''.join(
239 [(prefix + in_line + '\n')
234 [(prefix + in_line + '\n')
240 for in_line in text.split('\n')])
235 for in_line in text.split('\n')])
241 return out_text
236 return out_text
242
237
243 def comment(self, text):
238 def comment(self, text):
244 """Return commented version of the passed text."""
239 """Return commented version of the passed text."""
245 return self.comment_begin(text)+'.\n'
240 return self.comment_begin(text)+'.\n'
246
241
247 def ensure_eol(self):
242 def ensure_eol(self):
248 """Ensure the last line in body is terminated by new line."""
243 """Ensure the last line in body is terminated by new line."""
249 if self.body[-1][-1] != '\n':
244 if self.body[-1][-1] != '\n':
250 self.body.append('\n')
245 self.body.append('\n')
251
246
252 def astext(self):
247 def astext(self):
253 """Return the final formatted document as a string."""
248 """Return the final formatted document as a string."""
254 if not self.header_written:
249 if not self.header_written:
255 # ensure we get a ".TH" as viewers require it.
250 # ensure we get a ".TH" as viewers require it.
256 self.head.append(self.header())
251 self.head.append(self.header())
257 # filter body
252 # filter body
258 for i in xrange(len(self.body)-1, 0, -1):
253 for i in xrange(len(self.body)-1, 0, -1):
259 # remove superfluous vertical gaps.
254 # remove superfluous vertical gaps.
260 if self.body[i] == '.sp\n':
255 if self.body[i] == '.sp\n':
261 if self.body[i - 1][:4] in ('.BI ','.IP '):
256 if self.body[i - 1][:4] in ('.BI ','.IP '):
262 self.body[i] = '.\n'
257 self.body[i] = '.\n'
263 elif (self.body[i - 1][:3] == '.B ' and
258 elif (self.body[i - 1][:3] == '.B ' and
264 self.body[i - 2][:4] == '.TP\n'):
259 self.body[i - 2][:4] == '.TP\n'):
265 self.body[i] = '.\n'
260 self.body[i] = '.\n'
266 elif (self.body[i - 1] == '\n' and
261 elif (self.body[i - 1] == '\n' and
267 self.body[i - 2][0] != '.' and
262 self.body[i - 2][0] != '.' and
268 (self.body[i - 3][:7] == '.TP\n.B '
263 (self.body[i - 3][:7] == '.TP\n.B '
269 or self.body[i - 3][:4] == '\n.B ')
264 or self.body[i - 3][:4] == '\n.B ')
270 ):
265 ):
271 self.body[i] = '.\n'
266 self.body[i] = '.\n'
272 return ''.join(self.head + self.body + self.foot)
267 return ''.join(self.head + self.body + self.foot)
273
268
274 def deunicode(self, text):
269 def deunicode(self, text):
275 text = text.replace(u'\xa0', '\\ ')
270 text = text.replace(u'\xa0', '\\ ')
276 text = text.replace(u'\u2020', '\\(dg')
271 text = text.replace(u'\u2020', '\\(dg')
277 return text
272 return text
278
273
279 def visit_Text(self, node):
274 def visit_Text(self, node):
280 text = node.astext()
275 text = node.astext()
281 text = text.replace('\\','\\e')
276 text = text.replace('\\','\\e')
282 replace_pairs = [
277 replace_pairs = [
283 (u'-', ur'\-'),
278 (u'-', ur'\-'),
284 (u'\'', ur'\(aq'),
279 (u'\'', ur'\(aq'),
285 (u'Β΄', ur'\''),
280 (u'Β΄', ur'\''),
286 (u'`', ur'\(ga'),
281 (u'`', ur'\(ga'),
287 ]
282 ]
288 for (in_char, out_markup) in replace_pairs:
283 for (in_char, out_markup) in replace_pairs:
289 text = text.replace(in_char, out_markup)
284 text = text.replace(in_char, out_markup)
290 # unicode
285 # unicode
291 text = self.deunicode(text)
286 text = self.deunicode(text)
292 if self._in_literal:
287 if self._in_literal:
293 # prevent interpretation of "." at line start
288 # prevent interpretation of "." at line start
294 if text[0] == '.':
289 if text[0] == '.':
295 text = '\\&' + text
290 text = '\\&' + text
296 text = text.replace('\n.', '\n\\&.')
291 text = text.replace('\n.', '\n\\&.')
297 self.body.append(text)
292 self.body.append(text)
298
293
299 def depart_Text(self, node):
294 def depart_Text(self, node):
300 pass
295 pass
301
296
302 def list_start(self, node):
297 def list_start(self, node):
303 class enum_char:
298 class enum_char:
304 enum_style = {
299 enum_style = {
305 'bullet' : '\\(bu',
300 'bullet' : '\\(bu',
306 'emdash' : '\\(em',
301 'emdash' : '\\(em',
307 }
302 }
308
303
309 def __init__(self, style):
304 def __init__(self, style):
310 self._style = style
305 self._style = style
311 if node.has_key('start'):
306 if node.has_key('start'):
312 self._cnt = node['start'] - 1
307 self._cnt = node['start'] - 1
313 else:
308 else:
314 self._cnt = 0
309 self._cnt = 0
315 self._indent = 2
310 self._indent = 2
316 if style == 'arabic':
311 if style == 'arabic':
317 # indentation depends on number of childrens
312 # indentation depends on number of childrens
318 # and start value.
313 # and start value.
319 self._indent = len(str(len(node.children)))
314 self._indent = len(str(len(node.children)))
320 self._indent += len(str(self._cnt)) + 1
315 self._indent += len(str(self._cnt)) + 1
321 elif style == 'loweralpha':
316 elif style == 'loweralpha':
322 self._cnt += ord('a') - 1
317 self._cnt += ord('a') - 1
323 self._indent = 3
318 self._indent = 3
324 elif style == 'upperalpha':
319 elif style == 'upperalpha':
325 self._cnt += ord('A') - 1
320 self._cnt += ord('A') - 1
326 self._indent = 3
321 self._indent = 3
327 elif style.endswith('roman'):
322 elif style.endswith('roman'):
328 self._indent = 5
323 self._indent = 5
329
324
330 def next(self):
325 def next(self):
331 if self._style == 'bullet':
326 if self._style == 'bullet':
332 return self.enum_style[self._style]
327 return self.enum_style[self._style]
333 elif self._style == 'emdash':
328 elif self._style == 'emdash':
334 return self.enum_style[self._style]
329 return self.enum_style[self._style]
335 self._cnt += 1
330 self._cnt += 1
336 # TODO add prefix postfix
331 # TODO add prefix postfix
337 if self._style == 'arabic':
332 if self._style == 'arabic':
338 return "%d." % self._cnt
333 return "%d." % self._cnt
339 elif self._style in ('loweralpha', 'upperalpha'):
334 elif self._style in ('loweralpha', 'upperalpha'):
340 return "%c." % self._cnt
335 return "%c." % self._cnt
341 elif self._style.endswith('roman'):
336 elif self._style.endswith('roman'):
342 res = roman.toRoman(self._cnt) + '.'
337 res = roman.toRoman(self._cnt) + '.'
343 if self._style.startswith('upper'):
338 if self._style.startswith('upper'):
344 return res.upper()
339 return res.upper()
345 return res.lower()
340 return res.lower()
346 else:
341 else:
347 return "%d." % self._cnt
342 return "%d." % self._cnt
348 def get_width(self):
343 def get_width(self):
349 return self._indent
344 return self._indent
350 def __repr__(self):
345 def __repr__(self):
351 return 'enum_style-%s' % list(self._style)
346 return 'enum_style-%s' % list(self._style)
352
347
353 if node.has_key('enumtype'):
348 if node.has_key('enumtype'):
354 self._list_char.append(enum_char(node['enumtype']))
349 self._list_char.append(enum_char(node['enumtype']))
355 else:
350 else:
356 self._list_char.append(enum_char('bullet'))
351 self._list_char.append(enum_char('bullet'))
357 if len(self._list_char) > 1:
352 if len(self._list_char) > 1:
358 # indent nested lists
353 # indent nested lists
359 self.indent(self._list_char[-2].get_width())
354 self.indent(self._list_char[-2].get_width())
360 else:
355 else:
361 self.indent(self._list_char[-1].get_width())
356 self.indent(self._list_char[-1].get_width())
362
357
363 def list_end(self):
358 def list_end(self):
364 self.dedent()
359 self.dedent()
365 self._list_char.pop()
360 self._list_char.pop()
366
361
367 def header(self):
362 def header(self):
368 tmpl = (".TH %(title_upper)s %(manual_section)s"
363 tmpl = (".TH %(title_upper)s %(manual_section)s"
369 " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
364 " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
370 ".SH NAME\n"
365 ".SH NAME\n"
371 "%(title)s \- %(subtitle)s\n")
366 "%(title)s \- %(subtitle)s\n")
372 return tmpl % self._docinfo
367 return tmpl % self._docinfo
373
368
374 def append_header(self):
369 def append_header(self):
375 """append header with .TH and .SH NAME"""
370 """append header with .TH and .SH NAME"""
376 # NOTE before everything
371 # NOTE before everything
377 # .TH title_upper section date source manual
372 # .TH title_upper section date source manual
378 if self.header_written:
373 if self.header_written:
379 return
374 return
380 self.body.append(self.header())
375 self.body.append(self.header())
381 self.body.append(MACRO_DEF)
376 self.body.append(MACRO_DEF)
382 self.header_written = 1
377 self.header_written = 1
383
378
384 def visit_address(self, node):
379 def visit_address(self, node):
385 self.visit_docinfo_item(node, 'address')
380 self.visit_docinfo_item(node, 'address')
386
381
387 def depart_address(self, node):
382 def depart_address(self, node):
388 pass
383 pass
389
384
390 def visit_admonition(self, node, name=None):
385 def visit_admonition(self, node, name=None):
391 if name:
386 if name:
392 self.body.append('.IP %s\n' %
387 self.body.append('.IP %s\n' %
393 self.language.labels.get(name, name))
388 self.language.labels.get(name, name))
394
389
395 def depart_admonition(self, node):
390 def depart_admonition(self, node):
396 self.body.append('.RE\n')
391 self.body.append('.RE\n')
397
392
398 def visit_attention(self, node):
393 def visit_attention(self, node):
399 self.visit_admonition(node, 'attention')
394 self.visit_admonition(node, 'attention')
400
395
401 depart_attention = depart_admonition
396 depart_attention = depart_admonition
402
397
403 def visit_docinfo_item(self, node, name):
398 def visit_docinfo_item(self, node, name):
404 if name == 'author':
399 if name == 'author':
405 self._docinfo[name].append(node.astext())
400 self._docinfo[name].append(node.astext())
406 else:
401 else:
407 self._docinfo[name] = node.astext()
402 self._docinfo[name] = node.astext()
408 self._docinfo_keys.append(name)
403 self._docinfo_keys.append(name)
409 raise nodes.SkipNode
404 raise nodes.SkipNode
410
405
411 def depart_docinfo_item(self, node):
406 def depart_docinfo_item(self, node):
412 pass
407 pass
413
408
414 def visit_author(self, node):
409 def visit_author(self, node):
415 self.visit_docinfo_item(node, 'author')
410 self.visit_docinfo_item(node, 'author')
416
411
417 depart_author = depart_docinfo_item
412 depart_author = depart_docinfo_item
418
413
419 def visit_authors(self, node):
414 def visit_authors(self, node):
420 # _author is called anyway.
415 # _author is called anyway.
421 pass
416 pass
422
417
423 def depart_authors(self, node):
418 def depart_authors(self, node):
424 pass
419 pass
425
420
426 def visit_block_quote(self, node):
421 def visit_block_quote(self, node):
427 # BUG/HACK: indent alway uses the _last_ indention,
422 # BUG/HACK: indent alway uses the _last_ indention,
428 # thus we need two of them.
423 # thus we need two of them.
429 self.indent(BLOCKQOUTE_INDENT)
424 self.indent(BLOCKQOUTE_INDENT)
430 self.indent(0)
425 self.indent(0)
431
426
432 def depart_block_quote(self, node):
427 def depart_block_quote(self, node):
433 self.dedent()
428 self.dedent()
434 self.dedent()
429 self.dedent()
435
430
436 def visit_bullet_list(self, node):
431 def visit_bullet_list(self, node):
437 self.list_start(node)
432 self.list_start(node)
438
433
439 def depart_bullet_list(self, node):
434 def depart_bullet_list(self, node):
440 self.list_end()
435 self.list_end()
441
436
442 def visit_caption(self, node):
437 def visit_caption(self, node):
443 pass
438 pass
444
439
445 def depart_caption(self, node):
440 def depart_caption(self, node):
446 pass
441 pass
447
442
448 def visit_caution(self, node):
443 def visit_caution(self, node):
449 self.visit_admonition(node, 'caution')
444 self.visit_admonition(node, 'caution')
450
445
451 depart_caution = depart_admonition
446 depart_caution = depart_admonition
452
447
453 def visit_citation(self, node):
448 def visit_citation(self, node):
454 num, text = node.astext().split(None, 1)
449 num, text = node.astext().split(None, 1)
455 num = num.strip()
450 num = num.strip()
456 self.body.append('.IP [%s] 5\n' % num)
451 self.body.append('.IP [%s] 5\n' % num)
457
452
458 def depart_citation(self, node):
453 def depart_citation(self, node):
459 pass
454 pass
460
455
461 def visit_citation_reference(self, node):
456 def visit_citation_reference(self, node):
462 self.body.append('['+node.astext()+']')
457 self.body.append('['+node.astext()+']')
463 raise nodes.SkipNode
458 raise nodes.SkipNode
464
459
465 def visit_classifier(self, node):
460 def visit_classifier(self, node):
466 pass
461 pass
467
462
468 def depart_classifier(self, node):
463 def depart_classifier(self, node):
469 pass
464 pass
470
465
471 def visit_colspec(self, node):
466 def visit_colspec(self, node):
472 self.colspecs.append(node)
467 self.colspecs.append(node)
473
468
474 def depart_colspec(self, node):
469 def depart_colspec(self, node):
475 pass
470 pass
476
471
477 def write_colspecs(self):
472 def write_colspecs(self):
478 self.body.append("%s.\n" % ('L '*len(self.colspecs)))
473 self.body.append("%s.\n" % ('L '*len(self.colspecs)))
479
474
480 def visit_comment(self, node,
475 def visit_comment(self, node,
481 sub=re.compile('-(?=-)').sub):
476 sub=re.compile('-(?=-)').sub):
482 self.body.append(self.comment(node.astext()))
477 self.body.append(self.comment(node.astext()))
483 raise nodes.SkipNode
478 raise nodes.SkipNode
484
479
485 def visit_contact(self, node):
480 def visit_contact(self, node):
486 self.visit_docinfo_item(node, 'contact')
481 self.visit_docinfo_item(node, 'contact')
487
482
488 depart_contact = depart_docinfo_item
483 depart_contact = depart_docinfo_item
489
484
490 def visit_container(self, node):
485 def visit_container(self, node):
491 pass
486 pass
492
487
493 def depart_container(self, node):
488 def depart_container(self, node):
494 pass
489 pass
495
490
496 def visit_compound(self, node):
491 def visit_compound(self, node):
497 pass
492 pass
498
493
499 def depart_compound(self, node):
494 def depart_compound(self, node):
500 pass
495 pass
501
496
502 def visit_copyright(self, node):
497 def visit_copyright(self, node):
503 self.visit_docinfo_item(node, 'copyright')
498 self.visit_docinfo_item(node, 'copyright')
504
499
505 def visit_danger(self, node):
500 def visit_danger(self, node):
506 self.visit_admonition(node, 'danger')
501 self.visit_admonition(node, 'danger')
507
502
508 depart_danger = depart_admonition
503 depart_danger = depart_admonition
509
504
510 def visit_date(self, node):
505 def visit_date(self, node):
511 self.visit_docinfo_item(node, 'date')
506 self.visit_docinfo_item(node, 'date')
512
507
513 def visit_decoration(self, node):
508 def visit_decoration(self, node):
514 pass
509 pass
515
510
516 def depart_decoration(self, node):
511 def depart_decoration(self, node):
517 pass
512 pass
518
513
519 def visit_definition(self, node):
514 def visit_definition(self, node):
520 pass
515 pass
521
516
522 def depart_definition(self, node):
517 def depart_definition(self, node):
523 pass
518 pass
524
519
525 def visit_definition_list(self, node):
520 def visit_definition_list(self, node):
526 self.indent(DEFINITION_LIST_INDENT)
521 self.indent(DEFINITION_LIST_INDENT)
527
522
528 def depart_definition_list(self, node):
523 def depart_definition_list(self, node):
529 self.dedent()
524 self.dedent()
530
525
531 def visit_definition_list_item(self, node):
526 def visit_definition_list_item(self, node):
532 self.body.append(self.defs['definition_list_item'][0])
527 self.body.append(self.defs['definition_list_item'][0])
533
528
534 def depart_definition_list_item(self, node):
529 def depart_definition_list_item(self, node):
535 self.body.append(self.defs['definition_list_item'][1])
530 self.body.append(self.defs['definition_list_item'][1])
536
531
537 def visit_description(self, node):
532 def visit_description(self, node):
538 pass
533 pass
539
534
540 def depart_description(self, node):
535 def depart_description(self, node):
541 pass
536 pass
542
537
543 def visit_docinfo(self, node):
538 def visit_docinfo(self, node):
544 self._in_docinfo = 1
539 self._in_docinfo = 1
545
540
546 def depart_docinfo(self, node):
541 def depart_docinfo(self, node):
547 self._in_docinfo = None
542 self._in_docinfo = None
548 # NOTE nothing should be written before this
543 # NOTE nothing should be written before this
549 self.append_header()
544 self.append_header()
550
545
551 def visit_doctest_block(self, node):
546 def visit_doctest_block(self, node):
552 self.body.append(self.defs['literal_block'][0])
547 self.body.append(self.defs['literal_block'][0])
553 self._in_literal = True
548 self._in_literal = True
554
549
555 def depart_doctest_block(self, node):
550 def depart_doctest_block(self, node):
556 self._in_literal = False
551 self._in_literal = False
557 self.body.append(self.defs['literal_block'][1])
552 self.body.append(self.defs['literal_block'][1])
558
553
559 def visit_document(self, node):
554 def visit_document(self, node):
560 # no blank line between comment and header.
555 # no blank line between comment and header.
561 self.body.append(self.comment(self.document_start).rstrip()+'\n')
556 self.body.append(self.comment(self.document_start).rstrip()+'\n')
562 # writing header is postboned
557 # writing header is postboned
563 self.header_written = 0
558 self.header_written = 0
564
559
565 def depart_document(self, node):
560 def depart_document(self, node):
566 if self._docinfo['author']:
561 if self._docinfo['author']:
567 self.body.append('.SH AUTHOR\n%s\n'
562 self.body.append('.SH AUTHOR\n%s\n'
568 % ', '.join(self._docinfo['author']))
563 % ', '.join(self._docinfo['author']))
569 skip = ('author', 'copyright', 'date',
564 skip = ('author', 'copyright', 'date',
570 'manual_group', 'manual_section',
565 'manual_group', 'manual_section',
571 'subtitle',
566 'subtitle',
572 'title', 'title_upper', 'version')
567 'title', 'title_upper', 'version')
573 for name in self._docinfo_keys:
568 for name in self._docinfo_keys:
574 if name == 'address':
569 if name == 'address':
575 self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
570 self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
576 self.language.labels.get(name, name),
571 self.language.labels.get(name, name),
577 self.defs['indent'][0] % 0,
572 self.defs['indent'][0] % 0,
578 self.defs['indent'][0] % BLOCKQOUTE_INDENT,
573 self.defs['indent'][0] % BLOCKQOUTE_INDENT,
579 self._docinfo[name],
574 self._docinfo[name],
580 self.defs['indent'][1],
575 self.defs['indent'][1],
581 self.defs['indent'][1]))
576 self.defs['indent'][1]))
582 elif not name in skip:
577 elif not name in skip:
583 if name in self._docinfo_names:
578 if name in self._docinfo_names:
584 label = self._docinfo_names[name]
579 label = self._docinfo_names[name]
585 else:
580 else:
586 label = self.language.labels.get(name, name)
581 label = self.language.labels.get(name, name)
587 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
582 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
588 if self._docinfo['copyright']:
583 if self._docinfo['copyright']:
589 self.body.append('.SH COPYRIGHT\n%s\n'
584 self.body.append('.SH COPYRIGHT\n%s\n'
590 % self._docinfo['copyright'])
585 % self._docinfo['copyright'])
591 self.body.append(self.comment(
586 self.body.append(self.comment(
592 'Generated by docutils manpage writer.\n'))
587 'Generated by docutils manpage writer.\n'))
593
588
594 def visit_emphasis(self, node):
589 def visit_emphasis(self, node):
595 self.body.append(self.defs['emphasis'][0])
590 self.body.append(self.defs['emphasis'][0])
596
591
597 def depart_emphasis(self, node):
592 def depart_emphasis(self, node):
598 self.body.append(self.defs['emphasis'][1])
593 self.body.append(self.defs['emphasis'][1])
599
594
600 def visit_entry(self, node):
595 def visit_entry(self, node):
601 # a cell in a table row
596 # a cell in a table row
602 if 'morerows' in node:
597 if 'morerows' in node:
603 self.document.reporter.warning('"table row spanning" not supported',
598 self.document.reporter.warning('"table row spanning" not supported',
604 base_node=node)
599 base_node=node)
605 if 'morecols' in node:
600 if 'morecols' in node:
606 self.document.reporter.warning(
601 self.document.reporter.warning(
607 '"table cell spanning" not supported', base_node=node)
602 '"table cell spanning" not supported', base_node=node)
608 self.context.append(len(self.body))
603 self.context.append(len(self.body))
609
604
610 def depart_entry(self, node):
605 def depart_entry(self, node):
611 start = self.context.pop()
606 start = self.context.pop()
612 self._active_table.append_cell(self.body[start:])
607 self._active_table.append_cell(self.body[start:])
613 del self.body[start:]
608 del self.body[start:]
614
609
615 def visit_enumerated_list(self, node):
610 def visit_enumerated_list(self, node):
616 self.list_start(node)
611 self.list_start(node)
617
612
618 def depart_enumerated_list(self, node):
613 def depart_enumerated_list(self, node):
619 self.list_end()
614 self.list_end()
620
615
621 def visit_error(self, node):
616 def visit_error(self, node):
622 self.visit_admonition(node, 'error')
617 self.visit_admonition(node, 'error')
623
618
624 depart_error = depart_admonition
619 depart_error = depart_admonition
625
620
626 def visit_field(self, node):
621 def visit_field(self, node):
627 pass
622 pass
628
623
629 def depart_field(self, node):
624 def depart_field(self, node):
630 pass
625 pass
631
626
632 def visit_field_body(self, node):
627 def visit_field_body(self, node):
633 if self._in_docinfo:
628 if self._in_docinfo:
634 name_normalized = self._field_name.lower().replace(" ","_")
629 name_normalized = self._field_name.lower().replace(" ","_")
635 self._docinfo_names[name_normalized] = self._field_name
630 self._docinfo_names[name_normalized] = self._field_name
636 self.visit_docinfo_item(node, name_normalized)
631 self.visit_docinfo_item(node, name_normalized)
637 raise nodes.SkipNode
632 raise nodes.SkipNode
638
633
639 def depart_field_body(self, node):
634 def depart_field_body(self, node):
640 pass
635 pass
641
636
642 def visit_field_list(self, node):
637 def visit_field_list(self, node):
643 self.indent(FIELD_LIST_INDENT)
638 self.indent(FIELD_LIST_INDENT)
644
639
645 def depart_field_list(self, node):
640 def depart_field_list(self, node):
646 self.dedent()
641 self.dedent()
647
642
648 def visit_field_name(self, node):
643 def visit_field_name(self, node):
649 if self._in_docinfo:
644 if self._in_docinfo:
650 self._field_name = node.astext()
645 self._field_name = node.astext()
651 raise nodes.SkipNode
646 raise nodes.SkipNode
652 else:
647 else:
653 self.body.append(self.defs['field_name'][0])
648 self.body.append(self.defs['field_name'][0])
654
649
655 def depart_field_name(self, node):
650 def depart_field_name(self, node):
656 self.body.append(self.defs['field_name'][1])
651 self.body.append(self.defs['field_name'][1])
657
652
658 def visit_figure(self, node):
653 def visit_figure(self, node):
659 self.indent(2.5)
654 self.indent(2.5)
660 self.indent(0)
655 self.indent(0)
661
656
662 def depart_figure(self, node):
657 def depart_figure(self, node):
663 self.dedent()
658 self.dedent()
664 self.dedent()
659 self.dedent()
665
660
666 def visit_footer(self, node):
661 def visit_footer(self, node):
667 self.document.reporter.warning('"footer" not supported',
662 self.document.reporter.warning('"footer" not supported',
668 base_node=node)
663 base_node=node)
669
664
670 def depart_footer(self, node):
665 def depart_footer(self, node):
671 pass
666 pass
672
667
673 def visit_footnote(self, node):
668 def visit_footnote(self, node):
674 num, text = node.astext().split(None, 1)
669 num, text = node.astext().split(None, 1)
675 num = num.strip()
670 num = num.strip()
676 self.body.append('.IP [%s] 5\n' % self.deunicode(num))
671 self.body.append('.IP [%s] 5\n' % self.deunicode(num))
677
672
678 def depart_footnote(self, node):
673 def depart_footnote(self, node):
679 pass
674 pass
680
675
681 def footnote_backrefs(self, node):
676 def footnote_backrefs(self, node):
682 self.document.reporter.warning('"footnote_backrefs" not supported',
677 self.document.reporter.warning('"footnote_backrefs" not supported',
683 base_node=node)
678 base_node=node)
684
679
685 def visit_footnote_reference(self, node):
680 def visit_footnote_reference(self, node):
686 self.body.append('['+self.deunicode(node.astext())+']')
681 self.body.append('['+self.deunicode(node.astext())+']')
687 raise nodes.SkipNode
682 raise nodes.SkipNode
688
683
689 def depart_footnote_reference(self, node):
684 def depart_footnote_reference(self, node):
690 pass
685 pass
691
686
692 def visit_generated(self, node):
687 def visit_generated(self, node):
693 pass
688 pass
694
689
695 def depart_generated(self, node):
690 def depart_generated(self, node):
696 pass
691 pass
697
692
698 def visit_header(self, node):
693 def visit_header(self, node):
699 raise NotImplementedError, node.astext()
694 raise NotImplementedError, node.astext()
700
695
701 def depart_header(self, node):
696 def depart_header(self, node):
702 pass
697 pass
703
698
704 def visit_hint(self, node):
699 def visit_hint(self, node):
705 self.visit_admonition(node, 'hint')
700 self.visit_admonition(node, 'hint')
706
701
707 depart_hint = depart_admonition
702 depart_hint = depart_admonition
708
703
709 def visit_subscript(self, node):
704 def visit_subscript(self, node):
710 self.body.append('\\s-2\\d')
705 self.body.append('\\s-2\\d')
711
706
712 def depart_subscript(self, node):
707 def depart_subscript(self, node):
713 self.body.append('\\u\\s0')
708 self.body.append('\\u\\s0')
714
709
715 def visit_superscript(self, node):
710 def visit_superscript(self, node):
716 self.body.append('\\s-2\\u')
711 self.body.append('\\s-2\\u')
717
712
718 def depart_superscript(self, node):
713 def depart_superscript(self, node):
719 self.body.append('\\d\\s0')
714 self.body.append('\\d\\s0')
720
715
721 def visit_attribution(self, node):
716 def visit_attribution(self, node):
722 self.body.append('\\(em ')
717 self.body.append('\\(em ')
723
718
724 def depart_attribution(self, node):
719 def depart_attribution(self, node):
725 self.body.append('\n')
720 self.body.append('\n')
726
721
727 def visit_image(self, node):
722 def visit_image(self, node):
728 self.document.reporter.warning('"image" not supported',
723 self.document.reporter.warning('"image" not supported',
729 base_node=node)
724 base_node=node)
730 text = []
725 text = []
731 if 'alt' in node.attributes:
726 if 'alt' in node.attributes:
732 text.append(node.attributes['alt'])
727 text.append(node.attributes['alt'])
733 if 'uri' in node.attributes:
728 if 'uri' in node.attributes:
734 text.append(node.attributes['uri'])
729 text.append(node.attributes['uri'])
735 self.body.append('[image: %s]\n' % ('/'.join(text)))
730 self.body.append('[image: %s]\n' % ('/'.join(text)))
736 raise nodes.SkipNode
731 raise nodes.SkipNode
737
732
738 def visit_important(self, node):
733 def visit_important(self, node):
739 self.visit_admonition(node, 'important')
734 self.visit_admonition(node, 'important')
740
735
741 depart_important = depart_admonition
736 depart_important = depart_admonition
742
737
743 def visit_label(self, node):
738 def visit_label(self, node):
744 # footnote and citation
739 # footnote and citation
745 if (isinstance(node.parent, nodes.footnote)
740 if (isinstance(node.parent, nodes.footnote)
746 or isinstance(node.parent, nodes.citation)):
741 or isinstance(node.parent, nodes.citation)):
747 raise nodes.SkipNode
742 raise nodes.SkipNode
748 self.document.reporter.warning('"unsupported "label"',
743 self.document.reporter.warning('"unsupported "label"',
749 base_node=node)
744 base_node=node)
750 self.body.append('[')
745 self.body.append('[')
751
746
752 def depart_label(self, node):
747 def depart_label(self, node):
753 self.body.append(']\n')
748 self.body.append(']\n')
754
749
755 def visit_legend(self, node):
750 def visit_legend(self, node):
756 pass
751 pass
757
752
758 def depart_legend(self, node):
753 def depart_legend(self, node):
759 pass
754 pass
760
755
761 # WHAT should we use .INDENT, .UNINDENT ?
756 # WHAT should we use .INDENT, .UNINDENT ?
762 def visit_line_block(self, node):
757 def visit_line_block(self, node):
763 self._line_block += 1
758 self._line_block += 1
764 if self._line_block == 1:
759 if self._line_block == 1:
765 self.body.append('.sp\n')
760 self.body.append('.sp\n')
766 self.body.append('.nf\n')
761 self.body.append('.nf\n')
767 else:
762 else:
768 self.body.append('.in +2\n')
763 self.body.append('.in +2\n')
769
764
770 def depart_line_block(self, node):
765 def depart_line_block(self, node):
771 self._line_block -= 1
766 self._line_block -= 1
772 if self._line_block == 0:
767 if self._line_block == 0:
773 self.body.append('.fi\n')
768 self.body.append('.fi\n')
774 self.body.append('.sp\n')
769 self.body.append('.sp\n')
775 else:
770 else:
776 self.body.append('.in -2\n')
771 self.body.append('.in -2\n')
777
772
778 def visit_line(self, node):
773 def visit_line(self, node):
779 pass
774 pass
780
775
781 def depart_line(self, node):
776 def depart_line(self, node):
782 self.body.append('\n')
777 self.body.append('\n')
783
778
784 def visit_list_item(self, node):
779 def visit_list_item(self, node):
785 # man 7 man argues to use ".IP" instead of ".TP"
780 # man 7 man argues to use ".IP" instead of ".TP"
786 self.body.append('.IP %s %d\n' % (
781 self.body.append('.IP %s %d\n' % (
787 self._list_char[-1].next(),
782 self._list_char[-1].next(),
788 self._list_char[-1].get_width(),))
783 self._list_char[-1].get_width(),))
789
784
790 def depart_list_item(self, node):
785 def depart_list_item(self, node):
791 pass
786 pass
792
787
793 def visit_literal(self, node):
788 def visit_literal(self, node):
794 self.body.append(self.defs['literal'][0])
789 self.body.append(self.defs['literal'][0])
795
790
796 def depart_literal(self, node):
791 def depart_literal(self, node):
797 self.body.append(self.defs['literal'][1])
792 self.body.append(self.defs['literal'][1])
798
793
799 def visit_literal_block(self, node):
794 def visit_literal_block(self, node):
800 self.body.append(self.defs['literal_block'][0])
795 self.body.append(self.defs['literal_block'][0])
801 self._in_literal = True
796 self._in_literal = True
802
797
803 def depart_literal_block(self, node):
798 def depart_literal_block(self, node):
804 self._in_literal = False
799 self._in_literal = False
805 self.body.append(self.defs['literal_block'][1])
800 self.body.append(self.defs['literal_block'][1])
806
801
807 def visit_meta(self, node):
802 def visit_meta(self, node):
808 raise NotImplementedError, node.astext()
803 raise NotImplementedError, node.astext()
809
804
810 def depart_meta(self, node):
805 def depart_meta(self, node):
811 pass
806 pass
812
807
813 def visit_note(self, node):
808 def visit_note(self, node):
814 self.visit_admonition(node, 'note')
809 self.visit_admonition(node, 'note')
815
810
816 depart_note = depart_admonition
811 depart_note = depart_admonition
817
812
818 def indent(self, by=0.5):
813 def indent(self, by=0.5):
819 # if we are in a section ".SH" there already is a .RS
814 # if we are in a section ".SH" there already is a .RS
820 step = self._indent[-1]
815 step = self._indent[-1]
821 self._indent.append(by)
816 self._indent.append(by)
822 self.body.append(self.defs['indent'][0] % step)
817 self.body.append(self.defs['indent'][0] % step)
823
818
824 def dedent(self):
819 def dedent(self):
825 self._indent.pop()
820 self._indent.pop()
826 self.body.append(self.defs['indent'][1])
821 self.body.append(self.defs['indent'][1])
827
822
828 def visit_option_list(self, node):
823 def visit_option_list(self, node):
829 self.indent(OPTION_LIST_INDENT)
824 self.indent(OPTION_LIST_INDENT)
830
825
831 def depart_option_list(self, node):
826 def depart_option_list(self, node):
832 self.dedent()
827 self.dedent()
833
828
834 def visit_option_list_item(self, node):
829 def visit_option_list_item(self, node):
835 # one item of the list
830 # one item of the list
836 self.body.append(self.defs['option_list_item'][0])
831 self.body.append(self.defs['option_list_item'][0])
837
832
838 def depart_option_list_item(self, node):
833 def depart_option_list_item(self, node):
839 self.body.append(self.defs['option_list_item'][1])
834 self.body.append(self.defs['option_list_item'][1])
840
835
841 def visit_option_group(self, node):
836 def visit_option_group(self, node):
842 # as one option could have several forms it is a group
837 # as one option could have several forms it is a group
843 # options without parameter bold only, .B, -v
838 # options without parameter bold only, .B, -v
844 # options with parameter bold italic, .BI, -f file
839 # options with parameter bold italic, .BI, -f file
845 #
840 #
846 # we do not know if .B or .BI
841 # we do not know if .B or .BI
847 self.context.append('.B') # blind guess
842 self.context.append('.B') # blind guess
848 self.context.append(len(self.body)) # to be able to insert later
843 self.context.append(len(self.body)) # to be able to insert later
849 self.context.append(0) # option counter
844 self.context.append(0) # option counter
850
845
851 def depart_option_group(self, node):
846 def depart_option_group(self, node):
852 self.context.pop() # the counter
847 self.context.pop() # the counter
853 start_position = self.context.pop()
848 start_position = self.context.pop()
854 text = self.body[start_position:]
849 text = self.body[start_position:]
855 del self.body[start_position:]
850 del self.body[start_position:]
856 self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
851 self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
857
852
858 def visit_option(self, node):
853 def visit_option(self, node):
859 # each form of the option will be presented separately
854 # each form of the option will be presented separately
860 if self.context[-1] > 0:
855 if self.context[-1] > 0:
861 self.body.append(', ')
856 self.body.append(', ')
862 if self.context[-3] == '.BI':
857 if self.context[-3] == '.BI':
863 self.body.append('\\')
858 self.body.append('\\')
864 self.body.append(' ')
859 self.body.append(' ')
865
860
866 def depart_option(self, node):
861 def depart_option(self, node):
867 self.context[-1] += 1
862 self.context[-1] += 1
868
863
869 def visit_option_string(self, node):
864 def visit_option_string(self, node):
870 # do not know if .B or .BI
865 # do not know if .B or .BI
871 pass
866 pass
872
867
873 def depart_option_string(self, node):
868 def depart_option_string(self, node):
874 pass
869 pass
875
870
876 def visit_option_argument(self, node):
871 def visit_option_argument(self, node):
877 self.context[-3] = '.BI' # bold/italic alternate
872 self.context[-3] = '.BI' # bold/italic alternate
878 if node['delimiter'] != ' ':
873 if node['delimiter'] != ' ':
879 self.body.append('\\fB%s ' % node['delimiter'])
874 self.body.append('\\fB%s ' % node['delimiter'])
880 elif self.body[len(self.body)-1].endswith('='):
875 elif self.body[len(self.body)-1].endswith('='):
881 # a blank only means no blank in output, just changing font
876 # a blank only means no blank in output, just changing font
882 self.body.append(' ')
877 self.body.append(' ')
883 else:
878 else:
884 # blank backslash blank, switch font then a blank
879 # blank backslash blank, switch font then a blank
885 self.body.append(' \\ ')
880 self.body.append(' \\ ')
886
881
887 def depart_option_argument(self, node):
882 def depart_option_argument(self, node):
888 pass
883 pass
889
884
890 def visit_organization(self, node):
885 def visit_organization(self, node):
891 self.visit_docinfo_item(node, 'organization')
886 self.visit_docinfo_item(node, 'organization')
892
887
893 def depart_organization(self, node):
888 def depart_organization(self, node):
894 pass
889 pass
895
890
896 def visit_paragraph(self, node):
891 def visit_paragraph(self, node):
897 # ``.PP`` : Start standard indented paragraph.
892 # ``.PP`` : Start standard indented paragraph.
898 # ``.LP`` : Start block paragraph, all except the first.
893 # ``.LP`` : Start block paragraph, all except the first.
899 # ``.P [type]`` : Start paragraph type.
894 # ``.P [type]`` : Start paragraph type.
900 # NOTE dont use paragraph starts because they reset indentation.
895 # NOTE dont use paragraph starts because they reset indentation.
901 # ``.sp`` is only vertical space
896 # ``.sp`` is only vertical space
902 self.ensure_eol()
897 self.ensure_eol()
903 self.body.append('.sp\n')
898 self.body.append('.sp\n')
904
899
905 def depart_paragraph(self, node):
900 def depart_paragraph(self, node):
906 self.body.append('\n')
901 self.body.append('\n')
907
902
908 def visit_problematic(self, node):
903 def visit_problematic(self, node):
909 self.body.append(self.defs['problematic'][0])
904 self.body.append(self.defs['problematic'][0])
910
905
911 def depart_problematic(self, node):
906 def depart_problematic(self, node):
912 self.body.append(self.defs['problematic'][1])
907 self.body.append(self.defs['problematic'][1])
913
908
914 def visit_raw(self, node):
909 def visit_raw(self, node):
915 if node.get('format') == 'manpage':
910 if node.get('format') == 'manpage':
916 self.body.append(node.astext() + "\n")
911 self.body.append(node.astext() + "\n")
917 # Keep non-manpage raw text out of output:
912 # Keep non-manpage raw text out of output:
918 raise nodes.SkipNode
913 raise nodes.SkipNode
919
914
920 def visit_reference(self, node):
915 def visit_reference(self, node):
921 """E.g. link or email address."""
916 """E.g. link or email address."""
922 self.body.append(self.defs['reference'][0])
917 self.body.append(self.defs['reference'][0])
923
918
924 def depart_reference(self, node):
919 def depart_reference(self, node):
925 self.body.append(self.defs['reference'][1])
920 self.body.append(self.defs['reference'][1])
926
921
927 def visit_revision(self, node):
922 def visit_revision(self, node):
928 self.visit_docinfo_item(node, 'revision')
923 self.visit_docinfo_item(node, 'revision')
929
924
930 depart_revision = depart_docinfo_item
925 depart_revision = depart_docinfo_item
931
926
932 def visit_row(self, node):
927 def visit_row(self, node):
933 self._active_table.new_row()
928 self._active_table.new_row()
934
929
935 def depart_row(self, node):
930 def depart_row(self, node):
936 pass
931 pass
937
932
938 def visit_section(self, node):
933 def visit_section(self, node):
939 self.section_level += 1
934 self.section_level += 1
940
935
941 def depart_section(self, node):
936 def depart_section(self, node):
942 self.section_level -= 1
937 self.section_level -= 1
943
938
944 def visit_status(self, node):
939 def visit_status(self, node):
945 self.visit_docinfo_item(node, 'status')
940 self.visit_docinfo_item(node, 'status')
946
941
947 depart_status = depart_docinfo_item
942 depart_status = depart_docinfo_item
948
943
949 def visit_strong(self, node):
944 def visit_strong(self, node):
950 self.body.append(self.defs['strong'][0])
945 self.body.append(self.defs['strong'][0])
951
946
952 def depart_strong(self, node):
947 def depart_strong(self, node):
953 self.body.append(self.defs['strong'][1])
948 self.body.append(self.defs['strong'][1])
954
949
955 def visit_substitution_definition(self, node):
950 def visit_substitution_definition(self, node):
956 """Internal only."""
951 """Internal only."""
957 raise nodes.SkipNode
952 raise nodes.SkipNode
958
953
959 def visit_substitution_reference(self, node):
954 def visit_substitution_reference(self, node):
960 self.document.reporter.warning('"substitution_reference" not supported',
955 self.document.reporter.warning('"substitution_reference" not supported',
961 base_node=node)
956 base_node=node)
962
957
963 def visit_subtitle(self, node):
958 def visit_subtitle(self, node):
964 if isinstance(node.parent, nodes.sidebar):
959 if isinstance(node.parent, nodes.sidebar):
965 self.body.append(self.defs['strong'][0])
960 self.body.append(self.defs['strong'][0])
966 elif isinstance(node.parent, nodes.document):
961 elif isinstance(node.parent, nodes.document):
967 self.visit_docinfo_item(node, 'subtitle')
962 self.visit_docinfo_item(node, 'subtitle')
968 elif isinstance(node.parent, nodes.section):
963 elif isinstance(node.parent, nodes.section):
969 self.body.append(self.defs['strong'][0])
964 self.body.append(self.defs['strong'][0])
970
965
971 def depart_subtitle(self, node):
966 def depart_subtitle(self, node):
972 # document subtitle calls SkipNode
967 # document subtitle calls SkipNode
973 self.body.append(self.defs['strong'][1]+'\n.PP\n')
968 self.body.append(self.defs['strong'][1]+'\n.PP\n')
974
969
975 def visit_system_message(self, node):
970 def visit_system_message(self, node):
976 # TODO add report_level
971 # TODO add report_level
977 #if node['level'] < self.document.reporter['writer'].report_level:
972 #if node['level'] < self.document.reporter['writer'].report_level:
978 # Level is too low to display:
973 # Level is too low to display:
979 # raise nodes.SkipNode
974 # raise nodes.SkipNode
980 attr = {}
975 attr = {}
981 backref_text = ''
976 backref_text = ''
982 if node.hasattr('id'):
977 if node.hasattr('id'):
983 attr['name'] = node['id']
978 attr['name'] = node['id']
984 if node.hasattr('line'):
979 if node.hasattr('line'):
985 line = ', line %s' % node['line']
980 line = ', line %s' % node['line']
986 else:
981 else:
987 line = ''
982 line = ''
988 self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
983 self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
989 % (node['type'], node['level'], node['source'], line))
984 % (node['type'], node['level'], node['source'], line))
990
985
991 def depart_system_message(self, node):
986 def depart_system_message(self, node):
992 pass
987 pass
993
988
994 def visit_table(self, node):
989 def visit_table(self, node):
995 self._active_table = Table()
990 self._active_table = Table()
996
991
997 def depart_table(self, node):
992 def depart_table(self, node):
998 self.ensure_eol()
993 self.ensure_eol()
999 self.body.extend(self._active_table.as_list())
994 self.body.extend(self._active_table.as_list())
1000 self._active_table = None
995 self._active_table = None
1001
996
1002 def visit_target(self, node):
997 def visit_target(self, node):
1003 # targets are in-document hyper targets, without any use for man-pages.
998 # targets are in-document hyper targets, without any use for man-pages.
1004 raise nodes.SkipNode
999 raise nodes.SkipNode
1005
1000
1006 def visit_tbody(self, node):
1001 def visit_tbody(self, node):
1007 pass
1002 pass
1008
1003
1009 def depart_tbody(self, node):
1004 def depart_tbody(self, node):
1010 pass
1005 pass
1011
1006
1012 def visit_term(self, node):
1007 def visit_term(self, node):
1013 self.body.append(self.defs['term'][0])
1008 self.body.append(self.defs['term'][0])
1014
1009
1015 def depart_term(self, node):
1010 def depart_term(self, node):
1016 self.body.append(self.defs['term'][1])
1011 self.body.append(self.defs['term'][1])
1017
1012
1018 def visit_tgroup(self, node):
1013 def visit_tgroup(self, node):
1019 pass
1014 pass
1020
1015
1021 def depart_tgroup(self, node):
1016 def depart_tgroup(self, node):
1022 pass
1017 pass
1023
1018
1024 def visit_thead(self, node):
1019 def visit_thead(self, node):
1025 # MAYBE double line '='
1020 # MAYBE double line '='
1026 pass
1021 pass
1027
1022
1028 def depart_thead(self, node):
1023 def depart_thead(self, node):
1029 # MAYBE double line '='
1024 # MAYBE double line '='
1030 pass
1025 pass
1031
1026
1032 def visit_tip(self, node):
1027 def visit_tip(self, node):
1033 self.visit_admonition(node, 'tip')
1028 self.visit_admonition(node, 'tip')
1034
1029
1035 depart_tip = depart_admonition
1030 depart_tip = depart_admonition
1036
1031
1037 def visit_title(self, node):
1032 def visit_title(self, node):
1038 if isinstance(node.parent, nodes.topic):
1033 if isinstance(node.parent, nodes.topic):
1039 self.body.append(self.defs['topic-title'][0])
1034 self.body.append(self.defs['topic-title'][0])
1040 elif isinstance(node.parent, nodes.sidebar):
1035 elif isinstance(node.parent, nodes.sidebar):
1041 self.body.append(self.defs['sidebar-title'][0])
1036 self.body.append(self.defs['sidebar-title'][0])
1042 elif isinstance(node.parent, nodes.admonition):
1037 elif isinstance(node.parent, nodes.admonition):
1043 self.body.append('.IP "')
1038 self.body.append('.IP "')
1044 elif self.section_level == 0:
1039 elif self.section_level == 0:
1045 self._docinfo['title'] = node.astext()
1040 self._docinfo['title'] = node.astext()
1046 # document title for .TH
1041 # document title for .TH
1047 self._docinfo['title_upper'] = node.astext().upper()
1042 self._docinfo['title_upper'] = node.astext().upper()
1048 raise nodes.SkipNode
1043 raise nodes.SkipNode
1049 elif self.section_level == 1:
1044 elif self.section_level == 1:
1050 self.body.append('.SH ')
1045 self.body.append('.SH ')
1051 for n in node.traverse(nodes.Text):
1046 for n in node.traverse(nodes.Text):
1052 n.parent.replace(n, nodes.Text(n.astext().upper()))
1047 n.parent.replace(n, nodes.Text(n.astext().upper()))
1053 else:
1048 else:
1054 self.body.append('.SS ')
1049 self.body.append('.SS ')
1055
1050
1056 def depart_title(self, node):
1051 def depart_title(self, node):
1057 if isinstance(node.parent, nodes.admonition):
1052 if isinstance(node.parent, nodes.admonition):
1058 self.body.append('"')
1053 self.body.append('"')
1059 self.body.append('\n')
1054 self.body.append('\n')
1060
1055
1061 def visit_title_reference(self, node):
1056 def visit_title_reference(self, node):
1062 """inline citation reference"""
1057 """inline citation reference"""
1063 self.body.append(self.defs['title_reference'][0])
1058 self.body.append(self.defs['title_reference'][0])
1064
1059
1065 def depart_title_reference(self, node):
1060 def depart_title_reference(self, node):
1066 self.body.append(self.defs['title_reference'][1])
1061 self.body.append(self.defs['title_reference'][1])
1067
1062
1068 def visit_topic(self, node):
1063 def visit_topic(self, node):
1069 pass
1064 pass
1070
1065
1071 def depart_topic(self, node):
1066 def depart_topic(self, node):
1072 pass
1067 pass
1073
1068
1074 def visit_sidebar(self, node):
1069 def visit_sidebar(self, node):
1075 pass
1070 pass
1076
1071
1077 def depart_sidebar(self, node):
1072 def depart_sidebar(self, node):
1078 pass
1073 pass
1079
1074
1080 def visit_rubric(self, node):
1075 def visit_rubric(self, node):
1081 pass
1076 pass
1082
1077
1083 def depart_rubric(self, node):
1078 def depart_rubric(self, node):
1084 pass
1079 pass
1085
1080
1086 def visit_transition(self, node):
1081 def visit_transition(self, node):
1087 # .PP Begin a new paragraph and reset prevailing indent.
1082 # .PP Begin a new paragraph and reset prevailing indent.
1088 # .sp N leaves N lines of blank space.
1083 # .sp N leaves N lines of blank space.
1089 # .ce centers the next line
1084 # .ce centers the next line
1090 self.body.append('\n.sp\n.ce\n----\n')
1085 self.body.append('\n.sp\n.ce\n----\n')
1091
1086
1092 def depart_transition(self, node):
1087 def depart_transition(self, node):
1093 self.body.append('\n.ce 0\n.sp\n')
1088 self.body.append('\n.ce 0\n.sp\n')
1094
1089
1095 def visit_version(self, node):
1090 def visit_version(self, node):
1096 self.visit_docinfo_item(node, 'version')
1091 self.visit_docinfo_item(node, 'version')
1097
1092
1098 def visit_warning(self, node):
1093 def visit_warning(self, node):
1099 self.visit_admonition(node, 'warning')
1094 self.visit_admonition(node, 'warning')
1100
1095
1101 depart_warning = depart_admonition
1096 depart_warning = depart_admonition
1102
1097
1103 def unimplemented_visit(self, node):
1098 def unimplemented_visit(self, node):
1104 raise NotImplementedError('visiting unimplemented node type: %s'
1099 raise NotImplementedError('visiting unimplemented node type: %s'
1105 % node.__class__.__name__)
1100 % node.__class__.__name__)
1106
1101
1107 # The following part is taken from the Docutils rst2man.py script:
1102 # The following part is taken from the Docutils rst2man.py script:
1108 if __name__ == "__main__":
1103 if __name__ == "__main__":
1109 from docutils.core import publish_cmdline, default_description
1104 from docutils.core import publish_cmdline, default_description
1110 description = ("Generates plain unix manual documents. " +
1105 description = ("Generates plain unix manual documents. " +
1111 default_description)
1106 default_description)
1112 publish_cmdline(writer=Writer(), description=description)
1107 publish_cmdline(writer=Writer(), description=description)
1113
1108
1114 # vim: set fileencoding=utf-8 et ts=4 ai :
1109 # vim: set fileencoding=utf-8 et ts=4 ai :
@@ -1,188 +1,188 b''
1 # churn.py - create a graph of revisions count grouped by template
1 # churn.py - create a graph of revisions count grouped by template
2 #
2 #
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''command to display statistics about repository history'''
9 '''command to display statistics about repository history'''
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial import patch, cmdutil, util, templater
12 from mercurial import patch, cmdutil, util, templater
13 import sys, os
13 import os
14 import time, datetime
14 import time, datetime
15
15
16 def maketemplater(ui, repo, tmpl):
16 def maketemplater(ui, repo, tmpl):
17 tmpl = templater.parsestring(tmpl, quoted=False)
17 tmpl = templater.parsestring(tmpl, quoted=False)
18 try:
18 try:
19 t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
19 t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
20 except SyntaxError, inst:
20 except SyntaxError, inst:
21 raise util.Abort(inst.args[0])
21 raise util.Abort(inst.args[0])
22 t.use_template(tmpl)
22 t.use_template(tmpl)
23 return t
23 return t
24
24
25 def changedlines(ui, repo, ctx1, ctx2, fns):
25 def changedlines(ui, repo, ctx1, ctx2, fns):
26 added, removed = 0, 0
26 added, removed = 0, 0
27 fmatch = cmdutil.matchfiles(repo, fns)
27 fmatch = cmdutil.matchfiles(repo, fns)
28 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
28 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
29 for l in diff.split('\n'):
29 for l in diff.split('\n'):
30 if l.startswith("+") and not l.startswith("+++ "):
30 if l.startswith("+") and not l.startswith("+++ "):
31 added += 1
31 added += 1
32 elif l.startswith("-") and not l.startswith("--- "):
32 elif l.startswith("-") and not l.startswith("--- "):
33 removed += 1
33 removed += 1
34 return (added, removed)
34 return (added, removed)
35
35
36 def countrate(ui, repo, amap, *pats, **opts):
36 def countrate(ui, repo, amap, *pats, **opts):
37 """Calculate stats"""
37 """Calculate stats"""
38 if opts.get('dateformat'):
38 if opts.get('dateformat'):
39 def getkey(ctx):
39 def getkey(ctx):
40 t, tz = ctx.date()
40 t, tz = ctx.date()
41 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
41 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
42 return date.strftime(opts['dateformat'])
42 return date.strftime(opts['dateformat'])
43 else:
43 else:
44 tmpl = opts.get('template', '{author|email}')
44 tmpl = opts.get('template', '{author|email}')
45 tmpl = maketemplater(ui, repo, tmpl)
45 tmpl = maketemplater(ui, repo, tmpl)
46 def getkey(ctx):
46 def getkey(ctx):
47 ui.pushbuffer()
47 ui.pushbuffer()
48 tmpl.show(ctx)
48 tmpl.show(ctx)
49 return ui.popbuffer()
49 return ui.popbuffer()
50
50
51 state = {'count': 0}
51 state = {'count': 0}
52 rate = {}
52 rate = {}
53 df = False
53 df = False
54 if opts.get('date'):
54 if opts.get('date'):
55 df = util.matchdate(opts['date'])
55 df = util.matchdate(opts['date'])
56
56
57 m = cmdutil.match(repo, pats, opts)
57 m = cmdutil.match(repo, pats, opts)
58 def prep(ctx, fns):
58 def prep(ctx, fns):
59 rev = ctx.rev()
59 rev = ctx.rev()
60 if df and not df(ctx.date()[0]): # doesn't match date format
60 if df and not df(ctx.date()[0]): # doesn't match date format
61 return
61 return
62
62
63 key = getkey(ctx)
63 key = getkey(ctx)
64 key = amap.get(key, key) # alias remap
64 key = amap.get(key, key) # alias remap
65 if opts.get('changesets'):
65 if opts.get('changesets'):
66 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
66 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
67 else:
67 else:
68 parents = ctx.parents()
68 parents = ctx.parents()
69 if len(parents) > 1:
69 if len(parents) > 1:
70 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
70 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
71 return
71 return
72
72
73 ctx1 = parents[0]
73 ctx1 = parents[0]
74 lines = changedlines(ui, repo, ctx1, ctx, fns)
74 lines = changedlines(ui, repo, ctx1, ctx, fns)
75 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
75 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
76
76
77 state['count'] += 1
77 state['count'] += 1
78 ui.progress(_('analyzing'), state['count'], total=len(repo))
78 ui.progress(_('analyzing'), state['count'], total=len(repo))
79
79
80 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
80 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
81 continue
81 continue
82
82
83 ui.progress(_('analyzing'), None)
83 ui.progress(_('analyzing'), None)
84
84
85 return rate
85 return rate
86
86
87
87
88 def churn(ui, repo, *pats, **opts):
88 def churn(ui, repo, *pats, **opts):
89 '''histogram of changes to the repository
89 '''histogram of changes to the repository
90
90
91 This command will display a histogram representing the number
91 This command will display a histogram representing the number
92 of changed lines or revisions, grouped according to the given
92 of changed lines or revisions, grouped according to the given
93 template. The default template will group changes by author.
93 template. The default template will group changes by author.
94 The --dateformat option may be used to group the results by
94 The --dateformat option may be used to group the results by
95 date instead.
95 date instead.
96
96
97 Statistics are based on the number of changed lines, or
97 Statistics are based on the number of changed lines, or
98 alternatively the number of matching revisions if the
98 alternatively the number of matching revisions if the
99 --changesets option is specified.
99 --changesets option is specified.
100
100
101 Examples::
101 Examples::
102
102
103 # display count of changed lines for every committer
103 # display count of changed lines for every committer
104 hg churn -t '{author|email}'
104 hg churn -t '{author|email}'
105
105
106 # display daily activity graph
106 # display daily activity graph
107 hg churn -f '%H' -s -c
107 hg churn -f '%H' -s -c
108
108
109 # display activity of developers by month
109 # display activity of developers by month
110 hg churn -f '%Y-%m' -s -c
110 hg churn -f '%Y-%m' -s -c
111
111
112 # display count of lines changed in every year
112 # display count of lines changed in every year
113 hg churn -f '%Y' -s
113 hg churn -f '%Y' -s
114
114
115 It is possible to map alternate email addresses to a main address
115 It is possible to map alternate email addresses to a main address
116 by providing a file using the following format::
116 by providing a file using the following format::
117
117
118 <alias email> <actual email>
118 <alias email> <actual email>
119
119
120 Such a file may be specified with the --aliases option, otherwise
120 Such a file may be specified with the --aliases option, otherwise
121 a .hgchurn file will be looked for in the working directory root.
121 a .hgchurn file will be looked for in the working directory root.
122 '''
122 '''
123 def pad(s, l):
123 def pad(s, l):
124 return (s + " " * l)[:l]
124 return (s + " " * l)[:l]
125
125
126 amap = {}
126 amap = {}
127 aliases = opts.get('aliases')
127 aliases = opts.get('aliases')
128 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
128 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
129 aliases = repo.wjoin('.hgchurn')
129 aliases = repo.wjoin('.hgchurn')
130 if aliases:
130 if aliases:
131 for l in open(aliases, "r"):
131 for l in open(aliases, "r"):
132 l = l.strip()
132 l = l.strip()
133 alias, actual = l.split()
133 alias, actual = l.split()
134 amap[alias] = actual
134 amap[alias] = actual
135
135
136 rate = countrate(ui, repo, amap, *pats, **opts).items()
136 rate = countrate(ui, repo, amap, *pats, **opts).items()
137 if not rate:
137 if not rate:
138 return
138 return
139
139
140 sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None)
140 sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None)
141 rate.sort(key=sortkey)
141 rate.sort(key=sortkey)
142
142
143 # Be careful not to have a zero maxcount (issue833)
143 # Be careful not to have a zero maxcount (issue833)
144 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
144 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
145 maxname = max(len(k) for k, v in rate)
145 maxname = max(len(k) for k, v in rate)
146
146
147 ttywidth = util.termwidth()
147 ttywidth = util.termwidth()
148 ui.debug("assuming %i character terminal\n" % ttywidth)
148 ui.debug("assuming %i character terminal\n" % ttywidth)
149 width = ttywidth - maxname - 2 - 2 - 2
149 width = ttywidth - maxname - 2 - 2 - 2
150
150
151 if opts.get('diffstat'):
151 if opts.get('diffstat'):
152 width -= 15
152 width -= 15
153 def format(name, (added, removed)):
153 def format(name, (added, removed)):
154 return "%s %15s %s%s\n" % (pad(name, maxname),
154 return "%s %15s %s%s\n" % (pad(name, maxname),
155 '+%d/-%d' % (added, removed),
155 '+%d/-%d' % (added, removed),
156 ui.label('+' * charnum(added),
156 ui.label('+' * charnum(added),
157 'diffstat.inserted'),
157 'diffstat.inserted'),
158 ui.label('-' * charnum(removed),
158 ui.label('-' * charnum(removed),
159 'diffstat.deleted'))
159 'diffstat.deleted'))
160 else:
160 else:
161 width -= 6
161 width -= 6
162 def format(name, count):
162 def format(name, count):
163 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
163 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
164 '*' * charnum(sum(count)))
164 '*' * charnum(sum(count)))
165
165
166 def charnum(count):
166 def charnum(count):
167 return int(round(count * width / maxcount))
167 return int(round(count * width / maxcount))
168
168
169 for name, count in rate:
169 for name, count in rate:
170 ui.write(format(name, count))
170 ui.write(format(name, count))
171
171
172
172
173 cmdtable = {
173 cmdtable = {
174 "churn":
174 "churn":
175 (churn,
175 (churn,
176 [('r', 'rev', [], _('count rate for the specified revision or range')),
176 [('r', 'rev', [], _('count rate for the specified revision or range')),
177 ('d', 'date', '', _('count rate for revisions matching date spec')),
177 ('d', 'date', '', _('count rate for revisions matching date spec')),
178 ('t', 'template', '{author|email}',
178 ('t', 'template', '{author|email}',
179 _('template to group changesets')),
179 _('template to group changesets')),
180 ('f', 'dateformat', '',
180 ('f', 'dateformat', '',
181 _('strftime-compatible format for grouping by date')),
181 _('strftime-compatible format for grouping by date')),
182 ('c', 'changesets', False, _('count rate by number of changesets')),
182 ('c', 'changesets', False, _('count rate by number of changesets')),
183 ('s', 'sort', False, _('sort by key (default: sort by count)')),
183 ('s', 'sort', False, _('sort by key (default: sort by count)')),
184 ('', 'diffstat', False, _('display added/removed lines separately')),
184 ('', 'diffstat', False, _('display added/removed lines separately')),
185 ('', 'aliases', '', _('file with email aliases')),
185 ('', 'aliases', '', _('file with email aliases')),
186 ],
186 ],
187 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]")),
187 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]")),
188 }
188 }
@@ -1,471 +1,466 b''
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the
12 # License along with this library; if not, write to the
13 # Free Software Foundation, Inc.,
13 # Free Software Foundation, Inc.,
14 # 59 Temple Place, Suite 330,
14 # 59 Temple Place, Suite 330,
15 # Boston, MA 02111-1307 USA
15 # Boston, MA 02111-1307 USA
16
16
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19
19
20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21
21
22 import os
22 import os
23 import stat
23 import stat
24 import urllib
24 import urllib
25 import urllib2
25 import urllib2
26 import email.Utils
26 import email.Utils
27
27
28 try:
29 from cStringIO import StringIO
30 except ImportError, msg:
31 from StringIO import StringIO
32
33 class RangeError(IOError):
28 class RangeError(IOError):
34 """Error raised when an unsatisfiable range is requested."""
29 """Error raised when an unsatisfiable range is requested."""
35 pass
30 pass
36
31
37 class HTTPRangeHandler(urllib2.BaseHandler):
32 class HTTPRangeHandler(urllib2.BaseHandler):
38 """Handler that enables HTTP Range headers.
33 """Handler that enables HTTP Range headers.
39
34
40 This was extremely simple. The Range header is a HTTP feature to
35 This was extremely simple. The Range header is a HTTP feature to
41 begin with so all this class does is tell urllib2 that the
36 begin with so all this class does is tell urllib2 that the
42 "206 Partial Content" reponse from the HTTP server is what we
37 "206 Partial Content" reponse from the HTTP server is what we
43 expected.
38 expected.
44
39
45 Example:
40 Example:
46 import urllib2
41 import urllib2
47 import byterange
42 import byterange
48
43
49 range_handler = range.HTTPRangeHandler()
44 range_handler = range.HTTPRangeHandler()
50 opener = urllib2.build_opener(range_handler)
45 opener = urllib2.build_opener(range_handler)
51
46
52 # install it
47 # install it
53 urllib2.install_opener(opener)
48 urllib2.install_opener(opener)
54
49
55 # create Request and set Range header
50 # create Request and set Range header
56 req = urllib2.Request('http://www.python.org/')
51 req = urllib2.Request('http://www.python.org/')
57 req.header['Range'] = 'bytes=30-50'
52 req.header['Range'] = 'bytes=30-50'
58 f = urllib2.urlopen(req)
53 f = urllib2.urlopen(req)
59 """
54 """
60
55
61 def http_error_206(self, req, fp, code, msg, hdrs):
56 def http_error_206(self, req, fp, code, msg, hdrs):
62 # 206 Partial Content Response
57 # 206 Partial Content Response
63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
58 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 r.code = code
59 r.code = code
65 r.msg = msg
60 r.msg = msg
66 return r
61 return r
67
62
68 def http_error_416(self, req, fp, code, msg, hdrs):
63 def http_error_416(self, req, fp, code, msg, hdrs):
69 # HTTP's Range Not Satisfiable error
64 # HTTP's Range Not Satisfiable error
70 raise RangeError('Requested Range Not Satisfiable')
65 raise RangeError('Requested Range Not Satisfiable')
71
66
72 class RangeableFileObject:
67 class RangeableFileObject:
73 """File object wrapper to enable raw range handling.
68 """File object wrapper to enable raw range handling.
74 This was implemented primarilary for handling range
69 This was implemented primarilary for handling range
75 specifications for file:// urls. This object effectively makes
70 specifications for file:// urls. This object effectively makes
76 a file object look like it consists only of a range of bytes in
71 a file object look like it consists only of a range of bytes in
77 the stream.
72 the stream.
78
73
79 Examples:
74 Examples:
80 # expose 10 bytes, starting at byte position 20, from
75 # expose 10 bytes, starting at byte position 20, from
81 # /etc/aliases.
76 # /etc/aliases.
82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
77 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 # seek seeks within the range (to position 23 in this case)
78 # seek seeks within the range (to position 23 in this case)
84 >>> fo.seek(3)
79 >>> fo.seek(3)
85 # tell tells where your at _within the range_ (position 3 in
80 # tell tells where your at _within the range_ (position 3 in
86 # this case)
81 # this case)
87 >>> fo.tell()
82 >>> fo.tell()
88 # read EOFs if an attempt is made to read past the last
83 # read EOFs if an attempt is made to read past the last
89 # byte in the range. the following will return only 7 bytes.
84 # byte in the range. the following will return only 7 bytes.
90 >>> fo.read(30)
85 >>> fo.read(30)
91 """
86 """
92
87
93 def __init__(self, fo, rangetup):
88 def __init__(self, fo, rangetup):
94 """Create a RangeableFileObject.
89 """Create a RangeableFileObject.
95 fo -- a file like object. only the read() method need be
90 fo -- a file like object. only the read() method need be
96 supported but supporting an optimized seek() is
91 supported but supporting an optimized seek() is
97 preferable.
92 preferable.
98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
93 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 to work over.
94 to work over.
100 The file object provided is assumed to be at byte offset 0.
95 The file object provided is assumed to be at byte offset 0.
101 """
96 """
102 self.fo = fo
97 self.fo = fo
103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
98 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 self.realpos = 0
99 self.realpos = 0
105 self._do_seek(self.firstbyte)
100 self._do_seek(self.firstbyte)
106
101
107 def __getattr__(self, name):
102 def __getattr__(self, name):
108 """This effectively allows us to wrap at the instance level.
103 """This effectively allows us to wrap at the instance level.
109 Any attribute not found in _this_ object will be searched for
104 Any attribute not found in _this_ object will be searched for
110 in self.fo. This includes methods."""
105 in self.fo. This includes methods."""
111 if hasattr(self.fo, name):
106 if hasattr(self.fo, name):
112 return getattr(self.fo, name)
107 return getattr(self.fo, name)
113 raise AttributeError(name)
108 raise AttributeError(name)
114
109
115 def tell(self):
110 def tell(self):
116 """Return the position within the range.
111 """Return the position within the range.
117 This is different from fo.seek in that position 0 is the
112 This is different from fo.seek in that position 0 is the
118 first byte position of the range tuple. For example, if
113 first byte position of the range tuple. For example, if
119 this object was created with a range tuple of (500,899),
114 this object was created with a range tuple of (500,899),
120 tell() will return 0 when at byte position 500 of the file.
115 tell() will return 0 when at byte position 500 of the file.
121 """
116 """
122 return (self.realpos - self.firstbyte)
117 return (self.realpos - self.firstbyte)
123
118
124 def seek(self, offset, whence=0):
119 def seek(self, offset, whence=0):
125 """Seek within the byte range.
120 """Seek within the byte range.
126 Positioning is identical to that described under tell().
121 Positioning is identical to that described under tell().
127 """
122 """
128 assert whence in (0, 1, 2)
123 assert whence in (0, 1, 2)
129 if whence == 0: # absolute seek
124 if whence == 0: # absolute seek
130 realoffset = self.firstbyte + offset
125 realoffset = self.firstbyte + offset
131 elif whence == 1: # relative seek
126 elif whence == 1: # relative seek
132 realoffset = self.realpos + offset
127 realoffset = self.realpos + offset
133 elif whence == 2: # absolute from end of file
128 elif whence == 2: # absolute from end of file
134 # XXX: are we raising the right Error here?
129 # XXX: are we raising the right Error here?
135 raise IOError('seek from end of file not supported.')
130 raise IOError('seek from end of file not supported.')
136
131
137 # do not allow seek past lastbyte in range
132 # do not allow seek past lastbyte in range
138 if self.lastbyte and (realoffset >= self.lastbyte):
133 if self.lastbyte and (realoffset >= self.lastbyte):
139 realoffset = self.lastbyte
134 realoffset = self.lastbyte
140
135
141 self._do_seek(realoffset - self.realpos)
136 self._do_seek(realoffset - self.realpos)
142
137
143 def read(self, size=-1):
138 def read(self, size=-1):
144 """Read within the range.
139 """Read within the range.
145 This method will limit the size read based on the range.
140 This method will limit the size read based on the range.
146 """
141 """
147 size = self._calc_read_size(size)
142 size = self._calc_read_size(size)
148 rslt = self.fo.read(size)
143 rslt = self.fo.read(size)
149 self.realpos += len(rslt)
144 self.realpos += len(rslt)
150 return rslt
145 return rslt
151
146
152 def readline(self, size=-1):
147 def readline(self, size=-1):
153 """Read lines within the range.
148 """Read lines within the range.
154 This method will limit the size read based on the range.
149 This method will limit the size read based on the range.
155 """
150 """
156 size = self._calc_read_size(size)
151 size = self._calc_read_size(size)
157 rslt = self.fo.readline(size)
152 rslt = self.fo.readline(size)
158 self.realpos += len(rslt)
153 self.realpos += len(rslt)
159 return rslt
154 return rslt
160
155
161 def _calc_read_size(self, size):
156 def _calc_read_size(self, size):
162 """Handles calculating the amount of data to read based on
157 """Handles calculating the amount of data to read based on
163 the range.
158 the range.
164 """
159 """
165 if self.lastbyte:
160 if self.lastbyte:
166 if size > -1:
161 if size > -1:
167 if ((self.realpos + size) >= self.lastbyte):
162 if ((self.realpos + size) >= self.lastbyte):
168 size = (self.lastbyte - self.realpos)
163 size = (self.lastbyte - self.realpos)
169 else:
164 else:
170 size = (self.lastbyte - self.realpos)
165 size = (self.lastbyte - self.realpos)
171 return size
166 return size
172
167
173 def _do_seek(self, offset):
168 def _do_seek(self, offset):
174 """Seek based on whether wrapped object supports seek().
169 """Seek based on whether wrapped object supports seek().
175 offset is relative to the current position (self.realpos).
170 offset is relative to the current position (self.realpos).
176 """
171 """
177 assert offset >= 0
172 assert offset >= 0
178 if not hasattr(self.fo, 'seek'):
173 if not hasattr(self.fo, 'seek'):
179 self._poor_mans_seek(offset)
174 self._poor_mans_seek(offset)
180 else:
175 else:
181 self.fo.seek(self.realpos + offset)
176 self.fo.seek(self.realpos + offset)
182 self.realpos += offset
177 self.realpos += offset
183
178
184 def _poor_mans_seek(self, offset):
179 def _poor_mans_seek(self, offset):
185 """Seek by calling the wrapped file objects read() method.
180 """Seek by calling the wrapped file objects read() method.
186 This is used for file like objects that do not have native
181 This is used for file like objects that do not have native
187 seek support. The wrapped objects read() method is called
182 seek support. The wrapped objects read() method is called
188 to manually seek to the desired position.
183 to manually seek to the desired position.
189 offset -- read this number of bytes from the wrapped
184 offset -- read this number of bytes from the wrapped
190 file object.
185 file object.
191 raise RangeError if we encounter EOF before reaching the
186 raise RangeError if we encounter EOF before reaching the
192 specified offset.
187 specified offset.
193 """
188 """
194 pos = 0
189 pos = 0
195 bufsize = 1024
190 bufsize = 1024
196 while pos < offset:
191 while pos < offset:
197 if (pos + bufsize) > offset:
192 if (pos + bufsize) > offset:
198 bufsize = offset - pos
193 bufsize = offset - pos
199 buf = self.fo.read(bufsize)
194 buf = self.fo.read(bufsize)
200 if len(buf) != bufsize:
195 if len(buf) != bufsize:
201 raise RangeError('Requested Range Not Satisfiable')
196 raise RangeError('Requested Range Not Satisfiable')
202 pos += bufsize
197 pos += bufsize
203
198
204 class FileRangeHandler(urllib2.FileHandler):
199 class FileRangeHandler(urllib2.FileHandler):
205 """FileHandler subclass that adds Range support.
200 """FileHandler subclass that adds Range support.
206 This class handles Range headers exactly like an HTTP
201 This class handles Range headers exactly like an HTTP
207 server would.
202 server would.
208 """
203 """
209 def open_local_file(self, req):
204 def open_local_file(self, req):
210 import mimetypes
205 import mimetypes
211 import email
206 import email
212 host = req.get_host()
207 host = req.get_host()
213 file = req.get_selector()
208 file = req.get_selector()
214 localfile = urllib.url2pathname(file)
209 localfile = urllib.url2pathname(file)
215 stats = os.stat(localfile)
210 stats = os.stat(localfile)
216 size = stats[stat.ST_SIZE]
211 size = stats[stat.ST_SIZE]
217 modified = email.Utils.formatdate(stats[stat.ST_MTIME])
212 modified = email.Utils.formatdate(stats[stat.ST_MTIME])
218 mtype = mimetypes.guess_type(file)[0]
213 mtype = mimetypes.guess_type(file)[0]
219 if host:
214 if host:
220 host, port = urllib.splitport(host)
215 host, port = urllib.splitport(host)
221 if port or socket.gethostbyname(host) not in self.get_names():
216 if port or socket.gethostbyname(host) not in self.get_names():
222 raise urllib2.URLError('file not on local host')
217 raise urllib2.URLError('file not on local host')
223 fo = open(localfile,'rb')
218 fo = open(localfile,'rb')
224 brange = req.headers.get('Range', None)
219 brange = req.headers.get('Range', None)
225 brange = range_header_to_tuple(brange)
220 brange = range_header_to_tuple(brange)
226 assert brange != ()
221 assert brange != ()
227 if brange:
222 if brange:
228 (fb, lb) = brange
223 (fb, lb) = brange
229 if lb == '':
224 if lb == '':
230 lb = size
225 lb = size
231 if fb < 0 or fb > size or lb > size:
226 if fb < 0 or fb > size or lb > size:
232 raise RangeError('Requested Range Not Satisfiable')
227 raise RangeError('Requested Range Not Satisfiable')
233 size = (lb - fb)
228 size = (lb - fb)
234 fo = RangeableFileObject(fo, (fb, lb))
229 fo = RangeableFileObject(fo, (fb, lb))
235 headers = email.message_from_string(
230 headers = email.message_from_string(
236 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
231 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
237 (mtype or 'text/plain', size, modified))
232 (mtype or 'text/plain', size, modified))
238 return urllib.addinfourl(fo, headers, 'file:'+file)
233 return urllib.addinfourl(fo, headers, 'file:'+file)
239
234
240
235
241 # FTP Range Support
236 # FTP Range Support
242 # Unfortunately, a large amount of base FTP code had to be copied
237 # Unfortunately, a large amount of base FTP code had to be copied
243 # from urllib and urllib2 in order to insert the FTP REST command.
238 # from urllib and urllib2 in order to insert the FTP REST command.
244 # Code modifications for range support have been commented as
239 # Code modifications for range support have been commented as
245 # follows:
240 # follows:
246 # -- range support modifications start/end here
241 # -- range support modifications start/end here
247
242
248 from urllib import splitport, splituser, splitpasswd, splitattr, \
243 from urllib import splitport, splituser, splitpasswd, splitattr, \
249 unquote, addclosehook, addinfourl
244 unquote, addclosehook, addinfourl
250 import ftplib
245 import ftplib
251 import socket
246 import socket
252 import sys
247 import sys
253 import mimetypes
248 import mimetypes
254 import email
249 import email
255
250
256 class FTPRangeHandler(urllib2.FTPHandler):
251 class FTPRangeHandler(urllib2.FTPHandler):
257 def ftp_open(self, req):
252 def ftp_open(self, req):
258 host = req.get_host()
253 host = req.get_host()
259 if not host:
254 if not host:
260 raise IOError('ftp error', 'no host given')
255 raise IOError('ftp error', 'no host given')
261 host, port = splitport(host)
256 host, port = splitport(host)
262 if port is None:
257 if port is None:
263 port = ftplib.FTP_PORT
258 port = ftplib.FTP_PORT
264 else:
259 else:
265 port = int(port)
260 port = int(port)
266
261
267 # username/password handling
262 # username/password handling
268 user, host = splituser(host)
263 user, host = splituser(host)
269 if user:
264 if user:
270 user, passwd = splitpasswd(user)
265 user, passwd = splitpasswd(user)
271 else:
266 else:
272 passwd = None
267 passwd = None
273 host = unquote(host)
268 host = unquote(host)
274 user = unquote(user or '')
269 user = unquote(user or '')
275 passwd = unquote(passwd or '')
270 passwd = unquote(passwd or '')
276
271
277 try:
272 try:
278 host = socket.gethostbyname(host)
273 host = socket.gethostbyname(host)
279 except socket.error, msg:
274 except socket.error, msg:
280 raise urllib2.URLError(msg)
275 raise urllib2.URLError(msg)
281 path, attrs = splitattr(req.get_selector())
276 path, attrs = splitattr(req.get_selector())
282 dirs = path.split('/')
277 dirs = path.split('/')
283 dirs = map(unquote, dirs)
278 dirs = map(unquote, dirs)
284 dirs, file = dirs[:-1], dirs[-1]
279 dirs, file = dirs[:-1], dirs[-1]
285 if dirs and not dirs[0]:
280 if dirs and not dirs[0]:
286 dirs = dirs[1:]
281 dirs = dirs[1:]
287 try:
282 try:
288 fw = self.connect_ftp(user, passwd, host, port, dirs)
283 fw = self.connect_ftp(user, passwd, host, port, dirs)
289 type = file and 'I' or 'D'
284 type = file and 'I' or 'D'
290 for attr in attrs:
285 for attr in attrs:
291 attr, value = splitattr(attr)
286 attr, value = splitattr(attr)
292 if attr.lower() == 'type' and \
287 if attr.lower() == 'type' and \
293 value in ('a', 'A', 'i', 'I', 'd', 'D'):
288 value in ('a', 'A', 'i', 'I', 'd', 'D'):
294 type = value.upper()
289 type = value.upper()
295
290
296 # -- range support modifications start here
291 # -- range support modifications start here
297 rest = None
292 rest = None
298 range_tup = range_header_to_tuple(req.headers.get('Range', None))
293 range_tup = range_header_to_tuple(req.headers.get('Range', None))
299 assert range_tup != ()
294 assert range_tup != ()
300 if range_tup:
295 if range_tup:
301 (fb, lb) = range_tup
296 (fb, lb) = range_tup
302 if fb > 0:
297 if fb > 0:
303 rest = fb
298 rest = fb
304 # -- range support modifications end here
299 # -- range support modifications end here
305
300
306 fp, retrlen = fw.retrfile(file, type, rest)
301 fp, retrlen = fw.retrfile(file, type, rest)
307
302
308 # -- range support modifications start here
303 # -- range support modifications start here
309 if range_tup:
304 if range_tup:
310 (fb, lb) = range_tup
305 (fb, lb) = range_tup
311 if lb == '':
306 if lb == '':
312 if retrlen is None or retrlen == 0:
307 if retrlen is None or retrlen == 0:
313 raise RangeError('Requested Range Not Satisfiable due'
308 raise RangeError('Requested Range Not Satisfiable due'
314 ' to unobtainable file length.')
309 ' to unobtainable file length.')
315 lb = retrlen
310 lb = retrlen
316 retrlen = lb - fb
311 retrlen = lb - fb
317 if retrlen < 0:
312 if retrlen < 0:
318 # beginning of range is larger than file
313 # beginning of range is larger than file
319 raise RangeError('Requested Range Not Satisfiable')
314 raise RangeError('Requested Range Not Satisfiable')
320 else:
315 else:
321 retrlen = lb - fb
316 retrlen = lb - fb
322 fp = RangeableFileObject(fp, (0, retrlen))
317 fp = RangeableFileObject(fp, (0, retrlen))
323 # -- range support modifications end here
318 # -- range support modifications end here
324
319
325 headers = ""
320 headers = ""
326 mtype = mimetypes.guess_type(req.get_full_url())[0]
321 mtype = mimetypes.guess_type(req.get_full_url())[0]
327 if mtype:
322 if mtype:
328 headers += "Content-Type: %s\n" % mtype
323 headers += "Content-Type: %s\n" % mtype
329 if retrlen is not None and retrlen >= 0:
324 if retrlen is not None and retrlen >= 0:
330 headers += "Content-Length: %d\n" % retrlen
325 headers += "Content-Length: %d\n" % retrlen
331 headers = email.message_from_string(headers)
326 headers = email.message_from_string(headers)
332 return addinfourl(fp, headers, req.get_full_url())
327 return addinfourl(fp, headers, req.get_full_url())
333 except ftplib.all_errors, msg:
328 except ftplib.all_errors, msg:
334 raise IOError('ftp error', msg), sys.exc_info()[2]
329 raise IOError('ftp error', msg), sys.exc_info()[2]
335
330
336 def connect_ftp(self, user, passwd, host, port, dirs):
331 def connect_ftp(self, user, passwd, host, port, dirs):
337 fw = ftpwrapper(user, passwd, host, port, dirs)
332 fw = ftpwrapper(user, passwd, host, port, dirs)
338 return fw
333 return fw
339
334
340 class ftpwrapper(urllib.ftpwrapper):
335 class ftpwrapper(urllib.ftpwrapper):
341 # range support note:
336 # range support note:
342 # this ftpwrapper code is copied directly from
337 # this ftpwrapper code is copied directly from
343 # urllib. The only enhancement is to add the rest
338 # urllib. The only enhancement is to add the rest
344 # argument and pass it on to ftp.ntransfercmd
339 # argument and pass it on to ftp.ntransfercmd
345 def retrfile(self, file, type, rest=None):
340 def retrfile(self, file, type, rest=None):
346 self.endtransfer()
341 self.endtransfer()
347 if type in ('d', 'D'):
342 if type in ('d', 'D'):
348 cmd = 'TYPE A'
343 cmd = 'TYPE A'
349 isdir = 1
344 isdir = 1
350 else:
345 else:
351 cmd = 'TYPE ' + type
346 cmd = 'TYPE ' + type
352 isdir = 0
347 isdir = 0
353 try:
348 try:
354 self.ftp.voidcmd(cmd)
349 self.ftp.voidcmd(cmd)
355 except ftplib.all_errors:
350 except ftplib.all_errors:
356 self.init()
351 self.init()
357 self.ftp.voidcmd(cmd)
352 self.ftp.voidcmd(cmd)
358 conn = None
353 conn = None
359 if file and not isdir:
354 if file and not isdir:
360 # Use nlst to see if the file exists at all
355 # Use nlst to see if the file exists at all
361 try:
356 try:
362 self.ftp.nlst(file)
357 self.ftp.nlst(file)
363 except ftplib.error_perm, reason:
358 except ftplib.error_perm, reason:
364 raise IOError('ftp error', reason), sys.exc_info()[2]
359 raise IOError('ftp error', reason), sys.exc_info()[2]
365 # Restore the transfer mode!
360 # Restore the transfer mode!
366 self.ftp.voidcmd(cmd)
361 self.ftp.voidcmd(cmd)
367 # Try to retrieve as a file
362 # Try to retrieve as a file
368 try:
363 try:
369 cmd = 'RETR ' + file
364 cmd = 'RETR ' + file
370 conn = self.ftp.ntransfercmd(cmd, rest)
365 conn = self.ftp.ntransfercmd(cmd, rest)
371 except ftplib.error_perm, reason:
366 except ftplib.error_perm, reason:
372 if str(reason).startswith('501'):
367 if str(reason).startswith('501'):
373 # workaround for REST not supported error
368 # workaround for REST not supported error
374 fp, retrlen = self.retrfile(file, type)
369 fp, retrlen = self.retrfile(file, type)
375 fp = RangeableFileObject(fp, (rest,''))
370 fp = RangeableFileObject(fp, (rest,''))
376 return (fp, retrlen)
371 return (fp, retrlen)
377 elif not str(reason).startswith('550'):
372 elif not str(reason).startswith('550'):
378 raise IOError('ftp error', reason), sys.exc_info()[2]
373 raise IOError('ftp error', reason), sys.exc_info()[2]
379 if not conn:
374 if not conn:
380 # Set transfer mode to ASCII!
375 # Set transfer mode to ASCII!
381 self.ftp.voidcmd('TYPE A')
376 self.ftp.voidcmd('TYPE A')
382 # Try a directory listing
377 # Try a directory listing
383 if file:
378 if file:
384 cmd = 'LIST ' + file
379 cmd = 'LIST ' + file
385 else:
380 else:
386 cmd = 'LIST'
381 cmd = 'LIST'
387 conn = self.ftp.ntransfercmd(cmd)
382 conn = self.ftp.ntransfercmd(cmd)
388 self.busy = 1
383 self.busy = 1
389 # Pass back both a suitably decorated object and a retrieval length
384 # Pass back both a suitably decorated object and a retrieval length
390 return (addclosehook(conn[0].makefile('rb'),
385 return (addclosehook(conn[0].makefile('rb'),
391 self.endtransfer), conn[1])
386 self.endtransfer), conn[1])
392
387
393
388
394 ####################################################################
389 ####################################################################
395 # Range Tuple Functions
390 # Range Tuple Functions
396 # XXX: These range tuple functions might go better in a class.
391 # XXX: These range tuple functions might go better in a class.
397
392
398 _rangere = None
393 _rangere = None
399 def range_header_to_tuple(range_header):
394 def range_header_to_tuple(range_header):
400 """Get a (firstbyte,lastbyte) tuple from a Range header value.
395 """Get a (firstbyte,lastbyte) tuple from a Range header value.
401
396
402 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
397 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
403 function pulls the firstbyte and lastbyte values and returns
398 function pulls the firstbyte and lastbyte values and returns
404 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
399 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
405 the header value, it is returned as an empty string in the
400 the header value, it is returned as an empty string in the
406 tuple.
401 tuple.
407
402
408 Return None if range_header is None
403 Return None if range_header is None
409 Return () if range_header does not conform to the range spec
404 Return () if range_header does not conform to the range spec
410 pattern.
405 pattern.
411
406
412 """
407 """
413 global _rangere
408 global _rangere
414 if range_header is None:
409 if range_header is None:
415 return None
410 return None
416 if _rangere is None:
411 if _rangere is None:
417 import re
412 import re
418 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
413 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
419 match = _rangere.match(range_header)
414 match = _rangere.match(range_header)
420 if match:
415 if match:
421 tup = range_tuple_normalize(match.group(1, 2))
416 tup = range_tuple_normalize(match.group(1, 2))
422 if tup and tup[1]:
417 if tup and tup[1]:
423 tup = (tup[0], tup[1]+1)
418 tup = (tup[0], tup[1]+1)
424 return tup
419 return tup
425 return ()
420 return ()
426
421
427 def range_tuple_to_header(range_tup):
422 def range_tuple_to_header(range_tup):
428 """Convert a range tuple to a Range header value.
423 """Convert a range tuple to a Range header value.
429 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
424 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
430 if no range is needed.
425 if no range is needed.
431 """
426 """
432 if range_tup is None:
427 if range_tup is None:
433 return None
428 return None
434 range_tup = range_tuple_normalize(range_tup)
429 range_tup = range_tuple_normalize(range_tup)
435 if range_tup:
430 if range_tup:
436 if range_tup[1]:
431 if range_tup[1]:
437 range_tup = (range_tup[0], range_tup[1] - 1)
432 range_tup = (range_tup[0], range_tup[1] - 1)
438 return 'bytes=%s-%s' % range_tup
433 return 'bytes=%s-%s' % range_tup
439
434
440 def range_tuple_normalize(range_tup):
435 def range_tuple_normalize(range_tup):
441 """Normalize a (first_byte,last_byte) range tuple.
436 """Normalize a (first_byte,last_byte) range tuple.
442 Return a tuple whose first element is guaranteed to be an int
437 Return a tuple whose first element is guaranteed to be an int
443 and whose second element will be '' (meaning: the last byte) or
438 and whose second element will be '' (meaning: the last byte) or
444 an int. Finally, return None if the normalized tuple == (0,'')
439 an int. Finally, return None if the normalized tuple == (0,'')
445 as that is equivelant to retrieving the entire file.
440 as that is equivelant to retrieving the entire file.
446 """
441 """
447 if range_tup is None:
442 if range_tup is None:
448 return None
443 return None
449 # handle first byte
444 # handle first byte
450 fb = range_tup[0]
445 fb = range_tup[0]
451 if fb in (None, ''):
446 if fb in (None, ''):
452 fb = 0
447 fb = 0
453 else:
448 else:
454 fb = int(fb)
449 fb = int(fb)
455 # handle last byte
450 # handle last byte
456 try:
451 try:
457 lb = range_tup[1]
452 lb = range_tup[1]
458 except IndexError:
453 except IndexError:
459 lb = ''
454 lb = ''
460 else:
455 else:
461 if lb is None:
456 if lb is None:
462 lb = ''
457 lb = ''
463 elif lb != '':
458 elif lb != '':
464 lb = int(lb)
459 lb = int(lb)
465 # check if range is over the entire file
460 # check if range is over the entire file
466 if (fb, lb) == (0, ''):
461 if (fb, lb) == (0, ''):
467 return None
462 return None
468 # check that the range is valid
463 # check that the range is valid
469 if lb < fb:
464 if lb < fb:
470 raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
465 raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
471 return (fb, lb)
466 return (fb, lb)
@@ -1,287 +1,286 b''
1 # hgweb/hgweb_mod.py - Web interface for a repository.
1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import os
9 import os
10 from mercurial import ui, hg, hook, error, encoding, templater
10 from mercurial import ui, hg, hook, error, encoding, templater
11 from common import get_mtime, ErrorResponse, permhooks
11 from common import get_mtime, ErrorResponse, permhooks
12 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
12 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
13 from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
14 from request import wsgirequest
13 from request import wsgirequest
15 import webcommands, protocol, webutil
14 import webcommands, protocol, webutil
16
15
17 perms = {
16 perms = {
18 'changegroup': 'pull',
17 'changegroup': 'pull',
19 'changegroupsubset': 'pull',
18 'changegroupsubset': 'pull',
20 'unbundle': 'push',
19 'unbundle': 'push',
21 'stream_out': 'pull',
20 'stream_out': 'pull',
22 }
21 }
23
22
24 class hgweb(object):
23 class hgweb(object):
25 def __init__(self, repo, name=None):
24 def __init__(self, repo, name=None):
26 if isinstance(repo, str):
25 if isinstance(repo, str):
27 u = ui.ui()
26 u = ui.ui()
28 u.setconfig('ui', 'report_untrusted', 'off')
27 u.setconfig('ui', 'report_untrusted', 'off')
29 u.setconfig('ui', 'interactive', 'off')
28 u.setconfig('ui', 'interactive', 'off')
30 self.repo = hg.repository(u, repo)
29 self.repo = hg.repository(u, repo)
31 else:
30 else:
32 self.repo = repo
31 self.repo = repo
33
32
34 hook.redirect(True)
33 hook.redirect(True)
35 self.mtime = -1
34 self.mtime = -1
36 self.reponame = name
35 self.reponame = name
37 self.archives = 'zip', 'gz', 'bz2'
36 self.archives = 'zip', 'gz', 'bz2'
38 self.stripecount = 1
37 self.stripecount = 1
39 # a repo owner may set web.templates in .hg/hgrc to get any file
38 # a repo owner may set web.templates in .hg/hgrc to get any file
40 # readable by the user running the CGI script
39 # readable by the user running the CGI script
41 self.templatepath = self.config('web', 'templates')
40 self.templatepath = self.config('web', 'templates')
42
41
43 # The CGI scripts are often run by a user different from the repo owner.
42 # The CGI scripts are often run by a user different from the repo owner.
44 # Trust the settings from the .hg/hgrc files by default.
43 # Trust the settings from the .hg/hgrc files by default.
45 def config(self, section, name, default=None, untrusted=True):
44 def config(self, section, name, default=None, untrusted=True):
46 return self.repo.ui.config(section, name, default,
45 return self.repo.ui.config(section, name, default,
47 untrusted=untrusted)
46 untrusted=untrusted)
48
47
49 def configbool(self, section, name, default=False, untrusted=True):
48 def configbool(self, section, name, default=False, untrusted=True):
50 return self.repo.ui.configbool(section, name, default,
49 return self.repo.ui.configbool(section, name, default,
51 untrusted=untrusted)
50 untrusted=untrusted)
52
51
53 def configlist(self, section, name, default=None, untrusted=True):
52 def configlist(self, section, name, default=None, untrusted=True):
54 return self.repo.ui.configlist(section, name, default,
53 return self.repo.ui.configlist(section, name, default,
55 untrusted=untrusted)
54 untrusted=untrusted)
56
55
57 def refresh(self, request=None):
56 def refresh(self, request=None):
58 if request:
57 if request:
59 self.repo.ui.environ = request.env
58 self.repo.ui.environ = request.env
60 mtime = get_mtime(self.repo.spath)
59 mtime = get_mtime(self.repo.spath)
61 if mtime != self.mtime:
60 if mtime != self.mtime:
62 self.mtime = mtime
61 self.mtime = mtime
63 self.repo = hg.repository(self.repo.ui, self.repo.root)
62 self.repo = hg.repository(self.repo.ui, self.repo.root)
64 self.maxchanges = int(self.config("web", "maxchanges", 10))
63 self.maxchanges = int(self.config("web", "maxchanges", 10))
65 self.stripecount = int(self.config("web", "stripes", 1))
64 self.stripecount = int(self.config("web", "stripes", 1))
66 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
65 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
67 self.maxfiles = int(self.config("web", "maxfiles", 10))
66 self.maxfiles = int(self.config("web", "maxfiles", 10))
68 self.allowpull = self.configbool("web", "allowpull", True)
67 self.allowpull = self.configbool("web", "allowpull", True)
69 encoding.encoding = self.config("web", "encoding",
68 encoding.encoding = self.config("web", "encoding",
70 encoding.encoding)
69 encoding.encoding)
71
70
72 def run(self):
71 def run(self):
73 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
72 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
74 raise RuntimeError("This function is only intended to be "
73 raise RuntimeError("This function is only intended to be "
75 "called while running as a CGI script.")
74 "called while running as a CGI script.")
76 import mercurial.hgweb.wsgicgi as wsgicgi
75 import mercurial.hgweb.wsgicgi as wsgicgi
77 wsgicgi.launch(self)
76 wsgicgi.launch(self)
78
77
79 def __call__(self, env, respond):
78 def __call__(self, env, respond):
80 req = wsgirequest(env, respond)
79 req = wsgirequest(env, respond)
81 return self.run_wsgi(req)
80 return self.run_wsgi(req)
82
81
83 def run_wsgi(self, req):
82 def run_wsgi(self, req):
84
83
85 self.refresh(req)
84 self.refresh(req)
86
85
87 # work with CGI variables to create coherent structure
86 # work with CGI variables to create coherent structure
88 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
87 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
89
88
90 req.url = req.env['SCRIPT_NAME']
89 req.url = req.env['SCRIPT_NAME']
91 if not req.url.endswith('/'):
90 if not req.url.endswith('/'):
92 req.url += '/'
91 req.url += '/'
93 if 'REPO_NAME' in req.env:
92 if 'REPO_NAME' in req.env:
94 req.url += req.env['REPO_NAME'] + '/'
93 req.url += req.env['REPO_NAME'] + '/'
95
94
96 if 'PATH_INFO' in req.env:
95 if 'PATH_INFO' in req.env:
97 parts = req.env['PATH_INFO'].strip('/').split('/')
96 parts = req.env['PATH_INFO'].strip('/').split('/')
98 repo_parts = req.env.get('REPO_NAME', '').split('/')
97 repo_parts = req.env.get('REPO_NAME', '').split('/')
99 if parts[:len(repo_parts)] == repo_parts:
98 if parts[:len(repo_parts)] == repo_parts:
100 parts = parts[len(repo_parts):]
99 parts = parts[len(repo_parts):]
101 query = '/'.join(parts)
100 query = '/'.join(parts)
102 else:
101 else:
103 query = req.env['QUERY_STRING'].split('&', 1)[0]
102 query = req.env['QUERY_STRING'].split('&', 1)[0]
104 query = query.split(';', 1)[0]
103 query = query.split(';', 1)[0]
105
104
106 # process this if it's a protocol request
105 # process this if it's a protocol request
107 # protocol bits don't need to create any URLs
106 # protocol bits don't need to create any URLs
108 # and the clients always use the old URL structure
107 # and the clients always use the old URL structure
109
108
110 cmd = req.form.get('cmd', [''])[0]
109 cmd = req.form.get('cmd', [''])[0]
111 if cmd and cmd in protocol.__all__:
110 if cmd and cmd in protocol.__all__:
112 if query:
111 if query:
113 raise ErrorResponse(HTTP_NOT_FOUND)
112 raise ErrorResponse(HTTP_NOT_FOUND)
114 try:
113 try:
115 if cmd in perms:
114 if cmd in perms:
116 try:
115 try:
117 self.check_perm(req, perms[cmd])
116 self.check_perm(req, perms[cmd])
118 except ErrorResponse, inst:
117 except ErrorResponse, inst:
119 if cmd == 'unbundle':
118 if cmd == 'unbundle':
120 req.drain()
119 req.drain()
121 raise
120 raise
122 method = getattr(protocol, cmd)
121 method = getattr(protocol, cmd)
123 return method(self.repo, req)
122 return method(self.repo, req)
124 except ErrorResponse, inst:
123 except ErrorResponse, inst:
125 req.respond(inst, protocol.HGTYPE)
124 req.respond(inst, protocol.HGTYPE)
126 if not inst.message:
125 if not inst.message:
127 return []
126 return []
128 return '0\n%s\n' % inst.message,
127 return '0\n%s\n' % inst.message,
129
128
130 # translate user-visible url structure to internal structure
129 # translate user-visible url structure to internal structure
131
130
132 args = query.split('/', 2)
131 args = query.split('/', 2)
133 if 'cmd' not in req.form and args and args[0]:
132 if 'cmd' not in req.form and args and args[0]:
134
133
135 cmd = args.pop(0)
134 cmd = args.pop(0)
136 style = cmd.rfind('-')
135 style = cmd.rfind('-')
137 if style != -1:
136 if style != -1:
138 req.form['style'] = [cmd[:style]]
137 req.form['style'] = [cmd[:style]]
139 cmd = cmd[style + 1:]
138 cmd = cmd[style + 1:]
140
139
141 # avoid accepting e.g. style parameter as command
140 # avoid accepting e.g. style parameter as command
142 if hasattr(webcommands, cmd):
141 if hasattr(webcommands, cmd):
143 req.form['cmd'] = [cmd]
142 req.form['cmd'] = [cmd]
144 else:
143 else:
145 cmd = ''
144 cmd = ''
146
145
147 if cmd == 'static':
146 if cmd == 'static':
148 req.form['file'] = ['/'.join(args)]
147 req.form['file'] = ['/'.join(args)]
149 else:
148 else:
150 if args and args[0]:
149 if args and args[0]:
151 node = args.pop(0)
150 node = args.pop(0)
152 req.form['node'] = [node]
151 req.form['node'] = [node]
153 if args:
152 if args:
154 req.form['file'] = args
153 req.form['file'] = args
155
154
156 ua = req.env.get('HTTP_USER_AGENT', '')
155 ua = req.env.get('HTTP_USER_AGENT', '')
157 if cmd == 'rev' and 'mercurial' in ua:
156 if cmd == 'rev' and 'mercurial' in ua:
158 req.form['style'] = ['raw']
157 req.form['style'] = ['raw']
159
158
160 if cmd == 'archive':
159 if cmd == 'archive':
161 fn = req.form['node'][0]
160 fn = req.form['node'][0]
162 for type_, spec in self.archive_specs.iteritems():
161 for type_, spec in self.archive_specs.iteritems():
163 ext = spec[2]
162 ext = spec[2]
164 if fn.endswith(ext):
163 if fn.endswith(ext):
165 req.form['node'] = [fn[:-len(ext)]]
164 req.form['node'] = [fn[:-len(ext)]]
166 req.form['type'] = [type_]
165 req.form['type'] = [type_]
167
166
168 # process the web interface request
167 # process the web interface request
169
168
170 try:
169 try:
171 tmpl = self.templater(req)
170 tmpl = self.templater(req)
172 ctype = tmpl('mimetype', encoding=encoding.encoding)
171 ctype = tmpl('mimetype', encoding=encoding.encoding)
173 ctype = templater.stringify(ctype)
172 ctype = templater.stringify(ctype)
174
173
175 # check read permissions non-static content
174 # check read permissions non-static content
176 if cmd != 'static':
175 if cmd != 'static':
177 self.check_perm(req, None)
176 self.check_perm(req, None)
178
177
179 if cmd == '':
178 if cmd == '':
180 req.form['cmd'] = [tmpl.cache['default']]
179 req.form['cmd'] = [tmpl.cache['default']]
181 cmd = req.form['cmd'][0]
180 cmd = req.form['cmd'][0]
182
181
183 if cmd not in webcommands.__all__:
182 if cmd not in webcommands.__all__:
184 msg = 'no such method: %s' % cmd
183 msg = 'no such method: %s' % cmd
185 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
184 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
186 elif cmd == 'file' and 'raw' in req.form.get('style', []):
185 elif cmd == 'file' and 'raw' in req.form.get('style', []):
187 self.ctype = ctype
186 self.ctype = ctype
188 content = webcommands.rawfile(self, req, tmpl)
187 content = webcommands.rawfile(self, req, tmpl)
189 else:
188 else:
190 content = getattr(webcommands, cmd)(self, req, tmpl)
189 content = getattr(webcommands, cmd)(self, req, tmpl)
191 req.respond(HTTP_OK, ctype)
190 req.respond(HTTP_OK, ctype)
192
191
193 return content
192 return content
194
193
195 except error.LookupError, err:
194 except error.LookupError, err:
196 req.respond(HTTP_NOT_FOUND, ctype)
195 req.respond(HTTP_NOT_FOUND, ctype)
197 msg = str(err)
196 msg = str(err)
198 if 'manifest' not in msg:
197 if 'manifest' not in msg:
199 msg = 'revision not found: %s' % err.name
198 msg = 'revision not found: %s' % err.name
200 return tmpl('error', error=msg)
199 return tmpl('error', error=msg)
201 except (error.RepoError, error.RevlogError), inst:
200 except (error.RepoError, error.RevlogError), inst:
202 req.respond(HTTP_SERVER_ERROR, ctype)
201 req.respond(HTTP_SERVER_ERROR, ctype)
203 return tmpl('error', error=str(inst))
202 return tmpl('error', error=str(inst))
204 except ErrorResponse, inst:
203 except ErrorResponse, inst:
205 req.respond(inst, ctype)
204 req.respond(inst, ctype)
206 return tmpl('error', error=inst.message)
205 return tmpl('error', error=inst.message)
207
206
208 def templater(self, req):
207 def templater(self, req):
209
208
210 # determine scheme, port and server name
209 # determine scheme, port and server name
211 # this is needed to create absolute urls
210 # this is needed to create absolute urls
212
211
213 proto = req.env.get('wsgi.url_scheme')
212 proto = req.env.get('wsgi.url_scheme')
214 if proto == 'https':
213 if proto == 'https':
215 proto = 'https'
214 proto = 'https'
216 default_port = "443"
215 default_port = "443"
217 else:
216 else:
218 proto = 'http'
217 proto = 'http'
219 default_port = "80"
218 default_port = "80"
220
219
221 port = req.env["SERVER_PORT"]
220 port = req.env["SERVER_PORT"]
222 port = port != default_port and (":" + port) or ""
221 port = port != default_port and (":" + port) or ""
223 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
222 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
224 staticurl = self.config("web", "staticurl") or req.url + 'static/'
223 staticurl = self.config("web", "staticurl") or req.url + 'static/'
225 if not staticurl.endswith('/'):
224 if not staticurl.endswith('/'):
226 staticurl += '/'
225 staticurl += '/'
227
226
228 # some functions for the templater
227 # some functions for the templater
229
228
230 def header(**map):
229 def header(**map):
231 yield tmpl('header', encoding=encoding.encoding, **map)
230 yield tmpl('header', encoding=encoding.encoding, **map)
232
231
233 def footer(**map):
232 def footer(**map):
234 yield tmpl("footer", **map)
233 yield tmpl("footer", **map)
235
234
236 def motd(**map):
235 def motd(**map):
237 yield self.config("web", "motd", "")
236 yield self.config("web", "motd", "")
238
237
239 # figure out which style to use
238 # figure out which style to use
240
239
241 vars = {}
240 vars = {}
242 styles = (
241 styles = (
243 req.form.get('style', [None])[0],
242 req.form.get('style', [None])[0],
244 self.config('web', 'style'),
243 self.config('web', 'style'),
245 'paper',
244 'paper',
246 )
245 )
247 style, mapfile = templater.stylemap(styles, self.templatepath)
246 style, mapfile = templater.stylemap(styles, self.templatepath)
248 if style == styles[0]:
247 if style == styles[0]:
249 vars['style'] = style
248 vars['style'] = style
250
249
251 start = req.url[-1] == '?' and '&' or '?'
250 start = req.url[-1] == '?' and '&' or '?'
252 sessionvars = webutil.sessionvars(vars, start)
251 sessionvars = webutil.sessionvars(vars, start)
253
252
254 if not self.reponame:
253 if not self.reponame:
255 self.reponame = (self.config("web", "name")
254 self.reponame = (self.config("web", "name")
256 or req.env.get('REPO_NAME')
255 or req.env.get('REPO_NAME')
257 or req.url.strip('/') or self.repo.root)
256 or req.url.strip('/') or self.repo.root)
258
257
259 # create the templater
258 # create the templater
260
259
261 tmpl = templater.templater(mapfile,
260 tmpl = templater.templater(mapfile,
262 defaults={"url": req.url,
261 defaults={"url": req.url,
263 "staticurl": staticurl,
262 "staticurl": staticurl,
264 "urlbase": urlbase,
263 "urlbase": urlbase,
265 "repo": self.reponame,
264 "repo": self.reponame,
266 "header": header,
265 "header": header,
267 "footer": footer,
266 "footer": footer,
268 "motd": motd,
267 "motd": motd,
269 "sessionvars": sessionvars
268 "sessionvars": sessionvars
270 })
269 })
271 return tmpl
270 return tmpl
272
271
273 def archivelist(self, nodeid):
272 def archivelist(self, nodeid):
274 allowed = self.configlist("web", "allow_archive")
273 allowed = self.configlist("web", "allow_archive")
275 for i, spec in self.archive_specs.iteritems():
274 for i, spec in self.archive_specs.iteritems():
276 if i in allowed or self.configbool("web", "allow" + i):
275 if i in allowed or self.configbool("web", "allow" + i):
277 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
276 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
278
277
279 archive_specs = {
278 archive_specs = {
280 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
279 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
281 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
280 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
282 'zip': ('application/zip', 'zip', '.zip', None),
281 'zip': ('application/zip', 'zip', '.zip', None),
283 }
282 }
284
283
285 def check_perm(self, req, op):
284 def check_perm(self, req, op):
286 for hook in permhooks:
285 for hook in permhooks:
287 hook(self, req, op)
286 hook(self, req, op)
@@ -1,277 +1,277 b''
1 # hgweb/server.py - The standalone hg web server.
1 # hgweb/server.py - The standalone hg web server.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 from mercurial import hg, util, error
10 from mercurial import util, error
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 def _splitURI(uri):
13 def _splitURI(uri):
14 """ Return path and query splited from uri
14 """ Return path and query splited from uri
15
15
16 Just like CGI environment, the path is unquoted, the query is
16 Just like CGI environment, the path is unquoted, the query is
17 not.
17 not.
18 """
18 """
19 if '?' in uri:
19 if '?' in uri:
20 path, query = uri.split('?', 1)
20 path, query = uri.split('?', 1)
21 else:
21 else:
22 path, query = uri, ''
22 path, query = uri, ''
23 return urllib.unquote(path), query
23 return urllib.unquote(path), query
24
24
25 class _error_logger(object):
25 class _error_logger(object):
26 def __init__(self, handler):
26 def __init__(self, handler):
27 self.handler = handler
27 self.handler = handler
28 def flush(self):
28 def flush(self):
29 pass
29 pass
30 def write(self, str):
30 def write(self, str):
31 self.writelines(str.split('\n'))
31 self.writelines(str.split('\n'))
32 def writelines(self, seq):
32 def writelines(self, seq):
33 for msg in seq:
33 for msg in seq:
34 self.handler.log_error("HG error: %s", msg)
34 self.handler.log_error("HG error: %s", msg)
35
35
36 class _hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
36 class _hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
37
37
38 url_scheme = 'http'
38 url_scheme = 'http'
39
39
40 def __init__(self, *args, **kargs):
40 def __init__(self, *args, **kargs):
41 self.protocol_version = 'HTTP/1.1'
41 self.protocol_version = 'HTTP/1.1'
42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
43
43
44 def _log_any(self, fp, format, *args):
44 def _log_any(self, fp, format, *args):
45 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
45 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
46 self.log_date_time_string(),
46 self.log_date_time_string(),
47 format % args))
47 format % args))
48 fp.flush()
48 fp.flush()
49
49
50 def log_error(self, format, *args):
50 def log_error(self, format, *args):
51 self._log_any(self.server.errorlog, format, *args)
51 self._log_any(self.server.errorlog, format, *args)
52
52
53 def log_message(self, format, *args):
53 def log_message(self, format, *args):
54 self._log_any(self.server.accesslog, format, *args)
54 self._log_any(self.server.accesslog, format, *args)
55
55
56 def do_write(self):
56 def do_write(self):
57 try:
57 try:
58 self.do_hgweb()
58 self.do_hgweb()
59 except socket.error, inst:
59 except socket.error, inst:
60 if inst[0] != errno.EPIPE:
60 if inst[0] != errno.EPIPE:
61 raise
61 raise
62
62
63 def do_POST(self):
63 def do_POST(self):
64 try:
64 try:
65 self.do_write()
65 self.do_write()
66 except StandardError:
66 except StandardError:
67 self._start_response("500 Internal Server Error", [])
67 self._start_response("500 Internal Server Error", [])
68 self._write("Internal Server Error")
68 self._write("Internal Server Error")
69 tb = "".join(traceback.format_exception(*sys.exc_info()))
69 tb = "".join(traceback.format_exception(*sys.exc_info()))
70 self.log_error("Exception happened during processing "
70 self.log_error("Exception happened during processing "
71 "request '%s':\n%s", self.path, tb)
71 "request '%s':\n%s", self.path, tb)
72
72
73 def do_GET(self):
73 def do_GET(self):
74 self.do_POST()
74 self.do_POST()
75
75
76 def do_hgweb(self):
76 def do_hgweb(self):
77 path, query = _splitURI(self.path)
77 path, query = _splitURI(self.path)
78
78
79 env = {}
79 env = {}
80 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
80 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
81 env['REQUEST_METHOD'] = self.command
81 env['REQUEST_METHOD'] = self.command
82 env['SERVER_NAME'] = self.server.server_name
82 env['SERVER_NAME'] = self.server.server_name
83 env['SERVER_PORT'] = str(self.server.server_port)
83 env['SERVER_PORT'] = str(self.server.server_port)
84 env['REQUEST_URI'] = self.path
84 env['REQUEST_URI'] = self.path
85 env['SCRIPT_NAME'] = self.server.prefix
85 env['SCRIPT_NAME'] = self.server.prefix
86 env['PATH_INFO'] = path[len(self.server.prefix):]
86 env['PATH_INFO'] = path[len(self.server.prefix):]
87 env['REMOTE_HOST'] = self.client_address[0]
87 env['REMOTE_HOST'] = self.client_address[0]
88 env['REMOTE_ADDR'] = self.client_address[0]
88 env['REMOTE_ADDR'] = self.client_address[0]
89 if query:
89 if query:
90 env['QUERY_STRING'] = query
90 env['QUERY_STRING'] = query
91
91
92 if self.headers.typeheader is None:
92 if self.headers.typeheader is None:
93 env['CONTENT_TYPE'] = self.headers.type
93 env['CONTENT_TYPE'] = self.headers.type
94 else:
94 else:
95 env['CONTENT_TYPE'] = self.headers.typeheader
95 env['CONTENT_TYPE'] = self.headers.typeheader
96 length = self.headers.getheader('content-length')
96 length = self.headers.getheader('content-length')
97 if length:
97 if length:
98 env['CONTENT_LENGTH'] = length
98 env['CONTENT_LENGTH'] = length
99 for header in [h for h in self.headers.keys()
99 for header in [h for h in self.headers.keys()
100 if h not in ('content-type', 'content-length')]:
100 if h not in ('content-type', 'content-length')]:
101 hkey = 'HTTP_' + header.replace('-', '_').upper()
101 hkey = 'HTTP_' + header.replace('-', '_').upper()
102 hval = self.headers.getheader(header)
102 hval = self.headers.getheader(header)
103 hval = hval.replace('\n', '').strip()
103 hval = hval.replace('\n', '').strip()
104 if hval:
104 if hval:
105 env[hkey] = hval
105 env[hkey] = hval
106 env['SERVER_PROTOCOL'] = self.request_version
106 env['SERVER_PROTOCOL'] = self.request_version
107 env['wsgi.version'] = (1, 0)
107 env['wsgi.version'] = (1, 0)
108 env['wsgi.url_scheme'] = self.url_scheme
108 env['wsgi.url_scheme'] = self.url_scheme
109 env['wsgi.input'] = self.rfile
109 env['wsgi.input'] = self.rfile
110 env['wsgi.errors'] = _error_logger(self)
110 env['wsgi.errors'] = _error_logger(self)
111 env['wsgi.multithread'] = isinstance(self.server,
111 env['wsgi.multithread'] = isinstance(self.server,
112 SocketServer.ThreadingMixIn)
112 SocketServer.ThreadingMixIn)
113 env['wsgi.multiprocess'] = isinstance(self.server,
113 env['wsgi.multiprocess'] = isinstance(self.server,
114 SocketServer.ForkingMixIn)
114 SocketServer.ForkingMixIn)
115 env['wsgi.run_once'] = 0
115 env['wsgi.run_once'] = 0
116
116
117 self.close_connection = True
117 self.close_connection = True
118 self.saved_status = None
118 self.saved_status = None
119 self.saved_headers = []
119 self.saved_headers = []
120 self.sent_headers = False
120 self.sent_headers = False
121 self.length = None
121 self.length = None
122 for chunk in self.server.application(env, self._start_response):
122 for chunk in self.server.application(env, self._start_response):
123 self._write(chunk)
123 self._write(chunk)
124
124
125 def send_headers(self):
125 def send_headers(self):
126 if not self.saved_status:
126 if not self.saved_status:
127 raise AssertionError("Sending headers before "
127 raise AssertionError("Sending headers before "
128 "start_response() called")
128 "start_response() called")
129 saved_status = self.saved_status.split(None, 1)
129 saved_status = self.saved_status.split(None, 1)
130 saved_status[0] = int(saved_status[0])
130 saved_status[0] = int(saved_status[0])
131 self.send_response(*saved_status)
131 self.send_response(*saved_status)
132 should_close = True
132 should_close = True
133 for h in self.saved_headers:
133 for h in self.saved_headers:
134 self.send_header(*h)
134 self.send_header(*h)
135 if h[0].lower() == 'content-length':
135 if h[0].lower() == 'content-length':
136 should_close = False
136 should_close = False
137 self.length = int(h[1])
137 self.length = int(h[1])
138 # The value of the Connection header is a list of case-insensitive
138 # The value of the Connection header is a list of case-insensitive
139 # tokens separated by commas and optional whitespace.
139 # tokens separated by commas and optional whitespace.
140 if 'close' in [token.strip().lower() for token in
140 if 'close' in [token.strip().lower() for token in
141 self.headers.get('connection', '').split(',')]:
141 self.headers.get('connection', '').split(',')]:
142 should_close = True
142 should_close = True
143 if should_close:
143 if should_close:
144 self.send_header('Connection', 'close')
144 self.send_header('Connection', 'close')
145 self.close_connection = should_close
145 self.close_connection = should_close
146 self.end_headers()
146 self.end_headers()
147 self.sent_headers = True
147 self.sent_headers = True
148
148
149 def _start_response(self, http_status, headers, exc_info=None):
149 def _start_response(self, http_status, headers, exc_info=None):
150 code, msg = http_status.split(None, 1)
150 code, msg = http_status.split(None, 1)
151 code = int(code)
151 code = int(code)
152 self.saved_status = http_status
152 self.saved_status = http_status
153 bad_headers = ('connection', 'transfer-encoding')
153 bad_headers = ('connection', 'transfer-encoding')
154 self.saved_headers = [h for h in headers
154 self.saved_headers = [h for h in headers
155 if h[0].lower() not in bad_headers]
155 if h[0].lower() not in bad_headers]
156 return self._write
156 return self._write
157
157
158 def _write(self, data):
158 def _write(self, data):
159 if not self.saved_status:
159 if not self.saved_status:
160 raise AssertionError("data written before start_response() called")
160 raise AssertionError("data written before start_response() called")
161 elif not self.sent_headers:
161 elif not self.sent_headers:
162 self.send_headers()
162 self.send_headers()
163 if self.length is not None:
163 if self.length is not None:
164 if len(data) > self.length:
164 if len(data) > self.length:
165 raise AssertionError("Content-length header sent, but more "
165 raise AssertionError("Content-length header sent, but more "
166 "bytes than specified are being written.")
166 "bytes than specified are being written.")
167 self.length = self.length - len(data)
167 self.length = self.length - len(data)
168 self.wfile.write(data)
168 self.wfile.write(data)
169 self.wfile.flush()
169 self.wfile.flush()
170
170
171 class _shgwebhandler(_hgwebhandler):
171 class _shgwebhandler(_hgwebhandler):
172
172
173 url_scheme = 'https'
173 url_scheme = 'https'
174
174
175 def setup(self):
175 def setup(self):
176 self.connection = self.request
176 self.connection = self.request
177 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
177 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
178 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
178 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
179
179
180 def do_write(self):
180 def do_write(self):
181 from OpenSSL.SSL import SysCallError
181 from OpenSSL.SSL import SysCallError
182 try:
182 try:
183 super(_shgwebhandler, self).do_write()
183 super(_shgwebhandler, self).do_write()
184 except SysCallError, inst:
184 except SysCallError, inst:
185 if inst.args[0] != errno.EPIPE:
185 if inst.args[0] != errno.EPIPE:
186 raise
186 raise
187
187
188 def handle_one_request(self):
188 def handle_one_request(self):
189 from OpenSSL.SSL import SysCallError, ZeroReturnError
189 from OpenSSL.SSL import SysCallError, ZeroReturnError
190 try:
190 try:
191 super(_shgwebhandler, self).handle_one_request()
191 super(_shgwebhandler, self).handle_one_request()
192 except (SysCallError, ZeroReturnError):
192 except (SysCallError, ZeroReturnError):
193 self.close_connection = True
193 self.close_connection = True
194 pass
194 pass
195
195
196 try:
196 try:
197 from threading import activeCount
197 from threading import activeCount
198 _mixin = SocketServer.ThreadingMixIn
198 _mixin = SocketServer.ThreadingMixIn
199 except ImportError:
199 except ImportError:
200 if hasattr(os, "fork"):
200 if hasattr(os, "fork"):
201 _mixin = SocketServer.ForkingMixIn
201 _mixin = SocketServer.ForkingMixIn
202 else:
202 else:
203 class _mixin:
203 class _mixin:
204 pass
204 pass
205
205
206 def openlog(opt, default):
206 def openlog(opt, default):
207 if opt and opt != '-':
207 if opt and opt != '-':
208 return open(opt, 'a')
208 return open(opt, 'a')
209 return default
209 return default
210
210
211 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
211 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
212
212
213 # SO_REUSEADDR has broken semantics on windows
213 # SO_REUSEADDR has broken semantics on windows
214 if os.name == 'nt':
214 if os.name == 'nt':
215 allow_reuse_address = 0
215 allow_reuse_address = 0
216
216
217 def __init__(self, ui, app, addr, handler, **kwargs):
217 def __init__(self, ui, app, addr, handler, **kwargs):
218 BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs)
218 BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs)
219 self.daemon_threads = True
219 self.daemon_threads = True
220 self.application = app
220 self.application = app
221
221
222 ssl_cert = ui.config('web', 'certificate')
222 ssl_cert = ui.config('web', 'certificate')
223 if ssl_cert:
223 if ssl_cert:
224 try:
224 try:
225 from OpenSSL import SSL
225 from OpenSSL import SSL
226 ctx = SSL.Context(SSL.SSLv23_METHOD)
226 ctx = SSL.Context(SSL.SSLv23_METHOD)
227 except ImportError:
227 except ImportError:
228 raise util.Abort(_("SSL support is unavailable"))
228 raise util.Abort(_("SSL support is unavailable"))
229 ctx.use_privatekey_file(ssl_cert)
229 ctx.use_privatekey_file(ssl_cert)
230 ctx.use_certificate_file(ssl_cert)
230 ctx.use_certificate_file(ssl_cert)
231 sock = socket.socket(self.address_family, self.socket_type)
231 sock = socket.socket(self.address_family, self.socket_type)
232 self.socket = SSL.Connection(ctx, sock)
232 self.socket = SSL.Connection(ctx, sock)
233 self.server_bind()
233 self.server_bind()
234 self.server_activate()
234 self.server_activate()
235
235
236 prefix = ui.config('web', 'prefix', '')
236 prefix = ui.config('web', 'prefix', '')
237 if prefix:
237 if prefix:
238 prefix = '/' + prefix.strip('/')
238 prefix = '/' + prefix.strip('/')
239 self.prefix = prefix
239 self.prefix = prefix
240
240
241 alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
241 alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
242 elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
242 elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
243 self.accesslog = alog
243 self.accesslog = alog
244 self.errorlog = elog
244 self.errorlog = elog
245
245
246 self.addr, self.port = self.socket.getsockname()[0:2]
246 self.addr, self.port = self.socket.getsockname()[0:2]
247 self.fqaddr = socket.getfqdn(addr[0])
247 self.fqaddr = socket.getfqdn(addr[0])
248
248
249 class IPv6HTTPServer(MercurialHTTPServer):
249 class IPv6HTTPServer(MercurialHTTPServer):
250 address_family = getattr(socket, 'AF_INET6', None)
250 address_family = getattr(socket, 'AF_INET6', None)
251 def __init__(self, *args, **kwargs):
251 def __init__(self, *args, **kwargs):
252 if self.address_family is None:
252 if self.address_family is None:
253 raise error.RepoError(_('IPv6 is not available on this system'))
253 raise error.RepoError(_('IPv6 is not available on this system'))
254 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
254 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
255
255
256 def create_server(ui, app):
256 def create_server(ui, app):
257
257
258 if ui.config('web', 'certificate'):
258 if ui.config('web', 'certificate'):
259 handler = _shgwebhandler
259 handler = _shgwebhandler
260 else:
260 else:
261 handler = _hgwebhandler
261 handler = _hgwebhandler
262
262
263 if ui.configbool('web', 'ipv6'):
263 if ui.configbool('web', 'ipv6'):
264 cls = IPv6HTTPServer
264 cls = IPv6HTTPServer
265 else:
265 else:
266 cls = MercurialHTTPServer
266 cls = MercurialHTTPServer
267
267
268 # ugly hack due to python issue5853 (for threaded use)
268 # ugly hack due to python issue5853 (for threaded use)
269 import mimetypes; mimetypes.init()
269 import mimetypes; mimetypes.init()
270
270
271 address = ui.config('web', 'address', '')
271 address = ui.config('web', 'address', '')
272 port = int(ui.config('web', 'port', 8000))
272 port = int(ui.config('web', 'port', 8000))
273 try:
273 try:
274 return cls(ui, app, (address, port), handler)
274 return cls(ui, app, (address, port), handler)
275 except socket.error, inst:
275 except socket.error, inst:
276 raise util.Abort(_("cannot start server at '%s:%d': %s")
276 raise util.Abort(_("cannot start server at '%s:%d': %s")
277 % (address, port, inst.args[1]))
277 % (address, port, inst.args[1]))
@@ -1,1697 +1,1697 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from node import hex, nullid, short
10 from node import hex, nullid, short
11 import base85, cmdutil, mdiff, util, diffhelpers, copies
11 import base85, cmdutil, mdiff, util, diffhelpers, copies
12 import cStringIO, email.Parser, os, re
12 import cStringIO, email.Parser, os, re
13 import sys, tempfile, zlib
13 import tempfile, zlib
14
14
15 gitre = re.compile('diff --git a/(.*) b/(.*)')
15 gitre = re.compile('diff --git a/(.*) b/(.*)')
16
16
17 class PatchError(Exception):
17 class PatchError(Exception):
18 pass
18 pass
19
19
20 class NoHunks(PatchError):
20 class NoHunks(PatchError):
21 pass
21 pass
22
22
23 # helper functions
23 # helper functions
24
24
25 def copyfile(src, dst, basedir):
25 def copyfile(src, dst, basedir):
26 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
26 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
27 if os.path.exists(absdst):
27 if os.path.exists(absdst):
28 raise util.Abort(_("cannot create %s: destination already exists") %
28 raise util.Abort(_("cannot create %s: destination already exists") %
29 dst)
29 dst)
30
30
31 dstdir = os.path.dirname(absdst)
31 dstdir = os.path.dirname(absdst)
32 if dstdir and not os.path.isdir(dstdir):
32 if dstdir and not os.path.isdir(dstdir):
33 try:
33 try:
34 os.makedirs(dstdir)
34 os.makedirs(dstdir)
35 except IOError:
35 except IOError:
36 raise util.Abort(
36 raise util.Abort(
37 _("cannot create %s: unable to create destination directory")
37 _("cannot create %s: unable to create destination directory")
38 % dst)
38 % dst)
39
39
40 util.copyfile(abssrc, absdst)
40 util.copyfile(abssrc, absdst)
41
41
42 # public functions
42 # public functions
43
43
44 def split(stream):
44 def split(stream):
45 '''return an iterator of individual patches from a stream'''
45 '''return an iterator of individual patches from a stream'''
46 def isheader(line, inheader):
46 def isheader(line, inheader):
47 if inheader and line[0] in (' ', '\t'):
47 if inheader and line[0] in (' ', '\t'):
48 # continuation
48 # continuation
49 return True
49 return True
50 if line[0] in (' ', '-', '+'):
50 if line[0] in (' ', '-', '+'):
51 # diff line - don't check for header pattern in there
51 # diff line - don't check for header pattern in there
52 return False
52 return False
53 l = line.split(': ', 1)
53 l = line.split(': ', 1)
54 return len(l) == 2 and ' ' not in l[0]
54 return len(l) == 2 and ' ' not in l[0]
55
55
56 def chunk(lines):
56 def chunk(lines):
57 return cStringIO.StringIO(''.join(lines))
57 return cStringIO.StringIO(''.join(lines))
58
58
59 def hgsplit(stream, cur):
59 def hgsplit(stream, cur):
60 inheader = True
60 inheader = True
61
61
62 for line in stream:
62 for line in stream:
63 if not line.strip():
63 if not line.strip():
64 inheader = False
64 inheader = False
65 if not inheader and line.startswith('# HG changeset patch'):
65 if not inheader and line.startswith('# HG changeset patch'):
66 yield chunk(cur)
66 yield chunk(cur)
67 cur = []
67 cur = []
68 inheader = True
68 inheader = True
69
69
70 cur.append(line)
70 cur.append(line)
71
71
72 if cur:
72 if cur:
73 yield chunk(cur)
73 yield chunk(cur)
74
74
75 def mboxsplit(stream, cur):
75 def mboxsplit(stream, cur):
76 for line in stream:
76 for line in stream:
77 if line.startswith('From '):
77 if line.startswith('From '):
78 for c in split(chunk(cur[1:])):
78 for c in split(chunk(cur[1:])):
79 yield c
79 yield c
80 cur = []
80 cur = []
81
81
82 cur.append(line)
82 cur.append(line)
83
83
84 if cur:
84 if cur:
85 for c in split(chunk(cur[1:])):
85 for c in split(chunk(cur[1:])):
86 yield c
86 yield c
87
87
88 def mimesplit(stream, cur):
88 def mimesplit(stream, cur):
89 def msgfp(m):
89 def msgfp(m):
90 fp = cStringIO.StringIO()
90 fp = cStringIO.StringIO()
91 g = email.Generator.Generator(fp, mangle_from_=False)
91 g = email.Generator.Generator(fp, mangle_from_=False)
92 g.flatten(m)
92 g.flatten(m)
93 fp.seek(0)
93 fp.seek(0)
94 return fp
94 return fp
95
95
96 for line in stream:
96 for line in stream:
97 cur.append(line)
97 cur.append(line)
98 c = chunk(cur)
98 c = chunk(cur)
99
99
100 m = email.Parser.Parser().parse(c)
100 m = email.Parser.Parser().parse(c)
101 if not m.is_multipart():
101 if not m.is_multipart():
102 yield msgfp(m)
102 yield msgfp(m)
103 else:
103 else:
104 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
104 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
105 for part in m.walk():
105 for part in m.walk():
106 ct = part.get_content_type()
106 ct = part.get_content_type()
107 if ct not in ok_types:
107 if ct not in ok_types:
108 continue
108 continue
109 yield msgfp(part)
109 yield msgfp(part)
110
110
111 def headersplit(stream, cur):
111 def headersplit(stream, cur):
112 inheader = False
112 inheader = False
113
113
114 for line in stream:
114 for line in stream:
115 if not inheader and isheader(line, inheader):
115 if not inheader and isheader(line, inheader):
116 yield chunk(cur)
116 yield chunk(cur)
117 cur = []
117 cur = []
118 inheader = True
118 inheader = True
119 if inheader and not isheader(line, inheader):
119 if inheader and not isheader(line, inheader):
120 inheader = False
120 inheader = False
121
121
122 cur.append(line)
122 cur.append(line)
123
123
124 if cur:
124 if cur:
125 yield chunk(cur)
125 yield chunk(cur)
126
126
127 def remainder(cur):
127 def remainder(cur):
128 yield chunk(cur)
128 yield chunk(cur)
129
129
130 class fiter(object):
130 class fiter(object):
131 def __init__(self, fp):
131 def __init__(self, fp):
132 self.fp = fp
132 self.fp = fp
133
133
134 def __iter__(self):
134 def __iter__(self):
135 return self
135 return self
136
136
137 def next(self):
137 def next(self):
138 l = self.fp.readline()
138 l = self.fp.readline()
139 if not l:
139 if not l:
140 raise StopIteration
140 raise StopIteration
141 return l
141 return l
142
142
143 inheader = False
143 inheader = False
144 cur = []
144 cur = []
145
145
146 mimeheaders = ['content-type']
146 mimeheaders = ['content-type']
147
147
148 if not hasattr(stream, 'next'):
148 if not hasattr(stream, 'next'):
149 # http responses, for example, have readline but not next
149 # http responses, for example, have readline but not next
150 stream = fiter(stream)
150 stream = fiter(stream)
151
151
152 for line in stream:
152 for line in stream:
153 cur.append(line)
153 cur.append(line)
154 if line.startswith('# HG changeset patch'):
154 if line.startswith('# HG changeset patch'):
155 return hgsplit(stream, cur)
155 return hgsplit(stream, cur)
156 elif line.startswith('From '):
156 elif line.startswith('From '):
157 return mboxsplit(stream, cur)
157 return mboxsplit(stream, cur)
158 elif isheader(line, inheader):
158 elif isheader(line, inheader):
159 inheader = True
159 inheader = True
160 if line.split(':', 1)[0].lower() in mimeheaders:
160 if line.split(':', 1)[0].lower() in mimeheaders:
161 # let email parser handle this
161 # let email parser handle this
162 return mimesplit(stream, cur)
162 return mimesplit(stream, cur)
163 elif line.startswith('--- ') and inheader:
163 elif line.startswith('--- ') and inheader:
164 # No evil headers seen by diff start, split by hand
164 # No evil headers seen by diff start, split by hand
165 return headersplit(stream, cur)
165 return headersplit(stream, cur)
166 # Not enough info, keep reading
166 # Not enough info, keep reading
167
167
168 # if we are here, we have a very plain patch
168 # if we are here, we have a very plain patch
169 return remainder(cur)
169 return remainder(cur)
170
170
171 def extract(ui, fileobj):
171 def extract(ui, fileobj):
172 '''extract patch from data read from fileobj.
172 '''extract patch from data read from fileobj.
173
173
174 patch can be a normal patch or contained in an email message.
174 patch can be a normal patch or contained in an email message.
175
175
176 return tuple (filename, message, user, date, node, p1, p2).
176 return tuple (filename, message, user, date, node, p1, p2).
177 Any item in the returned tuple can be None. If filename is None,
177 Any item in the returned tuple can be None. If filename is None,
178 fileobj did not contain a patch. Caller must unlink filename when done.'''
178 fileobj did not contain a patch. Caller must unlink filename when done.'''
179
179
180 # attempt to detect the start of a patch
180 # attempt to detect the start of a patch
181 # (this heuristic is borrowed from quilt)
181 # (this heuristic is borrowed from quilt)
182 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
182 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
183 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
183 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
184 r'---[ \t].*?^\+\+\+[ \t]|'
184 r'---[ \t].*?^\+\+\+[ \t]|'
185 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
185 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
186
186
187 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
188 tmpfp = os.fdopen(fd, 'w')
188 tmpfp = os.fdopen(fd, 'w')
189 try:
189 try:
190 msg = email.Parser.Parser().parse(fileobj)
190 msg = email.Parser.Parser().parse(fileobj)
191
191
192 subject = msg['Subject']
192 subject = msg['Subject']
193 user = msg['From']
193 user = msg['From']
194 if not subject and not user:
194 if not subject and not user:
195 # Not an email, restore parsed headers if any
195 # Not an email, restore parsed headers if any
196 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
197
197
198 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
198 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
199 # should try to parse msg['Date']
199 # should try to parse msg['Date']
200 date = None
200 date = None
201 nodeid = None
201 nodeid = None
202 branch = None
202 branch = None
203 parents = []
203 parents = []
204
204
205 if subject:
205 if subject:
206 if subject.startswith('[PATCH'):
206 if subject.startswith('[PATCH'):
207 pend = subject.find(']')
207 pend = subject.find(']')
208 if pend >= 0:
208 if pend >= 0:
209 subject = subject[pend + 1:].lstrip()
209 subject = subject[pend + 1:].lstrip()
210 subject = subject.replace('\n\t', ' ')
210 subject = subject.replace('\n\t', ' ')
211 ui.debug('Subject: %s\n' % subject)
211 ui.debug('Subject: %s\n' % subject)
212 if user:
212 if user:
213 ui.debug('From: %s\n' % user)
213 ui.debug('From: %s\n' % user)
214 diffs_seen = 0
214 diffs_seen = 0
215 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
215 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
216 message = ''
216 message = ''
217 for part in msg.walk():
217 for part in msg.walk():
218 content_type = part.get_content_type()
218 content_type = part.get_content_type()
219 ui.debug('Content-Type: %s\n' % content_type)
219 ui.debug('Content-Type: %s\n' % content_type)
220 if content_type not in ok_types:
220 if content_type not in ok_types:
221 continue
221 continue
222 payload = part.get_payload(decode=True)
222 payload = part.get_payload(decode=True)
223 m = diffre.search(payload)
223 m = diffre.search(payload)
224 if m:
224 if m:
225 hgpatch = False
225 hgpatch = False
226 ignoretext = False
226 ignoretext = False
227
227
228 ui.debug('found patch at byte %d\n' % m.start(0))
228 ui.debug('found patch at byte %d\n' % m.start(0))
229 diffs_seen += 1
229 diffs_seen += 1
230 cfp = cStringIO.StringIO()
230 cfp = cStringIO.StringIO()
231 for line in payload[:m.start(0)].splitlines():
231 for line in payload[:m.start(0)].splitlines():
232 if line.startswith('# HG changeset patch'):
232 if line.startswith('# HG changeset patch'):
233 ui.debug('patch generated by hg export\n')
233 ui.debug('patch generated by hg export\n')
234 hgpatch = True
234 hgpatch = True
235 # drop earlier commit message content
235 # drop earlier commit message content
236 cfp.seek(0)
236 cfp.seek(0)
237 cfp.truncate()
237 cfp.truncate()
238 subject = None
238 subject = None
239 elif hgpatch:
239 elif hgpatch:
240 if line.startswith('# User '):
240 if line.startswith('# User '):
241 user = line[7:]
241 user = line[7:]
242 ui.debug('From: %s\n' % user)
242 ui.debug('From: %s\n' % user)
243 elif line.startswith("# Date "):
243 elif line.startswith("# Date "):
244 date = line[7:]
244 date = line[7:]
245 elif line.startswith("# Branch "):
245 elif line.startswith("# Branch "):
246 branch = line[9:]
246 branch = line[9:]
247 elif line.startswith("# Node ID "):
247 elif line.startswith("# Node ID "):
248 nodeid = line[10:]
248 nodeid = line[10:]
249 elif line.startswith("# Parent "):
249 elif line.startswith("# Parent "):
250 parents.append(line[10:])
250 parents.append(line[10:])
251 elif line == '---' and gitsendmail:
251 elif line == '---' and gitsendmail:
252 ignoretext = True
252 ignoretext = True
253 if not line.startswith('# ') and not ignoretext:
253 if not line.startswith('# ') and not ignoretext:
254 cfp.write(line)
254 cfp.write(line)
255 cfp.write('\n')
255 cfp.write('\n')
256 message = cfp.getvalue()
256 message = cfp.getvalue()
257 if tmpfp:
257 if tmpfp:
258 tmpfp.write(payload)
258 tmpfp.write(payload)
259 if not payload.endswith('\n'):
259 if not payload.endswith('\n'):
260 tmpfp.write('\n')
260 tmpfp.write('\n')
261 elif not diffs_seen and message and content_type == 'text/plain':
261 elif not diffs_seen and message and content_type == 'text/plain':
262 message += '\n' + payload
262 message += '\n' + payload
263 except:
263 except:
264 tmpfp.close()
264 tmpfp.close()
265 os.unlink(tmpname)
265 os.unlink(tmpname)
266 raise
266 raise
267
267
268 if subject and not message.startswith(subject):
268 if subject and not message.startswith(subject):
269 message = '%s\n%s' % (subject, message)
269 message = '%s\n%s' % (subject, message)
270 tmpfp.close()
270 tmpfp.close()
271 if not diffs_seen:
271 if not diffs_seen:
272 os.unlink(tmpname)
272 os.unlink(tmpname)
273 return None, message, user, date, branch, None, None, None
273 return None, message, user, date, branch, None, None, None
274 p1 = parents and parents.pop(0) or None
274 p1 = parents and parents.pop(0) or None
275 p2 = parents and parents.pop(0) or None
275 p2 = parents and parents.pop(0) or None
276 return tmpname, message, user, date, branch, nodeid, p1, p2
276 return tmpname, message, user, date, branch, nodeid, p1, p2
277
277
278 GP_PATCH = 1 << 0 # we have to run patch
278 GP_PATCH = 1 << 0 # we have to run patch
279 GP_FILTER = 1 << 1 # there's some copy/rename operation
279 GP_FILTER = 1 << 1 # there's some copy/rename operation
280 GP_BINARY = 1 << 2 # there's a binary patch
280 GP_BINARY = 1 << 2 # there's a binary patch
281
281
282 class patchmeta(object):
282 class patchmeta(object):
283 """Patched file metadata
283 """Patched file metadata
284
284
285 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
286 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 or COPY. 'path' is patched file path. 'oldpath' is set to the
287 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 origin file when 'op' is either COPY or RENAME, None otherwise. If
288 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 file mode is changed, 'mode' is a tuple (islink, isexec) where
289 'islink' is True if the file is a symlink and 'isexec' is True if
289 'islink' is True if the file is a symlink and 'isexec' is True if
290 the file is executable. Otherwise, 'mode' is None.
290 the file is executable. Otherwise, 'mode' is None.
291 """
291 """
292 def __init__(self, path):
292 def __init__(self, path):
293 self.path = path
293 self.path = path
294 self.oldpath = None
294 self.oldpath = None
295 self.mode = None
295 self.mode = None
296 self.op = 'MODIFY'
296 self.op = 'MODIFY'
297 self.lineno = 0
297 self.lineno = 0
298 self.binary = False
298 self.binary = False
299
299
300 def setmode(self, mode):
300 def setmode(self, mode):
301 islink = mode & 020000
301 islink = mode & 020000
302 isexec = mode & 0100
302 isexec = mode & 0100
303 self.mode = (islink, isexec)
303 self.mode = (islink, isexec)
304
304
305 def readgitpatch(lr):
305 def readgitpatch(lr):
306 """extract git-style metadata about patches from <patchname>"""
306 """extract git-style metadata about patches from <patchname>"""
307
307
308 # Filter patch for git information
308 # Filter patch for git information
309 gp = None
309 gp = None
310 gitpatches = []
310 gitpatches = []
311 # Can have a git patch with only metadata, causing patch to complain
311 # Can have a git patch with only metadata, causing patch to complain
312 dopatch = 0
312 dopatch = 0
313
313
314 lineno = 0
314 lineno = 0
315 for line in lr:
315 for line in lr:
316 lineno += 1
316 lineno += 1
317 line = line.rstrip(' \r\n')
317 line = line.rstrip(' \r\n')
318 if line.startswith('diff --git'):
318 if line.startswith('diff --git'):
319 m = gitre.match(line)
319 m = gitre.match(line)
320 if m:
320 if m:
321 if gp:
321 if gp:
322 gitpatches.append(gp)
322 gitpatches.append(gp)
323 dst = m.group(2)
323 dst = m.group(2)
324 gp = patchmeta(dst)
324 gp = patchmeta(dst)
325 gp.lineno = lineno
325 gp.lineno = lineno
326 elif gp:
326 elif gp:
327 if line.startswith('--- '):
327 if line.startswith('--- '):
328 if gp.op in ('COPY', 'RENAME'):
328 if gp.op in ('COPY', 'RENAME'):
329 dopatch |= GP_FILTER
329 dopatch |= GP_FILTER
330 gitpatches.append(gp)
330 gitpatches.append(gp)
331 gp = None
331 gp = None
332 dopatch |= GP_PATCH
332 dopatch |= GP_PATCH
333 continue
333 continue
334 if line.startswith('rename from '):
334 if line.startswith('rename from '):
335 gp.op = 'RENAME'
335 gp.op = 'RENAME'
336 gp.oldpath = line[12:]
336 gp.oldpath = line[12:]
337 elif line.startswith('rename to '):
337 elif line.startswith('rename to '):
338 gp.path = line[10:]
338 gp.path = line[10:]
339 elif line.startswith('copy from '):
339 elif line.startswith('copy from '):
340 gp.op = 'COPY'
340 gp.op = 'COPY'
341 gp.oldpath = line[10:]
341 gp.oldpath = line[10:]
342 elif line.startswith('copy to '):
342 elif line.startswith('copy to '):
343 gp.path = line[8:]
343 gp.path = line[8:]
344 elif line.startswith('deleted file'):
344 elif line.startswith('deleted file'):
345 gp.op = 'DELETE'
345 gp.op = 'DELETE'
346 # is the deleted file a symlink?
346 # is the deleted file a symlink?
347 gp.setmode(int(line[-6:], 8))
347 gp.setmode(int(line[-6:], 8))
348 elif line.startswith('new file mode '):
348 elif line.startswith('new file mode '):
349 gp.op = 'ADD'
349 gp.op = 'ADD'
350 gp.setmode(int(line[-6:], 8))
350 gp.setmode(int(line[-6:], 8))
351 elif line.startswith('new mode '):
351 elif line.startswith('new mode '):
352 gp.setmode(int(line[-6:], 8))
352 gp.setmode(int(line[-6:], 8))
353 elif line.startswith('GIT binary patch'):
353 elif line.startswith('GIT binary patch'):
354 dopatch |= GP_BINARY
354 dopatch |= GP_BINARY
355 gp.binary = True
355 gp.binary = True
356 if gp:
356 if gp:
357 gitpatches.append(gp)
357 gitpatches.append(gp)
358
358
359 if not gitpatches:
359 if not gitpatches:
360 dopatch = GP_PATCH
360 dopatch = GP_PATCH
361
361
362 return (dopatch, gitpatches)
362 return (dopatch, gitpatches)
363
363
364 class linereader(object):
364 class linereader(object):
365 # simple class to allow pushing lines back into the input stream
365 # simple class to allow pushing lines back into the input stream
366 def __init__(self, fp, textmode=False):
366 def __init__(self, fp, textmode=False):
367 self.fp = fp
367 self.fp = fp
368 self.buf = []
368 self.buf = []
369 self.textmode = textmode
369 self.textmode = textmode
370 self.eol = None
370 self.eol = None
371
371
372 def push(self, line):
372 def push(self, line):
373 if line is not None:
373 if line is not None:
374 self.buf.append(line)
374 self.buf.append(line)
375
375
376 def readline(self):
376 def readline(self):
377 if self.buf:
377 if self.buf:
378 l = self.buf[0]
378 l = self.buf[0]
379 del self.buf[0]
379 del self.buf[0]
380 return l
380 return l
381 l = self.fp.readline()
381 l = self.fp.readline()
382 if not self.eol:
382 if not self.eol:
383 if l.endswith('\r\n'):
383 if l.endswith('\r\n'):
384 self.eol = '\r\n'
384 self.eol = '\r\n'
385 elif l.endswith('\n'):
385 elif l.endswith('\n'):
386 self.eol = '\n'
386 self.eol = '\n'
387 if self.textmode and l.endswith('\r\n'):
387 if self.textmode and l.endswith('\r\n'):
388 l = l[:-2] + '\n'
388 l = l[:-2] + '\n'
389 return l
389 return l
390
390
391 def __iter__(self):
391 def __iter__(self):
392 while 1:
392 while 1:
393 l = self.readline()
393 l = self.readline()
394 if not l:
394 if not l:
395 break
395 break
396 yield l
396 yield l
397
397
398 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
398 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
399 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
399 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
400 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
400 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
401 eolmodes = ['strict', 'crlf', 'lf', 'auto']
401 eolmodes = ['strict', 'crlf', 'lf', 'auto']
402
402
403 class patchfile(object):
403 class patchfile(object):
404 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
404 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
405 self.fname = fname
405 self.fname = fname
406 self.eolmode = eolmode
406 self.eolmode = eolmode
407 self.eol = None
407 self.eol = None
408 self.opener = opener
408 self.opener = opener
409 self.ui = ui
409 self.ui = ui
410 self.lines = []
410 self.lines = []
411 self.exists = False
411 self.exists = False
412 self.missing = missing
412 self.missing = missing
413 if not missing:
413 if not missing:
414 try:
414 try:
415 self.lines = self.readlines(fname)
415 self.lines = self.readlines(fname)
416 self.exists = True
416 self.exists = True
417 except IOError:
417 except IOError:
418 pass
418 pass
419 else:
419 else:
420 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
420 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
421
421
422 self.hash = {}
422 self.hash = {}
423 self.dirty = 0
423 self.dirty = 0
424 self.offset = 0
424 self.offset = 0
425 self.skew = 0
425 self.skew = 0
426 self.rej = []
426 self.rej = []
427 self.fileprinted = False
427 self.fileprinted = False
428 self.printfile(False)
428 self.printfile(False)
429 self.hunks = 0
429 self.hunks = 0
430
430
431 def readlines(self, fname):
431 def readlines(self, fname):
432 if os.path.islink(fname):
432 if os.path.islink(fname):
433 return [os.readlink(fname)]
433 return [os.readlink(fname)]
434 fp = self.opener(fname, 'r')
434 fp = self.opener(fname, 'r')
435 try:
435 try:
436 lr = linereader(fp, self.eolmode != 'strict')
436 lr = linereader(fp, self.eolmode != 'strict')
437 lines = list(lr)
437 lines = list(lr)
438 self.eol = lr.eol
438 self.eol = lr.eol
439 return lines
439 return lines
440 finally:
440 finally:
441 fp.close()
441 fp.close()
442
442
443 def writelines(self, fname, lines):
443 def writelines(self, fname, lines):
444 # Ensure supplied data ends in fname, being a regular file or
444 # Ensure supplied data ends in fname, being a regular file or
445 # a symlink. updatedir() will -too magically- take care of
445 # a symlink. updatedir() will -too magically- take care of
446 # setting it to the proper type afterwards.
446 # setting it to the proper type afterwards.
447 islink = os.path.islink(fname)
447 islink = os.path.islink(fname)
448 if islink:
448 if islink:
449 fp = cStringIO.StringIO()
449 fp = cStringIO.StringIO()
450 else:
450 else:
451 fp = self.opener(fname, 'w')
451 fp = self.opener(fname, 'w')
452 try:
452 try:
453 if self.eolmode == 'auto':
453 if self.eolmode == 'auto':
454 eol = self.eol
454 eol = self.eol
455 elif self.eolmode == 'crlf':
455 elif self.eolmode == 'crlf':
456 eol = '\r\n'
456 eol = '\r\n'
457 else:
457 else:
458 eol = '\n'
458 eol = '\n'
459
459
460 if self.eolmode != 'strict' and eol and eol != '\n':
460 if self.eolmode != 'strict' and eol and eol != '\n':
461 for l in lines:
461 for l in lines:
462 if l and l[-1] == '\n':
462 if l and l[-1] == '\n':
463 l = l[:-1] + eol
463 l = l[:-1] + eol
464 fp.write(l)
464 fp.write(l)
465 else:
465 else:
466 fp.writelines(lines)
466 fp.writelines(lines)
467 if islink:
467 if islink:
468 self.opener.symlink(fp.getvalue(), fname)
468 self.opener.symlink(fp.getvalue(), fname)
469 finally:
469 finally:
470 fp.close()
470 fp.close()
471
471
472 def unlink(self, fname):
472 def unlink(self, fname):
473 os.unlink(fname)
473 os.unlink(fname)
474
474
475 def printfile(self, warn):
475 def printfile(self, warn):
476 if self.fileprinted:
476 if self.fileprinted:
477 return
477 return
478 if warn or self.ui.verbose:
478 if warn or self.ui.verbose:
479 self.fileprinted = True
479 self.fileprinted = True
480 s = _("patching file %s\n") % self.fname
480 s = _("patching file %s\n") % self.fname
481 if warn:
481 if warn:
482 self.ui.warn(s)
482 self.ui.warn(s)
483 else:
483 else:
484 self.ui.note(s)
484 self.ui.note(s)
485
485
486
486
487 def findlines(self, l, linenum):
487 def findlines(self, l, linenum):
488 # looks through the hash and finds candidate lines. The
488 # looks through the hash and finds candidate lines. The
489 # result is a list of line numbers sorted based on distance
489 # result is a list of line numbers sorted based on distance
490 # from linenum
490 # from linenum
491
491
492 cand = self.hash.get(l, [])
492 cand = self.hash.get(l, [])
493 if len(cand) > 1:
493 if len(cand) > 1:
494 # resort our list of potentials forward then back.
494 # resort our list of potentials forward then back.
495 cand.sort(key=lambda x: abs(x - linenum))
495 cand.sort(key=lambda x: abs(x - linenum))
496 return cand
496 return cand
497
497
498 def hashlines(self):
498 def hashlines(self):
499 self.hash = {}
499 self.hash = {}
500 for x, s in enumerate(self.lines):
500 for x, s in enumerate(self.lines):
501 self.hash.setdefault(s, []).append(x)
501 self.hash.setdefault(s, []).append(x)
502
502
503 def write_rej(self):
503 def write_rej(self):
504 # our rejects are a little different from patch(1). This always
504 # our rejects are a little different from patch(1). This always
505 # creates rejects in the same form as the original patch. A file
505 # creates rejects in the same form as the original patch. A file
506 # header is inserted so that you can run the reject through patch again
506 # header is inserted so that you can run the reject through patch again
507 # without having to type the filename.
507 # without having to type the filename.
508
508
509 if not self.rej:
509 if not self.rej:
510 return
510 return
511
511
512 fname = self.fname + ".rej"
512 fname = self.fname + ".rej"
513 self.ui.warn(
513 self.ui.warn(
514 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
514 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
515 (len(self.rej), self.hunks, fname))
515 (len(self.rej), self.hunks, fname))
516
516
517 def rejlines():
517 def rejlines():
518 base = os.path.basename(self.fname)
518 base = os.path.basename(self.fname)
519 yield "--- %s\n+++ %s\n" % (base, base)
519 yield "--- %s\n+++ %s\n" % (base, base)
520 for x in self.rej:
520 for x in self.rej:
521 for l in x.hunk:
521 for l in x.hunk:
522 yield l
522 yield l
523 if l[-1] != '\n':
523 if l[-1] != '\n':
524 yield "\n\ No newline at end of file\n"
524 yield "\n\ No newline at end of file\n"
525
525
526 self.writelines(fname, rejlines())
526 self.writelines(fname, rejlines())
527
527
528 def write(self, dest=None):
528 def write(self, dest=None):
529 if not self.dirty:
529 if not self.dirty:
530 return
530 return
531 if not dest:
531 if not dest:
532 dest = self.fname
532 dest = self.fname
533 self.writelines(dest, self.lines)
533 self.writelines(dest, self.lines)
534
534
535 def close(self):
535 def close(self):
536 self.write()
536 self.write()
537 self.write_rej()
537 self.write_rej()
538
538
539 def apply(self, h):
539 def apply(self, h):
540 if not h.complete():
540 if not h.complete():
541 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
541 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
542 (h.number, h.desc, len(h.a), h.lena, len(h.b),
542 (h.number, h.desc, len(h.a), h.lena, len(h.b),
543 h.lenb))
543 h.lenb))
544
544
545 self.hunks += 1
545 self.hunks += 1
546
546
547 if self.missing:
547 if self.missing:
548 self.rej.append(h)
548 self.rej.append(h)
549 return -1
549 return -1
550
550
551 if self.exists and h.createfile():
551 if self.exists and h.createfile():
552 self.ui.warn(_("file %s already exists\n") % self.fname)
552 self.ui.warn(_("file %s already exists\n") % self.fname)
553 self.rej.append(h)
553 self.rej.append(h)
554 return -1
554 return -1
555
555
556 if isinstance(h, binhunk):
556 if isinstance(h, binhunk):
557 if h.rmfile():
557 if h.rmfile():
558 self.unlink(self.fname)
558 self.unlink(self.fname)
559 else:
559 else:
560 self.lines[:] = h.new()
560 self.lines[:] = h.new()
561 self.offset += len(h.new())
561 self.offset += len(h.new())
562 self.dirty = 1
562 self.dirty = 1
563 return 0
563 return 0
564
564
565 horig = h
565 horig = h
566 if (self.eolmode in ('crlf', 'lf')
566 if (self.eolmode in ('crlf', 'lf')
567 or self.eolmode == 'auto' and self.eol):
567 or self.eolmode == 'auto' and self.eol):
568 # If new eols are going to be normalized, then normalize
568 # If new eols are going to be normalized, then normalize
569 # hunk data before patching. Otherwise, preserve input
569 # hunk data before patching. Otherwise, preserve input
570 # line-endings.
570 # line-endings.
571 h = h.getnormalized()
571 h = h.getnormalized()
572
572
573 # fast case first, no offsets, no fuzz
573 # fast case first, no offsets, no fuzz
574 old = h.old()
574 old = h.old()
575 # patch starts counting at 1 unless we are adding the file
575 # patch starts counting at 1 unless we are adding the file
576 if h.starta == 0:
576 if h.starta == 0:
577 start = 0
577 start = 0
578 else:
578 else:
579 start = h.starta + self.offset - 1
579 start = h.starta + self.offset - 1
580 orig_start = start
580 orig_start = start
581 # if there's skew we want to emit the "(offset %d lines)" even
581 # if there's skew we want to emit the "(offset %d lines)" even
582 # when the hunk cleanly applies at start + skew, so skip the
582 # when the hunk cleanly applies at start + skew, so skip the
583 # fast case code
583 # fast case code
584 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
584 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
585 if h.rmfile():
585 if h.rmfile():
586 self.unlink(self.fname)
586 self.unlink(self.fname)
587 else:
587 else:
588 self.lines[start : start + h.lena] = h.new()
588 self.lines[start : start + h.lena] = h.new()
589 self.offset += h.lenb - h.lena
589 self.offset += h.lenb - h.lena
590 self.dirty = 1
590 self.dirty = 1
591 return 0
591 return 0
592
592
593 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
593 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
594 self.hashlines()
594 self.hashlines()
595 if h.hunk[-1][0] != ' ':
595 if h.hunk[-1][0] != ' ':
596 # if the hunk tried to put something at the bottom of the file
596 # if the hunk tried to put something at the bottom of the file
597 # override the start line and use eof here
597 # override the start line and use eof here
598 search_start = len(self.lines)
598 search_start = len(self.lines)
599 else:
599 else:
600 search_start = orig_start + self.skew
600 search_start = orig_start + self.skew
601
601
602 for fuzzlen in xrange(3):
602 for fuzzlen in xrange(3):
603 for toponly in [True, False]:
603 for toponly in [True, False]:
604 old = h.old(fuzzlen, toponly)
604 old = h.old(fuzzlen, toponly)
605
605
606 cand = self.findlines(old[0][1:], search_start)
606 cand = self.findlines(old[0][1:], search_start)
607 for l in cand:
607 for l in cand:
608 if diffhelpers.testhunk(old, self.lines, l) == 0:
608 if diffhelpers.testhunk(old, self.lines, l) == 0:
609 newlines = h.new(fuzzlen, toponly)
609 newlines = h.new(fuzzlen, toponly)
610 self.lines[l : l + len(old)] = newlines
610 self.lines[l : l + len(old)] = newlines
611 self.offset += len(newlines) - len(old)
611 self.offset += len(newlines) - len(old)
612 self.skew = l - orig_start
612 self.skew = l - orig_start
613 self.dirty = 1
613 self.dirty = 1
614 offset = l - orig_start - fuzzlen
614 offset = l - orig_start - fuzzlen
615 if fuzzlen:
615 if fuzzlen:
616 msg = _("Hunk #%d succeeded at %d "
616 msg = _("Hunk #%d succeeded at %d "
617 "with fuzz %d "
617 "with fuzz %d "
618 "(offset %d lines).\n")
618 "(offset %d lines).\n")
619 self.printfile(True)
619 self.printfile(True)
620 self.ui.warn(msg %
620 self.ui.warn(msg %
621 (h.number, l + 1, fuzzlen, offset))
621 (h.number, l + 1, fuzzlen, offset))
622 else:
622 else:
623 msg = _("Hunk #%d succeeded at %d "
623 msg = _("Hunk #%d succeeded at %d "
624 "(offset %d lines).\n")
624 "(offset %d lines).\n")
625 self.ui.note(msg % (h.number, l + 1, offset))
625 self.ui.note(msg % (h.number, l + 1, offset))
626 return fuzzlen
626 return fuzzlen
627 self.printfile(True)
627 self.printfile(True)
628 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
628 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
629 self.rej.append(horig)
629 self.rej.append(horig)
630 return -1
630 return -1
631
631
632 class hunk(object):
632 class hunk(object):
633 def __init__(self, desc, num, lr, context, create=False, remove=False):
633 def __init__(self, desc, num, lr, context, create=False, remove=False):
634 self.number = num
634 self.number = num
635 self.desc = desc
635 self.desc = desc
636 self.hunk = [desc]
636 self.hunk = [desc]
637 self.a = []
637 self.a = []
638 self.b = []
638 self.b = []
639 self.starta = self.lena = None
639 self.starta = self.lena = None
640 self.startb = self.lenb = None
640 self.startb = self.lenb = None
641 if lr is not None:
641 if lr is not None:
642 if context:
642 if context:
643 self.read_context_hunk(lr)
643 self.read_context_hunk(lr)
644 else:
644 else:
645 self.read_unified_hunk(lr)
645 self.read_unified_hunk(lr)
646 self.create = create
646 self.create = create
647 self.remove = remove and not create
647 self.remove = remove and not create
648
648
649 def getnormalized(self):
649 def getnormalized(self):
650 """Return a copy with line endings normalized to LF."""
650 """Return a copy with line endings normalized to LF."""
651
651
652 def normalize(lines):
652 def normalize(lines):
653 nlines = []
653 nlines = []
654 for line in lines:
654 for line in lines:
655 if line.endswith('\r\n'):
655 if line.endswith('\r\n'):
656 line = line[:-2] + '\n'
656 line = line[:-2] + '\n'
657 nlines.append(line)
657 nlines.append(line)
658 return nlines
658 return nlines
659
659
660 # Dummy object, it is rebuilt manually
660 # Dummy object, it is rebuilt manually
661 nh = hunk(self.desc, self.number, None, None, False, False)
661 nh = hunk(self.desc, self.number, None, None, False, False)
662 nh.number = self.number
662 nh.number = self.number
663 nh.desc = self.desc
663 nh.desc = self.desc
664 nh.hunk = self.hunk
664 nh.hunk = self.hunk
665 nh.a = normalize(self.a)
665 nh.a = normalize(self.a)
666 nh.b = normalize(self.b)
666 nh.b = normalize(self.b)
667 nh.starta = self.starta
667 nh.starta = self.starta
668 nh.startb = self.startb
668 nh.startb = self.startb
669 nh.lena = self.lena
669 nh.lena = self.lena
670 nh.lenb = self.lenb
670 nh.lenb = self.lenb
671 nh.create = self.create
671 nh.create = self.create
672 nh.remove = self.remove
672 nh.remove = self.remove
673 return nh
673 return nh
674
674
675 def read_unified_hunk(self, lr):
675 def read_unified_hunk(self, lr):
676 m = unidesc.match(self.desc)
676 m = unidesc.match(self.desc)
677 if not m:
677 if not m:
678 raise PatchError(_("bad hunk #%d") % self.number)
678 raise PatchError(_("bad hunk #%d") % self.number)
679 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
679 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
680 if self.lena is None:
680 if self.lena is None:
681 self.lena = 1
681 self.lena = 1
682 else:
682 else:
683 self.lena = int(self.lena)
683 self.lena = int(self.lena)
684 if self.lenb is None:
684 if self.lenb is None:
685 self.lenb = 1
685 self.lenb = 1
686 else:
686 else:
687 self.lenb = int(self.lenb)
687 self.lenb = int(self.lenb)
688 self.starta = int(self.starta)
688 self.starta = int(self.starta)
689 self.startb = int(self.startb)
689 self.startb = int(self.startb)
690 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
690 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
691 # if we hit eof before finishing out the hunk, the last line will
691 # if we hit eof before finishing out the hunk, the last line will
692 # be zero length. Lets try to fix it up.
692 # be zero length. Lets try to fix it up.
693 while len(self.hunk[-1]) == 0:
693 while len(self.hunk[-1]) == 0:
694 del self.hunk[-1]
694 del self.hunk[-1]
695 del self.a[-1]
695 del self.a[-1]
696 del self.b[-1]
696 del self.b[-1]
697 self.lena -= 1
697 self.lena -= 1
698 self.lenb -= 1
698 self.lenb -= 1
699
699
700 def read_context_hunk(self, lr):
700 def read_context_hunk(self, lr):
701 self.desc = lr.readline()
701 self.desc = lr.readline()
702 m = contextdesc.match(self.desc)
702 m = contextdesc.match(self.desc)
703 if not m:
703 if not m:
704 raise PatchError(_("bad hunk #%d") % self.number)
704 raise PatchError(_("bad hunk #%d") % self.number)
705 foo, self.starta, foo2, aend, foo3 = m.groups()
705 foo, self.starta, foo2, aend, foo3 = m.groups()
706 self.starta = int(self.starta)
706 self.starta = int(self.starta)
707 if aend is None:
707 if aend is None:
708 aend = self.starta
708 aend = self.starta
709 self.lena = int(aend) - self.starta
709 self.lena = int(aend) - self.starta
710 if self.starta:
710 if self.starta:
711 self.lena += 1
711 self.lena += 1
712 for x in xrange(self.lena):
712 for x in xrange(self.lena):
713 l = lr.readline()
713 l = lr.readline()
714 if l.startswith('---'):
714 if l.startswith('---'):
715 lr.push(l)
715 lr.push(l)
716 break
716 break
717 s = l[2:]
717 s = l[2:]
718 if l.startswith('- ') or l.startswith('! '):
718 if l.startswith('- ') or l.startswith('! '):
719 u = '-' + s
719 u = '-' + s
720 elif l.startswith(' '):
720 elif l.startswith(' '):
721 u = ' ' + s
721 u = ' ' + s
722 else:
722 else:
723 raise PatchError(_("bad hunk #%d old text line %d") %
723 raise PatchError(_("bad hunk #%d old text line %d") %
724 (self.number, x))
724 (self.number, x))
725 self.a.append(u)
725 self.a.append(u)
726 self.hunk.append(u)
726 self.hunk.append(u)
727
727
728 l = lr.readline()
728 l = lr.readline()
729 if l.startswith('\ '):
729 if l.startswith('\ '):
730 s = self.a[-1][:-1]
730 s = self.a[-1][:-1]
731 self.a[-1] = s
731 self.a[-1] = s
732 self.hunk[-1] = s
732 self.hunk[-1] = s
733 l = lr.readline()
733 l = lr.readline()
734 m = contextdesc.match(l)
734 m = contextdesc.match(l)
735 if not m:
735 if not m:
736 raise PatchError(_("bad hunk #%d") % self.number)
736 raise PatchError(_("bad hunk #%d") % self.number)
737 foo, self.startb, foo2, bend, foo3 = m.groups()
737 foo, self.startb, foo2, bend, foo3 = m.groups()
738 self.startb = int(self.startb)
738 self.startb = int(self.startb)
739 if bend is None:
739 if bend is None:
740 bend = self.startb
740 bend = self.startb
741 self.lenb = int(bend) - self.startb
741 self.lenb = int(bend) - self.startb
742 if self.startb:
742 if self.startb:
743 self.lenb += 1
743 self.lenb += 1
744 hunki = 1
744 hunki = 1
745 for x in xrange(self.lenb):
745 for x in xrange(self.lenb):
746 l = lr.readline()
746 l = lr.readline()
747 if l.startswith('\ '):
747 if l.startswith('\ '):
748 s = self.b[-1][:-1]
748 s = self.b[-1][:-1]
749 self.b[-1] = s
749 self.b[-1] = s
750 self.hunk[hunki - 1] = s
750 self.hunk[hunki - 1] = s
751 continue
751 continue
752 if not l:
752 if not l:
753 lr.push(l)
753 lr.push(l)
754 break
754 break
755 s = l[2:]
755 s = l[2:]
756 if l.startswith('+ ') or l.startswith('! '):
756 if l.startswith('+ ') or l.startswith('! '):
757 u = '+' + s
757 u = '+' + s
758 elif l.startswith(' '):
758 elif l.startswith(' '):
759 u = ' ' + s
759 u = ' ' + s
760 elif len(self.b) == 0:
760 elif len(self.b) == 0:
761 # this can happen when the hunk does not add any lines
761 # this can happen when the hunk does not add any lines
762 lr.push(l)
762 lr.push(l)
763 break
763 break
764 else:
764 else:
765 raise PatchError(_("bad hunk #%d old text line %d") %
765 raise PatchError(_("bad hunk #%d old text line %d") %
766 (self.number, x))
766 (self.number, x))
767 self.b.append(s)
767 self.b.append(s)
768 while True:
768 while True:
769 if hunki >= len(self.hunk):
769 if hunki >= len(self.hunk):
770 h = ""
770 h = ""
771 else:
771 else:
772 h = self.hunk[hunki]
772 h = self.hunk[hunki]
773 hunki += 1
773 hunki += 1
774 if h == u:
774 if h == u:
775 break
775 break
776 elif h.startswith('-'):
776 elif h.startswith('-'):
777 continue
777 continue
778 else:
778 else:
779 self.hunk.insert(hunki - 1, u)
779 self.hunk.insert(hunki - 1, u)
780 break
780 break
781
781
782 if not self.a:
782 if not self.a:
783 # this happens when lines were only added to the hunk
783 # this happens when lines were only added to the hunk
784 for x in self.hunk:
784 for x in self.hunk:
785 if x.startswith('-') or x.startswith(' '):
785 if x.startswith('-') or x.startswith(' '):
786 self.a.append(x)
786 self.a.append(x)
787 if not self.b:
787 if not self.b:
788 # this happens when lines were only deleted from the hunk
788 # this happens when lines were only deleted from the hunk
789 for x in self.hunk:
789 for x in self.hunk:
790 if x.startswith('+') or x.startswith(' '):
790 if x.startswith('+') or x.startswith(' '):
791 self.b.append(x[1:])
791 self.b.append(x[1:])
792 # @@ -start,len +start,len @@
792 # @@ -start,len +start,len @@
793 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
793 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
794 self.startb, self.lenb)
794 self.startb, self.lenb)
795 self.hunk[0] = self.desc
795 self.hunk[0] = self.desc
796
796
797 def fix_newline(self):
797 def fix_newline(self):
798 diffhelpers.fix_newline(self.hunk, self.a, self.b)
798 diffhelpers.fix_newline(self.hunk, self.a, self.b)
799
799
800 def complete(self):
800 def complete(self):
801 return len(self.a) == self.lena and len(self.b) == self.lenb
801 return len(self.a) == self.lena and len(self.b) == self.lenb
802
802
803 def createfile(self):
803 def createfile(self):
804 return self.starta == 0 and self.lena == 0 and self.create
804 return self.starta == 0 and self.lena == 0 and self.create
805
805
806 def rmfile(self):
806 def rmfile(self):
807 return self.startb == 0 and self.lenb == 0 and self.remove
807 return self.startb == 0 and self.lenb == 0 and self.remove
808
808
809 def fuzzit(self, l, fuzz, toponly):
809 def fuzzit(self, l, fuzz, toponly):
810 # this removes context lines from the top and bottom of list 'l'. It
810 # this removes context lines from the top and bottom of list 'l'. It
811 # checks the hunk to make sure only context lines are removed, and then
811 # checks the hunk to make sure only context lines are removed, and then
812 # returns a new shortened list of lines.
812 # returns a new shortened list of lines.
813 fuzz = min(fuzz, len(l)-1)
813 fuzz = min(fuzz, len(l)-1)
814 if fuzz:
814 if fuzz:
815 top = 0
815 top = 0
816 bot = 0
816 bot = 0
817 hlen = len(self.hunk)
817 hlen = len(self.hunk)
818 for x in xrange(hlen - 1):
818 for x in xrange(hlen - 1):
819 # the hunk starts with the @@ line, so use x+1
819 # the hunk starts with the @@ line, so use x+1
820 if self.hunk[x + 1][0] == ' ':
820 if self.hunk[x + 1][0] == ' ':
821 top += 1
821 top += 1
822 else:
822 else:
823 break
823 break
824 if not toponly:
824 if not toponly:
825 for x in xrange(hlen - 1):
825 for x in xrange(hlen - 1):
826 if self.hunk[hlen - bot - 1][0] == ' ':
826 if self.hunk[hlen - bot - 1][0] == ' ':
827 bot += 1
827 bot += 1
828 else:
828 else:
829 break
829 break
830
830
831 # top and bot now count context in the hunk
831 # top and bot now count context in the hunk
832 # adjust them if either one is short
832 # adjust them if either one is short
833 context = max(top, bot, 3)
833 context = max(top, bot, 3)
834 if bot < context:
834 if bot < context:
835 bot = max(0, fuzz - (context - bot))
835 bot = max(0, fuzz - (context - bot))
836 else:
836 else:
837 bot = min(fuzz, bot)
837 bot = min(fuzz, bot)
838 if top < context:
838 if top < context:
839 top = max(0, fuzz - (context - top))
839 top = max(0, fuzz - (context - top))
840 else:
840 else:
841 top = min(fuzz, top)
841 top = min(fuzz, top)
842
842
843 return l[top:len(l)-bot]
843 return l[top:len(l)-bot]
844 return l
844 return l
845
845
846 def old(self, fuzz=0, toponly=False):
846 def old(self, fuzz=0, toponly=False):
847 return self.fuzzit(self.a, fuzz, toponly)
847 return self.fuzzit(self.a, fuzz, toponly)
848
848
849 def new(self, fuzz=0, toponly=False):
849 def new(self, fuzz=0, toponly=False):
850 return self.fuzzit(self.b, fuzz, toponly)
850 return self.fuzzit(self.b, fuzz, toponly)
851
851
852 class binhunk:
852 class binhunk:
853 'A binary patch file. Only understands literals so far.'
853 'A binary patch file. Only understands literals so far.'
854 def __init__(self, gitpatch):
854 def __init__(self, gitpatch):
855 self.gitpatch = gitpatch
855 self.gitpatch = gitpatch
856 self.text = None
856 self.text = None
857 self.hunk = ['GIT binary patch\n']
857 self.hunk = ['GIT binary patch\n']
858
858
859 def createfile(self):
859 def createfile(self):
860 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
860 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
861
861
862 def rmfile(self):
862 def rmfile(self):
863 return self.gitpatch.op == 'DELETE'
863 return self.gitpatch.op == 'DELETE'
864
864
865 def complete(self):
865 def complete(self):
866 return self.text is not None
866 return self.text is not None
867
867
868 def new(self):
868 def new(self):
869 return [self.text]
869 return [self.text]
870
870
871 def extract(self, lr):
871 def extract(self, lr):
872 line = lr.readline()
872 line = lr.readline()
873 self.hunk.append(line)
873 self.hunk.append(line)
874 while line and not line.startswith('literal '):
874 while line and not line.startswith('literal '):
875 line = lr.readline()
875 line = lr.readline()
876 self.hunk.append(line)
876 self.hunk.append(line)
877 if not line:
877 if not line:
878 raise PatchError(_('could not extract binary patch'))
878 raise PatchError(_('could not extract binary patch'))
879 size = int(line[8:].rstrip())
879 size = int(line[8:].rstrip())
880 dec = []
880 dec = []
881 line = lr.readline()
881 line = lr.readline()
882 self.hunk.append(line)
882 self.hunk.append(line)
883 while len(line) > 1:
883 while len(line) > 1:
884 l = line[0]
884 l = line[0]
885 if l <= 'Z' and l >= 'A':
885 if l <= 'Z' and l >= 'A':
886 l = ord(l) - ord('A') + 1
886 l = ord(l) - ord('A') + 1
887 else:
887 else:
888 l = ord(l) - ord('a') + 27
888 l = ord(l) - ord('a') + 27
889 dec.append(base85.b85decode(line[1:-1])[:l])
889 dec.append(base85.b85decode(line[1:-1])[:l])
890 line = lr.readline()
890 line = lr.readline()
891 self.hunk.append(line)
891 self.hunk.append(line)
892 text = zlib.decompress(''.join(dec))
892 text = zlib.decompress(''.join(dec))
893 if len(text) != size:
893 if len(text) != size:
894 raise PatchError(_('binary patch is %d bytes, not %d') %
894 raise PatchError(_('binary patch is %d bytes, not %d') %
895 len(text), size)
895 len(text), size)
896 self.text = text
896 self.text = text
897
897
898 def parsefilename(str):
898 def parsefilename(str):
899 # --- filename \t|space stuff
899 # --- filename \t|space stuff
900 s = str[4:].rstrip('\r\n')
900 s = str[4:].rstrip('\r\n')
901 i = s.find('\t')
901 i = s.find('\t')
902 if i < 0:
902 if i < 0:
903 i = s.find(' ')
903 i = s.find(' ')
904 if i < 0:
904 if i < 0:
905 return s
905 return s
906 return s[:i]
906 return s[:i]
907
907
908 def selectfile(afile_orig, bfile_orig, hunk, strip):
908 def selectfile(afile_orig, bfile_orig, hunk, strip):
909 def pathstrip(path, count=1):
909 def pathstrip(path, count=1):
910 pathlen = len(path)
910 pathlen = len(path)
911 i = 0
911 i = 0
912 if count == 0:
912 if count == 0:
913 return '', path.rstrip()
913 return '', path.rstrip()
914 while count > 0:
914 while count > 0:
915 i = path.find('/', i)
915 i = path.find('/', i)
916 if i == -1:
916 if i == -1:
917 raise PatchError(_("unable to strip away %d dirs from %s") %
917 raise PatchError(_("unable to strip away %d dirs from %s") %
918 (count, path))
918 (count, path))
919 i += 1
919 i += 1
920 # consume '//' in the path
920 # consume '//' in the path
921 while i < pathlen - 1 and path[i] == '/':
921 while i < pathlen - 1 and path[i] == '/':
922 i += 1
922 i += 1
923 count -= 1
923 count -= 1
924 return path[:i].lstrip(), path[i:].rstrip()
924 return path[:i].lstrip(), path[i:].rstrip()
925
925
926 nulla = afile_orig == "/dev/null"
926 nulla = afile_orig == "/dev/null"
927 nullb = bfile_orig == "/dev/null"
927 nullb = bfile_orig == "/dev/null"
928 abase, afile = pathstrip(afile_orig, strip)
928 abase, afile = pathstrip(afile_orig, strip)
929 gooda = not nulla and util.lexists(afile)
929 gooda = not nulla and util.lexists(afile)
930 bbase, bfile = pathstrip(bfile_orig, strip)
930 bbase, bfile = pathstrip(bfile_orig, strip)
931 if afile == bfile:
931 if afile == bfile:
932 goodb = gooda
932 goodb = gooda
933 else:
933 else:
934 goodb = not nullb and os.path.exists(bfile)
934 goodb = not nullb and os.path.exists(bfile)
935 createfunc = hunk.createfile
935 createfunc = hunk.createfile
936 missing = not goodb and not gooda and not createfunc()
936 missing = not goodb and not gooda and not createfunc()
937
937
938 # some diff programs apparently produce create patches where the
938 # some diff programs apparently produce create patches where the
939 # afile is not /dev/null, but afile starts with bfile
939 # afile is not /dev/null, but afile starts with bfile
940 abasedir = afile[:afile.rfind('/') + 1]
940 abasedir = afile[:afile.rfind('/') + 1]
941 bbasedir = bfile[:bfile.rfind('/') + 1]
941 bbasedir = bfile[:bfile.rfind('/') + 1]
942 if missing and abasedir == bbasedir and afile.startswith(bfile):
942 if missing and abasedir == bbasedir and afile.startswith(bfile):
943 # this isn't very pretty
943 # this isn't very pretty
944 hunk.create = True
944 hunk.create = True
945 if createfunc():
945 if createfunc():
946 missing = False
946 missing = False
947 else:
947 else:
948 hunk.create = False
948 hunk.create = False
949
949
950 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
950 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
951 # diff is between a file and its backup. In this case, the original
951 # diff is between a file and its backup. In this case, the original
952 # file should be patched (see original mpatch code).
952 # file should be patched (see original mpatch code).
953 isbackup = (abase == bbase and bfile.startswith(afile))
953 isbackup = (abase == bbase and bfile.startswith(afile))
954 fname = None
954 fname = None
955 if not missing:
955 if not missing:
956 if gooda and goodb:
956 if gooda and goodb:
957 fname = isbackup and afile or bfile
957 fname = isbackup and afile or bfile
958 elif gooda:
958 elif gooda:
959 fname = afile
959 fname = afile
960
960
961 if not fname:
961 if not fname:
962 if not nullb:
962 if not nullb:
963 fname = isbackup and afile or bfile
963 fname = isbackup and afile or bfile
964 elif not nulla:
964 elif not nulla:
965 fname = afile
965 fname = afile
966 else:
966 else:
967 raise PatchError(_("undefined source and destination files"))
967 raise PatchError(_("undefined source and destination files"))
968
968
969 return fname, missing
969 return fname, missing
970
970
971 def scangitpatch(lr, firstline):
971 def scangitpatch(lr, firstline):
972 """
972 """
973 Git patches can emit:
973 Git patches can emit:
974 - rename a to b
974 - rename a to b
975 - change b
975 - change b
976 - copy a to c
976 - copy a to c
977 - change c
977 - change c
978
978
979 We cannot apply this sequence as-is, the renamed 'a' could not be
979 We cannot apply this sequence as-is, the renamed 'a' could not be
980 found for it would have been renamed already. And we cannot copy
980 found for it would have been renamed already. And we cannot copy
981 from 'b' instead because 'b' would have been changed already. So
981 from 'b' instead because 'b' would have been changed already. So
982 we scan the git patch for copy and rename commands so we can
982 we scan the git patch for copy and rename commands so we can
983 perform the copies ahead of time.
983 perform the copies ahead of time.
984 """
984 """
985 pos = 0
985 pos = 0
986 try:
986 try:
987 pos = lr.fp.tell()
987 pos = lr.fp.tell()
988 fp = lr.fp
988 fp = lr.fp
989 except IOError:
989 except IOError:
990 fp = cStringIO.StringIO(lr.fp.read())
990 fp = cStringIO.StringIO(lr.fp.read())
991 gitlr = linereader(fp, lr.textmode)
991 gitlr = linereader(fp, lr.textmode)
992 gitlr.push(firstline)
992 gitlr.push(firstline)
993 (dopatch, gitpatches) = readgitpatch(gitlr)
993 (dopatch, gitpatches) = readgitpatch(gitlr)
994 fp.seek(pos)
994 fp.seek(pos)
995 return dopatch, gitpatches
995 return dopatch, gitpatches
996
996
997 def iterhunks(ui, fp, sourcefile=None):
997 def iterhunks(ui, fp, sourcefile=None):
998 """Read a patch and yield the following events:
998 """Read a patch and yield the following events:
999 - ("file", afile, bfile, firsthunk): select a new target file.
999 - ("file", afile, bfile, firsthunk): select a new target file.
1000 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1000 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1001 "file" event.
1001 "file" event.
1002 - ("git", gitchanges): current diff is in git format, gitchanges
1002 - ("git", gitchanges): current diff is in git format, gitchanges
1003 maps filenames to gitpatch records. Unique event.
1003 maps filenames to gitpatch records. Unique event.
1004 """
1004 """
1005 changed = {}
1005 changed = {}
1006 current_hunk = None
1006 current_hunk = None
1007 afile = ""
1007 afile = ""
1008 bfile = ""
1008 bfile = ""
1009 state = None
1009 state = None
1010 hunknum = 0
1010 hunknum = 0
1011 emitfile = False
1011 emitfile = False
1012 git = False
1012 git = False
1013
1013
1014 # our states
1014 # our states
1015 BFILE = 1
1015 BFILE = 1
1016 context = None
1016 context = None
1017 lr = linereader(fp)
1017 lr = linereader(fp)
1018 # gitworkdone is True if a git operation (copy, rename, ...) was
1018 # gitworkdone is True if a git operation (copy, rename, ...) was
1019 # performed already for the current file. Useful when the file
1019 # performed already for the current file. Useful when the file
1020 # section may have no hunk.
1020 # section may have no hunk.
1021 gitworkdone = False
1021 gitworkdone = False
1022 empty = None
1022 empty = None
1023
1023
1024 while True:
1024 while True:
1025 newfile = newgitfile = False
1025 newfile = newgitfile = False
1026 x = lr.readline()
1026 x = lr.readline()
1027 if not x:
1027 if not x:
1028 break
1028 break
1029 if current_hunk:
1029 if current_hunk:
1030 if x.startswith('\ '):
1030 if x.startswith('\ '):
1031 current_hunk.fix_newline()
1031 current_hunk.fix_newline()
1032 yield 'hunk', current_hunk
1032 yield 'hunk', current_hunk
1033 current_hunk = None
1033 current_hunk = None
1034 empty = False
1034 empty = False
1035 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
1035 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
1036 ((context is not False) and x.startswith('***************')))):
1036 ((context is not False) and x.startswith('***************')))):
1037 try:
1037 try:
1038 if context is None and x.startswith('***************'):
1038 if context is None and x.startswith('***************'):
1039 context = True
1039 context = True
1040 gpatch = changed.get(bfile)
1040 gpatch = changed.get(bfile)
1041 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1041 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1042 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1042 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1043 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1043 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1044 except PatchError, err:
1044 except PatchError, err:
1045 ui.debug(err)
1045 ui.debug(err)
1046 current_hunk = None
1046 current_hunk = None
1047 continue
1047 continue
1048 hunknum += 1
1048 hunknum += 1
1049 if emitfile:
1049 if emitfile:
1050 emitfile = False
1050 emitfile = False
1051 yield 'file', (afile, bfile, current_hunk)
1051 yield 'file', (afile, bfile, current_hunk)
1052 empty = False
1052 empty = False
1053 elif state == BFILE and x.startswith('GIT binary patch'):
1053 elif state == BFILE and x.startswith('GIT binary patch'):
1054 current_hunk = binhunk(changed[bfile])
1054 current_hunk = binhunk(changed[bfile])
1055 hunknum += 1
1055 hunknum += 1
1056 if emitfile:
1056 if emitfile:
1057 emitfile = False
1057 emitfile = False
1058 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1058 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1059 empty = False
1059 empty = False
1060 current_hunk.extract(lr)
1060 current_hunk.extract(lr)
1061 elif x.startswith('diff --git'):
1061 elif x.startswith('diff --git'):
1062 # check for git diff, scanning the whole patch file if needed
1062 # check for git diff, scanning the whole patch file if needed
1063 m = gitre.match(x)
1063 m = gitre.match(x)
1064 gitworkdone = False
1064 gitworkdone = False
1065 if m:
1065 if m:
1066 afile, bfile = m.group(1, 2)
1066 afile, bfile = m.group(1, 2)
1067 if not git:
1067 if not git:
1068 git = True
1068 git = True
1069 gitpatches = scangitpatch(lr, x)[1]
1069 gitpatches = scangitpatch(lr, x)[1]
1070 yield 'git', gitpatches
1070 yield 'git', gitpatches
1071 for gp in gitpatches:
1071 for gp in gitpatches:
1072 changed[gp.path] = gp
1072 changed[gp.path] = gp
1073 # else error?
1073 # else error?
1074 # copy/rename + modify should modify target, not source
1074 # copy/rename + modify should modify target, not source
1075 gp = changed.get(bfile)
1075 gp = changed.get(bfile)
1076 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1076 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1077 or gp.mode):
1077 or gp.mode):
1078 afile = bfile
1078 afile = bfile
1079 gitworkdone = True
1079 gitworkdone = True
1080 newgitfile = True
1080 newgitfile = True
1081 elif x.startswith('---'):
1081 elif x.startswith('---'):
1082 # check for a unified diff
1082 # check for a unified diff
1083 l2 = lr.readline()
1083 l2 = lr.readline()
1084 if not l2.startswith('+++'):
1084 if not l2.startswith('+++'):
1085 lr.push(l2)
1085 lr.push(l2)
1086 continue
1086 continue
1087 newfile = True
1087 newfile = True
1088 context = False
1088 context = False
1089 afile = parsefilename(x)
1089 afile = parsefilename(x)
1090 bfile = parsefilename(l2)
1090 bfile = parsefilename(l2)
1091 elif x.startswith('***'):
1091 elif x.startswith('***'):
1092 # check for a context diff
1092 # check for a context diff
1093 l2 = lr.readline()
1093 l2 = lr.readline()
1094 if not l2.startswith('---'):
1094 if not l2.startswith('---'):
1095 lr.push(l2)
1095 lr.push(l2)
1096 continue
1096 continue
1097 l3 = lr.readline()
1097 l3 = lr.readline()
1098 lr.push(l3)
1098 lr.push(l3)
1099 if not l3.startswith("***************"):
1099 if not l3.startswith("***************"):
1100 lr.push(l2)
1100 lr.push(l2)
1101 continue
1101 continue
1102 newfile = True
1102 newfile = True
1103 context = True
1103 context = True
1104 afile = parsefilename(x)
1104 afile = parsefilename(x)
1105 bfile = parsefilename(l2)
1105 bfile = parsefilename(l2)
1106
1106
1107 if newfile:
1107 if newfile:
1108 if empty:
1108 if empty:
1109 raise NoHunks
1109 raise NoHunks
1110 empty = not gitworkdone
1110 empty = not gitworkdone
1111 gitworkdone = False
1111 gitworkdone = False
1112
1112
1113 if newgitfile or newfile:
1113 if newgitfile or newfile:
1114 emitfile = True
1114 emitfile = True
1115 state = BFILE
1115 state = BFILE
1116 hunknum = 0
1116 hunknum = 0
1117 if current_hunk:
1117 if current_hunk:
1118 if current_hunk.complete():
1118 if current_hunk.complete():
1119 yield 'hunk', current_hunk
1119 yield 'hunk', current_hunk
1120 empty = False
1120 empty = False
1121 else:
1121 else:
1122 raise PatchError(_("malformed patch %s %s") % (afile,
1122 raise PatchError(_("malformed patch %s %s") % (afile,
1123 current_hunk.desc))
1123 current_hunk.desc))
1124
1124
1125 if (empty is None and not gitworkdone) or empty:
1125 if (empty is None and not gitworkdone) or empty:
1126 raise NoHunks
1126 raise NoHunks
1127
1127
1128 def applydiff(ui, fp, changed, strip=1, sourcefile=None, eolmode='strict'):
1128 def applydiff(ui, fp, changed, strip=1, sourcefile=None, eolmode='strict'):
1129 """
1129 """
1130 Reads a patch from fp and tries to apply it.
1130 Reads a patch from fp and tries to apply it.
1131
1131
1132 The dict 'changed' is filled in with all of the filenames changed
1132 The dict 'changed' is filled in with all of the filenames changed
1133 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1133 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1134 found and 1 if there was any fuzz.
1134 found and 1 if there was any fuzz.
1135
1135
1136 If 'eolmode' is 'strict', the patch content and patched file are
1136 If 'eolmode' is 'strict', the patch content and patched file are
1137 read in binary mode. Otherwise, line endings are ignored when
1137 read in binary mode. Otherwise, line endings are ignored when
1138 patching then normalized according to 'eolmode'.
1138 patching then normalized according to 'eolmode'.
1139 """
1139 """
1140 rejects = 0
1140 rejects = 0
1141 err = 0
1141 err = 0
1142 current_file = None
1142 current_file = None
1143 gitpatches = None
1143 gitpatches = None
1144 opener = util.opener(os.getcwd())
1144 opener = util.opener(os.getcwd())
1145
1145
1146 def closefile():
1146 def closefile():
1147 if not current_file:
1147 if not current_file:
1148 return 0
1148 return 0
1149 current_file.close()
1149 current_file.close()
1150 return len(current_file.rej)
1150 return len(current_file.rej)
1151
1151
1152 for state, values in iterhunks(ui, fp, sourcefile):
1152 for state, values in iterhunks(ui, fp, sourcefile):
1153 if state == 'hunk':
1153 if state == 'hunk':
1154 if not current_file:
1154 if not current_file:
1155 continue
1155 continue
1156 current_hunk = values
1156 current_hunk = values
1157 ret = current_file.apply(current_hunk)
1157 ret = current_file.apply(current_hunk)
1158 if ret >= 0:
1158 if ret >= 0:
1159 changed.setdefault(current_file.fname, None)
1159 changed.setdefault(current_file.fname, None)
1160 if ret > 0:
1160 if ret > 0:
1161 err = 1
1161 err = 1
1162 elif state == 'file':
1162 elif state == 'file':
1163 rejects += closefile()
1163 rejects += closefile()
1164 afile, bfile, first_hunk = values
1164 afile, bfile, first_hunk = values
1165 try:
1165 try:
1166 if sourcefile:
1166 if sourcefile:
1167 current_file = patchfile(ui, sourcefile, opener,
1167 current_file = patchfile(ui, sourcefile, opener,
1168 eolmode=eolmode)
1168 eolmode=eolmode)
1169 else:
1169 else:
1170 current_file, missing = selectfile(afile, bfile,
1170 current_file, missing = selectfile(afile, bfile,
1171 first_hunk, strip)
1171 first_hunk, strip)
1172 current_file = patchfile(ui, current_file, opener,
1172 current_file = patchfile(ui, current_file, opener,
1173 missing, eolmode)
1173 missing, eolmode)
1174 except PatchError, err:
1174 except PatchError, err:
1175 ui.warn(str(err) + '\n')
1175 ui.warn(str(err) + '\n')
1176 current_file, current_hunk = None, None
1176 current_file, current_hunk = None, None
1177 rejects += 1
1177 rejects += 1
1178 continue
1178 continue
1179 elif state == 'git':
1179 elif state == 'git':
1180 gitpatches = values
1180 gitpatches = values
1181 cwd = os.getcwd()
1181 cwd = os.getcwd()
1182 for gp in gitpatches:
1182 for gp in gitpatches:
1183 if gp.op in ('COPY', 'RENAME'):
1183 if gp.op in ('COPY', 'RENAME'):
1184 copyfile(gp.oldpath, gp.path, cwd)
1184 copyfile(gp.oldpath, gp.path, cwd)
1185 changed[gp.path] = gp
1185 changed[gp.path] = gp
1186 else:
1186 else:
1187 raise util.Abort(_('unsupported parser state: %s') % state)
1187 raise util.Abort(_('unsupported parser state: %s') % state)
1188
1188
1189 rejects += closefile()
1189 rejects += closefile()
1190
1190
1191 if rejects:
1191 if rejects:
1192 return -1
1192 return -1
1193 return err
1193 return err
1194
1194
1195 def updatedir(ui, repo, patches, similarity=0):
1195 def updatedir(ui, repo, patches, similarity=0):
1196 '''Update dirstate after patch application according to metadata'''
1196 '''Update dirstate after patch application according to metadata'''
1197 if not patches:
1197 if not patches:
1198 return
1198 return
1199 copies = []
1199 copies = []
1200 removes = set()
1200 removes = set()
1201 cfiles = patches.keys()
1201 cfiles = patches.keys()
1202 cwd = repo.getcwd()
1202 cwd = repo.getcwd()
1203 if cwd:
1203 if cwd:
1204 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1204 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1205 for f in patches:
1205 for f in patches:
1206 gp = patches[f]
1206 gp = patches[f]
1207 if not gp:
1207 if not gp:
1208 continue
1208 continue
1209 if gp.op == 'RENAME':
1209 if gp.op == 'RENAME':
1210 copies.append((gp.oldpath, gp.path))
1210 copies.append((gp.oldpath, gp.path))
1211 removes.add(gp.oldpath)
1211 removes.add(gp.oldpath)
1212 elif gp.op == 'COPY':
1212 elif gp.op == 'COPY':
1213 copies.append((gp.oldpath, gp.path))
1213 copies.append((gp.oldpath, gp.path))
1214 elif gp.op == 'DELETE':
1214 elif gp.op == 'DELETE':
1215 removes.add(gp.path)
1215 removes.add(gp.path)
1216 for src, dst in copies:
1216 for src, dst in copies:
1217 repo.copy(src, dst)
1217 repo.copy(src, dst)
1218 if (not similarity) and removes:
1218 if (not similarity) and removes:
1219 repo.remove(sorted(removes), True)
1219 repo.remove(sorted(removes), True)
1220 for f in patches:
1220 for f in patches:
1221 gp = patches[f]
1221 gp = patches[f]
1222 if gp and gp.mode:
1222 if gp and gp.mode:
1223 islink, isexec = gp.mode
1223 islink, isexec = gp.mode
1224 dst = repo.wjoin(gp.path)
1224 dst = repo.wjoin(gp.path)
1225 # patch won't create empty files
1225 # patch won't create empty files
1226 if gp.op == 'ADD' and not os.path.exists(dst):
1226 if gp.op == 'ADD' and not os.path.exists(dst):
1227 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1227 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1228 repo.wwrite(gp.path, '', flags)
1228 repo.wwrite(gp.path, '', flags)
1229 elif gp.op != 'DELETE':
1229 elif gp.op != 'DELETE':
1230 util.set_flags(dst, islink, isexec)
1230 util.set_flags(dst, islink, isexec)
1231 cmdutil.addremove(repo, cfiles, similarity=similarity)
1231 cmdutil.addremove(repo, cfiles, similarity=similarity)
1232 files = patches.keys()
1232 files = patches.keys()
1233 files.extend([r for r in removes if r not in files])
1233 files.extend([r for r in removes if r not in files])
1234 return sorted(files)
1234 return sorted(files)
1235
1235
1236 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1236 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1237 """use <patcher> to apply <patchname> to the working directory.
1237 """use <patcher> to apply <patchname> to the working directory.
1238 returns whether patch was applied with fuzz factor."""
1238 returns whether patch was applied with fuzz factor."""
1239
1239
1240 fuzz = False
1240 fuzz = False
1241 if cwd:
1241 if cwd:
1242 args.append('-d %s' % util.shellquote(cwd))
1242 args.append('-d %s' % util.shellquote(cwd))
1243 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1243 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1244 util.shellquote(patchname)))
1244 util.shellquote(patchname)))
1245
1245
1246 for line in fp:
1246 for line in fp:
1247 line = line.rstrip()
1247 line = line.rstrip()
1248 ui.note(line + '\n')
1248 ui.note(line + '\n')
1249 if line.startswith('patching file '):
1249 if line.startswith('patching file '):
1250 pf = util.parse_patch_output(line)
1250 pf = util.parse_patch_output(line)
1251 printed_file = False
1251 printed_file = False
1252 files.setdefault(pf, None)
1252 files.setdefault(pf, None)
1253 elif line.find('with fuzz') >= 0:
1253 elif line.find('with fuzz') >= 0:
1254 fuzz = True
1254 fuzz = True
1255 if not printed_file:
1255 if not printed_file:
1256 ui.warn(pf + '\n')
1256 ui.warn(pf + '\n')
1257 printed_file = True
1257 printed_file = True
1258 ui.warn(line + '\n')
1258 ui.warn(line + '\n')
1259 elif line.find('saving rejects to file') >= 0:
1259 elif line.find('saving rejects to file') >= 0:
1260 ui.warn(line + '\n')
1260 ui.warn(line + '\n')
1261 elif line.find('FAILED') >= 0:
1261 elif line.find('FAILED') >= 0:
1262 if not printed_file:
1262 if not printed_file:
1263 ui.warn(pf + '\n')
1263 ui.warn(pf + '\n')
1264 printed_file = True
1264 printed_file = True
1265 ui.warn(line + '\n')
1265 ui.warn(line + '\n')
1266 code = fp.close()
1266 code = fp.close()
1267 if code:
1267 if code:
1268 raise PatchError(_("patch command failed: %s") %
1268 raise PatchError(_("patch command failed: %s") %
1269 util.explain_exit(code)[0])
1269 util.explain_exit(code)[0])
1270 return fuzz
1270 return fuzz
1271
1271
1272 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1272 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1273 """use builtin patch to apply <patchobj> to the working directory.
1273 """use builtin patch to apply <patchobj> to the working directory.
1274 returns whether patch was applied with fuzz factor."""
1274 returns whether patch was applied with fuzz factor."""
1275
1275
1276 if files is None:
1276 if files is None:
1277 files = {}
1277 files = {}
1278 if eolmode is None:
1278 if eolmode is None:
1279 eolmode = ui.config('patch', 'eol', 'strict')
1279 eolmode = ui.config('patch', 'eol', 'strict')
1280 if eolmode.lower() not in eolmodes:
1280 if eolmode.lower() not in eolmodes:
1281 raise util.Abort(_('Unsupported line endings type: %s') % eolmode)
1281 raise util.Abort(_('Unsupported line endings type: %s') % eolmode)
1282 eolmode = eolmode.lower()
1282 eolmode = eolmode.lower()
1283
1283
1284 try:
1284 try:
1285 fp = open(patchobj, 'rb')
1285 fp = open(patchobj, 'rb')
1286 except TypeError:
1286 except TypeError:
1287 fp = patchobj
1287 fp = patchobj
1288 if cwd:
1288 if cwd:
1289 curdir = os.getcwd()
1289 curdir = os.getcwd()
1290 os.chdir(cwd)
1290 os.chdir(cwd)
1291 try:
1291 try:
1292 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1292 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1293 finally:
1293 finally:
1294 if cwd:
1294 if cwd:
1295 os.chdir(curdir)
1295 os.chdir(curdir)
1296 if fp != patchobj:
1296 if fp != patchobj:
1297 fp.close()
1297 fp.close()
1298 if ret < 0:
1298 if ret < 0:
1299 raise PatchError
1299 raise PatchError
1300 return ret > 0
1300 return ret > 0
1301
1301
1302 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1302 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1303 """Apply <patchname> to the working directory.
1303 """Apply <patchname> to the working directory.
1304
1304
1305 'eolmode' specifies how end of lines should be handled. It can be:
1305 'eolmode' specifies how end of lines should be handled. It can be:
1306 - 'strict': inputs are read in binary mode, EOLs are preserved
1306 - 'strict': inputs are read in binary mode, EOLs are preserved
1307 - 'crlf': EOLs are ignored when patching and reset to CRLF
1307 - 'crlf': EOLs are ignored when patching and reset to CRLF
1308 - 'lf': EOLs are ignored when patching and reset to LF
1308 - 'lf': EOLs are ignored when patching and reset to LF
1309 - None: get it from user settings, default to 'strict'
1309 - None: get it from user settings, default to 'strict'
1310 'eolmode' is ignored when using an external patcher program.
1310 'eolmode' is ignored when using an external patcher program.
1311
1311
1312 Returns whether patch was applied with fuzz factor.
1312 Returns whether patch was applied with fuzz factor.
1313 """
1313 """
1314 patcher = ui.config('ui', 'patch')
1314 patcher = ui.config('ui', 'patch')
1315 args = []
1315 args = []
1316 if files is None:
1316 if files is None:
1317 files = {}
1317 files = {}
1318 try:
1318 try:
1319 if patcher:
1319 if patcher:
1320 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1320 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1321 files)
1321 files)
1322 else:
1322 else:
1323 try:
1323 try:
1324 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1324 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1325 except NoHunks:
1325 except NoHunks:
1326 ui.warn(_('internal patcher failed\n'
1326 ui.warn(_('internal patcher failed\n'
1327 'please report details to '
1327 'please report details to '
1328 'http://mercurial.selenic.com/bts/\n'
1328 'http://mercurial.selenic.com/bts/\n'
1329 'or mercurial@selenic.com\n'))
1329 'or mercurial@selenic.com\n'))
1330 patcher = (util.find_exe('gpatch') or util.find_exe('patch')
1330 patcher = (util.find_exe('gpatch') or util.find_exe('patch')
1331 or 'patch')
1331 or 'patch')
1332 ui.debug('no valid hunks found; trying with %r instead\n' %
1332 ui.debug('no valid hunks found; trying with %r instead\n' %
1333 patcher)
1333 patcher)
1334 if util.needbinarypatch():
1334 if util.needbinarypatch():
1335 args.append('--binary')
1335 args.append('--binary')
1336 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1336 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1337 files)
1337 files)
1338 except PatchError, err:
1338 except PatchError, err:
1339 s = str(err)
1339 s = str(err)
1340 if s:
1340 if s:
1341 raise util.Abort(s)
1341 raise util.Abort(s)
1342 else:
1342 else:
1343 raise util.Abort(_('patch failed to apply'))
1343 raise util.Abort(_('patch failed to apply'))
1344
1344
1345 def b85diff(to, tn):
1345 def b85diff(to, tn):
1346 '''print base85-encoded binary diff'''
1346 '''print base85-encoded binary diff'''
1347 def gitindex(text):
1347 def gitindex(text):
1348 if not text:
1348 if not text:
1349 return '0' * 40
1349 return '0' * 40
1350 l = len(text)
1350 l = len(text)
1351 s = util.sha1('blob %d\0' % l)
1351 s = util.sha1('blob %d\0' % l)
1352 s.update(text)
1352 s.update(text)
1353 return s.hexdigest()
1353 return s.hexdigest()
1354
1354
1355 def fmtline(line):
1355 def fmtline(line):
1356 l = len(line)
1356 l = len(line)
1357 if l <= 26:
1357 if l <= 26:
1358 l = chr(ord('A') + l - 1)
1358 l = chr(ord('A') + l - 1)
1359 else:
1359 else:
1360 l = chr(l - 26 + ord('a') - 1)
1360 l = chr(l - 26 + ord('a') - 1)
1361 return '%c%s\n' % (l, base85.b85encode(line, True))
1361 return '%c%s\n' % (l, base85.b85encode(line, True))
1362
1362
1363 def chunk(text, csize=52):
1363 def chunk(text, csize=52):
1364 l = len(text)
1364 l = len(text)
1365 i = 0
1365 i = 0
1366 while i < l:
1366 while i < l:
1367 yield text[i:i + csize]
1367 yield text[i:i + csize]
1368 i += csize
1368 i += csize
1369
1369
1370 tohash = gitindex(to)
1370 tohash = gitindex(to)
1371 tnhash = gitindex(tn)
1371 tnhash = gitindex(tn)
1372 if tohash == tnhash:
1372 if tohash == tnhash:
1373 return ""
1373 return ""
1374
1374
1375 # TODO: deltas
1375 # TODO: deltas
1376 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1376 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1377 (tohash, tnhash, len(tn))]
1377 (tohash, tnhash, len(tn))]
1378 for l in chunk(zlib.compress(tn)):
1378 for l in chunk(zlib.compress(tn)):
1379 ret.append(fmtline(l))
1379 ret.append(fmtline(l))
1380 ret.append('\n')
1380 ret.append('\n')
1381 return ''.join(ret)
1381 return ''.join(ret)
1382
1382
1383 class GitDiffRequired(Exception):
1383 class GitDiffRequired(Exception):
1384 pass
1384 pass
1385
1385
1386 def diffopts(ui, opts=None, untrusted=False):
1386 def diffopts(ui, opts=None, untrusted=False):
1387 def get(key, name=None, getter=ui.configbool):
1387 def get(key, name=None, getter=ui.configbool):
1388 return ((opts and opts.get(key)) or
1388 return ((opts and opts.get(key)) or
1389 getter('diff', name or key, None, untrusted=untrusted))
1389 getter('diff', name or key, None, untrusted=untrusted))
1390 return mdiff.diffopts(
1390 return mdiff.diffopts(
1391 text=opts and opts.get('text'),
1391 text=opts and opts.get('text'),
1392 git=get('git'),
1392 git=get('git'),
1393 nodates=get('nodates'),
1393 nodates=get('nodates'),
1394 showfunc=get('show_function', 'showfunc'),
1394 showfunc=get('show_function', 'showfunc'),
1395 ignorews=get('ignore_all_space', 'ignorews'),
1395 ignorews=get('ignore_all_space', 'ignorews'),
1396 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1396 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1397 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1397 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1398 context=get('unified', getter=ui.config))
1398 context=get('unified', getter=ui.config))
1399
1399
1400 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1400 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1401 losedatafn=None):
1401 losedatafn=None):
1402 '''yields diff of changes to files between two nodes, or node and
1402 '''yields diff of changes to files between two nodes, or node and
1403 working directory.
1403 working directory.
1404
1404
1405 if node1 is None, use first dirstate parent instead.
1405 if node1 is None, use first dirstate parent instead.
1406 if node2 is None, compare node1 with working directory.
1406 if node2 is None, compare node1 with working directory.
1407
1407
1408 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1408 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1409 every time some change cannot be represented with the current
1409 every time some change cannot be represented with the current
1410 patch format. Return False to upgrade to git patch format, True to
1410 patch format. Return False to upgrade to git patch format, True to
1411 accept the loss or raise an exception to abort the diff. It is
1411 accept the loss or raise an exception to abort the diff. It is
1412 called with the name of current file being diffed as 'fn'. If set
1412 called with the name of current file being diffed as 'fn'. If set
1413 to None, patches will always be upgraded to git format when
1413 to None, patches will always be upgraded to git format when
1414 necessary.
1414 necessary.
1415 '''
1415 '''
1416
1416
1417 if opts is None:
1417 if opts is None:
1418 opts = mdiff.defaultopts
1418 opts = mdiff.defaultopts
1419
1419
1420 if not node1 and not node2:
1420 if not node1 and not node2:
1421 node1 = repo.dirstate.parents()[0]
1421 node1 = repo.dirstate.parents()[0]
1422
1422
1423 def lrugetfilectx():
1423 def lrugetfilectx():
1424 cache = {}
1424 cache = {}
1425 order = []
1425 order = []
1426 def getfilectx(f, ctx):
1426 def getfilectx(f, ctx):
1427 fctx = ctx.filectx(f, filelog=cache.get(f))
1427 fctx = ctx.filectx(f, filelog=cache.get(f))
1428 if f not in cache:
1428 if f not in cache:
1429 if len(cache) > 20:
1429 if len(cache) > 20:
1430 del cache[order.pop(0)]
1430 del cache[order.pop(0)]
1431 cache[f] = fctx.filelog()
1431 cache[f] = fctx.filelog()
1432 else:
1432 else:
1433 order.remove(f)
1433 order.remove(f)
1434 order.append(f)
1434 order.append(f)
1435 return fctx
1435 return fctx
1436 return getfilectx
1436 return getfilectx
1437 getfilectx = lrugetfilectx()
1437 getfilectx = lrugetfilectx()
1438
1438
1439 ctx1 = repo[node1]
1439 ctx1 = repo[node1]
1440 ctx2 = repo[node2]
1440 ctx2 = repo[node2]
1441
1441
1442 if not changes:
1442 if not changes:
1443 changes = repo.status(ctx1, ctx2, match=match)
1443 changes = repo.status(ctx1, ctx2, match=match)
1444 modified, added, removed = changes[:3]
1444 modified, added, removed = changes[:3]
1445
1445
1446 if not modified and not added and not removed:
1446 if not modified and not added and not removed:
1447 return []
1447 return []
1448
1448
1449 revs = None
1449 revs = None
1450 if not repo.ui.quiet:
1450 if not repo.ui.quiet:
1451 hexfunc = repo.ui.debugflag and hex or short
1451 hexfunc = repo.ui.debugflag and hex or short
1452 revs = [hexfunc(node) for node in [node1, node2] if node]
1452 revs = [hexfunc(node) for node in [node1, node2] if node]
1453
1453
1454 copy = {}
1454 copy = {}
1455 if opts.git or opts.upgrade:
1455 if opts.git or opts.upgrade:
1456 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1456 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1457
1457
1458 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1458 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1459 modified, added, removed, copy, getfilectx, opts, losedata)
1459 modified, added, removed, copy, getfilectx, opts, losedata)
1460 if opts.upgrade and not opts.git:
1460 if opts.upgrade and not opts.git:
1461 try:
1461 try:
1462 def losedata(fn):
1462 def losedata(fn):
1463 if not losedatafn or not losedatafn(fn=fn):
1463 if not losedatafn or not losedatafn(fn=fn):
1464 raise GitDiffRequired()
1464 raise GitDiffRequired()
1465 # Buffer the whole output until we are sure it can be generated
1465 # Buffer the whole output until we are sure it can be generated
1466 return list(difffn(opts.copy(git=False), losedata))
1466 return list(difffn(opts.copy(git=False), losedata))
1467 except GitDiffRequired:
1467 except GitDiffRequired:
1468 return difffn(opts.copy(git=True), None)
1468 return difffn(opts.copy(git=True), None)
1469 else:
1469 else:
1470 return difffn(opts, None)
1470 return difffn(opts, None)
1471
1471
1472 def difflabel(func, *args, **kw):
1472 def difflabel(func, *args, **kw):
1473 '''yields 2-tuples of (output, label) based on the output of func()'''
1473 '''yields 2-tuples of (output, label) based on the output of func()'''
1474 prefixes = [('diff', 'diff.diffline'),
1474 prefixes = [('diff', 'diff.diffline'),
1475 ('copy', 'diff.extended'),
1475 ('copy', 'diff.extended'),
1476 ('rename', 'diff.extended'),
1476 ('rename', 'diff.extended'),
1477 ('old', 'diff.extended'),
1477 ('old', 'diff.extended'),
1478 ('new', 'diff.extended'),
1478 ('new', 'diff.extended'),
1479 ('deleted', 'diff.extended'),
1479 ('deleted', 'diff.extended'),
1480 ('---', 'diff.file_a'),
1480 ('---', 'diff.file_a'),
1481 ('+++', 'diff.file_b'),
1481 ('+++', 'diff.file_b'),
1482 ('@@', 'diff.hunk'),
1482 ('@@', 'diff.hunk'),
1483 ('-', 'diff.deleted'),
1483 ('-', 'diff.deleted'),
1484 ('+', 'diff.inserted')]
1484 ('+', 'diff.inserted')]
1485
1485
1486 for chunk in func(*args, **kw):
1486 for chunk in func(*args, **kw):
1487 lines = chunk.split('\n')
1487 lines = chunk.split('\n')
1488 for i, line in enumerate(lines):
1488 for i, line in enumerate(lines):
1489 if i != 0:
1489 if i != 0:
1490 yield ('\n', '')
1490 yield ('\n', '')
1491 stripline = line
1491 stripline = line
1492 if line and line[0] in '+-':
1492 if line and line[0] in '+-':
1493 # highlight trailing whitespace, but only in changed lines
1493 # highlight trailing whitespace, but only in changed lines
1494 stripline = line.rstrip()
1494 stripline = line.rstrip()
1495 for prefix, label in prefixes:
1495 for prefix, label in prefixes:
1496 if stripline.startswith(prefix):
1496 if stripline.startswith(prefix):
1497 yield (stripline, label)
1497 yield (stripline, label)
1498 break
1498 break
1499 else:
1499 else:
1500 yield (line, '')
1500 yield (line, '')
1501 if line != stripline:
1501 if line != stripline:
1502 yield (line[len(stripline):], 'diff.trailingwhitespace')
1502 yield (line[len(stripline):], 'diff.trailingwhitespace')
1503
1503
1504 def diffui(*args, **kw):
1504 def diffui(*args, **kw):
1505 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1505 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1506 return difflabel(diff, *args, **kw)
1506 return difflabel(diff, *args, **kw)
1507
1507
1508
1508
1509 def _addmodehdr(header, omode, nmode):
1509 def _addmodehdr(header, omode, nmode):
1510 if omode != nmode:
1510 if omode != nmode:
1511 header.append('old mode %s\n' % omode)
1511 header.append('old mode %s\n' % omode)
1512 header.append('new mode %s\n' % nmode)
1512 header.append('new mode %s\n' % nmode)
1513
1513
1514 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1514 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1515 copy, getfilectx, opts, losedatafn):
1515 copy, getfilectx, opts, losedatafn):
1516
1516
1517 date1 = util.datestr(ctx1.date())
1517 date1 = util.datestr(ctx1.date())
1518 man1 = ctx1.manifest()
1518 man1 = ctx1.manifest()
1519
1519
1520 gone = set()
1520 gone = set()
1521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1522
1522
1523 copyto = dict([(v, k) for k, v in copy.items()])
1523 copyto = dict([(v, k) for k, v in copy.items()])
1524
1524
1525 if opts.git:
1525 if opts.git:
1526 revs = None
1526 revs = None
1527
1527
1528 for f in sorted(modified + added + removed):
1528 for f in sorted(modified + added + removed):
1529 to = None
1529 to = None
1530 tn = None
1530 tn = None
1531 dodiff = True
1531 dodiff = True
1532 header = []
1532 header = []
1533 if f in man1:
1533 if f in man1:
1534 to = getfilectx(f, ctx1).data()
1534 to = getfilectx(f, ctx1).data()
1535 if f not in removed:
1535 if f not in removed:
1536 tn = getfilectx(f, ctx2).data()
1536 tn = getfilectx(f, ctx2).data()
1537 a, b = f, f
1537 a, b = f, f
1538 if opts.git or losedatafn:
1538 if opts.git or losedatafn:
1539 if f in added:
1539 if f in added:
1540 mode = gitmode[ctx2.flags(f)]
1540 mode = gitmode[ctx2.flags(f)]
1541 if f in copy or f in copyto:
1541 if f in copy or f in copyto:
1542 if opts.git:
1542 if opts.git:
1543 if f in copy:
1543 if f in copy:
1544 a = copy[f]
1544 a = copy[f]
1545 else:
1545 else:
1546 a = copyto[f]
1546 a = copyto[f]
1547 omode = gitmode[man1.flags(a)]
1547 omode = gitmode[man1.flags(a)]
1548 _addmodehdr(header, omode, mode)
1548 _addmodehdr(header, omode, mode)
1549 if a in removed and a not in gone:
1549 if a in removed and a not in gone:
1550 op = 'rename'
1550 op = 'rename'
1551 gone.add(a)
1551 gone.add(a)
1552 else:
1552 else:
1553 op = 'copy'
1553 op = 'copy'
1554 header.append('%s from %s\n' % (op, a))
1554 header.append('%s from %s\n' % (op, a))
1555 header.append('%s to %s\n' % (op, f))
1555 header.append('%s to %s\n' % (op, f))
1556 to = getfilectx(a, ctx1).data()
1556 to = getfilectx(a, ctx1).data()
1557 else:
1557 else:
1558 losedatafn(f)
1558 losedatafn(f)
1559 else:
1559 else:
1560 if opts.git:
1560 if opts.git:
1561 header.append('new file mode %s\n' % mode)
1561 header.append('new file mode %s\n' % mode)
1562 elif ctx2.flags(f):
1562 elif ctx2.flags(f):
1563 losedatafn(f)
1563 losedatafn(f)
1564 if util.binary(tn):
1564 if util.binary(tn):
1565 if opts.git:
1565 if opts.git:
1566 dodiff = 'binary'
1566 dodiff = 'binary'
1567 else:
1567 else:
1568 losedatafn(f)
1568 losedatafn(f)
1569 if not opts.git and not tn:
1569 if not opts.git and not tn:
1570 # regular diffs cannot represent new empty file
1570 # regular diffs cannot represent new empty file
1571 losedatafn(f)
1571 losedatafn(f)
1572 elif f in removed:
1572 elif f in removed:
1573 if opts.git:
1573 if opts.git:
1574 # have we already reported a copy above?
1574 # have we already reported a copy above?
1575 if ((f in copy and copy[f] in added
1575 if ((f in copy and copy[f] in added
1576 and copyto[copy[f]] == f) or
1576 and copyto[copy[f]] == f) or
1577 (f in copyto and copyto[f] in added
1577 (f in copyto and copyto[f] in added
1578 and copy[copyto[f]] == f)):
1578 and copy[copyto[f]] == f)):
1579 dodiff = False
1579 dodiff = False
1580 else:
1580 else:
1581 header.append('deleted file mode %s\n' %
1581 header.append('deleted file mode %s\n' %
1582 gitmode[man1.flags(f)])
1582 gitmode[man1.flags(f)])
1583 elif not to:
1583 elif not to:
1584 # regular diffs cannot represent empty file deletion
1584 # regular diffs cannot represent empty file deletion
1585 losedatafn(f)
1585 losedatafn(f)
1586 else:
1586 else:
1587 oflag = man1.flags(f)
1587 oflag = man1.flags(f)
1588 nflag = ctx2.flags(f)
1588 nflag = ctx2.flags(f)
1589 binary = util.binary(to) or util.binary(tn)
1589 binary = util.binary(to) or util.binary(tn)
1590 if opts.git:
1590 if opts.git:
1591 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1591 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1592 if binary:
1592 if binary:
1593 dodiff = 'binary'
1593 dodiff = 'binary'
1594 elif binary or nflag != oflag:
1594 elif binary or nflag != oflag:
1595 losedatafn(f)
1595 losedatafn(f)
1596 if opts.git:
1596 if opts.git:
1597 header.insert(0, mdiff.diffline(revs, a, b, opts))
1597 header.insert(0, mdiff.diffline(revs, a, b, opts))
1598
1598
1599 if dodiff:
1599 if dodiff:
1600 if dodiff == 'binary':
1600 if dodiff == 'binary':
1601 text = b85diff(to, tn)
1601 text = b85diff(to, tn)
1602 else:
1602 else:
1603 text = mdiff.unidiff(to, date1,
1603 text = mdiff.unidiff(to, date1,
1604 # ctx2 date may be dynamic
1604 # ctx2 date may be dynamic
1605 tn, util.datestr(ctx2.date()),
1605 tn, util.datestr(ctx2.date()),
1606 a, b, revs, opts=opts)
1606 a, b, revs, opts=opts)
1607 if header and (text or len(header) > 1):
1607 if header and (text or len(header) > 1):
1608 yield ''.join(header)
1608 yield ''.join(header)
1609 if text:
1609 if text:
1610 yield text
1610 yield text
1611
1611
1612 def diffstatdata(lines):
1612 def diffstatdata(lines):
1613 filename, adds, removes = None, 0, 0
1613 filename, adds, removes = None, 0, 0
1614 for line in lines:
1614 for line in lines:
1615 if line.startswith('diff'):
1615 if line.startswith('diff'):
1616 if filename:
1616 if filename:
1617 isbinary = adds == 0 and removes == 0
1617 isbinary = adds == 0 and removes == 0
1618 yield (filename, adds, removes, isbinary)
1618 yield (filename, adds, removes, isbinary)
1619 # set numbers to 0 anyway when starting new file
1619 # set numbers to 0 anyway when starting new file
1620 adds, removes = 0, 0
1620 adds, removes = 0, 0
1621 if line.startswith('diff --git'):
1621 if line.startswith('diff --git'):
1622 filename = gitre.search(line).group(1)
1622 filename = gitre.search(line).group(1)
1623 else:
1623 else:
1624 # format: "diff -r ... -r ... filename"
1624 # format: "diff -r ... -r ... filename"
1625 filename = line.split(None, 5)[-1]
1625 filename = line.split(None, 5)[-1]
1626 elif line.startswith('+') and not line.startswith('+++'):
1626 elif line.startswith('+') and not line.startswith('+++'):
1627 adds += 1
1627 adds += 1
1628 elif line.startswith('-') and not line.startswith('---'):
1628 elif line.startswith('-') and not line.startswith('---'):
1629 removes += 1
1629 removes += 1
1630 if filename:
1630 if filename:
1631 isbinary = adds == 0 and removes == 0
1631 isbinary = adds == 0 and removes == 0
1632 yield (filename, adds, removes, isbinary)
1632 yield (filename, adds, removes, isbinary)
1633
1633
1634 def diffstat(lines, width=80, git=False):
1634 def diffstat(lines, width=80, git=False):
1635 output = []
1635 output = []
1636 stats = list(diffstatdata(lines))
1636 stats = list(diffstatdata(lines))
1637
1637
1638 maxtotal, maxname = 0, 0
1638 maxtotal, maxname = 0, 0
1639 totaladds, totalremoves = 0, 0
1639 totaladds, totalremoves = 0, 0
1640 hasbinary = False
1640 hasbinary = False
1641 for filename, adds, removes, isbinary in stats:
1641 for filename, adds, removes, isbinary in stats:
1642 totaladds += adds
1642 totaladds += adds
1643 totalremoves += removes
1643 totalremoves += removes
1644 maxname = max(maxname, len(filename))
1644 maxname = max(maxname, len(filename))
1645 maxtotal = max(maxtotal, adds + removes)
1645 maxtotal = max(maxtotal, adds + removes)
1646 if isbinary:
1646 if isbinary:
1647 hasbinary = True
1647 hasbinary = True
1648
1648
1649 countwidth = len(str(maxtotal))
1649 countwidth = len(str(maxtotal))
1650 if hasbinary and countwidth < 3:
1650 if hasbinary and countwidth < 3:
1651 countwidth = 3
1651 countwidth = 3
1652 graphwidth = width - countwidth - maxname - 6
1652 graphwidth = width - countwidth - maxname - 6
1653 if graphwidth < 10:
1653 if graphwidth < 10:
1654 graphwidth = 10
1654 graphwidth = 10
1655
1655
1656 def scale(i):
1656 def scale(i):
1657 if maxtotal <= graphwidth:
1657 if maxtotal <= graphwidth:
1658 return i
1658 return i
1659 # If diffstat runs out of room it doesn't print anything,
1659 # If diffstat runs out of room it doesn't print anything,
1660 # which isn't very useful, so always print at least one + or -
1660 # which isn't very useful, so always print at least one + or -
1661 # if there were at least some changes.
1661 # if there were at least some changes.
1662 return max(i * graphwidth // maxtotal, int(bool(i)))
1662 return max(i * graphwidth // maxtotal, int(bool(i)))
1663
1663
1664 for filename, adds, removes, isbinary in stats:
1664 for filename, adds, removes, isbinary in stats:
1665 if git and isbinary:
1665 if git and isbinary:
1666 count = 'Bin'
1666 count = 'Bin'
1667 else:
1667 else:
1668 count = adds + removes
1668 count = adds + removes
1669 pluses = '+' * scale(adds)
1669 pluses = '+' * scale(adds)
1670 minuses = '-' * scale(removes)
1670 minuses = '-' * scale(removes)
1671 output.append(' %-*s | %*s %s%s\n' % (maxname, filename, countwidth,
1671 output.append(' %-*s | %*s %s%s\n' % (maxname, filename, countwidth,
1672 count, pluses, minuses))
1672 count, pluses, minuses))
1673
1673
1674 if stats:
1674 if stats:
1675 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1675 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1676 % (len(stats), totaladds, totalremoves))
1676 % (len(stats), totaladds, totalremoves))
1677
1677
1678 return ''.join(output)
1678 return ''.join(output)
1679
1679
1680 def diffstatui(*args, **kw):
1680 def diffstatui(*args, **kw):
1681 '''like diffstat(), but yields 2-tuples of (output, label) for
1681 '''like diffstat(), but yields 2-tuples of (output, label) for
1682 ui.write()
1682 ui.write()
1683 '''
1683 '''
1684
1684
1685 for line in diffstat(*args, **kw).splitlines():
1685 for line in diffstat(*args, **kw).splitlines():
1686 if line and line[-1] in '+-':
1686 if line and line[-1] in '+-':
1687 name, graph = line.rsplit(' ', 1)
1687 name, graph = line.rsplit(' ', 1)
1688 yield (name + ' ', '')
1688 yield (name + ' ', '')
1689 m = re.search(r'\++', graph)
1689 m = re.search(r'\++', graph)
1690 if m:
1690 if m:
1691 yield (m.group(0), 'diffstat.inserted')
1691 yield (m.group(0), 'diffstat.inserted')
1692 m = re.search(r'-+', graph)
1692 m = re.search(r'-+', graph)
1693 if m:
1693 if m:
1694 yield (m.group(0), 'diffstat.deleted')
1694 yield (m.group(0), 'diffstat.deleted')
1695 else:
1695 else:
1696 yield (line, '')
1696 yield (line, '')
1697 yield ('\n', '')
1697 yield ('\n', '')
@@ -1,70 +1,69 b''
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import util, error
8 import util, error
9 from i18n import _
10
9
11 from mercurial import store
10 from mercurial import store
12
11
13 class StreamException(Exception):
12 class StreamException(Exception):
14 def __init__(self, code):
13 def __init__(self, code):
15 Exception.__init__(self)
14 Exception.__init__(self)
16 self.code = code
15 self.code = code
17 def __str__(self):
16 def __str__(self):
18 return '%i\n' % self.code
17 return '%i\n' % self.code
19
18
20 # if server supports streaming clone, it advertises "stream"
19 # if server supports streaming clone, it advertises "stream"
21 # capability with value that is version+flags of repo it is serving.
20 # capability with value that is version+flags of repo it is serving.
22 # client only streams if it can read that repo format.
21 # client only streams if it can read that repo format.
23
22
24 # stream file format is simple.
23 # stream file format is simple.
25 #
24 #
26 # server writes out line that says how many files, how many total
25 # server writes out line that says how many files, how many total
27 # bytes. separator is ascii space, byte counts are strings.
26 # bytes. separator is ascii space, byte counts are strings.
28 #
27 #
29 # then for each file:
28 # then for each file:
30 #
29 #
31 # server writes out line that says filename, how many bytes in
30 # server writes out line that says filename, how many bytes in
32 # file. separator is ascii nul, byte count is string.
31 # file. separator is ascii nul, byte count is string.
33 #
32 #
34 # server writes out raw file data.
33 # server writes out raw file data.
35
34
36 def allowed(ui):
35 def allowed(ui):
37 return ui.configbool('server', 'uncompressed', True, untrusted=True)
36 return ui.configbool('server', 'uncompressed', True, untrusted=True)
38
37
39 def stream_out(repo):
38 def stream_out(repo):
40 '''stream out all metadata files in repository.
39 '''stream out all metadata files in repository.
41 writes to file-like object, must support write() and optional flush().'''
40 writes to file-like object, must support write() and optional flush().'''
42
41
43 if not allowed(repo.ui):
42 if not allowed(repo.ui):
44 raise StreamException(1)
43 raise StreamException(1)
45
44
46 entries = []
45 entries = []
47 total_bytes = 0
46 total_bytes = 0
48 try:
47 try:
49 # get consistent snapshot of repo, lock during scan
48 # get consistent snapshot of repo, lock during scan
50 lock = repo.lock()
49 lock = repo.lock()
51 try:
50 try:
52 repo.ui.debug('scanning\n')
51 repo.ui.debug('scanning\n')
53 for name, ename, size in repo.store.walk():
52 for name, ename, size in repo.store.walk():
54 entries.append((name, size))
53 entries.append((name, size))
55 total_bytes += size
54 total_bytes += size
56 finally:
55 finally:
57 lock.release()
56 lock.release()
58 except error.LockError:
57 except error.LockError:
59 raise StreamException(2)
58 raise StreamException(2)
60
59
61 yield '0\n'
60 yield '0\n'
62 repo.ui.debug('%d files, %d bytes to transfer\n' %
61 repo.ui.debug('%d files, %d bytes to transfer\n' %
63 (len(entries), total_bytes))
62 (len(entries), total_bytes))
64 yield '%d %d\n' % (len(entries), total_bytes)
63 yield '%d %d\n' % (len(entries), total_bytes)
65 for name, size in entries:
64 for name, size in entries:
66 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
65 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
67 # partially encode name over the wire for backwards compat
66 # partially encode name over the wire for backwards compat
68 yield '%s\0%d\n' % (store.encodedir(name), size)
67 yield '%s\0%d\n' % (store.encodedir(name), size)
69 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
68 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
70 yield chunk
69 yield chunk
@@ -1,314 +1,313 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # This is the mercurial setup script.
3 # This is the mercurial setup script.
4 #
4 #
5 # 'python setup.py install', or
5 # 'python setup.py install', or
6 # 'python setup.py --help' for more options
6 # 'python setup.py --help' for more options
7
7
8 import sys
8 import sys
9 if not hasattr(sys, 'version_info') or sys.version_info < (2, 4, 0, 'final'):
9 if not hasattr(sys, 'version_info') or sys.version_info < (2, 4, 0, 'final'):
10 raise SystemExit("Mercurial requires Python 2.4 or later.")
10 raise SystemExit("Mercurial requires Python 2.4 or later.")
11
11
12 # Solaris Python packaging brain damage
12 # Solaris Python packaging brain damage
13 try:
13 try:
14 import hashlib
14 import hashlib
15 sha = hashlib.sha1()
15 sha = hashlib.sha1()
16 except:
16 except:
17 try:
17 try:
18 import sha
18 import sha
19 except:
19 except:
20 raise SystemExit(
20 raise SystemExit(
21 "Couldn't import standard hashlib (incomplete Python install).")
21 "Couldn't import standard hashlib (incomplete Python install).")
22
22
23 try:
23 try:
24 import zlib
24 import zlib
25 except:
25 except:
26 raise SystemExit(
26 raise SystemExit(
27 "Couldn't import standard zlib (incomplete Python install).")
27 "Couldn't import standard zlib (incomplete Python install).")
28
28
29 try:
29 try:
30 import bz2
30 import bz2
31 except:
31 except:
32 raise SystemExit(
32 raise SystemExit(
33 "Couldn't import standard bz2 (incomplete Python install).")
33 "Couldn't import standard bz2 (incomplete Python install).")
34
34
35 import os, subprocess, time
35 import os, subprocess, time
36 import shutil
36 import shutil
37 import tempfile
37 import tempfile
38 from distutils.core import setup, Extension
38 from distutils.core import setup, Extension
39 from distutils.dist import Distribution
39 from distutils.dist import Distribution
40 from distutils.command.install_data import install_data
41 from distutils.command.build import build
40 from distutils.command.build import build
42 from distutils.command.build_py import build_py
41 from distutils.command.build_py import build_py
43 from distutils.spawn import spawn, find_executable
42 from distutils.spawn import spawn, find_executable
44 from distutils.ccompiler import new_compiler
43 from distutils.ccompiler import new_compiler
45
44
46 scripts = ['hg']
45 scripts = ['hg']
47 if os.name == 'nt':
46 if os.name == 'nt':
48 scripts.append('contrib/win32/hg.bat')
47 scripts.append('contrib/win32/hg.bat')
49
48
50 # simplified version of distutils.ccompiler.CCompiler.has_function
49 # simplified version of distutils.ccompiler.CCompiler.has_function
51 # that actually removes its temporary files.
50 # that actually removes its temporary files.
52 def hasfunction(cc, funcname):
51 def hasfunction(cc, funcname):
53 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
52 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
54 devnull = oldstderr = None
53 devnull = oldstderr = None
55 try:
54 try:
56 try:
55 try:
57 fname = os.path.join(tmpdir, 'funcname.c')
56 fname = os.path.join(tmpdir, 'funcname.c')
58 f = open(fname, 'w')
57 f = open(fname, 'w')
59 f.write('int main(void) {\n')
58 f.write('int main(void) {\n')
60 f.write(' %s();\n' % funcname)
59 f.write(' %s();\n' % funcname)
61 f.write('}\n')
60 f.write('}\n')
62 f.close()
61 f.close()
63 # Redirect stderr to /dev/null to hide any error messages
62 # Redirect stderr to /dev/null to hide any error messages
64 # from the compiler.
63 # from the compiler.
65 # This will have to be changed if we ever have to check
64 # This will have to be changed if we ever have to check
66 # for a function on Windows.
65 # for a function on Windows.
67 devnull = open('/dev/null', 'w')
66 devnull = open('/dev/null', 'w')
68 oldstderr = os.dup(sys.stderr.fileno())
67 oldstderr = os.dup(sys.stderr.fileno())
69 os.dup2(devnull.fileno(), sys.stderr.fileno())
68 os.dup2(devnull.fileno(), sys.stderr.fileno())
70 objects = cc.compile([fname], output_dir=tmpdir)
69 objects = cc.compile([fname], output_dir=tmpdir)
71 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
70 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
72 except:
71 except:
73 return False
72 return False
74 return True
73 return True
75 finally:
74 finally:
76 if oldstderr is not None:
75 if oldstderr is not None:
77 os.dup2(oldstderr, sys.stderr.fileno())
76 os.dup2(oldstderr, sys.stderr.fileno())
78 if devnull is not None:
77 if devnull is not None:
79 devnull.close()
78 devnull.close()
80 shutil.rmtree(tmpdir)
79 shutil.rmtree(tmpdir)
81
80
82 # py2exe needs to be installed to work
81 # py2exe needs to be installed to work
83 try:
82 try:
84 import py2exe
83 import py2exe
85 py2exeloaded = True
84 py2exeloaded = True
86
85
87 # Help py2exe to find win32com.shell
86 # Help py2exe to find win32com.shell
88 try:
87 try:
89 import modulefinder
88 import modulefinder
90 import win32com
89 import win32com
91 for p in win32com.__path__[1:]: # Take the path to win32comext
90 for p in win32com.__path__[1:]: # Take the path to win32comext
92 modulefinder.AddPackagePath("win32com", p)
91 modulefinder.AddPackagePath("win32com", p)
93 pn = "win32com.shell"
92 pn = "win32com.shell"
94 __import__(pn)
93 __import__(pn)
95 m = sys.modules[pn]
94 m = sys.modules[pn]
96 for p in m.__path__[1:]:
95 for p in m.__path__[1:]:
97 modulefinder.AddPackagePath(pn, p)
96 modulefinder.AddPackagePath(pn, p)
98 except ImportError:
97 except ImportError:
99 pass
98 pass
100
99
101 except ImportError:
100 except ImportError:
102 py2exeloaded = False
101 py2exeloaded = False
103 pass
102 pass
104
103
105 def runcmd(cmd, env):
104 def runcmd(cmd, env):
106 p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
105 p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
107 stderr=subprocess.PIPE, env=env)
106 stderr=subprocess.PIPE, env=env)
108 out, err = p.communicate()
107 out, err = p.communicate()
109 # If root is executing setup.py, but the repository is owned by
108 # If root is executing setup.py, but the repository is owned by
110 # another user (as in "sudo python setup.py install") we will get
109 # another user (as in "sudo python setup.py install") we will get
111 # trust warnings since the .hg/hgrc file is untrusted. That is
110 # trust warnings since the .hg/hgrc file is untrusted. That is
112 # fine, we don't want to load it anyway. Python may warn about
111 # fine, we don't want to load it anyway. Python may warn about
113 # a missing __init__.py in mercurial/locale, we also ignore that.
112 # a missing __init__.py in mercurial/locale, we also ignore that.
114 err = [e for e in err.splitlines()
113 err = [e for e in err.splitlines()
115 if not e.startswith('Not trusting file') \
114 if not e.startswith('Not trusting file') \
116 and not e.startswith('warning: Not importing')]
115 and not e.startswith('warning: Not importing')]
117 if err:
116 if err:
118 return ''
117 return ''
119 return out
118 return out
120
119
121 version = ''
120 version = ''
122
121
123 if os.path.isdir('.hg'):
122 if os.path.isdir('.hg'):
124 # Execute hg out of this directory with a custom environment which
123 # Execute hg out of this directory with a custom environment which
125 # includes the pure Python modules in mercurial/pure. We also take
124 # includes the pure Python modules in mercurial/pure. We also take
126 # care to not use any hgrc files and do no localization.
125 # care to not use any hgrc files and do no localization.
127 pypath = ['mercurial', os.path.join('mercurial', 'pure')]
126 pypath = ['mercurial', os.path.join('mercurial', 'pure')]
128 env = {'PYTHONPATH': os.pathsep.join(pypath),
127 env = {'PYTHONPATH': os.pathsep.join(pypath),
129 'HGRCPATH': '',
128 'HGRCPATH': '',
130 'LANGUAGE': 'C'}
129 'LANGUAGE': 'C'}
131 if 'LD_LIBRARY_PATH' in os.environ:
130 if 'LD_LIBRARY_PATH' in os.environ:
132 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
131 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
133 if 'SystemRoot' in os.environ:
132 if 'SystemRoot' in os.environ:
134 # Copy SystemRoot into the custom environment for Python 2.6
133 # Copy SystemRoot into the custom environment for Python 2.6
135 # under Windows. Otherwise, the subprocess will fail with
134 # under Windows. Otherwise, the subprocess will fail with
136 # error 0xc0150004. See: http://bugs.python.org/issue3440
135 # error 0xc0150004. See: http://bugs.python.org/issue3440
137 env['SystemRoot'] = os.environ['SystemRoot']
136 env['SystemRoot'] = os.environ['SystemRoot']
138 cmd = [sys.executable, 'hg', 'id', '-i', '-t']
137 cmd = [sys.executable, 'hg', 'id', '-i', '-t']
139 l = runcmd(cmd, env).split()
138 l = runcmd(cmd, env).split()
140 while len(l) > 1 and l[-1][0].isalpha(): # remove non-numbered tags
139 while len(l) > 1 and l[-1][0].isalpha(): # remove non-numbered tags
141 l.pop()
140 l.pop()
142 if len(l) > 1: # tag found
141 if len(l) > 1: # tag found
143 version = l[-1]
142 version = l[-1]
144 if l[0].endswith('+'): # propagate the dirty status to the tag
143 if l[0].endswith('+'): # propagate the dirty status to the tag
145 version += '+'
144 version += '+'
146 elif len(l) == 1: # no tag found
145 elif len(l) == 1: # no tag found
147 cmd = [sys.executable, 'hg', 'parents', '--template',
146 cmd = [sys.executable, 'hg', 'parents', '--template',
148 '{latesttag}+{latesttagdistance}-']
147 '{latesttag}+{latesttagdistance}-']
149 version = runcmd(cmd, env) + l[0]
148 version = runcmd(cmd, env) + l[0]
150 if version.endswith('+'):
149 if version.endswith('+'):
151 version += time.strftime('%Y%m%d')
150 version += time.strftime('%Y%m%d')
152 elif os.path.exists('.hg_archival.txt'):
151 elif os.path.exists('.hg_archival.txt'):
153 kw = dict([[t.strip() for t in l.split(':', 1)]
152 kw = dict([[t.strip() for t in l.split(':', 1)]
154 for l in open('.hg_archival.txt')])
153 for l in open('.hg_archival.txt')])
155 if 'tag' in kw:
154 if 'tag' in kw:
156 version = kw['tag']
155 version = kw['tag']
157 elif 'latesttag' in kw:
156 elif 'latesttag' in kw:
158 version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
157 version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
159 else:
158 else:
160 version = kw.get('node', '')[:12]
159 version = kw.get('node', '')[:12]
161
160
162 if version:
161 if version:
163 f = open("mercurial/__version__.py", "w")
162 f = open("mercurial/__version__.py", "w")
164 f.write('# this file is autogenerated by setup.py\n')
163 f.write('# this file is autogenerated by setup.py\n')
165 f.write('version = "%s"\n' % version)
164 f.write('version = "%s"\n' % version)
166 f.close()
165 f.close()
167
166
168
167
169 try:
168 try:
170 from mercurial import __version__
169 from mercurial import __version__
171 version = __version__.version
170 version = __version__.version
172 except ImportError:
171 except ImportError:
173 version = 'unknown'
172 version = 'unknown'
174
173
175 class hgbuildmo(build):
174 class hgbuildmo(build):
176
175
177 description = "build translations (.mo files)"
176 description = "build translations (.mo files)"
178
177
179 def run(self):
178 def run(self):
180 if not find_executable('msgfmt'):
179 if not find_executable('msgfmt'):
181 self.warn("could not find msgfmt executable, no translations "
180 self.warn("could not find msgfmt executable, no translations "
182 "will be built")
181 "will be built")
183 return
182 return
184
183
185 podir = 'i18n'
184 podir = 'i18n'
186 if not os.path.isdir(podir):
185 if not os.path.isdir(podir):
187 self.warn("could not find %s/ directory" % podir)
186 self.warn("could not find %s/ directory" % podir)
188 return
187 return
189
188
190 join = os.path.join
189 join = os.path.join
191 for po in os.listdir(podir):
190 for po in os.listdir(podir):
192 if not po.endswith('.po'):
191 if not po.endswith('.po'):
193 continue
192 continue
194 pofile = join(podir, po)
193 pofile = join(podir, po)
195 modir = join('locale', po[:-3], 'LC_MESSAGES')
194 modir = join('locale', po[:-3], 'LC_MESSAGES')
196 mofile = join(modir, 'hg.mo')
195 mofile = join(modir, 'hg.mo')
197 mobuildfile = join('mercurial', mofile)
196 mobuildfile = join('mercurial', mofile)
198 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
197 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
199 if sys.platform != 'sunos5':
198 if sys.platform != 'sunos5':
200 # msgfmt on Solaris does not know about -c
199 # msgfmt on Solaris does not know about -c
201 cmd.append('-c')
200 cmd.append('-c')
202 self.mkpath(join('mercurial', modir))
201 self.mkpath(join('mercurial', modir))
203 self.make_file([pofile], mobuildfile, spawn, (cmd,))
202 self.make_file([pofile], mobuildfile, spawn, (cmd,))
204
203
205 # Insert hgbuildmo first so that files in mercurial/locale/ are found
204 # Insert hgbuildmo first so that files in mercurial/locale/ are found
206 # when build_py is run next.
205 # when build_py is run next.
207 build.sub_commands.insert(0, ('build_mo', None))
206 build.sub_commands.insert(0, ('build_mo', None))
208
207
209 Distribution.pure = 0
208 Distribution.pure = 0
210 Distribution.global_options.append(('pure', None, "use pure (slow) Python "
209 Distribution.global_options.append(('pure', None, "use pure (slow) Python "
211 "code instead of C extensions"))
210 "code instead of C extensions"))
212
211
213 class hgbuildpy(build_py):
212 class hgbuildpy(build_py):
214
213
215 def finalize_options(self):
214 def finalize_options(self):
216 build_py.finalize_options(self)
215 build_py.finalize_options(self)
217
216
218 if self.distribution.pure:
217 if self.distribution.pure:
219 if self.py_modules is None:
218 if self.py_modules is None:
220 self.py_modules = []
219 self.py_modules = []
221 for ext in self.distribution.ext_modules:
220 for ext in self.distribution.ext_modules:
222 if ext.name.startswith("mercurial."):
221 if ext.name.startswith("mercurial."):
223 self.py_modules.append("mercurial.pure.%s" % ext.name[10:])
222 self.py_modules.append("mercurial.pure.%s" % ext.name[10:])
224 self.distribution.ext_modules = []
223 self.distribution.ext_modules = []
225
224
226 def find_modules(self):
225 def find_modules(self):
227 modules = build_py.find_modules(self)
226 modules = build_py.find_modules(self)
228 for module in modules:
227 for module in modules:
229 if module[0] == "mercurial.pure":
228 if module[0] == "mercurial.pure":
230 if module[1] != "__init__":
229 if module[1] != "__init__":
231 yield ("mercurial", module[1], module[2])
230 yield ("mercurial", module[1], module[2])
232 else:
231 else:
233 yield module
232 yield module
234
233
235 cmdclass = {'build_mo': hgbuildmo,
234 cmdclass = {'build_mo': hgbuildmo,
236 'build_py': hgbuildpy}
235 'build_py': hgbuildpy}
237
236
238 packages = ['mercurial', 'mercurial.hgweb', 'hgext', 'hgext.convert',
237 packages = ['mercurial', 'mercurial.hgweb', 'hgext', 'hgext.convert',
239 'hgext.highlight', 'hgext.zeroconf']
238 'hgext.highlight', 'hgext.zeroconf']
240
239
241 pymodules = []
240 pymodules = []
242
241
243 extmodules = [
242 extmodules = [
244 Extension('mercurial.base85', ['mercurial/base85.c']),
243 Extension('mercurial.base85', ['mercurial/base85.c']),
245 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
244 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
246 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
245 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
247 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
246 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
248 Extension('mercurial.parsers', ['mercurial/parsers.c']),
247 Extension('mercurial.parsers', ['mercurial/parsers.c']),
249 ]
248 ]
250
249
251 # disable osutil.c under windows + python 2.4 (issue1364)
250 # disable osutil.c under windows + python 2.4 (issue1364)
252 if sys.platform == 'win32' and sys.version_info < (2, 5, 0, 'final'):
251 if sys.platform == 'win32' and sys.version_info < (2, 5, 0, 'final'):
253 pymodules.append('mercurial.pure.osutil')
252 pymodules.append('mercurial.pure.osutil')
254 else:
253 else:
255 extmodules.append(Extension('mercurial.osutil', ['mercurial/osutil.c']))
254 extmodules.append(Extension('mercurial.osutil', ['mercurial/osutil.c']))
256
255
257 if sys.platform == 'linux2' and os.uname()[2] > '2.6':
256 if sys.platform == 'linux2' and os.uname()[2] > '2.6':
258 # The inotify extension is only usable with Linux 2.6 kernels.
257 # The inotify extension is only usable with Linux 2.6 kernels.
259 # You also need a reasonably recent C library.
258 # You also need a reasonably recent C library.
260 cc = new_compiler()
259 cc = new_compiler()
261 if hasfunction(cc, 'inotify_add_watch'):
260 if hasfunction(cc, 'inotify_add_watch'):
262 extmodules.append(Extension('hgext.inotify.linux._inotify',
261 extmodules.append(Extension('hgext.inotify.linux._inotify',
263 ['hgext/inotify/linux/_inotify.c']))
262 ['hgext/inotify/linux/_inotify.c']))
264 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
263 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
265
264
266 packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
265 packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
267 'help/*.txt']}
266 'help/*.txt']}
268
267
269 def ordinarypath(p):
268 def ordinarypath(p):
270 return p and p[0] != '.' and p[-1] != '~'
269 return p and p[0] != '.' and p[-1] != '~'
271
270
272 for root in ('templates',):
271 for root in ('templates',):
273 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
272 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
274 curdir = curdir.split(os.sep, 1)[1]
273 curdir = curdir.split(os.sep, 1)[1]
275 dirs[:] = filter(ordinarypath, dirs)
274 dirs[:] = filter(ordinarypath, dirs)
276 for f in filter(ordinarypath, files):
275 for f in filter(ordinarypath, files):
277 f = os.path.join(curdir, f)
276 f = os.path.join(curdir, f)
278 packagedata['mercurial'].append(f)
277 packagedata['mercurial'].append(f)
279
278
280 datafiles = []
279 datafiles = []
281 setupversion = version
280 setupversion = version
282 extra = {}
281 extra = {}
283
282
284 if py2exeloaded:
283 if py2exeloaded:
285 extra['console'] = [
284 extra['console'] = [
286 {'script':'hg',
285 {'script':'hg',
287 'copyright':'Copyright (C) 2005-2010 Matt Mackall and others',
286 'copyright':'Copyright (C) 2005-2010 Matt Mackall and others',
288 'product_version':version}]
287 'product_version':version}]
289
288
290 if os.name == 'nt':
289 if os.name == 'nt':
291 # Windows binary file versions for exe/dll files must have the
290 # Windows binary file versions for exe/dll files must have the
292 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
291 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
293 setupversion = version.split('+', 1)[0]
292 setupversion = version.split('+', 1)[0]
294
293
295 setup(name='mercurial',
294 setup(name='mercurial',
296 version=setupversion,
295 version=setupversion,
297 author='Matt Mackall',
296 author='Matt Mackall',
298 author_email='mpm@selenic.com',
297 author_email='mpm@selenic.com',
299 url='http://mercurial.selenic.com/',
298 url='http://mercurial.selenic.com/',
300 description='Scalable distributed SCM',
299 description='Scalable distributed SCM',
301 license='GNU GPLv2+',
300 license='GNU GPLv2+',
302 scripts=scripts,
301 scripts=scripts,
303 packages=packages,
302 packages=packages,
304 py_modules=pymodules,
303 py_modules=pymodules,
305 ext_modules=extmodules,
304 ext_modules=extmodules,
306 data_files=datafiles,
305 data_files=datafiles,
307 package_data=packagedata,
306 package_data=packagedata,
308 cmdclass=cmdclass,
307 cmdclass=cmdclass,
309 options=dict(py2exe=dict(packages=['hgext', 'email']),
308 options=dict(py2exe=dict(packages=['hgext', 'email']),
310 bdist_mpkg=dict(zipdist=True,
309 bdist_mpkg=dict(zipdist=True,
311 license='COPYING',
310 license='COPYING',
312 readme='contrib/macosx/Readme.html',
311 readme='contrib/macosx/Readme.html',
313 welcome='contrib/macosx/Welcome.html')),
312 welcome='contrib/macosx/Welcome.html')),
314 **extra)
313 **extra)
@@ -1,29 +1,29 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 """This does HTTP GET requests given a host:port and path and returns
3 """This does HTTP GET requests given a host:port and path and returns
4 a subset of the headers plus the body of the result."""
4 a subset of the headers plus the body of the result."""
5
5
6 import httplib, sys, re
6 import httplib, sys
7
7
8 try:
8 try:
9 import msvcrt, os
9 import msvcrt, os
10 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
10 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
11 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
11 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
12 except ImportError:
12 except ImportError:
13 pass
13 pass
14
14
15 headers = [h.lower() for h in sys.argv[3:]]
15 headers = [h.lower() for h in sys.argv[3:]]
16 conn = httplib.HTTPConnection(sys.argv[1])
16 conn = httplib.HTTPConnection(sys.argv[1])
17 conn.request("GET", sys.argv[2])
17 conn.request("GET", sys.argv[2])
18 response = conn.getresponse()
18 response = conn.getresponse()
19 print response.status, response.reason
19 print response.status, response.reason
20 for h in headers:
20 for h in headers:
21 if response.getheader(h, None) is not None:
21 if response.getheader(h, None) is not None:
22 print "%s: %s" % (h, response.getheader(h))
22 print "%s: %s" % (h, response.getheader(h))
23 print
23 print
24 data = response.read()
24 data = response.read()
25 sys.stdout.write(data)
25 sys.stdout.write(data)
26
26
27 if 200 <= response.status <= 299:
27 if 200 <= response.status <= 299:
28 sys.exit(0)
28 sys.exit(0)
29 sys.exit(1)
29 sys.exit(1)
@@ -1,25 +1,25 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 import os, sys, time, errno, signal
3 import os, time, errno, signal
4
4
5 # Kill off any leftover daemon processes
5 # Kill off any leftover daemon processes
6 try:
6 try:
7 fp = open(os.environ['DAEMON_PIDS'])
7 fp = open(os.environ['DAEMON_PIDS'])
8 for line in fp:
8 for line in fp:
9 try:
9 try:
10 pid = int(line)
10 pid = int(line)
11 except ValueError:
11 except ValueError:
12 continue
12 continue
13 try:
13 try:
14 os.kill(pid, 0)
14 os.kill(pid, 0)
15 os.kill(pid, signal.SIGTERM)
15 os.kill(pid, signal.SIGTERM)
16 for i in range(10):
16 for i in range(10):
17 time.sleep(0.05)
17 time.sleep(0.05)
18 os.kill(pid, 0)
18 os.kill(pid, 0)
19 os.kill(pid, signal.SIGKILL)
19 os.kill(pid, signal.SIGKILL)
20 except OSError, err:
20 except OSError, err:
21 if err.errno != errno.ESRCH:
21 if err.errno != errno.ESRCH:
22 raise
22 raise
23 fp.close()
23 fp.close()
24 except IOError:
24 except IOError:
25 pass
25 pass
@@ -1,957 +1,956 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 #
38 #
39 # (You could use any subset of the tests: test-s* happens to match
39 # (You could use any subset of the tests: test-s* happens to match
40 # enough that it's worth doing parallel runs, few enough that it
40 # enough that it's worth doing parallel runs, few enough that it
41 # completes fairly quickly, includes both shell and Python scripts, and
41 # completes fairly quickly, includes both shell and Python scripts, and
42 # includes some scripts that run daemon processes.)
42 # includes some scripts that run daemon processes.)
43
43
44 from distutils import version
44 from distutils import version
45 import difflib
45 import difflib
46 import errno
46 import errno
47 import optparse
47 import optparse
48 import os
48 import os
49 import signal
49 import shutil
50 import subprocess
50 import subprocess
51 import shutil
52 import signal
51 import signal
53 import sys
52 import sys
54 import tempfile
53 import tempfile
55 import time
54 import time
56
55
57 closefds = os.name == 'posix'
56 closefds = os.name == 'posix'
58 def Popen4(cmd, bufsize=-1):
57 def Popen4(cmd, bufsize=-1):
59 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
58 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
60 close_fds=closefds,
59 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
60 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 stderr=subprocess.STDOUT)
61 stderr=subprocess.STDOUT)
63 p.fromchild = p.stdout
62 p.fromchild = p.stdout
64 p.tochild = p.stdin
63 p.tochild = p.stdin
65 p.childerr = p.stderr
64 p.childerr = p.stderr
66 return p
65 return p
67
66
68 # reserved exit code to skip test (used by hghave)
67 # reserved exit code to skip test (used by hghave)
69 SKIPPED_STATUS = 80
68 SKIPPED_STATUS = 80
70 SKIPPED_PREFIX = 'skipped: '
69 SKIPPED_PREFIX = 'skipped: '
71 FAILED_PREFIX = 'hghave check failed: '
70 FAILED_PREFIX = 'hghave check failed: '
72 PYTHON = sys.executable
71 PYTHON = sys.executable
73 IMPL_PATH = 'PYTHONPATH'
72 IMPL_PATH = 'PYTHONPATH'
74 if 'java' in sys.platform:
73 if 'java' in sys.platform:
75 IMPL_PATH = 'JYTHONPATH'
74 IMPL_PATH = 'JYTHONPATH'
76
75
77 requiredtools = ["python", "diff", "grep", "unzip", "gunzip", "bunzip2", "sed"]
76 requiredtools = ["python", "diff", "grep", "unzip", "gunzip", "bunzip2", "sed"]
78
77
79 defaults = {
78 defaults = {
80 'jobs': ('HGTEST_JOBS', 1),
79 'jobs': ('HGTEST_JOBS', 1),
81 'timeout': ('HGTEST_TIMEOUT', 180),
80 'timeout': ('HGTEST_TIMEOUT', 180),
82 'port': ('HGTEST_PORT', 20059),
81 'port': ('HGTEST_PORT', 20059),
83 }
82 }
84
83
85 def parseargs():
84 def parseargs():
86 parser = optparse.OptionParser("%prog [options] [tests]")
85 parser = optparse.OptionParser("%prog [options] [tests]")
87 parser.add_option("-C", "--annotate", action="store_true",
86 parser.add_option("-C", "--annotate", action="store_true",
88 help="output files annotated with coverage")
87 help="output files annotated with coverage")
89 parser.add_option("--child", type="int",
88 parser.add_option("--child", type="int",
90 help="run as child process, summary to given fd")
89 help="run as child process, summary to given fd")
91 parser.add_option("-c", "--cover", action="store_true",
90 parser.add_option("-c", "--cover", action="store_true",
92 help="print a test coverage report")
91 help="print a test coverage report")
93 parser.add_option("-f", "--first", action="store_true",
92 parser.add_option("-f", "--first", action="store_true",
94 help="exit on the first test failure")
93 help="exit on the first test failure")
95 parser.add_option("-i", "--interactive", action="store_true",
94 parser.add_option("-i", "--interactive", action="store_true",
96 help="prompt to accept changed output")
95 help="prompt to accept changed output")
97 parser.add_option("-j", "--jobs", type="int",
96 parser.add_option("-j", "--jobs", type="int",
98 help="number of jobs to run in parallel"
97 help="number of jobs to run in parallel"
99 " (default: $%s or %d)" % defaults['jobs'])
98 " (default: $%s or %d)" % defaults['jobs'])
100 parser.add_option("-k", "--keywords",
99 parser.add_option("-k", "--keywords",
101 help="run tests matching keywords")
100 help="run tests matching keywords")
102 parser.add_option("--keep-tmpdir", action="store_true",
101 parser.add_option("--keep-tmpdir", action="store_true",
103 help="keep temporary directory after running tests")
102 help="keep temporary directory after running tests")
104 parser.add_option("--tmpdir", type="string",
103 parser.add_option("--tmpdir", type="string",
105 help="run tests in the given temporary directory"
104 help="run tests in the given temporary directory"
106 " (implies --keep-tmpdir)")
105 " (implies --keep-tmpdir)")
107 parser.add_option("-d", "--debug", action="store_true",
106 parser.add_option("-d", "--debug", action="store_true",
108 help="debug mode: write output of test scripts to console"
107 help="debug mode: write output of test scripts to console"
109 " rather than capturing and diff'ing it (disables timeout)")
108 " rather than capturing and diff'ing it (disables timeout)")
110 parser.add_option("-R", "--restart", action="store_true",
109 parser.add_option("-R", "--restart", action="store_true",
111 help="restart at last error")
110 help="restart at last error")
112 parser.add_option("-p", "--port", type="int",
111 parser.add_option("-p", "--port", type="int",
113 help="port on which servers should listen"
112 help="port on which servers should listen"
114 " (default: $%s or %d)" % defaults['port'])
113 " (default: $%s or %d)" % defaults['port'])
115 parser.add_option("-r", "--retest", action="store_true",
114 parser.add_option("-r", "--retest", action="store_true",
116 help="retest failed tests")
115 help="retest failed tests")
117 parser.add_option("-S", "--noskips", action="store_true",
116 parser.add_option("-S", "--noskips", action="store_true",
118 help="don't report skip tests verbosely")
117 help="don't report skip tests verbosely")
119 parser.add_option("-t", "--timeout", type="int",
118 parser.add_option("-t", "--timeout", type="int",
120 help="kill errant tests after TIMEOUT seconds"
119 help="kill errant tests after TIMEOUT seconds"
121 " (default: $%s or %d)" % defaults['timeout'])
120 " (default: $%s or %d)" % defaults['timeout'])
122 parser.add_option("-v", "--verbose", action="store_true",
121 parser.add_option("-v", "--verbose", action="store_true",
123 help="output verbose messages")
122 help="output verbose messages")
124 parser.add_option("-n", "--nodiff", action="store_true",
123 parser.add_option("-n", "--nodiff", action="store_true",
125 help="skip showing test changes")
124 help="skip showing test changes")
126 parser.add_option("--with-hg", type="string",
125 parser.add_option("--with-hg", type="string",
127 metavar="HG",
126 metavar="HG",
128 help="test using specified hg script rather than a "
127 help="test using specified hg script rather than a "
129 "temporary installation")
128 "temporary installation")
130 parser.add_option("--local", action="store_true",
129 parser.add_option("--local", action="store_true",
131 help="shortcut for --with-hg=<testdir>/../hg")
130 help="shortcut for --with-hg=<testdir>/../hg")
132 parser.add_option("--pure", action="store_true",
131 parser.add_option("--pure", action="store_true",
133 help="use pure Python code instead of C extensions")
132 help="use pure Python code instead of C extensions")
134 parser.add_option("-3", "--py3k-warnings", action="store_true",
133 parser.add_option("-3", "--py3k-warnings", action="store_true",
135 help="enable Py3k warnings on Python 2.6+")
134 help="enable Py3k warnings on Python 2.6+")
136 parser.add_option("--inotify", action="store_true",
135 parser.add_option("--inotify", action="store_true",
137 help="enable inotify extension when running tests")
136 help="enable inotify extension when running tests")
138 parser.add_option("--blacklist", action="append",
137 parser.add_option("--blacklist", action="append",
139 help="skip tests listed in the specified blacklist file")
138 help="skip tests listed in the specified blacklist file")
140
139
141 for option, default in defaults.items():
140 for option, default in defaults.items():
142 defaults[option] = int(os.environ.get(*default))
141 defaults[option] = int(os.environ.get(*default))
143 parser.set_defaults(**defaults)
142 parser.set_defaults(**defaults)
144 (options, args) = parser.parse_args()
143 (options, args) = parser.parse_args()
145
144
146 # jython is always pure
145 # jython is always pure
147 if 'java' in sys.platform or '__pypy__' in sys.modules:
146 if 'java' in sys.platform or '__pypy__' in sys.modules:
148 options.pure = True
147 options.pure = True
149
148
150 if options.with_hg:
149 if options.with_hg:
151 if not (os.path.isfile(options.with_hg) and
150 if not (os.path.isfile(options.with_hg) and
152 os.access(options.with_hg, os.X_OK)):
151 os.access(options.with_hg, os.X_OK)):
153 parser.error('--with-hg must specify an executable hg script')
152 parser.error('--with-hg must specify an executable hg script')
154 if not os.path.basename(options.with_hg) == 'hg':
153 if not os.path.basename(options.with_hg) == 'hg':
155 sys.stderr.write('warning: --with-hg should specify an hg script')
154 sys.stderr.write('warning: --with-hg should specify an hg script')
156 if options.local:
155 if options.local:
157 testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
156 testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
158 hgbin = os.path.join(os.path.dirname(testdir), 'hg')
157 hgbin = os.path.join(os.path.dirname(testdir), 'hg')
159 if not os.access(hgbin, os.X_OK):
158 if not os.access(hgbin, os.X_OK):
160 parser.error('--local specified, but %r not found or not executable'
159 parser.error('--local specified, but %r not found or not executable'
161 % hgbin)
160 % hgbin)
162 options.with_hg = hgbin
161 options.with_hg = hgbin
163
162
164 options.anycoverage = options.cover or options.annotate
163 options.anycoverage = options.cover or options.annotate
165 if options.anycoverage:
164 if options.anycoverage:
166 try:
165 try:
167 import coverage
166 import coverage
168 covver = version.StrictVersion(coverage.__version__).version
167 covver = version.StrictVersion(coverage.__version__).version
169 if covver < (3, 3):
168 if covver < (3, 3):
170 parser.error('coverage options require coverage 3.3 or later')
169 parser.error('coverage options require coverage 3.3 or later')
171 except ImportError:
170 except ImportError:
172 parser.error('coverage options now require the coverage package')
171 parser.error('coverage options now require the coverage package')
173
172
174 if options.anycoverage and options.local:
173 if options.anycoverage and options.local:
175 # this needs some path mangling somewhere, I guess
174 # this needs some path mangling somewhere, I guess
176 parser.error("sorry, coverage options do not work when --local "
175 parser.error("sorry, coverage options do not work when --local "
177 "is specified")
176 "is specified")
178
177
179 global vlog
178 global vlog
180 if options.verbose:
179 if options.verbose:
181 if options.jobs > 1 or options.child is not None:
180 if options.jobs > 1 or options.child is not None:
182 pid = "[%d]" % os.getpid()
181 pid = "[%d]" % os.getpid()
183 else:
182 else:
184 pid = None
183 pid = None
185 def vlog(*msg):
184 def vlog(*msg):
186 if pid:
185 if pid:
187 print pid,
186 print pid,
188 for m in msg:
187 for m in msg:
189 print m,
188 print m,
190 print
189 print
191 sys.stdout.flush()
190 sys.stdout.flush()
192 else:
191 else:
193 vlog = lambda *msg: None
192 vlog = lambda *msg: None
194
193
195 if options.tmpdir:
194 if options.tmpdir:
196 options.tmpdir = os.path.expanduser(options.tmpdir)
195 options.tmpdir = os.path.expanduser(options.tmpdir)
197
196
198 if options.jobs < 1:
197 if options.jobs < 1:
199 parser.error('--jobs must be positive')
198 parser.error('--jobs must be positive')
200 if options.interactive and options.jobs > 1:
199 if options.interactive and options.jobs > 1:
201 print '(--interactive overrides --jobs)'
200 print '(--interactive overrides --jobs)'
202 options.jobs = 1
201 options.jobs = 1
203 if options.interactive and options.debug:
202 if options.interactive and options.debug:
204 parser.error("-i/--interactive and -d/--debug are incompatible")
203 parser.error("-i/--interactive and -d/--debug are incompatible")
205 if options.debug:
204 if options.debug:
206 if options.timeout != defaults['timeout']:
205 if options.timeout != defaults['timeout']:
207 sys.stderr.write(
206 sys.stderr.write(
208 'warning: --timeout option ignored with --debug\n')
207 'warning: --timeout option ignored with --debug\n')
209 options.timeout = 0
208 options.timeout = 0
210 if options.py3k_warnings:
209 if options.py3k_warnings:
211 if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
210 if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
212 parser.error('--py3k-warnings can only be used on Python 2.6+')
211 parser.error('--py3k-warnings can only be used on Python 2.6+')
213 if options.blacklist:
212 if options.blacklist:
214 blacklist = dict()
213 blacklist = dict()
215 for filename in options.blacklist:
214 for filename in options.blacklist:
216 try:
215 try:
217 path = os.path.expanduser(os.path.expandvars(filename))
216 path = os.path.expanduser(os.path.expandvars(filename))
218 f = open(path, "r")
217 f = open(path, "r")
219 except IOError, err:
218 except IOError, err:
220 if err.errno != errno.ENOENT:
219 if err.errno != errno.ENOENT:
221 raise
220 raise
222 print "warning: no such blacklist file: %s" % filename
221 print "warning: no such blacklist file: %s" % filename
223 continue
222 continue
224
223
225 for line in f.readlines():
224 for line in f.readlines():
226 line = line.strip()
225 line = line.strip()
227 if line and not line.startswith('#'):
226 if line and not line.startswith('#'):
228 blacklist[line] = filename
227 blacklist[line] = filename
229
228
230 options.blacklist = blacklist
229 options.blacklist = blacklist
231
230
232 return (options, args)
231 return (options, args)
233
232
234 def rename(src, dst):
233 def rename(src, dst):
235 """Like os.rename(), trade atomicity and opened files friendliness
234 """Like os.rename(), trade atomicity and opened files friendliness
236 for existing destination support.
235 for existing destination support.
237 """
236 """
238 shutil.copy(src, dst)
237 shutil.copy(src, dst)
239 os.remove(src)
238 os.remove(src)
240
239
241 def splitnewlines(text):
240 def splitnewlines(text):
242 '''like str.splitlines, but only split on newlines.
241 '''like str.splitlines, but only split on newlines.
243 keep line endings.'''
242 keep line endings.'''
244 i = 0
243 i = 0
245 lines = []
244 lines = []
246 while True:
245 while True:
247 n = text.find('\n', i)
246 n = text.find('\n', i)
248 if n == -1:
247 if n == -1:
249 last = text[i:]
248 last = text[i:]
250 if last:
249 if last:
251 lines.append(last)
250 lines.append(last)
252 return lines
251 return lines
253 lines.append(text[i:n + 1])
252 lines.append(text[i:n + 1])
254 i = n + 1
253 i = n + 1
255
254
256 def parsehghaveoutput(lines):
255 def parsehghaveoutput(lines):
257 '''Parse hghave log lines.
256 '''Parse hghave log lines.
258 Return tuple of lists (missing, failed):
257 Return tuple of lists (missing, failed):
259 * the missing/unknown features
258 * the missing/unknown features
260 * the features for which existence check failed'''
259 * the features for which existence check failed'''
261 missing = []
260 missing = []
262 failed = []
261 failed = []
263 for line in lines:
262 for line in lines:
264 if line.startswith(SKIPPED_PREFIX):
263 if line.startswith(SKIPPED_PREFIX):
265 line = line.splitlines()[0]
264 line = line.splitlines()[0]
266 missing.append(line[len(SKIPPED_PREFIX):])
265 missing.append(line[len(SKIPPED_PREFIX):])
267 elif line.startswith(FAILED_PREFIX):
266 elif line.startswith(FAILED_PREFIX):
268 line = line.splitlines()[0]
267 line = line.splitlines()[0]
269 failed.append(line[len(FAILED_PREFIX):])
268 failed.append(line[len(FAILED_PREFIX):])
270
269
271 return missing, failed
270 return missing, failed
272
271
273 def showdiff(expected, output, ref, err):
272 def showdiff(expected, output, ref, err):
274 for line in difflib.unified_diff(expected, output, ref, err):
273 for line in difflib.unified_diff(expected, output, ref, err):
275 sys.stdout.write(line)
274 sys.stdout.write(line)
276
275
277 def findprogram(program):
276 def findprogram(program):
278 """Search PATH for a executable program"""
277 """Search PATH for a executable program"""
279 for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
278 for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
280 name = os.path.join(p, program)
279 name = os.path.join(p, program)
281 if os.access(name, os.X_OK):
280 if os.access(name, os.X_OK):
282 return name
281 return name
283 return None
282 return None
284
283
285 def checktools():
284 def checktools():
286 # Before we go any further, check for pre-requisite tools
285 # Before we go any further, check for pre-requisite tools
287 # stuff from coreutils (cat, rm, etc) are not tested
286 # stuff from coreutils (cat, rm, etc) are not tested
288 for p in requiredtools:
287 for p in requiredtools:
289 if os.name == 'nt':
288 if os.name == 'nt':
290 p += '.exe'
289 p += '.exe'
291 found = findprogram(p)
290 found = findprogram(p)
292 if found:
291 if found:
293 vlog("# Found prerequisite", p, "at", found)
292 vlog("# Found prerequisite", p, "at", found)
294 else:
293 else:
295 print "WARNING: Did not find prerequisite tool: "+p
294 print "WARNING: Did not find prerequisite tool: "+p
296
295
297 def killdaemons():
296 def killdaemons():
298 # Kill off any leftover daemon processes
297 # Kill off any leftover daemon processes
299 try:
298 try:
300 fp = open(DAEMON_PIDS)
299 fp = open(DAEMON_PIDS)
301 for line in fp:
300 for line in fp:
302 try:
301 try:
303 pid = int(line)
302 pid = int(line)
304 except ValueError:
303 except ValueError:
305 continue
304 continue
306 try:
305 try:
307 os.kill(pid, 0)
306 os.kill(pid, 0)
308 vlog('# Killing daemon process %d' % pid)
307 vlog('# Killing daemon process %d' % pid)
309 os.kill(pid, signal.SIGTERM)
308 os.kill(pid, signal.SIGTERM)
310 time.sleep(0.25)
309 time.sleep(0.25)
311 os.kill(pid, 0)
310 os.kill(pid, 0)
312 vlog('# Daemon process %d is stuck - really killing it' % pid)
311 vlog('# Daemon process %d is stuck - really killing it' % pid)
313 os.kill(pid, signal.SIGKILL)
312 os.kill(pid, signal.SIGKILL)
314 except OSError, err:
313 except OSError, err:
315 if err.errno != errno.ESRCH:
314 if err.errno != errno.ESRCH:
316 raise
315 raise
317 fp.close()
316 fp.close()
318 os.unlink(DAEMON_PIDS)
317 os.unlink(DAEMON_PIDS)
319 except IOError:
318 except IOError:
320 pass
319 pass
321
320
322 def cleanup(options):
321 def cleanup(options):
323 if not options.keep_tmpdir:
322 if not options.keep_tmpdir:
324 vlog("# Cleaning up HGTMP", HGTMP)
323 vlog("# Cleaning up HGTMP", HGTMP)
325 shutil.rmtree(HGTMP, True)
324 shutil.rmtree(HGTMP, True)
326
325
327 def usecorrectpython():
326 def usecorrectpython():
328 # some tests run python interpreter. they must use same
327 # some tests run python interpreter. they must use same
329 # interpreter we use or bad things will happen.
328 # interpreter we use or bad things will happen.
330 exedir, exename = os.path.split(sys.executable)
329 exedir, exename = os.path.split(sys.executable)
331 if exename == 'python':
330 if exename == 'python':
332 path = findprogram('python')
331 path = findprogram('python')
333 if os.path.dirname(path) == exedir:
332 if os.path.dirname(path) == exedir:
334 return
333 return
335 vlog('# Making python executable in test path use correct Python')
334 vlog('# Making python executable in test path use correct Python')
336 mypython = os.path.join(BINDIR, 'python')
335 mypython = os.path.join(BINDIR, 'python')
337 try:
336 try:
338 os.symlink(sys.executable, mypython)
337 os.symlink(sys.executable, mypython)
339 except AttributeError:
338 except AttributeError:
340 # windows fallback
339 # windows fallback
341 shutil.copyfile(sys.executable, mypython)
340 shutil.copyfile(sys.executable, mypython)
342 shutil.copymode(sys.executable, mypython)
341 shutil.copymode(sys.executable, mypython)
343
342
344 def installhg(options):
343 def installhg(options):
345 vlog("# Performing temporary installation of HG")
344 vlog("# Performing temporary installation of HG")
346 installerrs = os.path.join("tests", "install.err")
345 installerrs = os.path.join("tests", "install.err")
347 pure = options.pure and "--pure" or ""
346 pure = options.pure and "--pure" or ""
348
347
349 # Run installer in hg root
348 # Run installer in hg root
350 script = os.path.realpath(sys.argv[0])
349 script = os.path.realpath(sys.argv[0])
351 hgroot = os.path.dirname(os.path.dirname(script))
350 hgroot = os.path.dirname(os.path.dirname(script))
352 os.chdir(hgroot)
351 os.chdir(hgroot)
353 nohome = '--home=""'
352 nohome = '--home=""'
354 if os.name == 'nt':
353 if os.name == 'nt':
355 # The --home="" trick works only on OS where os.sep == '/'
354 # The --home="" trick works only on OS where os.sep == '/'
356 # because of a distutils convert_path() fast-path. Avoid it at
355 # because of a distutils convert_path() fast-path. Avoid it at
357 # least on Windows for now, deal with .pydistutils.cfg bugs
356 # least on Windows for now, deal with .pydistutils.cfg bugs
358 # when they happen.
357 # when they happen.
359 nohome = ''
358 nohome = ''
360 cmd = ('%s setup.py %s clean --all'
359 cmd = ('%s setup.py %s clean --all'
361 ' install --force --prefix="%s" --install-lib="%s"'
360 ' install --force --prefix="%s" --install-lib="%s"'
362 ' --install-scripts="%s" %s >%s 2>&1'
361 ' --install-scripts="%s" %s >%s 2>&1'
363 % (sys.executable, pure, INST, PYTHONDIR, BINDIR, nohome,
362 % (sys.executable, pure, INST, PYTHONDIR, BINDIR, nohome,
364 installerrs))
363 installerrs))
365 vlog("# Running", cmd)
364 vlog("# Running", cmd)
366 if os.system(cmd) == 0:
365 if os.system(cmd) == 0:
367 if not options.verbose:
366 if not options.verbose:
368 os.remove(installerrs)
367 os.remove(installerrs)
369 else:
368 else:
370 f = open(installerrs)
369 f = open(installerrs)
371 for line in f:
370 for line in f:
372 print line,
371 print line,
373 f.close()
372 f.close()
374 sys.exit(1)
373 sys.exit(1)
375 os.chdir(TESTDIR)
374 os.chdir(TESTDIR)
376
375
377 usecorrectpython()
376 usecorrectpython()
378
377
379 vlog("# Installing dummy diffstat")
378 vlog("# Installing dummy diffstat")
380 f = open(os.path.join(BINDIR, 'diffstat'), 'w')
379 f = open(os.path.join(BINDIR, 'diffstat'), 'w')
381 f.write('#!' + sys.executable + '\n'
380 f.write('#!' + sys.executable + '\n'
382 'import sys\n'
381 'import sys\n'
383 'files = 0\n'
382 'files = 0\n'
384 'for line in sys.stdin:\n'
383 'for line in sys.stdin:\n'
385 ' if line.startswith("diff "):\n'
384 ' if line.startswith("diff "):\n'
386 ' files += 1\n'
385 ' files += 1\n'
387 'sys.stdout.write("files patched: %d\\n" % files)\n')
386 'sys.stdout.write("files patched: %d\\n" % files)\n')
388 f.close()
387 f.close()
389 os.chmod(os.path.join(BINDIR, 'diffstat'), 0700)
388 os.chmod(os.path.join(BINDIR, 'diffstat'), 0700)
390
389
391 if options.py3k_warnings and not options.anycoverage:
390 if options.py3k_warnings and not options.anycoverage:
392 vlog("# Updating hg command to enable Py3k Warnings switch")
391 vlog("# Updating hg command to enable Py3k Warnings switch")
393 f = open(os.path.join(BINDIR, 'hg'), 'r')
392 f = open(os.path.join(BINDIR, 'hg'), 'r')
394 lines = [line.rstrip() for line in f]
393 lines = [line.rstrip() for line in f]
395 lines[0] += ' -3'
394 lines[0] += ' -3'
396 f.close()
395 f.close()
397 f = open(os.path.join(BINDIR, 'hg'), 'w')
396 f = open(os.path.join(BINDIR, 'hg'), 'w')
398 for line in lines:
397 for line in lines:
399 f.write(line + '\n')
398 f.write(line + '\n')
400 f.close()
399 f.close()
401
400
402 if options.anycoverage:
401 if options.anycoverage:
403 custom = os.path.join(TESTDIR, 'sitecustomize.py')
402 custom = os.path.join(TESTDIR, 'sitecustomize.py')
404 target = os.path.join(PYTHONDIR, 'sitecustomize.py')
403 target = os.path.join(PYTHONDIR, 'sitecustomize.py')
405 vlog('# Installing coverage trigger to %s' % target)
404 vlog('# Installing coverage trigger to %s' % target)
406 shutil.copyfile(custom, target)
405 shutil.copyfile(custom, target)
407 rc = os.path.join(TESTDIR, '.coveragerc')
406 rc = os.path.join(TESTDIR, '.coveragerc')
408 vlog('# Installing coverage rc to %s' % rc)
407 vlog('# Installing coverage rc to %s' % rc)
409 os.environ['COVERAGE_PROCESS_START'] = rc
408 os.environ['COVERAGE_PROCESS_START'] = rc
410 fn = os.path.join(INST, '..', '.coverage')
409 fn = os.path.join(INST, '..', '.coverage')
411 os.environ['COVERAGE_FILE'] = fn
410 os.environ['COVERAGE_FILE'] = fn
412
411
413 def outputcoverage(options):
412 def outputcoverage(options):
414
413
415 vlog('# Producing coverage report')
414 vlog('# Producing coverage report')
416 os.chdir(PYTHONDIR)
415 os.chdir(PYTHONDIR)
417
416
418 def covrun(*args):
417 def covrun(*args):
419 cmd = 'coverage %s' % ' '.join(args)
418 cmd = 'coverage %s' % ' '.join(args)
420 vlog('# Running: %s' % cmd)
419 vlog('# Running: %s' % cmd)
421 os.system(cmd)
420 os.system(cmd)
422
421
423 if options.child:
422 if options.child:
424 return
423 return
425
424
426 covrun('-c')
425 covrun('-c')
427 omit = ','.join([BINDIR, TESTDIR])
426 omit = ','.join([BINDIR, TESTDIR])
428 covrun('-i', '-r', '"--omit=%s"' % omit) # report
427 covrun('-i', '-r', '"--omit=%s"' % omit) # report
429 if options.annotate:
428 if options.annotate:
430 adir = os.path.join(TESTDIR, 'annotated')
429 adir = os.path.join(TESTDIR, 'annotated')
431 if not os.path.isdir(adir):
430 if not os.path.isdir(adir):
432 os.mkdir(adir)
431 os.mkdir(adir)
433 covrun('-i', '-a', '"--directory=%s"' % adir, '"--omit=%s"' % omit)
432 covrun('-i', '-a', '"--directory=%s"' % adir, '"--omit=%s"' % omit)
434
433
435 class Timeout(Exception):
434 class Timeout(Exception):
436 pass
435 pass
437
436
438 def alarmed(signum, frame):
437 def alarmed(signum, frame):
439 raise Timeout
438 raise Timeout
440
439
441 def run(cmd, options):
440 def run(cmd, options):
442 """Run command in a sub-process, capturing the output (stdout and stderr).
441 """Run command in a sub-process, capturing the output (stdout and stderr).
443 Return a tuple (exitcode, output). output is None in debug mode."""
442 Return a tuple (exitcode, output). output is None in debug mode."""
444 # TODO: Use subprocess.Popen if we're running on Python 2.4
443 # TODO: Use subprocess.Popen if we're running on Python 2.4
445 if options.debug:
444 if options.debug:
446 proc = subprocess.Popen(cmd, shell=True)
445 proc = subprocess.Popen(cmd, shell=True)
447 ret = proc.wait()
446 ret = proc.wait()
448 return (ret, None)
447 return (ret, None)
449
448
450 if os.name == 'nt' or sys.platform.startswith('java'):
449 if os.name == 'nt' or sys.platform.startswith('java'):
451 tochild, fromchild = os.popen4(cmd)
450 tochild, fromchild = os.popen4(cmd)
452 tochild.close()
451 tochild.close()
453 output = fromchild.read()
452 output = fromchild.read()
454 ret = fromchild.close()
453 ret = fromchild.close()
455 if ret == None:
454 if ret == None:
456 ret = 0
455 ret = 0
457 else:
456 else:
458 proc = Popen4(cmd)
457 proc = Popen4(cmd)
459 def cleanup():
458 def cleanup():
460 os.kill(proc.pid, signal.SIGTERM)
459 os.kill(proc.pid, signal.SIGTERM)
461 ret = proc.wait()
460 ret = proc.wait()
462 if ret == 0:
461 if ret == 0:
463 ret = signal.SIGTERM << 8
462 ret = signal.SIGTERM << 8
464 killdaemons()
463 killdaemons()
465 return ret
464 return ret
466
465
467 try:
466 try:
468 output = ''
467 output = ''
469 proc.tochild.close()
468 proc.tochild.close()
470 output = proc.fromchild.read()
469 output = proc.fromchild.read()
471 ret = proc.wait()
470 ret = proc.wait()
472 if os.WIFEXITED(ret):
471 if os.WIFEXITED(ret):
473 ret = os.WEXITSTATUS(ret)
472 ret = os.WEXITSTATUS(ret)
474 except Timeout:
473 except Timeout:
475 vlog('# Process %d timed out - killing it' % proc.pid)
474 vlog('# Process %d timed out - killing it' % proc.pid)
476 ret = cleanup()
475 ret = cleanup()
477 output += ("\n### Abort: timeout after %d seconds.\n"
476 output += ("\n### Abort: timeout after %d seconds.\n"
478 % options.timeout)
477 % options.timeout)
479 except KeyboardInterrupt:
478 except KeyboardInterrupt:
480 vlog('# Handling keyboard interrupt')
479 vlog('# Handling keyboard interrupt')
481 cleanup()
480 cleanup()
482 raise
481 raise
483
482
484 return ret, splitnewlines(output)
483 return ret, splitnewlines(output)
485
484
486 def runone(options, test, skips, fails):
485 def runone(options, test, skips, fails):
487 '''tristate output:
486 '''tristate output:
488 None -> skipped
487 None -> skipped
489 True -> passed
488 True -> passed
490 False -> failed'''
489 False -> failed'''
491
490
492 def skip(msg):
491 def skip(msg):
493 if not options.verbose:
492 if not options.verbose:
494 skips.append((test, msg))
493 skips.append((test, msg))
495 else:
494 else:
496 print "\nSkipping %s: %s" % (testpath, msg)
495 print "\nSkipping %s: %s" % (testpath, msg)
497 return None
496 return None
498
497
499 def fail(msg):
498 def fail(msg):
500 fails.append((test, msg))
499 fails.append((test, msg))
501 if not options.nodiff:
500 if not options.nodiff:
502 print "\nERROR: %s %s" % (testpath, msg)
501 print "\nERROR: %s %s" % (testpath, msg)
503 return None
502 return None
504
503
505 vlog("# Test", test)
504 vlog("# Test", test)
506
505
507 # create a fresh hgrc
506 # create a fresh hgrc
508 hgrc = open(HGRCPATH, 'w+')
507 hgrc = open(HGRCPATH, 'w+')
509 hgrc.write('[ui]\n')
508 hgrc.write('[ui]\n')
510 hgrc.write('slash = True\n')
509 hgrc.write('slash = True\n')
511 hgrc.write('[defaults]\n')
510 hgrc.write('[defaults]\n')
512 hgrc.write('backout = -d "0 0"\n')
511 hgrc.write('backout = -d "0 0"\n')
513 hgrc.write('commit = -d "0 0"\n')
512 hgrc.write('commit = -d "0 0"\n')
514 hgrc.write('tag = -d "0 0"\n')
513 hgrc.write('tag = -d "0 0"\n')
515 if options.inotify:
514 if options.inotify:
516 hgrc.write('[extensions]\n')
515 hgrc.write('[extensions]\n')
517 hgrc.write('inotify=\n')
516 hgrc.write('inotify=\n')
518 hgrc.write('[inotify]\n')
517 hgrc.write('[inotify]\n')
519 hgrc.write('pidfile=%s\n' % DAEMON_PIDS)
518 hgrc.write('pidfile=%s\n' % DAEMON_PIDS)
520 hgrc.write('appendpid=True\n')
519 hgrc.write('appendpid=True\n')
521 hgrc.close()
520 hgrc.close()
522
521
523 testpath = os.path.join(TESTDIR, test)
522 testpath = os.path.join(TESTDIR, test)
524 ref = os.path.join(TESTDIR, test+".out")
523 ref = os.path.join(TESTDIR, test+".out")
525 err = os.path.join(TESTDIR, test+".err")
524 err = os.path.join(TESTDIR, test+".err")
526 if os.path.exists(err):
525 if os.path.exists(err):
527 os.remove(err) # Remove any previous output files
526 os.remove(err) # Remove any previous output files
528 try:
527 try:
529 tf = open(testpath)
528 tf = open(testpath)
530 firstline = tf.readline().rstrip()
529 firstline = tf.readline().rstrip()
531 tf.close()
530 tf.close()
532 except:
531 except:
533 firstline = ''
532 firstline = ''
534 lctest = test.lower()
533 lctest = test.lower()
535
534
536 if lctest.endswith('.py') or firstline == '#!/usr/bin/env python':
535 if lctest.endswith('.py') or firstline == '#!/usr/bin/env python':
537 py3kswitch = options.py3k_warnings and ' -3' or ''
536 py3kswitch = options.py3k_warnings and ' -3' or ''
538 cmd = '%s%s "%s"' % (PYTHON, py3kswitch, testpath)
537 cmd = '%s%s "%s"' % (PYTHON, py3kswitch, testpath)
539 elif lctest.endswith('.bat'):
538 elif lctest.endswith('.bat'):
540 # do not run batch scripts on non-windows
539 # do not run batch scripts on non-windows
541 if os.name != 'nt':
540 if os.name != 'nt':
542 return skip("batch script")
541 return skip("batch script")
543 # To reliably get the error code from batch files on WinXP,
542 # To reliably get the error code from batch files on WinXP,
544 # the "cmd /c call" prefix is needed. Grrr
543 # the "cmd /c call" prefix is needed. Grrr
545 cmd = 'cmd /c call "%s"' % testpath
544 cmd = 'cmd /c call "%s"' % testpath
546 else:
545 else:
547 # do not run shell scripts on windows
546 # do not run shell scripts on windows
548 if os.name == 'nt':
547 if os.name == 'nt':
549 return skip("shell script")
548 return skip("shell script")
550 # do not try to run non-executable programs
549 # do not try to run non-executable programs
551 if not os.path.exists(testpath):
550 if not os.path.exists(testpath):
552 return fail("does not exist")
551 return fail("does not exist")
553 elif not os.access(testpath, os.X_OK):
552 elif not os.access(testpath, os.X_OK):
554 return skip("not executable")
553 return skip("not executable")
555 cmd = '"%s"' % testpath
554 cmd = '"%s"' % testpath
556
555
557 # Make a tmp subdirectory to work in
556 # Make a tmp subdirectory to work in
558 tmpd = os.path.join(HGTMP, test)
557 tmpd = os.path.join(HGTMP, test)
559 os.mkdir(tmpd)
558 os.mkdir(tmpd)
560 os.chdir(tmpd)
559 os.chdir(tmpd)
561
560
562 if options.timeout > 0:
561 if options.timeout > 0:
563 signal.alarm(options.timeout)
562 signal.alarm(options.timeout)
564
563
565 vlog("# Running", cmd)
564 vlog("# Running", cmd)
566 ret, out = run(cmd, options)
565 ret, out = run(cmd, options)
567 vlog("# Ret was:", ret)
566 vlog("# Ret was:", ret)
568
567
569 if options.timeout > 0:
568 if options.timeout > 0:
570 signal.alarm(0)
569 signal.alarm(0)
571
570
572 mark = '.'
571 mark = '.'
573
572
574 skipped = (ret == SKIPPED_STATUS)
573 skipped = (ret == SKIPPED_STATUS)
575 # If we're not in --debug mode and reference output file exists,
574 # If we're not in --debug mode and reference output file exists,
576 # check test output against it.
575 # check test output against it.
577 if options.debug:
576 if options.debug:
578 refout = None # to match out == None
577 refout = None # to match out == None
579 elif os.path.exists(ref):
578 elif os.path.exists(ref):
580 f = open(ref, "r")
579 f = open(ref, "r")
581 refout = splitnewlines(f.read())
580 refout = splitnewlines(f.read())
582 f.close()
581 f.close()
583 else:
582 else:
584 refout = []
583 refout = []
585
584
586 if skipped:
585 if skipped:
587 mark = 's'
586 mark = 's'
588 if out is None: # debug mode: nothing to parse
587 if out is None: # debug mode: nothing to parse
589 missing = ['unknown']
588 missing = ['unknown']
590 failed = None
589 failed = None
591 else:
590 else:
592 missing, failed = parsehghaveoutput(out)
591 missing, failed = parsehghaveoutput(out)
593 if not missing:
592 if not missing:
594 missing = ['irrelevant']
593 missing = ['irrelevant']
595 if failed:
594 if failed:
596 fail("hghave failed checking for %s" % failed[-1])
595 fail("hghave failed checking for %s" % failed[-1])
597 skipped = False
596 skipped = False
598 else:
597 else:
599 skip(missing[-1])
598 skip(missing[-1])
600 elif out != refout:
599 elif out != refout:
601 mark = '!'
600 mark = '!'
602 if ret:
601 if ret:
603 fail("output changed and returned error code %d" % ret)
602 fail("output changed and returned error code %d" % ret)
604 else:
603 else:
605 fail("output changed")
604 fail("output changed")
606 if not options.nodiff:
605 if not options.nodiff:
607 showdiff(refout, out, ref, err)
606 showdiff(refout, out, ref, err)
608 ret = 1
607 ret = 1
609 elif ret:
608 elif ret:
610 mark = '!'
609 mark = '!'
611 fail("returned error code %d" % ret)
610 fail("returned error code %d" % ret)
612
611
613 if not options.verbose:
612 if not options.verbose:
614 sys.stdout.write(mark)
613 sys.stdout.write(mark)
615 sys.stdout.flush()
614 sys.stdout.flush()
616
615
617 if ret != 0 and not skipped and not options.debug:
616 if ret != 0 and not skipped and not options.debug:
618 # Save errors to a file for diagnosis
617 # Save errors to a file for diagnosis
619 f = open(err, "wb")
618 f = open(err, "wb")
620 for line in out:
619 for line in out:
621 f.write(line)
620 f.write(line)
622 f.close()
621 f.close()
623
622
624 killdaemons()
623 killdaemons()
625
624
626 os.chdir(TESTDIR)
625 os.chdir(TESTDIR)
627 if not options.keep_tmpdir:
626 if not options.keep_tmpdir:
628 shutil.rmtree(tmpd, True)
627 shutil.rmtree(tmpd, True)
629 if skipped:
628 if skipped:
630 return None
629 return None
631 return ret == 0
630 return ret == 0
632
631
633 _hgpath = None
632 _hgpath = None
634
633
635 def _gethgpath():
634 def _gethgpath():
636 """Return the path to the mercurial package that is actually found by
635 """Return the path to the mercurial package that is actually found by
637 the current Python interpreter."""
636 the current Python interpreter."""
638 global _hgpath
637 global _hgpath
639 if _hgpath is not None:
638 if _hgpath is not None:
640 return _hgpath
639 return _hgpath
641
640
642 cmd = '%s -c "import mercurial; print mercurial.__path__[0]"'
641 cmd = '%s -c "import mercurial; print mercurial.__path__[0]"'
643 pipe = os.popen(cmd % PYTHON)
642 pipe = os.popen(cmd % PYTHON)
644 try:
643 try:
645 _hgpath = pipe.read().strip()
644 _hgpath = pipe.read().strip()
646 finally:
645 finally:
647 pipe.close()
646 pipe.close()
648 return _hgpath
647 return _hgpath
649
648
650 def _checkhglib(verb):
649 def _checkhglib(verb):
651 """Ensure that the 'mercurial' package imported by python is
650 """Ensure that the 'mercurial' package imported by python is
652 the one we expect it to be. If not, print a warning to stderr."""
651 the one we expect it to be. If not, print a warning to stderr."""
653 expecthg = os.path.join(PYTHONDIR, 'mercurial')
652 expecthg = os.path.join(PYTHONDIR, 'mercurial')
654 actualhg = _gethgpath()
653 actualhg = _gethgpath()
655 if actualhg != expecthg:
654 if actualhg != expecthg:
656 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
655 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
657 ' (expected %s)\n'
656 ' (expected %s)\n'
658 % (verb, actualhg, expecthg))
657 % (verb, actualhg, expecthg))
659
658
660 def runchildren(options, tests):
659 def runchildren(options, tests):
661 if INST:
660 if INST:
662 installhg(options)
661 installhg(options)
663 _checkhglib("Testing")
662 _checkhglib("Testing")
664
663
665 optcopy = dict(options.__dict__)
664 optcopy = dict(options.__dict__)
666 optcopy['jobs'] = 1
665 optcopy['jobs'] = 1
667 if optcopy['with_hg'] is None:
666 if optcopy['with_hg'] is None:
668 optcopy['with_hg'] = os.path.join(BINDIR, "hg")
667 optcopy['with_hg'] = os.path.join(BINDIR, "hg")
669 optcopy.pop('anycoverage', None)
668 optcopy.pop('anycoverage', None)
670
669
671 opts = []
670 opts = []
672 for opt, value in optcopy.iteritems():
671 for opt, value in optcopy.iteritems():
673 name = '--' + opt.replace('_', '-')
672 name = '--' + opt.replace('_', '-')
674 if value is True:
673 if value is True:
675 opts.append(name)
674 opts.append(name)
676 elif value is not None:
675 elif value is not None:
677 opts.append(name + '=' + str(value))
676 opts.append(name + '=' + str(value))
678
677
679 tests.reverse()
678 tests.reverse()
680 jobs = [[] for j in xrange(options.jobs)]
679 jobs = [[] for j in xrange(options.jobs)]
681 while tests:
680 while tests:
682 for job in jobs:
681 for job in jobs:
683 if not tests:
682 if not tests:
684 break
683 break
685 job.append(tests.pop())
684 job.append(tests.pop())
686 fps = {}
685 fps = {}
687
686
688 for j, job in enumerate(jobs):
687 for j, job in enumerate(jobs):
689 if not job:
688 if not job:
690 continue
689 continue
691 rfd, wfd = os.pipe()
690 rfd, wfd = os.pipe()
692 childopts = ['--child=%d' % wfd, '--port=%d' % (options.port + j * 3)]
691 childopts = ['--child=%d' % wfd, '--port=%d' % (options.port + j * 3)]
693 childtmp = os.path.join(HGTMP, 'child%d' % j)
692 childtmp = os.path.join(HGTMP, 'child%d' % j)
694 childopts += ['--tmpdir', childtmp]
693 childopts += ['--tmpdir', childtmp]
695 cmdline = [PYTHON, sys.argv[0]] + opts + childopts + job
694 cmdline = [PYTHON, sys.argv[0]] + opts + childopts + job
696 vlog(' '.join(cmdline))
695 vlog(' '.join(cmdline))
697 fps[os.spawnvp(os.P_NOWAIT, cmdline[0], cmdline)] = os.fdopen(rfd, 'r')
696 fps[os.spawnvp(os.P_NOWAIT, cmdline[0], cmdline)] = os.fdopen(rfd, 'r')
698 os.close(wfd)
697 os.close(wfd)
699 signal.signal(signal.SIGINT, signal.SIG_IGN)
698 signal.signal(signal.SIGINT, signal.SIG_IGN)
700 failures = 0
699 failures = 0
701 tested, skipped, failed = 0, 0, 0
700 tested, skipped, failed = 0, 0, 0
702 skips = []
701 skips = []
703 fails = []
702 fails = []
704 while fps:
703 while fps:
705 pid, status = os.wait()
704 pid, status = os.wait()
706 fp = fps.pop(pid)
705 fp = fps.pop(pid)
707 l = fp.read().splitlines()
706 l = fp.read().splitlines()
708 try:
707 try:
709 test, skip, fail = map(int, l[:3])
708 test, skip, fail = map(int, l[:3])
710 except ValueError:
709 except ValueError:
711 test, skip, fail = 0, 0, 0
710 test, skip, fail = 0, 0, 0
712 split = -fail or len(l)
711 split = -fail or len(l)
713 for s in l[3:split]:
712 for s in l[3:split]:
714 skips.append(s.split(" ", 1))
713 skips.append(s.split(" ", 1))
715 for s in l[split:]:
714 for s in l[split:]:
716 fails.append(s.split(" ", 1))
715 fails.append(s.split(" ", 1))
717 tested += test
716 tested += test
718 skipped += skip
717 skipped += skip
719 failed += fail
718 failed += fail
720 vlog('pid %d exited, status %d' % (pid, status))
719 vlog('pid %d exited, status %d' % (pid, status))
721 failures |= status
720 failures |= status
722 print
721 print
723 if not options.noskips:
722 if not options.noskips:
724 for s in skips:
723 for s in skips:
725 print "Skipped %s: %s" % (s[0], s[1])
724 print "Skipped %s: %s" % (s[0], s[1])
726 for s in fails:
725 for s in fails:
727 print "Failed %s: %s" % (s[0], s[1])
726 print "Failed %s: %s" % (s[0], s[1])
728
727
729 _checkhglib("Tested")
728 _checkhglib("Tested")
730 print "# Ran %d tests, %d skipped, %d failed." % (
729 print "# Ran %d tests, %d skipped, %d failed." % (
731 tested, skipped, failed)
730 tested, skipped, failed)
732
731
733 if options.anycoverage:
732 if options.anycoverage:
734 outputcoverage(options)
733 outputcoverage(options)
735 sys.exit(failures != 0)
734 sys.exit(failures != 0)
736
735
737 def runtests(options, tests):
736 def runtests(options, tests):
738 global DAEMON_PIDS, HGRCPATH
737 global DAEMON_PIDS, HGRCPATH
739 DAEMON_PIDS = os.environ["DAEMON_PIDS"] = os.path.join(HGTMP, 'daemon.pids')
738 DAEMON_PIDS = os.environ["DAEMON_PIDS"] = os.path.join(HGTMP, 'daemon.pids')
740 HGRCPATH = os.environ["HGRCPATH"] = os.path.join(HGTMP, '.hgrc')
739 HGRCPATH = os.environ["HGRCPATH"] = os.path.join(HGTMP, '.hgrc')
741
740
742 try:
741 try:
743 if INST:
742 if INST:
744 installhg(options)
743 installhg(options)
745 _checkhglib("Testing")
744 _checkhglib("Testing")
746
745
747 if options.timeout > 0:
746 if options.timeout > 0:
748 try:
747 try:
749 signal.signal(signal.SIGALRM, alarmed)
748 signal.signal(signal.SIGALRM, alarmed)
750 vlog('# Running each test with %d second timeout' %
749 vlog('# Running each test with %d second timeout' %
751 options.timeout)
750 options.timeout)
752 except AttributeError:
751 except AttributeError:
753 print 'WARNING: cannot run tests with timeouts'
752 print 'WARNING: cannot run tests with timeouts'
754 options.timeout = 0
753 options.timeout = 0
755
754
756 tested = 0
755 tested = 0
757 failed = 0
756 failed = 0
758 skipped = 0
757 skipped = 0
759
758
760 if options.restart:
759 if options.restart:
761 orig = list(tests)
760 orig = list(tests)
762 while tests:
761 while tests:
763 if os.path.exists(tests[0] + ".err"):
762 if os.path.exists(tests[0] + ".err"):
764 break
763 break
765 tests.pop(0)
764 tests.pop(0)
766 if not tests:
765 if not tests:
767 print "running all tests"
766 print "running all tests"
768 tests = orig
767 tests = orig
769
768
770 skips = []
769 skips = []
771 fails = []
770 fails = []
772
771
773 for test in tests:
772 for test in tests:
774 if options.blacklist:
773 if options.blacklist:
775 filename = options.blacklist.get(test)
774 filename = options.blacklist.get(test)
776 if filename is not None:
775 if filename is not None:
777 skips.append((test, "blacklisted (%s)" % filename))
776 skips.append((test, "blacklisted (%s)" % filename))
778 skipped += 1
777 skipped += 1
779 continue
778 continue
780
779
781 if options.retest and not os.path.exists(test + ".err"):
780 if options.retest and not os.path.exists(test + ".err"):
782 skipped += 1
781 skipped += 1
783 continue
782 continue
784
783
785 if options.keywords:
784 if options.keywords:
786 t = open(test).read().lower() + test.lower()
785 t = open(test).read().lower() + test.lower()
787 for k in options.keywords.lower().split():
786 for k in options.keywords.lower().split():
788 if k in t:
787 if k in t:
789 break
788 break
790 else:
789 else:
791 skipped += 1
790 skipped += 1
792 continue
791 continue
793
792
794 ret = runone(options, test, skips, fails)
793 ret = runone(options, test, skips, fails)
795 if ret is None:
794 if ret is None:
796 skipped += 1
795 skipped += 1
797 elif not ret:
796 elif not ret:
798 if options.interactive:
797 if options.interactive:
799 print "Accept this change? [n] ",
798 print "Accept this change? [n] ",
800 answer = sys.stdin.readline().strip()
799 answer = sys.stdin.readline().strip()
801 if answer.lower() in "y yes".split():
800 if answer.lower() in "y yes".split():
802 rename(test + ".err", test + ".out")
801 rename(test + ".err", test + ".out")
803 tested += 1
802 tested += 1
804 fails.pop()
803 fails.pop()
805 continue
804 continue
806 failed += 1
805 failed += 1
807 if options.first:
806 if options.first:
808 break
807 break
809 tested += 1
808 tested += 1
810
809
811 if options.child:
810 if options.child:
812 fp = os.fdopen(options.child, 'w')
811 fp = os.fdopen(options.child, 'w')
813 fp.write('%d\n%d\n%d\n' % (tested, skipped, failed))
812 fp.write('%d\n%d\n%d\n' % (tested, skipped, failed))
814 for s in skips:
813 for s in skips:
815 fp.write("%s %s\n" % s)
814 fp.write("%s %s\n" % s)
816 for s in fails:
815 for s in fails:
817 fp.write("%s %s\n" % s)
816 fp.write("%s %s\n" % s)
818 fp.close()
817 fp.close()
819 else:
818 else:
820 print
819 print
821 for s in skips:
820 for s in skips:
822 print "Skipped %s: %s" % s
821 print "Skipped %s: %s" % s
823 for s in fails:
822 for s in fails:
824 print "Failed %s: %s" % s
823 print "Failed %s: %s" % s
825 _checkhglib("Tested")
824 _checkhglib("Tested")
826 print "# Ran %d tests, %d skipped, %d failed." % (
825 print "# Ran %d tests, %d skipped, %d failed." % (
827 tested, skipped, failed)
826 tested, skipped, failed)
828
827
829 if options.anycoverage:
828 if options.anycoverage:
830 outputcoverage(options)
829 outputcoverage(options)
831 except KeyboardInterrupt:
830 except KeyboardInterrupt:
832 failed = True
831 failed = True
833 print "\ninterrupted!"
832 print "\ninterrupted!"
834
833
835 if failed:
834 if failed:
836 sys.exit(1)
835 sys.exit(1)
837
836
838 def main():
837 def main():
839 (options, args) = parseargs()
838 (options, args) = parseargs()
840 if not options.child:
839 if not options.child:
841 os.umask(022)
840 os.umask(022)
842
841
843 checktools()
842 checktools()
844
843
845 # Reset some environment variables to well-known values so that
844 # Reset some environment variables to well-known values so that
846 # the tests produce repeatable output.
845 # the tests produce repeatable output.
847 os.environ['LANG'] = os.environ['LC_ALL'] = os.environ['LANGUAGE'] = 'C'
846 os.environ['LANG'] = os.environ['LC_ALL'] = os.environ['LANGUAGE'] = 'C'
848 os.environ['TZ'] = 'GMT'
847 os.environ['TZ'] = 'GMT'
849 os.environ["EMAIL"] = "Foo Bar <foo.bar@example.com>"
848 os.environ["EMAIL"] = "Foo Bar <foo.bar@example.com>"
850 os.environ['CDPATH'] = ''
849 os.environ['CDPATH'] = ''
851 os.environ['COLUMNS'] = '80'
850 os.environ['COLUMNS'] = '80'
852 os.environ['GREP_OPTIONS'] = ''
851 os.environ['GREP_OPTIONS'] = ''
853 os.environ['http_proxy'] = ''
852 os.environ['http_proxy'] = ''
854
853
855 # unset env related to hooks
854 # unset env related to hooks
856 for k in os.environ.keys():
855 for k in os.environ.keys():
857 if k.startswith('HG_'):
856 if k.startswith('HG_'):
858 del os.environ[k]
857 del os.environ[k]
859
858
860 global TESTDIR, HGTMP, INST, BINDIR, PYTHONDIR, COVERAGE_FILE
859 global TESTDIR, HGTMP, INST, BINDIR, PYTHONDIR, COVERAGE_FILE
861 TESTDIR = os.environ["TESTDIR"] = os.getcwd()
860 TESTDIR = os.environ["TESTDIR"] = os.getcwd()
862 if options.tmpdir:
861 if options.tmpdir:
863 options.keep_tmpdir = True
862 options.keep_tmpdir = True
864 tmpdir = options.tmpdir
863 tmpdir = options.tmpdir
865 if os.path.exists(tmpdir):
864 if os.path.exists(tmpdir):
866 # Meaning of tmpdir has changed since 1.3: we used to create
865 # Meaning of tmpdir has changed since 1.3: we used to create
867 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
866 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
868 # tmpdir already exists.
867 # tmpdir already exists.
869 sys.exit("error: temp dir %r already exists" % tmpdir)
868 sys.exit("error: temp dir %r already exists" % tmpdir)
870
869
871 # Automatically removing tmpdir sounds convenient, but could
870 # Automatically removing tmpdir sounds convenient, but could
872 # really annoy anyone in the habit of using "--tmpdir=/tmp"
871 # really annoy anyone in the habit of using "--tmpdir=/tmp"
873 # or "--tmpdir=$HOME".
872 # or "--tmpdir=$HOME".
874 #vlog("# Removing temp dir", tmpdir)
873 #vlog("# Removing temp dir", tmpdir)
875 #shutil.rmtree(tmpdir)
874 #shutil.rmtree(tmpdir)
876 os.makedirs(tmpdir)
875 os.makedirs(tmpdir)
877 else:
876 else:
878 tmpdir = tempfile.mkdtemp('', 'hgtests.')
877 tmpdir = tempfile.mkdtemp('', 'hgtests.')
879 HGTMP = os.environ['HGTMP'] = os.path.realpath(tmpdir)
878 HGTMP = os.environ['HGTMP'] = os.path.realpath(tmpdir)
880 DAEMON_PIDS = None
879 DAEMON_PIDS = None
881 HGRCPATH = None
880 HGRCPATH = None
882
881
883 os.environ["HGEDITOR"] = sys.executable + ' -c "import sys; sys.exit(0)"'
882 os.environ["HGEDITOR"] = sys.executable + ' -c "import sys; sys.exit(0)"'
884 os.environ["HGMERGE"] = "internal:merge"
883 os.environ["HGMERGE"] = "internal:merge"
885 os.environ["HGUSER"] = "test"
884 os.environ["HGUSER"] = "test"
886 os.environ["HGENCODING"] = "ascii"
885 os.environ["HGENCODING"] = "ascii"
887 os.environ["HGENCODINGMODE"] = "strict"
886 os.environ["HGENCODINGMODE"] = "strict"
888 os.environ["HGPORT"] = str(options.port)
887 os.environ["HGPORT"] = str(options.port)
889 os.environ["HGPORT1"] = str(options.port + 1)
888 os.environ["HGPORT1"] = str(options.port + 1)
890 os.environ["HGPORT2"] = str(options.port + 2)
889 os.environ["HGPORT2"] = str(options.port + 2)
891
890
892 if options.with_hg:
891 if options.with_hg:
893 INST = None
892 INST = None
894 BINDIR = os.path.dirname(os.path.realpath(options.with_hg))
893 BINDIR = os.path.dirname(os.path.realpath(options.with_hg))
895
894
896 # This looks redundant with how Python initializes sys.path from
895 # This looks redundant with how Python initializes sys.path from
897 # the location of the script being executed. Needed because the
896 # the location of the script being executed. Needed because the
898 # "hg" specified by --with-hg is not the only Python script
897 # "hg" specified by --with-hg is not the only Python script
899 # executed in the test suite that needs to import 'mercurial'
898 # executed in the test suite that needs to import 'mercurial'
900 # ... which means it's not really redundant at all.
899 # ... which means it's not really redundant at all.
901 PYTHONDIR = BINDIR
900 PYTHONDIR = BINDIR
902 else:
901 else:
903 INST = os.path.join(HGTMP, "install")
902 INST = os.path.join(HGTMP, "install")
904 BINDIR = os.environ["BINDIR"] = os.path.join(INST, "bin")
903 BINDIR = os.environ["BINDIR"] = os.path.join(INST, "bin")
905 PYTHONDIR = os.path.join(INST, "lib", "python")
904 PYTHONDIR = os.path.join(INST, "lib", "python")
906
905
907 os.environ["BINDIR"] = BINDIR
906 os.environ["BINDIR"] = BINDIR
908 os.environ["PYTHON"] = PYTHON
907 os.environ["PYTHON"] = PYTHON
909
908
910 if not options.child:
909 if not options.child:
911 path = [BINDIR] + os.environ["PATH"].split(os.pathsep)
910 path = [BINDIR] + os.environ["PATH"].split(os.pathsep)
912 os.environ["PATH"] = os.pathsep.join(path)
911 os.environ["PATH"] = os.pathsep.join(path)
913
912
914 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
913 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
915 # can run .../tests/run-tests.py test-foo where test-foo
914 # can run .../tests/run-tests.py test-foo where test-foo
916 # adds an extension to HGRC
915 # adds an extension to HGRC
917 pypath = [PYTHONDIR, TESTDIR]
916 pypath = [PYTHONDIR, TESTDIR]
918 # We have to augment PYTHONPATH, rather than simply replacing
917 # We have to augment PYTHONPATH, rather than simply replacing
919 # it, in case external libraries are only available via current
918 # it, in case external libraries are only available via current
920 # PYTHONPATH. (In particular, the Subversion bindings on OS X
919 # PYTHONPATH. (In particular, the Subversion bindings on OS X
921 # are in /opt/subversion.)
920 # are in /opt/subversion.)
922 oldpypath = os.environ.get(IMPL_PATH)
921 oldpypath = os.environ.get(IMPL_PATH)
923 if oldpypath:
922 if oldpypath:
924 pypath.append(oldpypath)
923 pypath.append(oldpypath)
925 os.environ[IMPL_PATH] = os.pathsep.join(pypath)
924 os.environ[IMPL_PATH] = os.pathsep.join(pypath)
926
925
927 COVERAGE_FILE = os.path.join(TESTDIR, ".coverage")
926 COVERAGE_FILE = os.path.join(TESTDIR, ".coverage")
928
927
929 if len(args) == 0:
928 if len(args) == 0:
930 args = os.listdir(".")
929 args = os.listdir(".")
931 args.sort()
930 args.sort()
932
931
933 tests = []
932 tests = []
934 for test in args:
933 for test in args:
935 if (test.startswith("test-") and '~' not in test and
934 if (test.startswith("test-") and '~' not in test and
936 ('.' not in test or test.endswith('.py') or
935 ('.' not in test or test.endswith('.py') or
937 test.endswith('.bat'))):
936 test.endswith('.bat'))):
938 tests.append(test)
937 tests.append(test)
939 if not tests:
938 if not tests:
940 print "# Ran 0 tests, 0 skipped, 0 failed."
939 print "# Ran 0 tests, 0 skipped, 0 failed."
941 return
940 return
942
941
943 vlog("# Using TESTDIR", TESTDIR)
942 vlog("# Using TESTDIR", TESTDIR)
944 vlog("# Using HGTMP", HGTMP)
943 vlog("# Using HGTMP", HGTMP)
945 vlog("# Using PATH", os.environ["PATH"])
944 vlog("# Using PATH", os.environ["PATH"])
946 vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
945 vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
947
946
948 try:
947 try:
949 if len(tests) > 1 and options.jobs > 1:
948 if len(tests) > 1 and options.jobs > 1:
950 runchildren(options, tests)
949 runchildren(options, tests)
951 else:
950 else:
952 runtests(options, tests)
951 runtests(options, tests)
953 finally:
952 finally:
954 time.sleep(1)
953 time.sleep(1)
955 cleanup(options)
954 cleanup(options)
956
955
957 main()
956 main()
@@ -1,32 +1,31 b''
1 #!/usr/bin/python
1 #!/usr/bin/python
2 import os
3 from mercurial.ui import ui
2 from mercurial.ui import ui
4 from mercurial.localrepo import localrepository
3 from mercurial.localrepo import localrepository
5 from mercurial.commands import add, commit, status
4 from mercurial.commands import add, commit, status
6
5
7 u = ui()
6 u = ui()
8
7
9 print '% creating repo'
8 print '% creating repo'
10 repo = localrepository(u, '.', create=True)
9 repo = localrepository(u, '.', create=True)
11
10
12 f = open('test.py', 'w')
11 f = open('test.py', 'w')
13 try:
12 try:
14 f.write('foo\n')
13 f.write('foo\n')
15 finally:
14 finally:
16 f.close
15 f.close
17
16
18 print '% add and commit'
17 print '% add and commit'
19 add(u, repo, 'test.py')
18 add(u, repo, 'test.py')
20 commit(u, repo, message='*')
19 commit(u, repo, message='*')
21 status(u, repo, clean=True)
20 status(u, repo, clean=True)
22
21
23
22
24 print '% change'
23 print '% change'
25 f = open('test.py', 'w')
24 f = open('test.py', 'w')
26 try:
25 try:
27 f.write('bar\n')
26 f.write('bar\n')
28 finally:
27 finally:
29 f.close()
28 f.close()
30
29
31 # this would return clean instead of changed before the fix
30 # this would return clean instead of changed before the fix
32 status(u, repo, clean=True, modified=True)
31 status(u, repo, clean=True, modified=True)
General Comments 0
You need to be logged in to leave comments. Login now