##// END OF EJS Templates
pylint, pyflakes: remove unused or duplicate imports
Nicolas Dumazet -
r10905:13a1b2fb default
parent child Browse files
Show More
@@ -1,225 +1,225 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # check-code - a style and portability checker for Mercurial
4 4 #
5 5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 import sys, re, glob
10 import re, glob
11 11 import optparse
12 12
13 13 def repquote(m):
14 14 t = re.sub(r"\w", "x", m.group('text'))
15 15 t = re.sub(r"[^\sx]", "o", t)
16 16 return m.group('quote') + t + m.group('quote')
17 17
18 18 def reppython(m):
19 19 comment = m.group('comment')
20 20 if comment:
21 21 return "#" * len(comment)
22 22 return repquote(m)
23 23
24 24 def repcomment(m):
25 25 return m.group(1) + "#" * len(m.group(2))
26 26
27 27 def repccomment(m):
28 28 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
29 29 return m.group(1) + t + "*/"
30 30
31 31 def repcallspaces(m):
32 32 t = re.sub(r"\n\s+", "\n", m.group(2))
33 33 return m.group(1) + t
34 34
35 35 def repinclude(m):
36 36 return m.group(1) + "<foo>"
37 37
38 38 def rephere(m):
39 39 t = re.sub(r"\S", "x", m.group(2))
40 40 return m.group(1) + t
41 41
42 42
43 43 testpats = [
44 44 (r'(pushd|popd)', "don't use 'pushd' or 'popd', use 'cd'"),
45 45 (r'\W\$?\(\([^\)]*\)\)', "don't use (()) or $(()), use 'expr'"),
46 46 (r'^function', "don't use 'function', use old style"),
47 47 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
48 48 (r'echo.*\\n', "don't use 'echo \\n', use printf"),
49 49 (r'^diff.*-\w*N', "don't use 'diff -N'"),
50 50 (r'(^| )wc[^|]*$', "filter wc output"),
51 51 (r'head -c', "don't use 'head -c', use 'dd'"),
52 52 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
53 53 (r'printf.*\\\d\d\d', "don't use 'printf \NNN', use Python"),
54 54 (r'printf.*\\x', "don't use printf \\x, use Python"),
55 55 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
56 56 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
57 57 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
58 58 "use egrep for extended grep syntax"),
59 59 (r'/bin/', "don't use explicit paths for tools"),
60 60 (r'\$PWD', "don't use $PWD, use `pwd`"),
61 61 (r'[^\n]\Z', "no trailing newline"),
62 62 (r'export.*=', "don't export and assign at once"),
63 63 ('^([^"\']|("[^"]*")|(\'[^\']*\'))*\\^', "^ must be quoted"),
64 64 ]
65 65
66 66 testfilters = [
67 67 (r"( *)(#([^\n]*\S)?)", repcomment),
68 68 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
69 69 ]
70 70
71 71 pypats = [
72 72 (r'^\s*\t', "don't use tabs"),
73 73 (r'\S;\s*\n', "semicolon"),
74 74 (r'\w,\w', "missing whitespace after ,"),
75 75 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
76 76 (r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
77 77 (r'.{85}', "line too long"),
78 78 (r'[^\n]\Z', "no trailing newline"),
79 79 # (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
80 80 # (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
81 81 (r'^\s*(if|while|def|class|except|try)\s[^[]*:\s*[^\]#\s]+',
82 82 "linebreak after :"),
83 83 (r'class\s[^(]:', "old-style class, use class foo(object)"),
84 84 (r'^\s+del\(', "del isn't a function"),
85 85 (r'^\s+except\(', "except isn't a function"),
86 86 (r',]', "unneeded trailing ',' in list"),
87 87 # (r'class\s[A-Z][^\(]*\((?!Exception)',
88 88 # "don't capitalize non-exception classes"),
89 89 # (r'in range\(', "use xrange"),
90 90 # (r'^\s*print\s+', "avoid using print in core and extensions"),
91 91 (r'[\x80-\xff]', "non-ASCII character literal"),
92 92 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
93 93 (r'^\s*with\s+', "with not available in Python 2.4"),
94 94 (r'^\s*(any|all)\(', "any/all not available in Python 2.4"),
95 95 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
96 96 (r'([\(\[]\s\S)|(\S\s[\)\]])', "gratuitous whitespace in () or []"),
97 97 # (r'\s\s=', "gratuitous whitespace before ="),
98 98 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\S', "missing whitespace around operator"),
99 99 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\s', "missing whitespace around operator"),
100 100 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=)\S', "missing whitespace around operator"),
101 101 (r'[^+=*!<>&| -](\s=|=\s)[^= ]', "wrong whitespace around ="),
102 102 (r'raise Exception', "don't raise generic exceptions"),
103 103 (r'ui\.(status|progress|write|note)\([\'\"]x',
104 104 "warning: unwrapped ui message"),
105 105 ]
106 106
107 107 pyfilters = [
108 108 (r"""(?msx)(?P<comment>\#.*?$)|
109 109 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
110 110 (?P<text>(([^\\]|\\.)*?))
111 111 (?P=quote))""", reppython),
112 112 ]
113 113
114 114 cpats = [
115 115 (r'//', "don't use //-style comments"),
116 116 (r'^ ', "don't use spaces to indent"),
117 117 (r'\S\t', "don't use tabs except for indent"),
118 118 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
119 119 (r'.{85}', "line too long"),
120 120 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
121 121 (r'return\(', "return is not a function"),
122 122 (r' ;', "no space before ;"),
123 123 (r'\w+\* \w+', "use int *foo, not int* foo"),
124 124 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
125 125 (r'\S+ (\+\+|--)', "use foo++, not foo ++"),
126 126 (r'\w,\w', "missing whitespace after ,"),
127 127 (r'\w[+/*]\w', "missing whitespace in expression"),
128 128 (r'^#\s+\w', "use #foo, not # foo"),
129 129 (r'[^\n]\Z', "no trailing newline"),
130 130 ]
131 131
132 132 cfilters = [
133 133 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
134 134 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
135 135 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
136 136 (r'(\()([^)]+\))', repcallspaces),
137 137 ]
138 138
139 139 checks = [
140 140 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
141 141 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
142 142 ('c', r'.*\.c$', cfilters, cpats),
143 143 ]
144 144
145 145 class norepeatlogger(object):
146 146 def __init__(self):
147 147 self._lastseen = None
148 148
149 149 def log(self, fname, lineno, line, msg):
150 150 """print error related a to given line of a given file.
151 151
152 152 The faulty line will also be printed but only once in the case
153 153 of multiple errors.
154 154
155 155 :fname: filename
156 156 :lineno: line number
157 157 :line: actual content of the line
158 158 :msg: error message
159 159 """
160 160 msgid = fname, lineno, line
161 161 if msgid != self._lastseen:
162 162 print "%s:%d:" % (fname, lineno)
163 163 print " > %s" % line
164 164 self._lastseen = msgid
165 165 print " " + msg
166 166
167 167 _defaultlogger = norepeatlogger()
168 168
169 169 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False):
170 170 """checks style and portability of a given file
171 171
172 172 :f: filepath
173 173 :logfunc: function used to report error
174 174 logfunc(filename, linenumber, linecontent, errormessage)
175 175 :maxerr: number of error to display before arborting.
176 176 Set to None (default) to report all errors
177 177
178 178 return True if no error is found, False otherwise.
179 179 """
180 180 result = True
181 181 for name, match, filters, pats in checks:
182 182 fc = 0
183 183 if not re.match(match, f):
184 184 continue
185 185 pre = post = open(f).read()
186 186 if "no-" + "check-code" in pre:
187 187 break
188 188 for p, r in filters:
189 189 post = re.sub(p, r, post)
190 190 # print post # uncomment to show filtered version
191 191 z = enumerate(zip(pre.splitlines(), post.splitlines(True)))
192 192 for n, l in z:
193 193 if "check-code" + "-ignore" in l[0]:
194 194 continue
195 195 for p, msg in pats:
196 196 if not warnings and msg.startswith("warning"):
197 197 continue
198 198 if re.search(p, l[1]):
199 199 logfunc(f, n + 1, l[0], msg)
200 200 fc += 1
201 201 result = False
202 202 if maxerr is not None and fc >= maxerr:
203 203 print " (too many errors, giving up)"
204 204 break
205 205 break
206 206 return result
207 207
208 208
209 209 if __name__ == "__main__":
210 210 parser = optparse.OptionParser("%prog [options] [files]")
211 211 parser.add_option("-w", "--warnings", action="store_true",
212 212 help="include warning-level checks")
213 213 parser.add_option("-p", "--per-file", type="int",
214 214 help="max warnings per file")
215 215
216 216 parser.set_defaults(per_file=15, warnings=False)
217 217 (options, args) = parser.parse_args()
218 218
219 219 if len(args) == 0:
220 220 check = glob.glob("*")
221 221 else:
222 222 check = args
223 223
224 224 for f in check:
225 225 checkfile(f, maxerr=options.per_file, warnings=options.warnings)
@@ -1,1114 +1,1109 b''
1 1 #!/usr/bin/env python
2 2 # -*- coding: utf-8 -*-
3 3 # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
4 4 # Author: Engelbert Gruber <grubert@users.sourceforge.net>
5 5 # Copyright: This module is put into the public domain.
6 6
7 7 """
8 8 Simple man page writer for reStructuredText.
9 9
10 10 Man pages (short for "manual pages") contain system documentation on unix-like
11 11 systems. The pages are grouped in numbered sections:
12 12
13 13 1 executable programs and shell commands
14 14 2 system calls
15 15 3 library functions
16 16 4 special files
17 17 5 file formats
18 18 6 games
19 19 7 miscellaneous
20 20 8 system administration
21 21
22 22 Man pages are written *troff*, a text file formatting system.
23 23
24 24 See http://www.tldp.org/HOWTO/Man-Page for a start.
25 25
26 26 Man pages have no subsection only parts.
27 27 Standard parts
28 28
29 29 NAME ,
30 30 SYNOPSIS ,
31 31 DESCRIPTION ,
32 32 OPTIONS ,
33 33 FILES ,
34 34 SEE ALSO ,
35 35 BUGS ,
36 36
37 37 and
38 38
39 39 AUTHOR .
40 40
41 41 A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
42 42 by the command whatis or apropos.
43 43
44 44 """
45 45
46 46 __docformat__ = 'reStructuredText'
47 47
48 import sys
49 import os
50 import time
51 48 import re
52 from types import ListType
53 49
54 import docutils
55 from docutils import nodes, utils, writers, languages
50 from docutils import nodes, writers, languages
56 51 import roman
57 52
58 53 FIELD_LIST_INDENT = 7
59 54 DEFINITION_LIST_INDENT = 7
60 55 OPTION_LIST_INDENT = 7
61 56 BLOCKQOUTE_INDENT = 3.5
62 57
63 58 # Define two macros so man/roff can calculate the
64 59 # indent/unindent margins by itself
65 60 MACRO_DEF = (r""".
66 61 .nr rst2man-indent-level 0
67 62 .
68 63 .de1 rstReportMargin
69 64 \\$1 \\n[an-margin]
70 65 level \\n[rst2man-indent-level]
71 66 level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
72 67 -
73 68 \\n[rst2man-indent0]
74 69 \\n[rst2man-indent1]
75 70 \\n[rst2man-indent2]
76 71 ..
77 72 .de1 INDENT
78 73 .\" .rstReportMargin pre:
79 74 . RS \\$1
80 75 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
81 76 . nr rst2man-indent-level +1
82 77 .\" .rstReportMargin post:
83 78 ..
84 79 .de UNINDENT
85 80 . RE
86 81 .\" indent \\n[an-margin]
87 82 .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
88 83 .nr rst2man-indent-level -1
89 84 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
90 85 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
91 86 ..
92 87 """)
93 88
94 89 class Writer(writers.Writer):
95 90
96 91 supported = ('manpage')
97 92 """Formats this writer supports."""
98 93
99 94 output = None
100 95 """Final translated form of `document`."""
101 96
102 97 def __init__(self):
103 98 writers.Writer.__init__(self)
104 99 self.translator_class = Translator
105 100
106 101 def translate(self):
107 102 visitor = self.translator_class(self.document)
108 103 self.document.walkabout(visitor)
109 104 self.output = visitor.astext()
110 105
111 106
112 107 class Table:
113 108 def __init__(self):
114 109 self._rows = []
115 110 self._options = ['center']
116 111 self._tab_char = '\t'
117 112 self._coldefs = []
118 113 def new_row(self):
119 114 self._rows.append([])
120 115 def append_separator(self, separator):
121 116 """Append the separator for table head."""
122 117 self._rows.append([separator])
123 118 def append_cell(self, cell_lines):
124 119 """cell_lines is an array of lines"""
125 120 start = 0
126 121 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
127 122 start = 1
128 123 self._rows[-1].append(cell_lines[start:])
129 124 if len(self._coldefs) < len(self._rows[-1]):
130 125 self._coldefs.append('l')
131 126 def _minimize_cell(self, cell_lines):
132 127 """Remove leading and trailing blank and ``.sp`` lines"""
133 128 while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
134 129 del cell_lines[0]
135 130 while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
136 131 del cell_lines[-1]
137 132 def as_list(self):
138 133 text = ['.TS\n']
139 134 text.append(' '.join(self._options) + ';\n')
140 135 text.append('|%s|.\n' % ('|'.join(self._coldefs)))
141 136 for row in self._rows:
142 137 # row = array of cells. cell = array of lines.
143 138 text.append('_\n') # line above
144 139 text.append('T{\n')
145 140 for i in range(len(row)):
146 141 cell = row[i]
147 142 self._minimize_cell(cell)
148 143 text.extend(cell)
149 144 if not text[-1].endswith('\n'):
150 145 text[-1] += '\n'
151 146 if i < len(row)-1:
152 147 text.append('T}'+self._tab_char+'T{\n')
153 148 else:
154 149 text.append('T}\n')
155 150 text.append('_\n')
156 151 text.append('.TE\n')
157 152 return text
158 153
159 154 class Translator(nodes.NodeVisitor):
160 155 """"""
161 156
162 157 words_and_spaces = re.compile(r'\S+| +|\n')
163 158 document_start = """Man page generated from reStructeredText."""
164 159
165 160 def __init__(self, document):
166 161 nodes.NodeVisitor.__init__(self, document)
167 162 self.settings = settings = document.settings
168 163 lcode = settings.language_code
169 164 self.language = languages.get_language(lcode)
170 165 self.head = []
171 166 self.body = []
172 167 self.foot = []
173 168 self.section_level = 0
174 169 self.context = []
175 170 self.topic_class = ''
176 171 self.colspecs = []
177 172 self.compact_p = 1
178 173 self.compact_simple = None
179 174 # the list style "*" bullet or "#" numbered
180 175 self._list_char = []
181 176 # writing the header .TH and .SH NAME is postboned after
182 177 # docinfo.
183 178 self._docinfo = {
184 179 "title" : "", "title_upper": "",
185 180 "subtitle" : "",
186 181 "manual_section" : "", "manual_group" : "",
187 182 "author" : [],
188 183 "date" : "",
189 184 "copyright" : "",
190 185 "version" : "",
191 186 }
192 187 self._docinfo_keys = [] # a list to keep the sequence as in source.
193 188 self._docinfo_names = {} # to get name from text not normalized.
194 189 self._in_docinfo = None
195 190 self._active_table = None
196 191 self._in_literal = False
197 192 self.header_written = 0
198 193 self._line_block = 0
199 194 self.authors = []
200 195 self.section_level = 0
201 196 self._indent = [0]
202 197 # central definition of simple processing rules
203 198 # what to output on : visit, depart
204 199 # Do not use paragraph requests ``.PP`` because these set indentation.
205 200 # use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
206 201 #
207 202 # Fonts are put on a stack, the top one is used.
208 203 # ``.ft P`` or ``\\fP`` pop from stack.
209 204 # ``B`` bold, ``I`` italic, ``R`` roman should be available.
210 205 # Hopefully ``C`` courier too.
211 206 self.defs = {
212 207 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
213 208 'definition_list_item' : ('.TP', ''),
214 209 'field_name' : ('.TP\n.B ', '\n'),
215 210 'literal' : ('\\fB', '\\fP'),
216 211 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
217 212
218 213 'option_list_item' : ('.TP\n', ''),
219 214
220 215 'reference' : (r'\%', r'\:'),
221 216 'emphasis': ('\\fI', '\\fP'),
222 217 'strong' : ('\\fB', '\\fP'),
223 218 'term' : ('\n.B ', '\n'),
224 219 'title_reference' : ('\\fI', '\\fP'),
225 220
226 221 'topic-title' : ('.SS ',),
227 222 'sidebar-title' : ('.SS ',),
228 223
229 224 'problematic' : ('\n.nf\n', '\n.fi\n'),
230 225 }
231 226 # NOTE don't specify the newline before a dot-command, but ensure
232 227 # it is there.
233 228
234 229 def comment_begin(self, text):
235 230 """Return commented version of the passed text WITHOUT end of
236 231 line/comment."""
237 232 prefix = '.\\" '
238 233 out_text = ''.join(
239 234 [(prefix + in_line + '\n')
240 235 for in_line in text.split('\n')])
241 236 return out_text
242 237
243 238 def comment(self, text):
244 239 """Return commented version of the passed text."""
245 240 return self.comment_begin(text)+'.\n'
246 241
247 242 def ensure_eol(self):
248 243 """Ensure the last line in body is terminated by new line."""
249 244 if self.body[-1][-1] != '\n':
250 245 self.body.append('\n')
251 246
252 247 def astext(self):
253 248 """Return the final formatted document as a string."""
254 249 if not self.header_written:
255 250 # ensure we get a ".TH" as viewers require it.
256 251 self.head.append(self.header())
257 252 # filter body
258 253 for i in xrange(len(self.body)-1, 0, -1):
259 254 # remove superfluous vertical gaps.
260 255 if self.body[i] == '.sp\n':
261 256 if self.body[i - 1][:4] in ('.BI ','.IP '):
262 257 self.body[i] = '.\n'
263 258 elif (self.body[i - 1][:3] == '.B ' and
264 259 self.body[i - 2][:4] == '.TP\n'):
265 260 self.body[i] = '.\n'
266 261 elif (self.body[i - 1] == '\n' and
267 262 self.body[i - 2][0] != '.' and
268 263 (self.body[i - 3][:7] == '.TP\n.B '
269 264 or self.body[i - 3][:4] == '\n.B ')
270 265 ):
271 266 self.body[i] = '.\n'
272 267 return ''.join(self.head + self.body + self.foot)
273 268
274 269 def deunicode(self, text):
275 270 text = text.replace(u'\xa0', '\\ ')
276 271 text = text.replace(u'\u2020', '\\(dg')
277 272 return text
278 273
279 274 def visit_Text(self, node):
280 275 text = node.astext()
281 276 text = text.replace('\\','\\e')
282 277 replace_pairs = [
283 278 (u'-', ur'\-'),
284 279 (u'\'', ur'\(aq'),
285 280 (u'´', ur'\''),
286 281 (u'`', ur'\(ga'),
287 282 ]
288 283 for (in_char, out_markup) in replace_pairs:
289 284 text = text.replace(in_char, out_markup)
290 285 # unicode
291 286 text = self.deunicode(text)
292 287 if self._in_literal:
293 288 # prevent interpretation of "." at line start
294 289 if text[0] == '.':
295 290 text = '\\&' + text
296 291 text = text.replace('\n.', '\n\\&.')
297 292 self.body.append(text)
298 293
299 294 def depart_Text(self, node):
300 295 pass
301 296
302 297 def list_start(self, node):
303 298 class enum_char:
304 299 enum_style = {
305 300 'bullet' : '\\(bu',
306 301 'emdash' : '\\(em',
307 302 }
308 303
309 304 def __init__(self, style):
310 305 self._style = style
311 306 if node.has_key('start'):
312 307 self._cnt = node['start'] - 1
313 308 else:
314 309 self._cnt = 0
315 310 self._indent = 2
316 311 if style == 'arabic':
317 312 # indentation depends on number of childrens
318 313 # and start value.
319 314 self._indent = len(str(len(node.children)))
320 315 self._indent += len(str(self._cnt)) + 1
321 316 elif style == 'loweralpha':
322 317 self._cnt += ord('a') - 1
323 318 self._indent = 3
324 319 elif style == 'upperalpha':
325 320 self._cnt += ord('A') - 1
326 321 self._indent = 3
327 322 elif style.endswith('roman'):
328 323 self._indent = 5
329 324
330 325 def next(self):
331 326 if self._style == 'bullet':
332 327 return self.enum_style[self._style]
333 328 elif self._style == 'emdash':
334 329 return self.enum_style[self._style]
335 330 self._cnt += 1
336 331 # TODO add prefix postfix
337 332 if self._style == 'arabic':
338 333 return "%d." % self._cnt
339 334 elif self._style in ('loweralpha', 'upperalpha'):
340 335 return "%c." % self._cnt
341 336 elif self._style.endswith('roman'):
342 337 res = roman.toRoman(self._cnt) + '.'
343 338 if self._style.startswith('upper'):
344 339 return res.upper()
345 340 return res.lower()
346 341 else:
347 342 return "%d." % self._cnt
348 343 def get_width(self):
349 344 return self._indent
350 345 def __repr__(self):
351 346 return 'enum_style-%s' % list(self._style)
352 347
353 348 if node.has_key('enumtype'):
354 349 self._list_char.append(enum_char(node['enumtype']))
355 350 else:
356 351 self._list_char.append(enum_char('bullet'))
357 352 if len(self._list_char) > 1:
358 353 # indent nested lists
359 354 self.indent(self._list_char[-2].get_width())
360 355 else:
361 356 self.indent(self._list_char[-1].get_width())
362 357
363 358 def list_end(self):
364 359 self.dedent()
365 360 self._list_char.pop()
366 361
367 362 def header(self):
368 363 tmpl = (".TH %(title_upper)s %(manual_section)s"
369 364 " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
370 365 ".SH NAME\n"
371 366 "%(title)s \- %(subtitle)s\n")
372 367 return tmpl % self._docinfo
373 368
374 369 def append_header(self):
375 370 """append header with .TH and .SH NAME"""
376 371 # NOTE before everything
377 372 # .TH title_upper section date source manual
378 373 if self.header_written:
379 374 return
380 375 self.body.append(self.header())
381 376 self.body.append(MACRO_DEF)
382 377 self.header_written = 1
383 378
384 379 def visit_address(self, node):
385 380 self.visit_docinfo_item(node, 'address')
386 381
387 382 def depart_address(self, node):
388 383 pass
389 384
390 385 def visit_admonition(self, node, name=None):
391 386 if name:
392 387 self.body.append('.IP %s\n' %
393 388 self.language.labels.get(name, name))
394 389
395 390 def depart_admonition(self, node):
396 391 self.body.append('.RE\n')
397 392
398 393 def visit_attention(self, node):
399 394 self.visit_admonition(node, 'attention')
400 395
401 396 depart_attention = depart_admonition
402 397
403 398 def visit_docinfo_item(self, node, name):
404 399 if name == 'author':
405 400 self._docinfo[name].append(node.astext())
406 401 else:
407 402 self._docinfo[name] = node.astext()
408 403 self._docinfo_keys.append(name)
409 404 raise nodes.SkipNode
410 405
411 406 def depart_docinfo_item(self, node):
412 407 pass
413 408
414 409 def visit_author(self, node):
415 410 self.visit_docinfo_item(node, 'author')
416 411
417 412 depart_author = depart_docinfo_item
418 413
419 414 def visit_authors(self, node):
420 415 # _author is called anyway.
421 416 pass
422 417
423 418 def depart_authors(self, node):
424 419 pass
425 420
426 421 def visit_block_quote(self, node):
427 422 # BUG/HACK: indent alway uses the _last_ indention,
428 423 # thus we need two of them.
429 424 self.indent(BLOCKQOUTE_INDENT)
430 425 self.indent(0)
431 426
432 427 def depart_block_quote(self, node):
433 428 self.dedent()
434 429 self.dedent()
435 430
436 431 def visit_bullet_list(self, node):
437 432 self.list_start(node)
438 433
439 434 def depart_bullet_list(self, node):
440 435 self.list_end()
441 436
442 437 def visit_caption(self, node):
443 438 pass
444 439
445 440 def depart_caption(self, node):
446 441 pass
447 442
448 443 def visit_caution(self, node):
449 444 self.visit_admonition(node, 'caution')
450 445
451 446 depart_caution = depart_admonition
452 447
453 448 def visit_citation(self, node):
454 449 num, text = node.astext().split(None, 1)
455 450 num = num.strip()
456 451 self.body.append('.IP [%s] 5\n' % num)
457 452
458 453 def depart_citation(self, node):
459 454 pass
460 455
461 456 def visit_citation_reference(self, node):
462 457 self.body.append('['+node.astext()+']')
463 458 raise nodes.SkipNode
464 459
465 460 def visit_classifier(self, node):
466 461 pass
467 462
468 463 def depart_classifier(self, node):
469 464 pass
470 465
471 466 def visit_colspec(self, node):
472 467 self.colspecs.append(node)
473 468
474 469 def depart_colspec(self, node):
475 470 pass
476 471
477 472 def write_colspecs(self):
478 473 self.body.append("%s.\n" % ('L '*len(self.colspecs)))
479 474
480 475 def visit_comment(self, node,
481 476 sub=re.compile('-(?=-)').sub):
482 477 self.body.append(self.comment(node.astext()))
483 478 raise nodes.SkipNode
484 479
485 480 def visit_contact(self, node):
486 481 self.visit_docinfo_item(node, 'contact')
487 482
488 483 depart_contact = depart_docinfo_item
489 484
490 485 def visit_container(self, node):
491 486 pass
492 487
493 488 def depart_container(self, node):
494 489 pass
495 490
496 491 def visit_compound(self, node):
497 492 pass
498 493
499 494 def depart_compound(self, node):
500 495 pass
501 496
502 497 def visit_copyright(self, node):
503 498 self.visit_docinfo_item(node, 'copyright')
504 499
505 500 def visit_danger(self, node):
506 501 self.visit_admonition(node, 'danger')
507 502
508 503 depart_danger = depart_admonition
509 504
510 505 def visit_date(self, node):
511 506 self.visit_docinfo_item(node, 'date')
512 507
513 508 def visit_decoration(self, node):
514 509 pass
515 510
516 511 def depart_decoration(self, node):
517 512 pass
518 513
519 514 def visit_definition(self, node):
520 515 pass
521 516
522 517 def depart_definition(self, node):
523 518 pass
524 519
525 520 def visit_definition_list(self, node):
526 521 self.indent(DEFINITION_LIST_INDENT)
527 522
528 523 def depart_definition_list(self, node):
529 524 self.dedent()
530 525
531 526 def visit_definition_list_item(self, node):
532 527 self.body.append(self.defs['definition_list_item'][0])
533 528
534 529 def depart_definition_list_item(self, node):
535 530 self.body.append(self.defs['definition_list_item'][1])
536 531
537 532 def visit_description(self, node):
538 533 pass
539 534
540 535 def depart_description(self, node):
541 536 pass
542 537
543 538 def visit_docinfo(self, node):
544 539 self._in_docinfo = 1
545 540
546 541 def depart_docinfo(self, node):
547 542 self._in_docinfo = None
548 543 # NOTE nothing should be written before this
549 544 self.append_header()
550 545
551 546 def visit_doctest_block(self, node):
552 547 self.body.append(self.defs['literal_block'][0])
553 548 self._in_literal = True
554 549
555 550 def depart_doctest_block(self, node):
556 551 self._in_literal = False
557 552 self.body.append(self.defs['literal_block'][1])
558 553
559 554 def visit_document(self, node):
560 555 # no blank line between comment and header.
561 556 self.body.append(self.comment(self.document_start).rstrip()+'\n')
562 557 # writing header is postboned
563 558 self.header_written = 0
564 559
565 560 def depart_document(self, node):
566 561 if self._docinfo['author']:
567 562 self.body.append('.SH AUTHOR\n%s\n'
568 563 % ', '.join(self._docinfo['author']))
569 564 skip = ('author', 'copyright', 'date',
570 565 'manual_group', 'manual_section',
571 566 'subtitle',
572 567 'title', 'title_upper', 'version')
573 568 for name in self._docinfo_keys:
574 569 if name == 'address':
575 570 self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
576 571 self.language.labels.get(name, name),
577 572 self.defs['indent'][0] % 0,
578 573 self.defs['indent'][0] % BLOCKQOUTE_INDENT,
579 574 self._docinfo[name],
580 575 self.defs['indent'][1],
581 576 self.defs['indent'][1]))
582 577 elif not name in skip:
583 578 if name in self._docinfo_names:
584 579 label = self._docinfo_names[name]
585 580 else:
586 581 label = self.language.labels.get(name, name)
587 582 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
588 583 if self._docinfo['copyright']:
589 584 self.body.append('.SH COPYRIGHT\n%s\n'
590 585 % self._docinfo['copyright'])
591 586 self.body.append(self.comment(
592 587 'Generated by docutils manpage writer.\n'))
593 588
594 589 def visit_emphasis(self, node):
595 590 self.body.append(self.defs['emphasis'][0])
596 591
597 592 def depart_emphasis(self, node):
598 593 self.body.append(self.defs['emphasis'][1])
599 594
600 595 def visit_entry(self, node):
601 596 # a cell in a table row
602 597 if 'morerows' in node:
603 598 self.document.reporter.warning('"table row spanning" not supported',
604 599 base_node=node)
605 600 if 'morecols' in node:
606 601 self.document.reporter.warning(
607 602 '"table cell spanning" not supported', base_node=node)
608 603 self.context.append(len(self.body))
609 604
610 605 def depart_entry(self, node):
611 606 start = self.context.pop()
612 607 self._active_table.append_cell(self.body[start:])
613 608 del self.body[start:]
614 609
615 610 def visit_enumerated_list(self, node):
616 611 self.list_start(node)
617 612
618 613 def depart_enumerated_list(self, node):
619 614 self.list_end()
620 615
621 616 def visit_error(self, node):
622 617 self.visit_admonition(node, 'error')
623 618
624 619 depart_error = depart_admonition
625 620
626 621 def visit_field(self, node):
627 622 pass
628 623
629 624 def depart_field(self, node):
630 625 pass
631 626
632 627 def visit_field_body(self, node):
633 628 if self._in_docinfo:
634 629 name_normalized = self._field_name.lower().replace(" ","_")
635 630 self._docinfo_names[name_normalized] = self._field_name
636 631 self.visit_docinfo_item(node, name_normalized)
637 632 raise nodes.SkipNode
638 633
639 634 def depart_field_body(self, node):
640 635 pass
641 636
642 637 def visit_field_list(self, node):
643 638 self.indent(FIELD_LIST_INDENT)
644 639
645 640 def depart_field_list(self, node):
646 641 self.dedent()
647 642
648 643 def visit_field_name(self, node):
649 644 if self._in_docinfo:
650 645 self._field_name = node.astext()
651 646 raise nodes.SkipNode
652 647 else:
653 648 self.body.append(self.defs['field_name'][0])
654 649
655 650 def depart_field_name(self, node):
656 651 self.body.append(self.defs['field_name'][1])
657 652
658 653 def visit_figure(self, node):
659 654 self.indent(2.5)
660 655 self.indent(0)
661 656
662 657 def depart_figure(self, node):
663 658 self.dedent()
664 659 self.dedent()
665 660
666 661 def visit_footer(self, node):
667 662 self.document.reporter.warning('"footer" not supported',
668 663 base_node=node)
669 664
670 665 def depart_footer(self, node):
671 666 pass
672 667
673 668 def visit_footnote(self, node):
674 669 num, text = node.astext().split(None, 1)
675 670 num = num.strip()
676 671 self.body.append('.IP [%s] 5\n' % self.deunicode(num))
677 672
678 673 def depart_footnote(self, node):
679 674 pass
680 675
681 676 def footnote_backrefs(self, node):
682 677 self.document.reporter.warning('"footnote_backrefs" not supported',
683 678 base_node=node)
684 679
685 680 def visit_footnote_reference(self, node):
686 681 self.body.append('['+self.deunicode(node.astext())+']')
687 682 raise nodes.SkipNode
688 683
689 684 def depart_footnote_reference(self, node):
690 685 pass
691 686
692 687 def visit_generated(self, node):
693 688 pass
694 689
695 690 def depart_generated(self, node):
696 691 pass
697 692
698 693 def visit_header(self, node):
699 694 raise NotImplementedError, node.astext()
700 695
701 696 def depart_header(self, node):
702 697 pass
703 698
704 699 def visit_hint(self, node):
705 700 self.visit_admonition(node, 'hint')
706 701
707 702 depart_hint = depart_admonition
708 703
709 704 def visit_subscript(self, node):
710 705 self.body.append('\\s-2\\d')
711 706
712 707 def depart_subscript(self, node):
713 708 self.body.append('\\u\\s0')
714 709
715 710 def visit_superscript(self, node):
716 711 self.body.append('\\s-2\\u')
717 712
718 713 def depart_superscript(self, node):
719 714 self.body.append('\\d\\s0')
720 715
721 716 def visit_attribution(self, node):
722 717 self.body.append('\\(em ')
723 718
724 719 def depart_attribution(self, node):
725 720 self.body.append('\n')
726 721
727 722 def visit_image(self, node):
728 723 self.document.reporter.warning('"image" not supported',
729 724 base_node=node)
730 725 text = []
731 726 if 'alt' in node.attributes:
732 727 text.append(node.attributes['alt'])
733 728 if 'uri' in node.attributes:
734 729 text.append(node.attributes['uri'])
735 730 self.body.append('[image: %s]\n' % ('/'.join(text)))
736 731 raise nodes.SkipNode
737 732
738 733 def visit_important(self, node):
739 734 self.visit_admonition(node, 'important')
740 735
741 736 depart_important = depart_admonition
742 737
743 738 def visit_label(self, node):
744 739 # footnote and citation
745 740 if (isinstance(node.parent, nodes.footnote)
746 741 or isinstance(node.parent, nodes.citation)):
747 742 raise nodes.SkipNode
748 743 self.document.reporter.warning('"unsupported "label"',
749 744 base_node=node)
750 745 self.body.append('[')
751 746
752 747 def depart_label(self, node):
753 748 self.body.append(']\n')
754 749
755 750 def visit_legend(self, node):
756 751 pass
757 752
758 753 def depart_legend(self, node):
759 754 pass
760 755
761 756 # WHAT should we use .INDENT, .UNINDENT ?
762 757 def visit_line_block(self, node):
763 758 self._line_block += 1
764 759 if self._line_block == 1:
765 760 self.body.append('.sp\n')
766 761 self.body.append('.nf\n')
767 762 else:
768 763 self.body.append('.in +2\n')
769 764
770 765 def depart_line_block(self, node):
771 766 self._line_block -= 1
772 767 if self._line_block == 0:
773 768 self.body.append('.fi\n')
774 769 self.body.append('.sp\n')
775 770 else:
776 771 self.body.append('.in -2\n')
777 772
778 773 def visit_line(self, node):
779 774 pass
780 775
781 776 def depart_line(self, node):
782 777 self.body.append('\n')
783 778
784 779 def visit_list_item(self, node):
785 780 # man 7 man argues to use ".IP" instead of ".TP"
786 781 self.body.append('.IP %s %d\n' % (
787 782 self._list_char[-1].next(),
788 783 self._list_char[-1].get_width(),))
789 784
790 785 def depart_list_item(self, node):
791 786 pass
792 787
793 788 def visit_literal(self, node):
794 789 self.body.append(self.defs['literal'][0])
795 790
796 791 def depart_literal(self, node):
797 792 self.body.append(self.defs['literal'][1])
798 793
799 794 def visit_literal_block(self, node):
800 795 self.body.append(self.defs['literal_block'][0])
801 796 self._in_literal = True
802 797
803 798 def depart_literal_block(self, node):
804 799 self._in_literal = False
805 800 self.body.append(self.defs['literal_block'][1])
806 801
807 802 def visit_meta(self, node):
808 803 raise NotImplementedError, node.astext()
809 804
810 805 def depart_meta(self, node):
811 806 pass
812 807
813 808 def visit_note(self, node):
814 809 self.visit_admonition(node, 'note')
815 810
816 811 depart_note = depart_admonition
817 812
818 813 def indent(self, by=0.5):
819 814 # if we are in a section ".SH" there already is a .RS
820 815 step = self._indent[-1]
821 816 self._indent.append(by)
822 817 self.body.append(self.defs['indent'][0] % step)
823 818
824 819 def dedent(self):
825 820 self._indent.pop()
826 821 self.body.append(self.defs['indent'][1])
827 822
828 823 def visit_option_list(self, node):
829 824 self.indent(OPTION_LIST_INDENT)
830 825
831 826 def depart_option_list(self, node):
832 827 self.dedent()
833 828
834 829 def visit_option_list_item(self, node):
835 830 # one item of the list
836 831 self.body.append(self.defs['option_list_item'][0])
837 832
838 833 def depart_option_list_item(self, node):
839 834 self.body.append(self.defs['option_list_item'][1])
840 835
841 836 def visit_option_group(self, node):
842 837 # as one option could have several forms it is a group
843 838 # options without parameter bold only, .B, -v
844 839 # options with parameter bold italic, .BI, -f file
845 840 #
846 841 # we do not know if .B or .BI
847 842 self.context.append('.B') # blind guess
848 843 self.context.append(len(self.body)) # to be able to insert later
849 844 self.context.append(0) # option counter
850 845
851 846 def depart_option_group(self, node):
852 847 self.context.pop() # the counter
853 848 start_position = self.context.pop()
854 849 text = self.body[start_position:]
855 850 del self.body[start_position:]
856 851 self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
857 852
858 853 def visit_option(self, node):
859 854 # each form of the option will be presented separately
860 855 if self.context[-1] > 0:
861 856 self.body.append(', ')
862 857 if self.context[-3] == '.BI':
863 858 self.body.append('\\')
864 859 self.body.append(' ')
865 860
866 861 def depart_option(self, node):
867 862 self.context[-1] += 1
868 863
869 864 def visit_option_string(self, node):
870 865 # do not know if .B or .BI
871 866 pass
872 867
873 868 def depart_option_string(self, node):
874 869 pass
875 870
876 871 def visit_option_argument(self, node):
877 872 self.context[-3] = '.BI' # bold/italic alternate
878 873 if node['delimiter'] != ' ':
879 874 self.body.append('\\fB%s ' % node['delimiter'])
880 875 elif self.body[len(self.body)-1].endswith('='):
881 876 # a blank only means no blank in output, just changing font
882 877 self.body.append(' ')
883 878 else:
884 879 # blank backslash blank, switch font then a blank
885 880 self.body.append(' \\ ')
886 881
887 882 def depart_option_argument(self, node):
888 883 pass
889 884
890 885 def visit_organization(self, node):
891 886 self.visit_docinfo_item(node, 'organization')
892 887
893 888 def depart_organization(self, node):
894 889 pass
895 890
896 891 def visit_paragraph(self, node):
897 892 # ``.PP`` : Start standard indented paragraph.
898 893 # ``.LP`` : Start block paragraph, all except the first.
899 894 # ``.P [type]`` : Start paragraph type.
900 895 # NOTE dont use paragraph starts because they reset indentation.
901 896 # ``.sp`` is only vertical space
902 897 self.ensure_eol()
903 898 self.body.append('.sp\n')
904 899
905 900 def depart_paragraph(self, node):
906 901 self.body.append('\n')
907 902
908 903 def visit_problematic(self, node):
909 904 self.body.append(self.defs['problematic'][0])
910 905
911 906 def depart_problematic(self, node):
912 907 self.body.append(self.defs['problematic'][1])
913 908
914 909 def visit_raw(self, node):
915 910 if node.get('format') == 'manpage':
916 911 self.body.append(node.astext() + "\n")
917 912 # Keep non-manpage raw text out of output:
918 913 raise nodes.SkipNode
919 914
920 915 def visit_reference(self, node):
921 916 """E.g. link or email address."""
922 917 self.body.append(self.defs['reference'][0])
923 918
924 919 def depart_reference(self, node):
925 920 self.body.append(self.defs['reference'][1])
926 921
927 922 def visit_revision(self, node):
928 923 self.visit_docinfo_item(node, 'revision')
929 924
930 925 depart_revision = depart_docinfo_item
931 926
932 927 def visit_row(self, node):
933 928 self._active_table.new_row()
934 929
935 930 def depart_row(self, node):
936 931 pass
937 932
938 933 def visit_section(self, node):
939 934 self.section_level += 1
940 935
941 936 def depart_section(self, node):
942 937 self.section_level -= 1
943 938
944 939 def visit_status(self, node):
945 940 self.visit_docinfo_item(node, 'status')
946 941
947 942 depart_status = depart_docinfo_item
948 943
949 944 def visit_strong(self, node):
950 945 self.body.append(self.defs['strong'][0])
951 946
952 947 def depart_strong(self, node):
953 948 self.body.append(self.defs['strong'][1])
954 949
955 950 def visit_substitution_definition(self, node):
956 951 """Internal only."""
957 952 raise nodes.SkipNode
958 953
959 954 def visit_substitution_reference(self, node):
960 955 self.document.reporter.warning('"substitution_reference" not supported',
961 956 base_node=node)
962 957
963 958 def visit_subtitle(self, node):
964 959 if isinstance(node.parent, nodes.sidebar):
965 960 self.body.append(self.defs['strong'][0])
966 961 elif isinstance(node.parent, nodes.document):
967 962 self.visit_docinfo_item(node, 'subtitle')
968 963 elif isinstance(node.parent, nodes.section):
969 964 self.body.append(self.defs['strong'][0])
970 965
971 966 def depart_subtitle(self, node):
972 967 # document subtitle calls SkipNode
973 968 self.body.append(self.defs['strong'][1]+'\n.PP\n')
974 969
975 970 def visit_system_message(self, node):
976 971 # TODO add report_level
977 972 #if node['level'] < self.document.reporter['writer'].report_level:
978 973 # Level is too low to display:
979 974 # raise nodes.SkipNode
980 975 attr = {}
981 976 backref_text = ''
982 977 if node.hasattr('id'):
983 978 attr['name'] = node['id']
984 979 if node.hasattr('line'):
985 980 line = ', line %s' % node['line']
986 981 else:
987 982 line = ''
988 983 self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
989 984 % (node['type'], node['level'], node['source'], line))
990 985
991 986 def depart_system_message(self, node):
992 987 pass
993 988
994 989 def visit_table(self, node):
995 990 self._active_table = Table()
996 991
997 992 def depart_table(self, node):
998 993 self.ensure_eol()
999 994 self.body.extend(self._active_table.as_list())
1000 995 self._active_table = None
1001 996
1002 997 def visit_target(self, node):
1003 998 # targets are in-document hyper targets, without any use for man-pages.
1004 999 raise nodes.SkipNode
1005 1000
1006 1001 def visit_tbody(self, node):
1007 1002 pass
1008 1003
1009 1004 def depart_tbody(self, node):
1010 1005 pass
1011 1006
1012 1007 def visit_term(self, node):
1013 1008 self.body.append(self.defs['term'][0])
1014 1009
1015 1010 def depart_term(self, node):
1016 1011 self.body.append(self.defs['term'][1])
1017 1012
1018 1013 def visit_tgroup(self, node):
1019 1014 pass
1020 1015
1021 1016 def depart_tgroup(self, node):
1022 1017 pass
1023 1018
1024 1019 def visit_thead(self, node):
1025 1020 # MAYBE double line '='
1026 1021 pass
1027 1022
1028 1023 def depart_thead(self, node):
1029 1024 # MAYBE double line '='
1030 1025 pass
1031 1026
1032 1027 def visit_tip(self, node):
1033 1028 self.visit_admonition(node, 'tip')
1034 1029
1035 1030 depart_tip = depart_admonition
1036 1031
1037 1032 def visit_title(self, node):
1038 1033 if isinstance(node.parent, nodes.topic):
1039 1034 self.body.append(self.defs['topic-title'][0])
1040 1035 elif isinstance(node.parent, nodes.sidebar):
1041 1036 self.body.append(self.defs['sidebar-title'][0])
1042 1037 elif isinstance(node.parent, nodes.admonition):
1043 1038 self.body.append('.IP "')
1044 1039 elif self.section_level == 0:
1045 1040 self._docinfo['title'] = node.astext()
1046 1041 # document title for .TH
1047 1042 self._docinfo['title_upper'] = node.astext().upper()
1048 1043 raise nodes.SkipNode
1049 1044 elif self.section_level == 1:
1050 1045 self.body.append('.SH ')
1051 1046 for n in node.traverse(nodes.Text):
1052 1047 n.parent.replace(n, nodes.Text(n.astext().upper()))
1053 1048 else:
1054 1049 self.body.append('.SS ')
1055 1050
1056 1051 def depart_title(self, node):
1057 1052 if isinstance(node.parent, nodes.admonition):
1058 1053 self.body.append('"')
1059 1054 self.body.append('\n')
1060 1055
1061 1056 def visit_title_reference(self, node):
1062 1057 """inline citation reference"""
1063 1058 self.body.append(self.defs['title_reference'][0])
1064 1059
1065 1060 def depart_title_reference(self, node):
1066 1061 self.body.append(self.defs['title_reference'][1])
1067 1062
1068 1063 def visit_topic(self, node):
1069 1064 pass
1070 1065
1071 1066 def depart_topic(self, node):
1072 1067 pass
1073 1068
1074 1069 def visit_sidebar(self, node):
1075 1070 pass
1076 1071
1077 1072 def depart_sidebar(self, node):
1078 1073 pass
1079 1074
1080 1075 def visit_rubric(self, node):
1081 1076 pass
1082 1077
1083 1078 def depart_rubric(self, node):
1084 1079 pass
1085 1080
1086 1081 def visit_transition(self, node):
1087 1082 # .PP Begin a new paragraph and reset prevailing indent.
1088 1083 # .sp N leaves N lines of blank space.
1089 1084 # .ce centers the next line
1090 1085 self.body.append('\n.sp\n.ce\n----\n')
1091 1086
1092 1087 def depart_transition(self, node):
1093 1088 self.body.append('\n.ce 0\n.sp\n')
1094 1089
1095 1090 def visit_version(self, node):
1096 1091 self.visit_docinfo_item(node, 'version')
1097 1092
1098 1093 def visit_warning(self, node):
1099 1094 self.visit_admonition(node, 'warning')
1100 1095
1101 1096 depart_warning = depart_admonition
1102 1097
1103 1098 def unimplemented_visit(self, node):
1104 1099 raise NotImplementedError('visiting unimplemented node type: %s'
1105 1100 % node.__class__.__name__)
1106 1101
1107 1102 # The following part is taken from the Docutils rst2man.py script:
1108 1103 if __name__ == "__main__":
1109 1104 from docutils.core import publish_cmdline, default_description
1110 1105 description = ("Generates plain unix manual documents. " +
1111 1106 default_description)
1112 1107 publish_cmdline(writer=Writer(), description=description)
1113 1108
1114 1109 # vim: set fileencoding=utf-8 et ts=4 ai :
@@ -1,188 +1,188 b''
1 1 # churn.py - create a graph of revisions count grouped by template
2 2 #
3 3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''command to display statistics about repository history'''
10 10
11 11 from mercurial.i18n import _
12 12 from mercurial import patch, cmdutil, util, templater
13 import sys, os
13 import os
14 14 import time, datetime
15 15
16 16 def maketemplater(ui, repo, tmpl):
17 17 tmpl = templater.parsestring(tmpl, quoted=False)
18 18 try:
19 19 t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
20 20 except SyntaxError, inst:
21 21 raise util.Abort(inst.args[0])
22 22 t.use_template(tmpl)
23 23 return t
24 24
25 25 def changedlines(ui, repo, ctx1, ctx2, fns):
26 26 added, removed = 0, 0
27 27 fmatch = cmdutil.matchfiles(repo, fns)
28 28 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
29 29 for l in diff.split('\n'):
30 30 if l.startswith("+") and not l.startswith("+++ "):
31 31 added += 1
32 32 elif l.startswith("-") and not l.startswith("--- "):
33 33 removed += 1
34 34 return (added, removed)
35 35
36 36 def countrate(ui, repo, amap, *pats, **opts):
37 37 """Calculate stats"""
38 38 if opts.get('dateformat'):
39 39 def getkey(ctx):
40 40 t, tz = ctx.date()
41 41 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
42 42 return date.strftime(opts['dateformat'])
43 43 else:
44 44 tmpl = opts.get('template', '{author|email}')
45 45 tmpl = maketemplater(ui, repo, tmpl)
46 46 def getkey(ctx):
47 47 ui.pushbuffer()
48 48 tmpl.show(ctx)
49 49 return ui.popbuffer()
50 50
51 51 state = {'count': 0}
52 52 rate = {}
53 53 df = False
54 54 if opts.get('date'):
55 55 df = util.matchdate(opts['date'])
56 56
57 57 m = cmdutil.match(repo, pats, opts)
58 58 def prep(ctx, fns):
59 59 rev = ctx.rev()
60 60 if df and not df(ctx.date()[0]): # doesn't match date format
61 61 return
62 62
63 63 key = getkey(ctx)
64 64 key = amap.get(key, key) # alias remap
65 65 if opts.get('changesets'):
66 66 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
67 67 else:
68 68 parents = ctx.parents()
69 69 if len(parents) > 1:
70 70 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
71 71 return
72 72
73 73 ctx1 = parents[0]
74 74 lines = changedlines(ui, repo, ctx1, ctx, fns)
75 75 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
76 76
77 77 state['count'] += 1
78 78 ui.progress(_('analyzing'), state['count'], total=len(repo))
79 79
80 80 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
81 81 continue
82 82
83 83 ui.progress(_('analyzing'), None)
84 84
85 85 return rate
86 86
87 87
88 88 def churn(ui, repo, *pats, **opts):
89 89 '''histogram of changes to the repository
90 90
91 91 This command will display a histogram representing the number
92 92 of changed lines or revisions, grouped according to the given
93 93 template. The default template will group changes by author.
94 94 The --dateformat option may be used to group the results by
95 95 date instead.
96 96
97 97 Statistics are based on the number of changed lines, or
98 98 alternatively the number of matching revisions if the
99 99 --changesets option is specified.
100 100
101 101 Examples::
102 102
103 103 # display count of changed lines for every committer
104 104 hg churn -t '{author|email}'
105 105
106 106 # display daily activity graph
107 107 hg churn -f '%H' -s -c
108 108
109 109 # display activity of developers by month
110 110 hg churn -f '%Y-%m' -s -c
111 111
112 112 # display count of lines changed in every year
113 113 hg churn -f '%Y' -s
114 114
115 115 It is possible to map alternate email addresses to a main address
116 116 by providing a file using the following format::
117 117
118 118 <alias email> <actual email>
119 119
120 120 Such a file may be specified with the --aliases option, otherwise
121 121 a .hgchurn file will be looked for in the working directory root.
122 122 '''
123 123 def pad(s, l):
124 124 return (s + " " * l)[:l]
125 125
126 126 amap = {}
127 127 aliases = opts.get('aliases')
128 128 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
129 129 aliases = repo.wjoin('.hgchurn')
130 130 if aliases:
131 131 for l in open(aliases, "r"):
132 132 l = l.strip()
133 133 alias, actual = l.split()
134 134 amap[alias] = actual
135 135
136 136 rate = countrate(ui, repo, amap, *pats, **opts).items()
137 137 if not rate:
138 138 return
139 139
140 140 sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None)
141 141 rate.sort(key=sortkey)
142 142
143 143 # Be careful not to have a zero maxcount (issue833)
144 144 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
145 145 maxname = max(len(k) for k, v in rate)
146 146
147 147 ttywidth = util.termwidth()
148 148 ui.debug("assuming %i character terminal\n" % ttywidth)
149 149 width = ttywidth - maxname - 2 - 2 - 2
150 150
151 151 if opts.get('diffstat'):
152 152 width -= 15
153 153 def format(name, (added, removed)):
154 154 return "%s %15s %s%s\n" % (pad(name, maxname),
155 155 '+%d/-%d' % (added, removed),
156 156 ui.label('+' * charnum(added),
157 157 'diffstat.inserted'),
158 158 ui.label('-' * charnum(removed),
159 159 'diffstat.deleted'))
160 160 else:
161 161 width -= 6
162 162 def format(name, count):
163 163 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
164 164 '*' * charnum(sum(count)))
165 165
166 166 def charnum(count):
167 167 return int(round(count * width / maxcount))
168 168
169 169 for name, count in rate:
170 170 ui.write(format(name, count))
171 171
172 172
173 173 cmdtable = {
174 174 "churn":
175 175 (churn,
176 176 [('r', 'rev', [], _('count rate for the specified revision or range')),
177 177 ('d', 'date', '', _('count rate for revisions matching date spec')),
178 178 ('t', 'template', '{author|email}',
179 179 _('template to group changesets')),
180 180 ('f', 'dateformat', '',
181 181 _('strftime-compatible format for grouping by date')),
182 182 ('c', 'changesets', False, _('count rate by number of changesets')),
183 183 ('s', 'sort', False, _('sort by key (default: sort by count)')),
184 184 ('', 'diffstat', False, _('display added/removed lines separately')),
185 185 ('', 'aliases', '', _('file with email aliases')),
186 186 ],
187 187 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]")),
188 188 }
@@ -1,471 +1,466 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, write to the
13 13 # Free Software Foundation, Inc.,
14 14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21 21
22 22 import os
23 23 import stat
24 24 import urllib
25 25 import urllib2
26 26 import email.Utils
27 27
28 try:
29 from cStringIO import StringIO
30 except ImportError, msg:
31 from StringIO import StringIO
32
33 28 class RangeError(IOError):
34 29 """Error raised when an unsatisfiable range is requested."""
35 30 pass
36 31
37 32 class HTTPRangeHandler(urllib2.BaseHandler):
38 33 """Handler that enables HTTP Range headers.
39 34
40 35 This was extremely simple. The Range header is a HTTP feature to
41 36 begin with so all this class does is tell urllib2 that the
42 37 "206 Partial Content" reponse from the HTTP server is what we
43 38 expected.
44 39
45 40 Example:
46 41 import urllib2
47 42 import byterange
48 43
49 44 range_handler = range.HTTPRangeHandler()
50 45 opener = urllib2.build_opener(range_handler)
51 46
52 47 # install it
53 48 urllib2.install_opener(opener)
54 49
55 50 # create Request and set Range header
56 51 req = urllib2.Request('http://www.python.org/')
57 52 req.header['Range'] = 'bytes=30-50'
58 53 f = urllib2.urlopen(req)
59 54 """
60 55
61 56 def http_error_206(self, req, fp, code, msg, hdrs):
62 57 # 206 Partial Content Response
63 58 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 59 r.code = code
65 60 r.msg = msg
66 61 return r
67 62
68 63 def http_error_416(self, req, fp, code, msg, hdrs):
69 64 # HTTP's Range Not Satisfiable error
70 65 raise RangeError('Requested Range Not Satisfiable')
71 66
72 67 class RangeableFileObject:
73 68 """File object wrapper to enable raw range handling.
74 69 This was implemented primarilary for handling range
75 70 specifications for file:// urls. This object effectively makes
76 71 a file object look like it consists only of a range of bytes in
77 72 the stream.
78 73
79 74 Examples:
80 75 # expose 10 bytes, starting at byte position 20, from
81 76 # /etc/aliases.
82 77 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 78 # seek seeks within the range (to position 23 in this case)
84 79 >>> fo.seek(3)
85 80 # tell tells where your at _within the range_ (position 3 in
86 81 # this case)
87 82 >>> fo.tell()
88 83 # read EOFs if an attempt is made to read past the last
89 84 # byte in the range. the following will return only 7 bytes.
90 85 >>> fo.read(30)
91 86 """
92 87
93 88 def __init__(self, fo, rangetup):
94 89 """Create a RangeableFileObject.
95 90 fo -- a file like object. only the read() method need be
96 91 supported but supporting an optimized seek() is
97 92 preferable.
98 93 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 94 to work over.
100 95 The file object provided is assumed to be at byte offset 0.
101 96 """
102 97 self.fo = fo
103 98 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 99 self.realpos = 0
105 100 self._do_seek(self.firstbyte)
106 101
107 102 def __getattr__(self, name):
108 103 """This effectively allows us to wrap at the instance level.
109 104 Any attribute not found in _this_ object will be searched for
110 105 in self.fo. This includes methods."""
111 106 if hasattr(self.fo, name):
112 107 return getattr(self.fo, name)
113 108 raise AttributeError(name)
114 109
115 110 def tell(self):
116 111 """Return the position within the range.
117 112 This is different from fo.seek in that position 0 is the
118 113 first byte position of the range tuple. For example, if
119 114 this object was created with a range tuple of (500,899),
120 115 tell() will return 0 when at byte position 500 of the file.
121 116 """
122 117 return (self.realpos - self.firstbyte)
123 118
124 119 def seek(self, offset, whence=0):
125 120 """Seek within the byte range.
126 121 Positioning is identical to that described under tell().
127 122 """
128 123 assert whence in (0, 1, 2)
129 124 if whence == 0: # absolute seek
130 125 realoffset = self.firstbyte + offset
131 126 elif whence == 1: # relative seek
132 127 realoffset = self.realpos + offset
133 128 elif whence == 2: # absolute from end of file
134 129 # XXX: are we raising the right Error here?
135 130 raise IOError('seek from end of file not supported.')
136 131
137 132 # do not allow seek past lastbyte in range
138 133 if self.lastbyte and (realoffset >= self.lastbyte):
139 134 realoffset = self.lastbyte
140 135
141 136 self._do_seek(realoffset - self.realpos)
142 137
143 138 def read(self, size=-1):
144 139 """Read within the range.
145 140 This method will limit the size read based on the range.
146 141 """
147 142 size = self._calc_read_size(size)
148 143 rslt = self.fo.read(size)
149 144 self.realpos += len(rslt)
150 145 return rslt
151 146
152 147 def readline(self, size=-1):
153 148 """Read lines within the range.
154 149 This method will limit the size read based on the range.
155 150 """
156 151 size = self._calc_read_size(size)
157 152 rslt = self.fo.readline(size)
158 153 self.realpos += len(rslt)
159 154 return rslt
160 155
161 156 def _calc_read_size(self, size):
162 157 """Handles calculating the amount of data to read based on
163 158 the range.
164 159 """
165 160 if self.lastbyte:
166 161 if size > -1:
167 162 if ((self.realpos + size) >= self.lastbyte):
168 163 size = (self.lastbyte - self.realpos)
169 164 else:
170 165 size = (self.lastbyte - self.realpos)
171 166 return size
172 167
173 168 def _do_seek(self, offset):
174 169 """Seek based on whether wrapped object supports seek().
175 170 offset is relative to the current position (self.realpos).
176 171 """
177 172 assert offset >= 0
178 173 if not hasattr(self.fo, 'seek'):
179 174 self._poor_mans_seek(offset)
180 175 else:
181 176 self.fo.seek(self.realpos + offset)
182 177 self.realpos += offset
183 178
184 179 def _poor_mans_seek(self, offset):
185 180 """Seek by calling the wrapped file objects read() method.
186 181 This is used for file like objects that do not have native
187 182 seek support. The wrapped objects read() method is called
188 183 to manually seek to the desired position.
189 184 offset -- read this number of bytes from the wrapped
190 185 file object.
191 186 raise RangeError if we encounter EOF before reaching the
192 187 specified offset.
193 188 """
194 189 pos = 0
195 190 bufsize = 1024
196 191 while pos < offset:
197 192 if (pos + bufsize) > offset:
198 193 bufsize = offset - pos
199 194 buf = self.fo.read(bufsize)
200 195 if len(buf) != bufsize:
201 196 raise RangeError('Requested Range Not Satisfiable')
202 197 pos += bufsize
203 198
204 199 class FileRangeHandler(urllib2.FileHandler):
205 200 """FileHandler subclass that adds Range support.
206 201 This class handles Range headers exactly like an HTTP
207 202 server would.
208 203 """
209 204 def open_local_file(self, req):
210 205 import mimetypes
211 206 import email
212 207 host = req.get_host()
213 208 file = req.get_selector()
214 209 localfile = urllib.url2pathname(file)
215 210 stats = os.stat(localfile)
216 211 size = stats[stat.ST_SIZE]
217 212 modified = email.Utils.formatdate(stats[stat.ST_MTIME])
218 213 mtype = mimetypes.guess_type(file)[0]
219 214 if host:
220 215 host, port = urllib.splitport(host)
221 216 if port or socket.gethostbyname(host) not in self.get_names():
222 217 raise urllib2.URLError('file not on local host')
223 218 fo = open(localfile,'rb')
224 219 brange = req.headers.get('Range', None)
225 220 brange = range_header_to_tuple(brange)
226 221 assert brange != ()
227 222 if brange:
228 223 (fb, lb) = brange
229 224 if lb == '':
230 225 lb = size
231 226 if fb < 0 or fb > size or lb > size:
232 227 raise RangeError('Requested Range Not Satisfiable')
233 228 size = (lb - fb)
234 229 fo = RangeableFileObject(fo, (fb, lb))
235 230 headers = email.message_from_string(
236 231 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
237 232 (mtype or 'text/plain', size, modified))
238 233 return urllib.addinfourl(fo, headers, 'file:'+file)
239 234
240 235
241 236 # FTP Range Support
242 237 # Unfortunately, a large amount of base FTP code had to be copied
243 238 # from urllib and urllib2 in order to insert the FTP REST command.
244 239 # Code modifications for range support have been commented as
245 240 # follows:
246 241 # -- range support modifications start/end here
247 242
248 243 from urllib import splitport, splituser, splitpasswd, splitattr, \
249 244 unquote, addclosehook, addinfourl
250 245 import ftplib
251 246 import socket
252 247 import sys
253 248 import mimetypes
254 249 import email
255 250
256 251 class FTPRangeHandler(urllib2.FTPHandler):
257 252 def ftp_open(self, req):
258 253 host = req.get_host()
259 254 if not host:
260 255 raise IOError('ftp error', 'no host given')
261 256 host, port = splitport(host)
262 257 if port is None:
263 258 port = ftplib.FTP_PORT
264 259 else:
265 260 port = int(port)
266 261
267 262 # username/password handling
268 263 user, host = splituser(host)
269 264 if user:
270 265 user, passwd = splitpasswd(user)
271 266 else:
272 267 passwd = None
273 268 host = unquote(host)
274 269 user = unquote(user or '')
275 270 passwd = unquote(passwd or '')
276 271
277 272 try:
278 273 host = socket.gethostbyname(host)
279 274 except socket.error, msg:
280 275 raise urllib2.URLError(msg)
281 276 path, attrs = splitattr(req.get_selector())
282 277 dirs = path.split('/')
283 278 dirs = map(unquote, dirs)
284 279 dirs, file = dirs[:-1], dirs[-1]
285 280 if dirs and not dirs[0]:
286 281 dirs = dirs[1:]
287 282 try:
288 283 fw = self.connect_ftp(user, passwd, host, port, dirs)
289 284 type = file and 'I' or 'D'
290 285 for attr in attrs:
291 286 attr, value = splitattr(attr)
292 287 if attr.lower() == 'type' and \
293 288 value in ('a', 'A', 'i', 'I', 'd', 'D'):
294 289 type = value.upper()
295 290
296 291 # -- range support modifications start here
297 292 rest = None
298 293 range_tup = range_header_to_tuple(req.headers.get('Range', None))
299 294 assert range_tup != ()
300 295 if range_tup:
301 296 (fb, lb) = range_tup
302 297 if fb > 0:
303 298 rest = fb
304 299 # -- range support modifications end here
305 300
306 301 fp, retrlen = fw.retrfile(file, type, rest)
307 302
308 303 # -- range support modifications start here
309 304 if range_tup:
310 305 (fb, lb) = range_tup
311 306 if lb == '':
312 307 if retrlen is None or retrlen == 0:
313 308 raise RangeError('Requested Range Not Satisfiable due'
314 309 ' to unobtainable file length.')
315 310 lb = retrlen
316 311 retrlen = lb - fb
317 312 if retrlen < 0:
318 313 # beginning of range is larger than file
319 314 raise RangeError('Requested Range Not Satisfiable')
320 315 else:
321 316 retrlen = lb - fb
322 317 fp = RangeableFileObject(fp, (0, retrlen))
323 318 # -- range support modifications end here
324 319
325 320 headers = ""
326 321 mtype = mimetypes.guess_type(req.get_full_url())[0]
327 322 if mtype:
328 323 headers += "Content-Type: %s\n" % mtype
329 324 if retrlen is not None and retrlen >= 0:
330 325 headers += "Content-Length: %d\n" % retrlen
331 326 headers = email.message_from_string(headers)
332 327 return addinfourl(fp, headers, req.get_full_url())
333 328 except ftplib.all_errors, msg:
334 329 raise IOError('ftp error', msg), sys.exc_info()[2]
335 330
336 331 def connect_ftp(self, user, passwd, host, port, dirs):
337 332 fw = ftpwrapper(user, passwd, host, port, dirs)
338 333 return fw
339 334
340 335 class ftpwrapper(urllib.ftpwrapper):
341 336 # range support note:
342 337 # this ftpwrapper code is copied directly from
343 338 # urllib. The only enhancement is to add the rest
344 339 # argument and pass it on to ftp.ntransfercmd
345 340 def retrfile(self, file, type, rest=None):
346 341 self.endtransfer()
347 342 if type in ('d', 'D'):
348 343 cmd = 'TYPE A'
349 344 isdir = 1
350 345 else:
351 346 cmd = 'TYPE ' + type
352 347 isdir = 0
353 348 try:
354 349 self.ftp.voidcmd(cmd)
355 350 except ftplib.all_errors:
356 351 self.init()
357 352 self.ftp.voidcmd(cmd)
358 353 conn = None
359 354 if file and not isdir:
360 355 # Use nlst to see if the file exists at all
361 356 try:
362 357 self.ftp.nlst(file)
363 358 except ftplib.error_perm, reason:
364 359 raise IOError('ftp error', reason), sys.exc_info()[2]
365 360 # Restore the transfer mode!
366 361 self.ftp.voidcmd(cmd)
367 362 # Try to retrieve as a file
368 363 try:
369 364 cmd = 'RETR ' + file
370 365 conn = self.ftp.ntransfercmd(cmd, rest)
371 366 except ftplib.error_perm, reason:
372 367 if str(reason).startswith('501'):
373 368 # workaround for REST not supported error
374 369 fp, retrlen = self.retrfile(file, type)
375 370 fp = RangeableFileObject(fp, (rest,''))
376 371 return (fp, retrlen)
377 372 elif not str(reason).startswith('550'):
378 373 raise IOError('ftp error', reason), sys.exc_info()[2]
379 374 if not conn:
380 375 # Set transfer mode to ASCII!
381 376 self.ftp.voidcmd('TYPE A')
382 377 # Try a directory listing
383 378 if file:
384 379 cmd = 'LIST ' + file
385 380 else:
386 381 cmd = 'LIST'
387 382 conn = self.ftp.ntransfercmd(cmd)
388 383 self.busy = 1
389 384 # Pass back both a suitably decorated object and a retrieval length
390 385 return (addclosehook(conn[0].makefile('rb'),
391 386 self.endtransfer), conn[1])
392 387
393 388
394 389 ####################################################################
395 390 # Range Tuple Functions
396 391 # XXX: These range tuple functions might go better in a class.
397 392
398 393 _rangere = None
399 394 def range_header_to_tuple(range_header):
400 395 """Get a (firstbyte,lastbyte) tuple from a Range header value.
401 396
402 397 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
403 398 function pulls the firstbyte and lastbyte values and returns
404 399 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
405 400 the header value, it is returned as an empty string in the
406 401 tuple.
407 402
408 403 Return None if range_header is None
409 404 Return () if range_header does not conform to the range spec
410 405 pattern.
411 406
412 407 """
413 408 global _rangere
414 409 if range_header is None:
415 410 return None
416 411 if _rangere is None:
417 412 import re
418 413 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
419 414 match = _rangere.match(range_header)
420 415 if match:
421 416 tup = range_tuple_normalize(match.group(1, 2))
422 417 if tup and tup[1]:
423 418 tup = (tup[0], tup[1]+1)
424 419 return tup
425 420 return ()
426 421
427 422 def range_tuple_to_header(range_tup):
428 423 """Convert a range tuple to a Range header value.
429 424 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
430 425 if no range is needed.
431 426 """
432 427 if range_tup is None:
433 428 return None
434 429 range_tup = range_tuple_normalize(range_tup)
435 430 if range_tup:
436 431 if range_tup[1]:
437 432 range_tup = (range_tup[0], range_tup[1] - 1)
438 433 return 'bytes=%s-%s' % range_tup
439 434
440 435 def range_tuple_normalize(range_tup):
441 436 """Normalize a (first_byte,last_byte) range tuple.
442 437 Return a tuple whose first element is guaranteed to be an int
443 438 and whose second element will be '' (meaning: the last byte) or
444 439 an int. Finally, return None if the normalized tuple == (0,'')
445 440 as that is equivelant to retrieving the entire file.
446 441 """
447 442 if range_tup is None:
448 443 return None
449 444 # handle first byte
450 445 fb = range_tup[0]
451 446 if fb in (None, ''):
452 447 fb = 0
453 448 else:
454 449 fb = int(fb)
455 450 # handle last byte
456 451 try:
457 452 lb = range_tup[1]
458 453 except IndexError:
459 454 lb = ''
460 455 else:
461 456 if lb is None:
462 457 lb = ''
463 458 elif lb != '':
464 459 lb = int(lb)
465 460 # check if range is over the entire file
466 461 if (fb, lb) == (0, ''):
467 462 return None
468 463 # check that the range is valid
469 464 if lb < fb:
470 465 raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
471 466 return (fb, lb)
@@ -1,287 +1,286 b''
1 1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import os
10 10 from mercurial import ui, hg, hook, error, encoding, templater
11 11 from common import get_mtime, ErrorResponse, permhooks
12 12 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
13 from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
14 13 from request import wsgirequest
15 14 import webcommands, protocol, webutil
16 15
17 16 perms = {
18 17 'changegroup': 'pull',
19 18 'changegroupsubset': 'pull',
20 19 'unbundle': 'push',
21 20 'stream_out': 'pull',
22 21 }
23 22
24 23 class hgweb(object):
25 24 def __init__(self, repo, name=None):
26 25 if isinstance(repo, str):
27 26 u = ui.ui()
28 27 u.setconfig('ui', 'report_untrusted', 'off')
29 28 u.setconfig('ui', 'interactive', 'off')
30 29 self.repo = hg.repository(u, repo)
31 30 else:
32 31 self.repo = repo
33 32
34 33 hook.redirect(True)
35 34 self.mtime = -1
36 35 self.reponame = name
37 36 self.archives = 'zip', 'gz', 'bz2'
38 37 self.stripecount = 1
39 38 # a repo owner may set web.templates in .hg/hgrc to get any file
40 39 # readable by the user running the CGI script
41 40 self.templatepath = self.config('web', 'templates')
42 41
43 42 # The CGI scripts are often run by a user different from the repo owner.
44 43 # Trust the settings from the .hg/hgrc files by default.
45 44 def config(self, section, name, default=None, untrusted=True):
46 45 return self.repo.ui.config(section, name, default,
47 46 untrusted=untrusted)
48 47
49 48 def configbool(self, section, name, default=False, untrusted=True):
50 49 return self.repo.ui.configbool(section, name, default,
51 50 untrusted=untrusted)
52 51
53 52 def configlist(self, section, name, default=None, untrusted=True):
54 53 return self.repo.ui.configlist(section, name, default,
55 54 untrusted=untrusted)
56 55
57 56 def refresh(self, request=None):
58 57 if request:
59 58 self.repo.ui.environ = request.env
60 59 mtime = get_mtime(self.repo.spath)
61 60 if mtime != self.mtime:
62 61 self.mtime = mtime
63 62 self.repo = hg.repository(self.repo.ui, self.repo.root)
64 63 self.maxchanges = int(self.config("web", "maxchanges", 10))
65 64 self.stripecount = int(self.config("web", "stripes", 1))
66 65 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
67 66 self.maxfiles = int(self.config("web", "maxfiles", 10))
68 67 self.allowpull = self.configbool("web", "allowpull", True)
69 68 encoding.encoding = self.config("web", "encoding",
70 69 encoding.encoding)
71 70
72 71 def run(self):
73 72 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
74 73 raise RuntimeError("This function is only intended to be "
75 74 "called while running as a CGI script.")
76 75 import mercurial.hgweb.wsgicgi as wsgicgi
77 76 wsgicgi.launch(self)
78 77
79 78 def __call__(self, env, respond):
80 79 req = wsgirequest(env, respond)
81 80 return self.run_wsgi(req)
82 81
83 82 def run_wsgi(self, req):
84 83
85 84 self.refresh(req)
86 85
87 86 # work with CGI variables to create coherent structure
88 87 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
89 88
90 89 req.url = req.env['SCRIPT_NAME']
91 90 if not req.url.endswith('/'):
92 91 req.url += '/'
93 92 if 'REPO_NAME' in req.env:
94 93 req.url += req.env['REPO_NAME'] + '/'
95 94
96 95 if 'PATH_INFO' in req.env:
97 96 parts = req.env['PATH_INFO'].strip('/').split('/')
98 97 repo_parts = req.env.get('REPO_NAME', '').split('/')
99 98 if parts[:len(repo_parts)] == repo_parts:
100 99 parts = parts[len(repo_parts):]
101 100 query = '/'.join(parts)
102 101 else:
103 102 query = req.env['QUERY_STRING'].split('&', 1)[0]
104 103 query = query.split(';', 1)[0]
105 104
106 105 # process this if it's a protocol request
107 106 # protocol bits don't need to create any URLs
108 107 # and the clients always use the old URL structure
109 108
110 109 cmd = req.form.get('cmd', [''])[0]
111 110 if cmd and cmd in protocol.__all__:
112 111 if query:
113 112 raise ErrorResponse(HTTP_NOT_FOUND)
114 113 try:
115 114 if cmd in perms:
116 115 try:
117 116 self.check_perm(req, perms[cmd])
118 117 except ErrorResponse, inst:
119 118 if cmd == 'unbundle':
120 119 req.drain()
121 120 raise
122 121 method = getattr(protocol, cmd)
123 122 return method(self.repo, req)
124 123 except ErrorResponse, inst:
125 124 req.respond(inst, protocol.HGTYPE)
126 125 if not inst.message:
127 126 return []
128 127 return '0\n%s\n' % inst.message,
129 128
130 129 # translate user-visible url structure to internal structure
131 130
132 131 args = query.split('/', 2)
133 132 if 'cmd' not in req.form and args and args[0]:
134 133
135 134 cmd = args.pop(0)
136 135 style = cmd.rfind('-')
137 136 if style != -1:
138 137 req.form['style'] = [cmd[:style]]
139 138 cmd = cmd[style + 1:]
140 139
141 140 # avoid accepting e.g. style parameter as command
142 141 if hasattr(webcommands, cmd):
143 142 req.form['cmd'] = [cmd]
144 143 else:
145 144 cmd = ''
146 145
147 146 if cmd == 'static':
148 147 req.form['file'] = ['/'.join(args)]
149 148 else:
150 149 if args and args[0]:
151 150 node = args.pop(0)
152 151 req.form['node'] = [node]
153 152 if args:
154 153 req.form['file'] = args
155 154
156 155 ua = req.env.get('HTTP_USER_AGENT', '')
157 156 if cmd == 'rev' and 'mercurial' in ua:
158 157 req.form['style'] = ['raw']
159 158
160 159 if cmd == 'archive':
161 160 fn = req.form['node'][0]
162 161 for type_, spec in self.archive_specs.iteritems():
163 162 ext = spec[2]
164 163 if fn.endswith(ext):
165 164 req.form['node'] = [fn[:-len(ext)]]
166 165 req.form['type'] = [type_]
167 166
168 167 # process the web interface request
169 168
170 169 try:
171 170 tmpl = self.templater(req)
172 171 ctype = tmpl('mimetype', encoding=encoding.encoding)
173 172 ctype = templater.stringify(ctype)
174 173
175 174 # check read permissions non-static content
176 175 if cmd != 'static':
177 176 self.check_perm(req, None)
178 177
179 178 if cmd == '':
180 179 req.form['cmd'] = [tmpl.cache['default']]
181 180 cmd = req.form['cmd'][0]
182 181
183 182 if cmd not in webcommands.__all__:
184 183 msg = 'no such method: %s' % cmd
185 184 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
186 185 elif cmd == 'file' and 'raw' in req.form.get('style', []):
187 186 self.ctype = ctype
188 187 content = webcommands.rawfile(self, req, tmpl)
189 188 else:
190 189 content = getattr(webcommands, cmd)(self, req, tmpl)
191 190 req.respond(HTTP_OK, ctype)
192 191
193 192 return content
194 193
195 194 except error.LookupError, err:
196 195 req.respond(HTTP_NOT_FOUND, ctype)
197 196 msg = str(err)
198 197 if 'manifest' not in msg:
199 198 msg = 'revision not found: %s' % err.name
200 199 return tmpl('error', error=msg)
201 200 except (error.RepoError, error.RevlogError), inst:
202 201 req.respond(HTTP_SERVER_ERROR, ctype)
203 202 return tmpl('error', error=str(inst))
204 203 except ErrorResponse, inst:
205 204 req.respond(inst, ctype)
206 205 return tmpl('error', error=inst.message)
207 206
208 207 def templater(self, req):
209 208
210 209 # determine scheme, port and server name
211 210 # this is needed to create absolute urls
212 211
213 212 proto = req.env.get('wsgi.url_scheme')
214 213 if proto == 'https':
215 214 proto = 'https'
216 215 default_port = "443"
217 216 else:
218 217 proto = 'http'
219 218 default_port = "80"
220 219
221 220 port = req.env["SERVER_PORT"]
222 221 port = port != default_port and (":" + port) or ""
223 222 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
224 223 staticurl = self.config("web", "staticurl") or req.url + 'static/'
225 224 if not staticurl.endswith('/'):
226 225 staticurl += '/'
227 226
228 227 # some functions for the templater
229 228
230 229 def header(**map):
231 230 yield tmpl('header', encoding=encoding.encoding, **map)
232 231
233 232 def footer(**map):
234 233 yield tmpl("footer", **map)
235 234
236 235 def motd(**map):
237 236 yield self.config("web", "motd", "")
238 237
239 238 # figure out which style to use
240 239
241 240 vars = {}
242 241 styles = (
243 242 req.form.get('style', [None])[0],
244 243 self.config('web', 'style'),
245 244 'paper',
246 245 )
247 246 style, mapfile = templater.stylemap(styles, self.templatepath)
248 247 if style == styles[0]:
249 248 vars['style'] = style
250 249
251 250 start = req.url[-1] == '?' and '&' or '?'
252 251 sessionvars = webutil.sessionvars(vars, start)
253 252
254 253 if not self.reponame:
255 254 self.reponame = (self.config("web", "name")
256 255 or req.env.get('REPO_NAME')
257 256 or req.url.strip('/') or self.repo.root)
258 257
259 258 # create the templater
260 259
261 260 tmpl = templater.templater(mapfile,
262 261 defaults={"url": req.url,
263 262 "staticurl": staticurl,
264 263 "urlbase": urlbase,
265 264 "repo": self.reponame,
266 265 "header": header,
267 266 "footer": footer,
268 267 "motd": motd,
269 268 "sessionvars": sessionvars
270 269 })
271 270 return tmpl
272 271
273 272 def archivelist(self, nodeid):
274 273 allowed = self.configlist("web", "allow_archive")
275 274 for i, spec in self.archive_specs.iteritems():
276 275 if i in allowed or self.configbool("web", "allow" + i):
277 276 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
278 277
279 278 archive_specs = {
280 279 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
281 280 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
282 281 'zip': ('application/zip', 'zip', '.zip', None),
283 282 }
284 283
285 284 def check_perm(self, req, op):
286 285 for hook in permhooks:
287 286 hook(self, req, op)
@@ -1,277 +1,277 b''
1 1 # hgweb/server.py - The standalone hg web server.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 from mercurial import hg, util, error
10 from mercurial import util, error
11 11 from mercurial.i18n import _
12 12
13 13 def _splitURI(uri):
14 14 """ Return path and query splited from uri
15 15
16 16 Just like CGI environment, the path is unquoted, the query is
17 17 not.
18 18 """
19 19 if '?' in uri:
20 20 path, query = uri.split('?', 1)
21 21 else:
22 22 path, query = uri, ''
23 23 return urllib.unquote(path), query
24 24
25 25 class _error_logger(object):
26 26 def __init__(self, handler):
27 27 self.handler = handler
28 28 def flush(self):
29 29 pass
30 30 def write(self, str):
31 31 self.writelines(str.split('\n'))
32 32 def writelines(self, seq):
33 33 for msg in seq:
34 34 self.handler.log_error("HG error: %s", msg)
35 35
36 36 class _hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
37 37
38 38 url_scheme = 'http'
39 39
40 40 def __init__(self, *args, **kargs):
41 41 self.protocol_version = 'HTTP/1.1'
42 42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
43 43
44 44 def _log_any(self, fp, format, *args):
45 45 fp.write("%s - - [%s] %s\n" % (self.client_address[0],
46 46 self.log_date_time_string(),
47 47 format % args))
48 48 fp.flush()
49 49
50 50 def log_error(self, format, *args):
51 51 self._log_any(self.server.errorlog, format, *args)
52 52
53 53 def log_message(self, format, *args):
54 54 self._log_any(self.server.accesslog, format, *args)
55 55
56 56 def do_write(self):
57 57 try:
58 58 self.do_hgweb()
59 59 except socket.error, inst:
60 60 if inst[0] != errno.EPIPE:
61 61 raise
62 62
63 63 def do_POST(self):
64 64 try:
65 65 self.do_write()
66 66 except StandardError:
67 67 self._start_response("500 Internal Server Error", [])
68 68 self._write("Internal Server Error")
69 69 tb = "".join(traceback.format_exception(*sys.exc_info()))
70 70 self.log_error("Exception happened during processing "
71 71 "request '%s':\n%s", self.path, tb)
72 72
73 73 def do_GET(self):
74 74 self.do_POST()
75 75
76 76 def do_hgweb(self):
77 77 path, query = _splitURI(self.path)
78 78
79 79 env = {}
80 80 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
81 81 env['REQUEST_METHOD'] = self.command
82 82 env['SERVER_NAME'] = self.server.server_name
83 83 env['SERVER_PORT'] = str(self.server.server_port)
84 84 env['REQUEST_URI'] = self.path
85 85 env['SCRIPT_NAME'] = self.server.prefix
86 86 env['PATH_INFO'] = path[len(self.server.prefix):]
87 87 env['REMOTE_HOST'] = self.client_address[0]
88 88 env['REMOTE_ADDR'] = self.client_address[0]
89 89 if query:
90 90 env['QUERY_STRING'] = query
91 91
92 92 if self.headers.typeheader is None:
93 93 env['CONTENT_TYPE'] = self.headers.type
94 94 else:
95 95 env['CONTENT_TYPE'] = self.headers.typeheader
96 96 length = self.headers.getheader('content-length')
97 97 if length:
98 98 env['CONTENT_LENGTH'] = length
99 99 for header in [h for h in self.headers.keys()
100 100 if h not in ('content-type', 'content-length')]:
101 101 hkey = 'HTTP_' + header.replace('-', '_').upper()
102 102 hval = self.headers.getheader(header)
103 103 hval = hval.replace('\n', '').strip()
104 104 if hval:
105 105 env[hkey] = hval
106 106 env['SERVER_PROTOCOL'] = self.request_version
107 107 env['wsgi.version'] = (1, 0)
108 108 env['wsgi.url_scheme'] = self.url_scheme
109 109 env['wsgi.input'] = self.rfile
110 110 env['wsgi.errors'] = _error_logger(self)
111 111 env['wsgi.multithread'] = isinstance(self.server,
112 112 SocketServer.ThreadingMixIn)
113 113 env['wsgi.multiprocess'] = isinstance(self.server,
114 114 SocketServer.ForkingMixIn)
115 115 env['wsgi.run_once'] = 0
116 116
117 117 self.close_connection = True
118 118 self.saved_status = None
119 119 self.saved_headers = []
120 120 self.sent_headers = False
121 121 self.length = None
122 122 for chunk in self.server.application(env, self._start_response):
123 123 self._write(chunk)
124 124
125 125 def send_headers(self):
126 126 if not self.saved_status:
127 127 raise AssertionError("Sending headers before "
128 128 "start_response() called")
129 129 saved_status = self.saved_status.split(None, 1)
130 130 saved_status[0] = int(saved_status[0])
131 131 self.send_response(*saved_status)
132 132 should_close = True
133 133 for h in self.saved_headers:
134 134 self.send_header(*h)
135 135 if h[0].lower() == 'content-length':
136 136 should_close = False
137 137 self.length = int(h[1])
138 138 # The value of the Connection header is a list of case-insensitive
139 139 # tokens separated by commas and optional whitespace.
140 140 if 'close' in [token.strip().lower() for token in
141 141 self.headers.get('connection', '').split(',')]:
142 142 should_close = True
143 143 if should_close:
144 144 self.send_header('Connection', 'close')
145 145 self.close_connection = should_close
146 146 self.end_headers()
147 147 self.sent_headers = True
148 148
149 149 def _start_response(self, http_status, headers, exc_info=None):
150 150 code, msg = http_status.split(None, 1)
151 151 code = int(code)
152 152 self.saved_status = http_status
153 153 bad_headers = ('connection', 'transfer-encoding')
154 154 self.saved_headers = [h for h in headers
155 155 if h[0].lower() not in bad_headers]
156 156 return self._write
157 157
158 158 def _write(self, data):
159 159 if not self.saved_status:
160 160 raise AssertionError("data written before start_response() called")
161 161 elif not self.sent_headers:
162 162 self.send_headers()
163 163 if self.length is not None:
164 164 if len(data) > self.length:
165 165 raise AssertionError("Content-length header sent, but more "
166 166 "bytes than specified are being written.")
167 167 self.length = self.length - len(data)
168 168 self.wfile.write(data)
169 169 self.wfile.flush()
170 170
171 171 class _shgwebhandler(_hgwebhandler):
172 172
173 173 url_scheme = 'https'
174 174
175 175 def setup(self):
176 176 self.connection = self.request
177 177 self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
178 178 self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
179 179
180 180 def do_write(self):
181 181 from OpenSSL.SSL import SysCallError
182 182 try:
183 183 super(_shgwebhandler, self).do_write()
184 184 except SysCallError, inst:
185 185 if inst.args[0] != errno.EPIPE:
186 186 raise
187 187
188 188 def handle_one_request(self):
189 189 from OpenSSL.SSL import SysCallError, ZeroReturnError
190 190 try:
191 191 super(_shgwebhandler, self).handle_one_request()
192 192 except (SysCallError, ZeroReturnError):
193 193 self.close_connection = True
194 194 pass
195 195
196 196 try:
197 197 from threading import activeCount
198 198 _mixin = SocketServer.ThreadingMixIn
199 199 except ImportError:
200 200 if hasattr(os, "fork"):
201 201 _mixin = SocketServer.ForkingMixIn
202 202 else:
203 203 class _mixin:
204 204 pass
205 205
206 206 def openlog(opt, default):
207 207 if opt and opt != '-':
208 208 return open(opt, 'a')
209 209 return default
210 210
211 211 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
212 212
213 213 # SO_REUSEADDR has broken semantics on windows
214 214 if os.name == 'nt':
215 215 allow_reuse_address = 0
216 216
217 217 def __init__(self, ui, app, addr, handler, **kwargs):
218 218 BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs)
219 219 self.daemon_threads = True
220 220 self.application = app
221 221
222 222 ssl_cert = ui.config('web', 'certificate')
223 223 if ssl_cert:
224 224 try:
225 225 from OpenSSL import SSL
226 226 ctx = SSL.Context(SSL.SSLv23_METHOD)
227 227 except ImportError:
228 228 raise util.Abort(_("SSL support is unavailable"))
229 229 ctx.use_privatekey_file(ssl_cert)
230 230 ctx.use_certificate_file(ssl_cert)
231 231 sock = socket.socket(self.address_family, self.socket_type)
232 232 self.socket = SSL.Connection(ctx, sock)
233 233 self.server_bind()
234 234 self.server_activate()
235 235
236 236 prefix = ui.config('web', 'prefix', '')
237 237 if prefix:
238 238 prefix = '/' + prefix.strip('/')
239 239 self.prefix = prefix
240 240
241 241 alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
242 242 elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
243 243 self.accesslog = alog
244 244 self.errorlog = elog
245 245
246 246 self.addr, self.port = self.socket.getsockname()[0:2]
247 247 self.fqaddr = socket.getfqdn(addr[0])
248 248
249 249 class IPv6HTTPServer(MercurialHTTPServer):
250 250 address_family = getattr(socket, 'AF_INET6', None)
251 251 def __init__(self, *args, **kwargs):
252 252 if self.address_family is None:
253 253 raise error.RepoError(_('IPv6 is not available on this system'))
254 254 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
255 255
256 256 def create_server(ui, app):
257 257
258 258 if ui.config('web', 'certificate'):
259 259 handler = _shgwebhandler
260 260 else:
261 261 handler = _hgwebhandler
262 262
263 263 if ui.configbool('web', 'ipv6'):
264 264 cls = IPv6HTTPServer
265 265 else:
266 266 cls = MercurialHTTPServer
267 267
268 268 # ugly hack due to python issue5853 (for threaded use)
269 269 import mimetypes; mimetypes.init()
270 270
271 271 address = ui.config('web', 'address', '')
272 272 port = int(ui.config('web', 'port', 8000))
273 273 try:
274 274 return cls(ui, app, (address, port), handler)
275 275 except socket.error, inst:
276 276 raise util.Abort(_("cannot start server at '%s:%d': %s")
277 277 % (address, port, inst.args[1]))
@@ -1,1697 +1,1697 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from i18n import _
10 10 from node import hex, nullid, short
11 11 import base85, cmdutil, mdiff, util, diffhelpers, copies
12 12 import cStringIO, email.Parser, os, re
13 import sys, tempfile, zlib
13 import tempfile, zlib
14 14
15 15 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 16
17 17 class PatchError(Exception):
18 18 pass
19 19
20 20 class NoHunks(PatchError):
21 21 pass
22 22
23 23 # helper functions
24 24
25 25 def copyfile(src, dst, basedir):
26 26 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
27 27 if os.path.exists(absdst):
28 28 raise util.Abort(_("cannot create %s: destination already exists") %
29 29 dst)
30 30
31 31 dstdir = os.path.dirname(absdst)
32 32 if dstdir and not os.path.isdir(dstdir):
33 33 try:
34 34 os.makedirs(dstdir)
35 35 except IOError:
36 36 raise util.Abort(
37 37 _("cannot create %s: unable to create destination directory")
38 38 % dst)
39 39
40 40 util.copyfile(abssrc, absdst)
41 41
42 42 # public functions
43 43
44 44 def split(stream):
45 45 '''return an iterator of individual patches from a stream'''
46 46 def isheader(line, inheader):
47 47 if inheader and line[0] in (' ', '\t'):
48 48 # continuation
49 49 return True
50 50 if line[0] in (' ', '-', '+'):
51 51 # diff line - don't check for header pattern in there
52 52 return False
53 53 l = line.split(': ', 1)
54 54 return len(l) == 2 and ' ' not in l[0]
55 55
56 56 def chunk(lines):
57 57 return cStringIO.StringIO(''.join(lines))
58 58
59 59 def hgsplit(stream, cur):
60 60 inheader = True
61 61
62 62 for line in stream:
63 63 if not line.strip():
64 64 inheader = False
65 65 if not inheader and line.startswith('# HG changeset patch'):
66 66 yield chunk(cur)
67 67 cur = []
68 68 inheader = True
69 69
70 70 cur.append(line)
71 71
72 72 if cur:
73 73 yield chunk(cur)
74 74
75 75 def mboxsplit(stream, cur):
76 76 for line in stream:
77 77 if line.startswith('From '):
78 78 for c in split(chunk(cur[1:])):
79 79 yield c
80 80 cur = []
81 81
82 82 cur.append(line)
83 83
84 84 if cur:
85 85 for c in split(chunk(cur[1:])):
86 86 yield c
87 87
88 88 def mimesplit(stream, cur):
89 89 def msgfp(m):
90 90 fp = cStringIO.StringIO()
91 91 g = email.Generator.Generator(fp, mangle_from_=False)
92 92 g.flatten(m)
93 93 fp.seek(0)
94 94 return fp
95 95
96 96 for line in stream:
97 97 cur.append(line)
98 98 c = chunk(cur)
99 99
100 100 m = email.Parser.Parser().parse(c)
101 101 if not m.is_multipart():
102 102 yield msgfp(m)
103 103 else:
104 104 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
105 105 for part in m.walk():
106 106 ct = part.get_content_type()
107 107 if ct not in ok_types:
108 108 continue
109 109 yield msgfp(part)
110 110
111 111 def headersplit(stream, cur):
112 112 inheader = False
113 113
114 114 for line in stream:
115 115 if not inheader and isheader(line, inheader):
116 116 yield chunk(cur)
117 117 cur = []
118 118 inheader = True
119 119 if inheader and not isheader(line, inheader):
120 120 inheader = False
121 121
122 122 cur.append(line)
123 123
124 124 if cur:
125 125 yield chunk(cur)
126 126
127 127 def remainder(cur):
128 128 yield chunk(cur)
129 129
130 130 class fiter(object):
131 131 def __init__(self, fp):
132 132 self.fp = fp
133 133
134 134 def __iter__(self):
135 135 return self
136 136
137 137 def next(self):
138 138 l = self.fp.readline()
139 139 if not l:
140 140 raise StopIteration
141 141 return l
142 142
143 143 inheader = False
144 144 cur = []
145 145
146 146 mimeheaders = ['content-type']
147 147
148 148 if not hasattr(stream, 'next'):
149 149 # http responses, for example, have readline but not next
150 150 stream = fiter(stream)
151 151
152 152 for line in stream:
153 153 cur.append(line)
154 154 if line.startswith('# HG changeset patch'):
155 155 return hgsplit(stream, cur)
156 156 elif line.startswith('From '):
157 157 return mboxsplit(stream, cur)
158 158 elif isheader(line, inheader):
159 159 inheader = True
160 160 if line.split(':', 1)[0].lower() in mimeheaders:
161 161 # let email parser handle this
162 162 return mimesplit(stream, cur)
163 163 elif line.startswith('--- ') and inheader:
164 164 # No evil headers seen by diff start, split by hand
165 165 return headersplit(stream, cur)
166 166 # Not enough info, keep reading
167 167
168 168 # if we are here, we have a very plain patch
169 169 return remainder(cur)
170 170
171 171 def extract(ui, fileobj):
172 172 '''extract patch from data read from fileobj.
173 173
174 174 patch can be a normal patch or contained in an email message.
175 175
176 176 return tuple (filename, message, user, date, node, p1, p2).
177 177 Any item in the returned tuple can be None. If filename is None,
178 178 fileobj did not contain a patch. Caller must unlink filename when done.'''
179 179
180 180 # attempt to detect the start of a patch
181 181 # (this heuristic is borrowed from quilt)
182 182 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
183 183 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
184 184 r'---[ \t].*?^\+\+\+[ \t]|'
185 185 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
186 186
187 187 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
188 188 tmpfp = os.fdopen(fd, 'w')
189 189 try:
190 190 msg = email.Parser.Parser().parse(fileobj)
191 191
192 192 subject = msg['Subject']
193 193 user = msg['From']
194 194 if not subject and not user:
195 195 # Not an email, restore parsed headers if any
196 196 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
197 197
198 198 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
199 199 # should try to parse msg['Date']
200 200 date = None
201 201 nodeid = None
202 202 branch = None
203 203 parents = []
204 204
205 205 if subject:
206 206 if subject.startswith('[PATCH'):
207 207 pend = subject.find(']')
208 208 if pend >= 0:
209 209 subject = subject[pend + 1:].lstrip()
210 210 subject = subject.replace('\n\t', ' ')
211 211 ui.debug('Subject: %s\n' % subject)
212 212 if user:
213 213 ui.debug('From: %s\n' % user)
214 214 diffs_seen = 0
215 215 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
216 216 message = ''
217 217 for part in msg.walk():
218 218 content_type = part.get_content_type()
219 219 ui.debug('Content-Type: %s\n' % content_type)
220 220 if content_type not in ok_types:
221 221 continue
222 222 payload = part.get_payload(decode=True)
223 223 m = diffre.search(payload)
224 224 if m:
225 225 hgpatch = False
226 226 ignoretext = False
227 227
228 228 ui.debug('found patch at byte %d\n' % m.start(0))
229 229 diffs_seen += 1
230 230 cfp = cStringIO.StringIO()
231 231 for line in payload[:m.start(0)].splitlines():
232 232 if line.startswith('# HG changeset patch'):
233 233 ui.debug('patch generated by hg export\n')
234 234 hgpatch = True
235 235 # drop earlier commit message content
236 236 cfp.seek(0)
237 237 cfp.truncate()
238 238 subject = None
239 239 elif hgpatch:
240 240 if line.startswith('# User '):
241 241 user = line[7:]
242 242 ui.debug('From: %s\n' % user)
243 243 elif line.startswith("# Date "):
244 244 date = line[7:]
245 245 elif line.startswith("# Branch "):
246 246 branch = line[9:]
247 247 elif line.startswith("# Node ID "):
248 248 nodeid = line[10:]
249 249 elif line.startswith("# Parent "):
250 250 parents.append(line[10:])
251 251 elif line == '---' and gitsendmail:
252 252 ignoretext = True
253 253 if not line.startswith('# ') and not ignoretext:
254 254 cfp.write(line)
255 255 cfp.write('\n')
256 256 message = cfp.getvalue()
257 257 if tmpfp:
258 258 tmpfp.write(payload)
259 259 if not payload.endswith('\n'):
260 260 tmpfp.write('\n')
261 261 elif not diffs_seen and message and content_type == 'text/plain':
262 262 message += '\n' + payload
263 263 except:
264 264 tmpfp.close()
265 265 os.unlink(tmpname)
266 266 raise
267 267
268 268 if subject and not message.startswith(subject):
269 269 message = '%s\n%s' % (subject, message)
270 270 tmpfp.close()
271 271 if not diffs_seen:
272 272 os.unlink(tmpname)
273 273 return None, message, user, date, branch, None, None, None
274 274 p1 = parents and parents.pop(0) or None
275 275 p2 = parents and parents.pop(0) or None
276 276 return tmpname, message, user, date, branch, nodeid, p1, p2
277 277
278 278 GP_PATCH = 1 << 0 # we have to run patch
279 279 GP_FILTER = 1 << 1 # there's some copy/rename operation
280 280 GP_BINARY = 1 << 2 # there's a binary patch
281 281
282 282 class patchmeta(object):
283 283 """Patched file metadata
284 284
285 285 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
286 286 or COPY. 'path' is patched file path. 'oldpath' is set to the
287 287 origin file when 'op' is either COPY or RENAME, None otherwise. If
288 288 file mode is changed, 'mode' is a tuple (islink, isexec) where
289 289 'islink' is True if the file is a symlink and 'isexec' is True if
290 290 the file is executable. Otherwise, 'mode' is None.
291 291 """
292 292 def __init__(self, path):
293 293 self.path = path
294 294 self.oldpath = None
295 295 self.mode = None
296 296 self.op = 'MODIFY'
297 297 self.lineno = 0
298 298 self.binary = False
299 299
300 300 def setmode(self, mode):
301 301 islink = mode & 020000
302 302 isexec = mode & 0100
303 303 self.mode = (islink, isexec)
304 304
305 305 def readgitpatch(lr):
306 306 """extract git-style metadata about patches from <patchname>"""
307 307
308 308 # Filter patch for git information
309 309 gp = None
310 310 gitpatches = []
311 311 # Can have a git patch with only metadata, causing patch to complain
312 312 dopatch = 0
313 313
314 314 lineno = 0
315 315 for line in lr:
316 316 lineno += 1
317 317 line = line.rstrip(' \r\n')
318 318 if line.startswith('diff --git'):
319 319 m = gitre.match(line)
320 320 if m:
321 321 if gp:
322 322 gitpatches.append(gp)
323 323 dst = m.group(2)
324 324 gp = patchmeta(dst)
325 325 gp.lineno = lineno
326 326 elif gp:
327 327 if line.startswith('--- '):
328 328 if gp.op in ('COPY', 'RENAME'):
329 329 dopatch |= GP_FILTER
330 330 gitpatches.append(gp)
331 331 gp = None
332 332 dopatch |= GP_PATCH
333 333 continue
334 334 if line.startswith('rename from '):
335 335 gp.op = 'RENAME'
336 336 gp.oldpath = line[12:]
337 337 elif line.startswith('rename to '):
338 338 gp.path = line[10:]
339 339 elif line.startswith('copy from '):
340 340 gp.op = 'COPY'
341 341 gp.oldpath = line[10:]
342 342 elif line.startswith('copy to '):
343 343 gp.path = line[8:]
344 344 elif line.startswith('deleted file'):
345 345 gp.op = 'DELETE'
346 346 # is the deleted file a symlink?
347 347 gp.setmode(int(line[-6:], 8))
348 348 elif line.startswith('new file mode '):
349 349 gp.op = 'ADD'
350 350 gp.setmode(int(line[-6:], 8))
351 351 elif line.startswith('new mode '):
352 352 gp.setmode(int(line[-6:], 8))
353 353 elif line.startswith('GIT binary patch'):
354 354 dopatch |= GP_BINARY
355 355 gp.binary = True
356 356 if gp:
357 357 gitpatches.append(gp)
358 358
359 359 if not gitpatches:
360 360 dopatch = GP_PATCH
361 361
362 362 return (dopatch, gitpatches)
363 363
364 364 class linereader(object):
365 365 # simple class to allow pushing lines back into the input stream
366 366 def __init__(self, fp, textmode=False):
367 367 self.fp = fp
368 368 self.buf = []
369 369 self.textmode = textmode
370 370 self.eol = None
371 371
372 372 def push(self, line):
373 373 if line is not None:
374 374 self.buf.append(line)
375 375
376 376 def readline(self):
377 377 if self.buf:
378 378 l = self.buf[0]
379 379 del self.buf[0]
380 380 return l
381 381 l = self.fp.readline()
382 382 if not self.eol:
383 383 if l.endswith('\r\n'):
384 384 self.eol = '\r\n'
385 385 elif l.endswith('\n'):
386 386 self.eol = '\n'
387 387 if self.textmode and l.endswith('\r\n'):
388 388 l = l[:-2] + '\n'
389 389 return l
390 390
391 391 def __iter__(self):
392 392 while 1:
393 393 l = self.readline()
394 394 if not l:
395 395 break
396 396 yield l
397 397
398 398 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
399 399 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
400 400 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
401 401 eolmodes = ['strict', 'crlf', 'lf', 'auto']
402 402
403 403 class patchfile(object):
404 404 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
405 405 self.fname = fname
406 406 self.eolmode = eolmode
407 407 self.eol = None
408 408 self.opener = opener
409 409 self.ui = ui
410 410 self.lines = []
411 411 self.exists = False
412 412 self.missing = missing
413 413 if not missing:
414 414 try:
415 415 self.lines = self.readlines(fname)
416 416 self.exists = True
417 417 except IOError:
418 418 pass
419 419 else:
420 420 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
421 421
422 422 self.hash = {}
423 423 self.dirty = 0
424 424 self.offset = 0
425 425 self.skew = 0
426 426 self.rej = []
427 427 self.fileprinted = False
428 428 self.printfile(False)
429 429 self.hunks = 0
430 430
431 431 def readlines(self, fname):
432 432 if os.path.islink(fname):
433 433 return [os.readlink(fname)]
434 434 fp = self.opener(fname, 'r')
435 435 try:
436 436 lr = linereader(fp, self.eolmode != 'strict')
437 437 lines = list(lr)
438 438 self.eol = lr.eol
439 439 return lines
440 440 finally:
441 441 fp.close()
442 442
443 443 def writelines(self, fname, lines):
444 444 # Ensure supplied data ends in fname, being a regular file or
445 445 # a symlink. updatedir() will -too magically- take care of
446 446 # setting it to the proper type afterwards.
447 447 islink = os.path.islink(fname)
448 448 if islink:
449 449 fp = cStringIO.StringIO()
450 450 else:
451 451 fp = self.opener(fname, 'w')
452 452 try:
453 453 if self.eolmode == 'auto':
454 454 eol = self.eol
455 455 elif self.eolmode == 'crlf':
456 456 eol = '\r\n'
457 457 else:
458 458 eol = '\n'
459 459
460 460 if self.eolmode != 'strict' and eol and eol != '\n':
461 461 for l in lines:
462 462 if l and l[-1] == '\n':
463 463 l = l[:-1] + eol
464 464 fp.write(l)
465 465 else:
466 466 fp.writelines(lines)
467 467 if islink:
468 468 self.opener.symlink(fp.getvalue(), fname)
469 469 finally:
470 470 fp.close()
471 471
472 472 def unlink(self, fname):
473 473 os.unlink(fname)
474 474
475 475 def printfile(self, warn):
476 476 if self.fileprinted:
477 477 return
478 478 if warn or self.ui.verbose:
479 479 self.fileprinted = True
480 480 s = _("patching file %s\n") % self.fname
481 481 if warn:
482 482 self.ui.warn(s)
483 483 else:
484 484 self.ui.note(s)
485 485
486 486
487 487 def findlines(self, l, linenum):
488 488 # looks through the hash and finds candidate lines. The
489 489 # result is a list of line numbers sorted based on distance
490 490 # from linenum
491 491
492 492 cand = self.hash.get(l, [])
493 493 if len(cand) > 1:
494 494 # resort our list of potentials forward then back.
495 495 cand.sort(key=lambda x: abs(x - linenum))
496 496 return cand
497 497
498 498 def hashlines(self):
499 499 self.hash = {}
500 500 for x, s in enumerate(self.lines):
501 501 self.hash.setdefault(s, []).append(x)
502 502
503 503 def write_rej(self):
504 504 # our rejects are a little different from patch(1). This always
505 505 # creates rejects in the same form as the original patch. A file
506 506 # header is inserted so that you can run the reject through patch again
507 507 # without having to type the filename.
508 508
509 509 if not self.rej:
510 510 return
511 511
512 512 fname = self.fname + ".rej"
513 513 self.ui.warn(
514 514 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
515 515 (len(self.rej), self.hunks, fname))
516 516
517 517 def rejlines():
518 518 base = os.path.basename(self.fname)
519 519 yield "--- %s\n+++ %s\n" % (base, base)
520 520 for x in self.rej:
521 521 for l in x.hunk:
522 522 yield l
523 523 if l[-1] != '\n':
524 524 yield "\n\ No newline at end of file\n"
525 525
526 526 self.writelines(fname, rejlines())
527 527
528 528 def write(self, dest=None):
529 529 if not self.dirty:
530 530 return
531 531 if not dest:
532 532 dest = self.fname
533 533 self.writelines(dest, self.lines)
534 534
535 535 def close(self):
536 536 self.write()
537 537 self.write_rej()
538 538
539 539 def apply(self, h):
540 540 if not h.complete():
541 541 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
542 542 (h.number, h.desc, len(h.a), h.lena, len(h.b),
543 543 h.lenb))
544 544
545 545 self.hunks += 1
546 546
547 547 if self.missing:
548 548 self.rej.append(h)
549 549 return -1
550 550
551 551 if self.exists and h.createfile():
552 552 self.ui.warn(_("file %s already exists\n") % self.fname)
553 553 self.rej.append(h)
554 554 return -1
555 555
556 556 if isinstance(h, binhunk):
557 557 if h.rmfile():
558 558 self.unlink(self.fname)
559 559 else:
560 560 self.lines[:] = h.new()
561 561 self.offset += len(h.new())
562 562 self.dirty = 1
563 563 return 0
564 564
565 565 horig = h
566 566 if (self.eolmode in ('crlf', 'lf')
567 567 or self.eolmode == 'auto' and self.eol):
568 568 # If new eols are going to be normalized, then normalize
569 569 # hunk data before patching. Otherwise, preserve input
570 570 # line-endings.
571 571 h = h.getnormalized()
572 572
573 573 # fast case first, no offsets, no fuzz
574 574 old = h.old()
575 575 # patch starts counting at 1 unless we are adding the file
576 576 if h.starta == 0:
577 577 start = 0
578 578 else:
579 579 start = h.starta + self.offset - 1
580 580 orig_start = start
581 581 # if there's skew we want to emit the "(offset %d lines)" even
582 582 # when the hunk cleanly applies at start + skew, so skip the
583 583 # fast case code
584 584 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
585 585 if h.rmfile():
586 586 self.unlink(self.fname)
587 587 else:
588 588 self.lines[start : start + h.lena] = h.new()
589 589 self.offset += h.lenb - h.lena
590 590 self.dirty = 1
591 591 return 0
592 592
593 593 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
594 594 self.hashlines()
595 595 if h.hunk[-1][0] != ' ':
596 596 # if the hunk tried to put something at the bottom of the file
597 597 # override the start line and use eof here
598 598 search_start = len(self.lines)
599 599 else:
600 600 search_start = orig_start + self.skew
601 601
602 602 for fuzzlen in xrange(3):
603 603 for toponly in [True, False]:
604 604 old = h.old(fuzzlen, toponly)
605 605
606 606 cand = self.findlines(old[0][1:], search_start)
607 607 for l in cand:
608 608 if diffhelpers.testhunk(old, self.lines, l) == 0:
609 609 newlines = h.new(fuzzlen, toponly)
610 610 self.lines[l : l + len(old)] = newlines
611 611 self.offset += len(newlines) - len(old)
612 612 self.skew = l - orig_start
613 613 self.dirty = 1
614 614 offset = l - orig_start - fuzzlen
615 615 if fuzzlen:
616 616 msg = _("Hunk #%d succeeded at %d "
617 617 "with fuzz %d "
618 618 "(offset %d lines).\n")
619 619 self.printfile(True)
620 620 self.ui.warn(msg %
621 621 (h.number, l + 1, fuzzlen, offset))
622 622 else:
623 623 msg = _("Hunk #%d succeeded at %d "
624 624 "(offset %d lines).\n")
625 625 self.ui.note(msg % (h.number, l + 1, offset))
626 626 return fuzzlen
627 627 self.printfile(True)
628 628 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
629 629 self.rej.append(horig)
630 630 return -1
631 631
632 632 class hunk(object):
633 633 def __init__(self, desc, num, lr, context, create=False, remove=False):
634 634 self.number = num
635 635 self.desc = desc
636 636 self.hunk = [desc]
637 637 self.a = []
638 638 self.b = []
639 639 self.starta = self.lena = None
640 640 self.startb = self.lenb = None
641 641 if lr is not None:
642 642 if context:
643 643 self.read_context_hunk(lr)
644 644 else:
645 645 self.read_unified_hunk(lr)
646 646 self.create = create
647 647 self.remove = remove and not create
648 648
649 649 def getnormalized(self):
650 650 """Return a copy with line endings normalized to LF."""
651 651
652 652 def normalize(lines):
653 653 nlines = []
654 654 for line in lines:
655 655 if line.endswith('\r\n'):
656 656 line = line[:-2] + '\n'
657 657 nlines.append(line)
658 658 return nlines
659 659
660 660 # Dummy object, it is rebuilt manually
661 661 nh = hunk(self.desc, self.number, None, None, False, False)
662 662 nh.number = self.number
663 663 nh.desc = self.desc
664 664 nh.hunk = self.hunk
665 665 nh.a = normalize(self.a)
666 666 nh.b = normalize(self.b)
667 667 nh.starta = self.starta
668 668 nh.startb = self.startb
669 669 nh.lena = self.lena
670 670 nh.lenb = self.lenb
671 671 nh.create = self.create
672 672 nh.remove = self.remove
673 673 return nh
674 674
675 675 def read_unified_hunk(self, lr):
676 676 m = unidesc.match(self.desc)
677 677 if not m:
678 678 raise PatchError(_("bad hunk #%d") % self.number)
679 679 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
680 680 if self.lena is None:
681 681 self.lena = 1
682 682 else:
683 683 self.lena = int(self.lena)
684 684 if self.lenb is None:
685 685 self.lenb = 1
686 686 else:
687 687 self.lenb = int(self.lenb)
688 688 self.starta = int(self.starta)
689 689 self.startb = int(self.startb)
690 690 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
691 691 # if we hit eof before finishing out the hunk, the last line will
692 692 # be zero length. Lets try to fix it up.
693 693 while len(self.hunk[-1]) == 0:
694 694 del self.hunk[-1]
695 695 del self.a[-1]
696 696 del self.b[-1]
697 697 self.lena -= 1
698 698 self.lenb -= 1
699 699
700 700 def read_context_hunk(self, lr):
701 701 self.desc = lr.readline()
702 702 m = contextdesc.match(self.desc)
703 703 if not m:
704 704 raise PatchError(_("bad hunk #%d") % self.number)
705 705 foo, self.starta, foo2, aend, foo3 = m.groups()
706 706 self.starta = int(self.starta)
707 707 if aend is None:
708 708 aend = self.starta
709 709 self.lena = int(aend) - self.starta
710 710 if self.starta:
711 711 self.lena += 1
712 712 for x in xrange(self.lena):
713 713 l = lr.readline()
714 714 if l.startswith('---'):
715 715 lr.push(l)
716 716 break
717 717 s = l[2:]
718 718 if l.startswith('- ') or l.startswith('! '):
719 719 u = '-' + s
720 720 elif l.startswith(' '):
721 721 u = ' ' + s
722 722 else:
723 723 raise PatchError(_("bad hunk #%d old text line %d") %
724 724 (self.number, x))
725 725 self.a.append(u)
726 726 self.hunk.append(u)
727 727
728 728 l = lr.readline()
729 729 if l.startswith('\ '):
730 730 s = self.a[-1][:-1]
731 731 self.a[-1] = s
732 732 self.hunk[-1] = s
733 733 l = lr.readline()
734 734 m = contextdesc.match(l)
735 735 if not m:
736 736 raise PatchError(_("bad hunk #%d") % self.number)
737 737 foo, self.startb, foo2, bend, foo3 = m.groups()
738 738 self.startb = int(self.startb)
739 739 if bend is None:
740 740 bend = self.startb
741 741 self.lenb = int(bend) - self.startb
742 742 if self.startb:
743 743 self.lenb += 1
744 744 hunki = 1
745 745 for x in xrange(self.lenb):
746 746 l = lr.readline()
747 747 if l.startswith('\ '):
748 748 s = self.b[-1][:-1]
749 749 self.b[-1] = s
750 750 self.hunk[hunki - 1] = s
751 751 continue
752 752 if not l:
753 753 lr.push(l)
754 754 break
755 755 s = l[2:]
756 756 if l.startswith('+ ') or l.startswith('! '):
757 757 u = '+' + s
758 758 elif l.startswith(' '):
759 759 u = ' ' + s
760 760 elif len(self.b) == 0:
761 761 # this can happen when the hunk does not add any lines
762 762 lr.push(l)
763 763 break
764 764 else:
765 765 raise PatchError(_("bad hunk #%d old text line %d") %
766 766 (self.number, x))
767 767 self.b.append(s)
768 768 while True:
769 769 if hunki >= len(self.hunk):
770 770 h = ""
771 771 else:
772 772 h = self.hunk[hunki]
773 773 hunki += 1
774 774 if h == u:
775 775 break
776 776 elif h.startswith('-'):
777 777 continue
778 778 else:
779 779 self.hunk.insert(hunki - 1, u)
780 780 break
781 781
782 782 if not self.a:
783 783 # this happens when lines were only added to the hunk
784 784 for x in self.hunk:
785 785 if x.startswith('-') or x.startswith(' '):
786 786 self.a.append(x)
787 787 if not self.b:
788 788 # this happens when lines were only deleted from the hunk
789 789 for x in self.hunk:
790 790 if x.startswith('+') or x.startswith(' '):
791 791 self.b.append(x[1:])
792 792 # @@ -start,len +start,len @@
793 793 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
794 794 self.startb, self.lenb)
795 795 self.hunk[0] = self.desc
796 796
797 797 def fix_newline(self):
798 798 diffhelpers.fix_newline(self.hunk, self.a, self.b)
799 799
800 800 def complete(self):
801 801 return len(self.a) == self.lena and len(self.b) == self.lenb
802 802
803 803 def createfile(self):
804 804 return self.starta == 0 and self.lena == 0 and self.create
805 805
806 806 def rmfile(self):
807 807 return self.startb == 0 and self.lenb == 0 and self.remove
808 808
809 809 def fuzzit(self, l, fuzz, toponly):
810 810 # this removes context lines from the top and bottom of list 'l'. It
811 811 # checks the hunk to make sure only context lines are removed, and then
812 812 # returns a new shortened list of lines.
813 813 fuzz = min(fuzz, len(l)-1)
814 814 if fuzz:
815 815 top = 0
816 816 bot = 0
817 817 hlen = len(self.hunk)
818 818 for x in xrange(hlen - 1):
819 819 # the hunk starts with the @@ line, so use x+1
820 820 if self.hunk[x + 1][0] == ' ':
821 821 top += 1
822 822 else:
823 823 break
824 824 if not toponly:
825 825 for x in xrange(hlen - 1):
826 826 if self.hunk[hlen - bot - 1][0] == ' ':
827 827 bot += 1
828 828 else:
829 829 break
830 830
831 831 # top and bot now count context in the hunk
832 832 # adjust them if either one is short
833 833 context = max(top, bot, 3)
834 834 if bot < context:
835 835 bot = max(0, fuzz - (context - bot))
836 836 else:
837 837 bot = min(fuzz, bot)
838 838 if top < context:
839 839 top = max(0, fuzz - (context - top))
840 840 else:
841 841 top = min(fuzz, top)
842 842
843 843 return l[top:len(l)-bot]
844 844 return l
845 845
846 846 def old(self, fuzz=0, toponly=False):
847 847 return self.fuzzit(self.a, fuzz, toponly)
848 848
849 849 def new(self, fuzz=0, toponly=False):
850 850 return self.fuzzit(self.b, fuzz, toponly)
851 851
852 852 class binhunk:
853 853 'A binary patch file. Only understands literals so far.'
854 854 def __init__(self, gitpatch):
855 855 self.gitpatch = gitpatch
856 856 self.text = None
857 857 self.hunk = ['GIT binary patch\n']
858 858
859 859 def createfile(self):
860 860 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
861 861
862 862 def rmfile(self):
863 863 return self.gitpatch.op == 'DELETE'
864 864
865 865 def complete(self):
866 866 return self.text is not None
867 867
868 868 def new(self):
869 869 return [self.text]
870 870
871 871 def extract(self, lr):
872 872 line = lr.readline()
873 873 self.hunk.append(line)
874 874 while line and not line.startswith('literal '):
875 875 line = lr.readline()
876 876 self.hunk.append(line)
877 877 if not line:
878 878 raise PatchError(_('could not extract binary patch'))
879 879 size = int(line[8:].rstrip())
880 880 dec = []
881 881 line = lr.readline()
882 882 self.hunk.append(line)
883 883 while len(line) > 1:
884 884 l = line[0]
885 885 if l <= 'Z' and l >= 'A':
886 886 l = ord(l) - ord('A') + 1
887 887 else:
888 888 l = ord(l) - ord('a') + 27
889 889 dec.append(base85.b85decode(line[1:-1])[:l])
890 890 line = lr.readline()
891 891 self.hunk.append(line)
892 892 text = zlib.decompress(''.join(dec))
893 893 if len(text) != size:
894 894 raise PatchError(_('binary patch is %d bytes, not %d') %
895 895 len(text), size)
896 896 self.text = text
897 897
898 898 def parsefilename(str):
899 899 # --- filename \t|space stuff
900 900 s = str[4:].rstrip('\r\n')
901 901 i = s.find('\t')
902 902 if i < 0:
903 903 i = s.find(' ')
904 904 if i < 0:
905 905 return s
906 906 return s[:i]
907 907
908 908 def selectfile(afile_orig, bfile_orig, hunk, strip):
909 909 def pathstrip(path, count=1):
910 910 pathlen = len(path)
911 911 i = 0
912 912 if count == 0:
913 913 return '', path.rstrip()
914 914 while count > 0:
915 915 i = path.find('/', i)
916 916 if i == -1:
917 917 raise PatchError(_("unable to strip away %d dirs from %s") %
918 918 (count, path))
919 919 i += 1
920 920 # consume '//' in the path
921 921 while i < pathlen - 1 and path[i] == '/':
922 922 i += 1
923 923 count -= 1
924 924 return path[:i].lstrip(), path[i:].rstrip()
925 925
926 926 nulla = afile_orig == "/dev/null"
927 927 nullb = bfile_orig == "/dev/null"
928 928 abase, afile = pathstrip(afile_orig, strip)
929 929 gooda = not nulla and util.lexists(afile)
930 930 bbase, bfile = pathstrip(bfile_orig, strip)
931 931 if afile == bfile:
932 932 goodb = gooda
933 933 else:
934 934 goodb = not nullb and os.path.exists(bfile)
935 935 createfunc = hunk.createfile
936 936 missing = not goodb and not gooda and not createfunc()
937 937
938 938 # some diff programs apparently produce create patches where the
939 939 # afile is not /dev/null, but afile starts with bfile
940 940 abasedir = afile[:afile.rfind('/') + 1]
941 941 bbasedir = bfile[:bfile.rfind('/') + 1]
942 942 if missing and abasedir == bbasedir and afile.startswith(bfile):
943 943 # this isn't very pretty
944 944 hunk.create = True
945 945 if createfunc():
946 946 missing = False
947 947 else:
948 948 hunk.create = False
949 949
950 950 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
951 951 # diff is between a file and its backup. In this case, the original
952 952 # file should be patched (see original mpatch code).
953 953 isbackup = (abase == bbase and bfile.startswith(afile))
954 954 fname = None
955 955 if not missing:
956 956 if gooda and goodb:
957 957 fname = isbackup and afile or bfile
958 958 elif gooda:
959 959 fname = afile
960 960
961 961 if not fname:
962 962 if not nullb:
963 963 fname = isbackup and afile or bfile
964 964 elif not nulla:
965 965 fname = afile
966 966 else:
967 967 raise PatchError(_("undefined source and destination files"))
968 968
969 969 return fname, missing
970 970
971 971 def scangitpatch(lr, firstline):
972 972 """
973 973 Git patches can emit:
974 974 - rename a to b
975 975 - change b
976 976 - copy a to c
977 977 - change c
978 978
979 979 We cannot apply this sequence as-is, the renamed 'a' could not be
980 980 found for it would have been renamed already. And we cannot copy
981 981 from 'b' instead because 'b' would have been changed already. So
982 982 we scan the git patch for copy and rename commands so we can
983 983 perform the copies ahead of time.
984 984 """
985 985 pos = 0
986 986 try:
987 987 pos = lr.fp.tell()
988 988 fp = lr.fp
989 989 except IOError:
990 990 fp = cStringIO.StringIO(lr.fp.read())
991 991 gitlr = linereader(fp, lr.textmode)
992 992 gitlr.push(firstline)
993 993 (dopatch, gitpatches) = readgitpatch(gitlr)
994 994 fp.seek(pos)
995 995 return dopatch, gitpatches
996 996
997 997 def iterhunks(ui, fp, sourcefile=None):
998 998 """Read a patch and yield the following events:
999 999 - ("file", afile, bfile, firsthunk): select a new target file.
1000 1000 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1001 1001 "file" event.
1002 1002 - ("git", gitchanges): current diff is in git format, gitchanges
1003 1003 maps filenames to gitpatch records. Unique event.
1004 1004 """
1005 1005 changed = {}
1006 1006 current_hunk = None
1007 1007 afile = ""
1008 1008 bfile = ""
1009 1009 state = None
1010 1010 hunknum = 0
1011 1011 emitfile = False
1012 1012 git = False
1013 1013
1014 1014 # our states
1015 1015 BFILE = 1
1016 1016 context = None
1017 1017 lr = linereader(fp)
1018 1018 # gitworkdone is True if a git operation (copy, rename, ...) was
1019 1019 # performed already for the current file. Useful when the file
1020 1020 # section may have no hunk.
1021 1021 gitworkdone = False
1022 1022 empty = None
1023 1023
1024 1024 while True:
1025 1025 newfile = newgitfile = False
1026 1026 x = lr.readline()
1027 1027 if not x:
1028 1028 break
1029 1029 if current_hunk:
1030 1030 if x.startswith('\ '):
1031 1031 current_hunk.fix_newline()
1032 1032 yield 'hunk', current_hunk
1033 1033 current_hunk = None
1034 1034 empty = False
1035 1035 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
1036 1036 ((context is not False) and x.startswith('***************')))):
1037 1037 try:
1038 1038 if context is None and x.startswith('***************'):
1039 1039 context = True
1040 1040 gpatch = changed.get(bfile)
1041 1041 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1042 1042 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1043 1043 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1044 1044 except PatchError, err:
1045 1045 ui.debug(err)
1046 1046 current_hunk = None
1047 1047 continue
1048 1048 hunknum += 1
1049 1049 if emitfile:
1050 1050 emitfile = False
1051 1051 yield 'file', (afile, bfile, current_hunk)
1052 1052 empty = False
1053 1053 elif state == BFILE and x.startswith('GIT binary patch'):
1054 1054 current_hunk = binhunk(changed[bfile])
1055 1055 hunknum += 1
1056 1056 if emitfile:
1057 1057 emitfile = False
1058 1058 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1059 1059 empty = False
1060 1060 current_hunk.extract(lr)
1061 1061 elif x.startswith('diff --git'):
1062 1062 # check for git diff, scanning the whole patch file if needed
1063 1063 m = gitre.match(x)
1064 1064 gitworkdone = False
1065 1065 if m:
1066 1066 afile, bfile = m.group(1, 2)
1067 1067 if not git:
1068 1068 git = True
1069 1069 gitpatches = scangitpatch(lr, x)[1]
1070 1070 yield 'git', gitpatches
1071 1071 for gp in gitpatches:
1072 1072 changed[gp.path] = gp
1073 1073 # else error?
1074 1074 # copy/rename + modify should modify target, not source
1075 1075 gp = changed.get(bfile)
1076 1076 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1077 1077 or gp.mode):
1078 1078 afile = bfile
1079 1079 gitworkdone = True
1080 1080 newgitfile = True
1081 1081 elif x.startswith('---'):
1082 1082 # check for a unified diff
1083 1083 l2 = lr.readline()
1084 1084 if not l2.startswith('+++'):
1085 1085 lr.push(l2)
1086 1086 continue
1087 1087 newfile = True
1088 1088 context = False
1089 1089 afile = parsefilename(x)
1090 1090 bfile = parsefilename(l2)
1091 1091 elif x.startswith('***'):
1092 1092 # check for a context diff
1093 1093 l2 = lr.readline()
1094 1094 if not l2.startswith('---'):
1095 1095 lr.push(l2)
1096 1096 continue
1097 1097 l3 = lr.readline()
1098 1098 lr.push(l3)
1099 1099 if not l3.startswith("***************"):
1100 1100 lr.push(l2)
1101 1101 continue
1102 1102 newfile = True
1103 1103 context = True
1104 1104 afile = parsefilename(x)
1105 1105 bfile = parsefilename(l2)
1106 1106
1107 1107 if newfile:
1108 1108 if empty:
1109 1109 raise NoHunks
1110 1110 empty = not gitworkdone
1111 1111 gitworkdone = False
1112 1112
1113 1113 if newgitfile or newfile:
1114 1114 emitfile = True
1115 1115 state = BFILE
1116 1116 hunknum = 0
1117 1117 if current_hunk:
1118 1118 if current_hunk.complete():
1119 1119 yield 'hunk', current_hunk
1120 1120 empty = False
1121 1121 else:
1122 1122 raise PatchError(_("malformed patch %s %s") % (afile,
1123 1123 current_hunk.desc))
1124 1124
1125 1125 if (empty is None and not gitworkdone) or empty:
1126 1126 raise NoHunks
1127 1127
1128 1128 def applydiff(ui, fp, changed, strip=1, sourcefile=None, eolmode='strict'):
1129 1129 """
1130 1130 Reads a patch from fp and tries to apply it.
1131 1131
1132 1132 The dict 'changed' is filled in with all of the filenames changed
1133 1133 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1134 1134 found and 1 if there was any fuzz.
1135 1135
1136 1136 If 'eolmode' is 'strict', the patch content and patched file are
1137 1137 read in binary mode. Otherwise, line endings are ignored when
1138 1138 patching then normalized according to 'eolmode'.
1139 1139 """
1140 1140 rejects = 0
1141 1141 err = 0
1142 1142 current_file = None
1143 1143 gitpatches = None
1144 1144 opener = util.opener(os.getcwd())
1145 1145
1146 1146 def closefile():
1147 1147 if not current_file:
1148 1148 return 0
1149 1149 current_file.close()
1150 1150 return len(current_file.rej)
1151 1151
1152 1152 for state, values in iterhunks(ui, fp, sourcefile):
1153 1153 if state == 'hunk':
1154 1154 if not current_file:
1155 1155 continue
1156 1156 current_hunk = values
1157 1157 ret = current_file.apply(current_hunk)
1158 1158 if ret >= 0:
1159 1159 changed.setdefault(current_file.fname, None)
1160 1160 if ret > 0:
1161 1161 err = 1
1162 1162 elif state == 'file':
1163 1163 rejects += closefile()
1164 1164 afile, bfile, first_hunk = values
1165 1165 try:
1166 1166 if sourcefile:
1167 1167 current_file = patchfile(ui, sourcefile, opener,
1168 1168 eolmode=eolmode)
1169 1169 else:
1170 1170 current_file, missing = selectfile(afile, bfile,
1171 1171 first_hunk, strip)
1172 1172 current_file = patchfile(ui, current_file, opener,
1173 1173 missing, eolmode)
1174 1174 except PatchError, err:
1175 1175 ui.warn(str(err) + '\n')
1176 1176 current_file, current_hunk = None, None
1177 1177 rejects += 1
1178 1178 continue
1179 1179 elif state == 'git':
1180 1180 gitpatches = values
1181 1181 cwd = os.getcwd()
1182 1182 for gp in gitpatches:
1183 1183 if gp.op in ('COPY', 'RENAME'):
1184 1184 copyfile(gp.oldpath, gp.path, cwd)
1185 1185 changed[gp.path] = gp
1186 1186 else:
1187 1187 raise util.Abort(_('unsupported parser state: %s') % state)
1188 1188
1189 1189 rejects += closefile()
1190 1190
1191 1191 if rejects:
1192 1192 return -1
1193 1193 return err
1194 1194
1195 1195 def updatedir(ui, repo, patches, similarity=0):
1196 1196 '''Update dirstate after patch application according to metadata'''
1197 1197 if not patches:
1198 1198 return
1199 1199 copies = []
1200 1200 removes = set()
1201 1201 cfiles = patches.keys()
1202 1202 cwd = repo.getcwd()
1203 1203 if cwd:
1204 1204 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1205 1205 for f in patches:
1206 1206 gp = patches[f]
1207 1207 if not gp:
1208 1208 continue
1209 1209 if gp.op == 'RENAME':
1210 1210 copies.append((gp.oldpath, gp.path))
1211 1211 removes.add(gp.oldpath)
1212 1212 elif gp.op == 'COPY':
1213 1213 copies.append((gp.oldpath, gp.path))
1214 1214 elif gp.op == 'DELETE':
1215 1215 removes.add(gp.path)
1216 1216 for src, dst in copies:
1217 1217 repo.copy(src, dst)
1218 1218 if (not similarity) and removes:
1219 1219 repo.remove(sorted(removes), True)
1220 1220 for f in patches:
1221 1221 gp = patches[f]
1222 1222 if gp and gp.mode:
1223 1223 islink, isexec = gp.mode
1224 1224 dst = repo.wjoin(gp.path)
1225 1225 # patch won't create empty files
1226 1226 if gp.op == 'ADD' and not os.path.exists(dst):
1227 1227 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1228 1228 repo.wwrite(gp.path, '', flags)
1229 1229 elif gp.op != 'DELETE':
1230 1230 util.set_flags(dst, islink, isexec)
1231 1231 cmdutil.addremove(repo, cfiles, similarity=similarity)
1232 1232 files = patches.keys()
1233 1233 files.extend([r for r in removes if r not in files])
1234 1234 return sorted(files)
1235 1235
1236 1236 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1237 1237 """use <patcher> to apply <patchname> to the working directory.
1238 1238 returns whether patch was applied with fuzz factor."""
1239 1239
1240 1240 fuzz = False
1241 1241 if cwd:
1242 1242 args.append('-d %s' % util.shellquote(cwd))
1243 1243 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1244 1244 util.shellquote(patchname)))
1245 1245
1246 1246 for line in fp:
1247 1247 line = line.rstrip()
1248 1248 ui.note(line + '\n')
1249 1249 if line.startswith('patching file '):
1250 1250 pf = util.parse_patch_output(line)
1251 1251 printed_file = False
1252 1252 files.setdefault(pf, None)
1253 1253 elif line.find('with fuzz') >= 0:
1254 1254 fuzz = True
1255 1255 if not printed_file:
1256 1256 ui.warn(pf + '\n')
1257 1257 printed_file = True
1258 1258 ui.warn(line + '\n')
1259 1259 elif line.find('saving rejects to file') >= 0:
1260 1260 ui.warn(line + '\n')
1261 1261 elif line.find('FAILED') >= 0:
1262 1262 if not printed_file:
1263 1263 ui.warn(pf + '\n')
1264 1264 printed_file = True
1265 1265 ui.warn(line + '\n')
1266 1266 code = fp.close()
1267 1267 if code:
1268 1268 raise PatchError(_("patch command failed: %s") %
1269 1269 util.explain_exit(code)[0])
1270 1270 return fuzz
1271 1271
1272 1272 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1273 1273 """use builtin patch to apply <patchobj> to the working directory.
1274 1274 returns whether patch was applied with fuzz factor."""
1275 1275
1276 1276 if files is None:
1277 1277 files = {}
1278 1278 if eolmode is None:
1279 1279 eolmode = ui.config('patch', 'eol', 'strict')
1280 1280 if eolmode.lower() not in eolmodes:
1281 1281 raise util.Abort(_('Unsupported line endings type: %s') % eolmode)
1282 1282 eolmode = eolmode.lower()
1283 1283
1284 1284 try:
1285 1285 fp = open(patchobj, 'rb')
1286 1286 except TypeError:
1287 1287 fp = patchobj
1288 1288 if cwd:
1289 1289 curdir = os.getcwd()
1290 1290 os.chdir(cwd)
1291 1291 try:
1292 1292 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1293 1293 finally:
1294 1294 if cwd:
1295 1295 os.chdir(curdir)
1296 1296 if fp != patchobj:
1297 1297 fp.close()
1298 1298 if ret < 0:
1299 1299 raise PatchError
1300 1300 return ret > 0
1301 1301
1302 1302 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1303 1303 """Apply <patchname> to the working directory.
1304 1304
1305 1305 'eolmode' specifies how end of lines should be handled. It can be:
1306 1306 - 'strict': inputs are read in binary mode, EOLs are preserved
1307 1307 - 'crlf': EOLs are ignored when patching and reset to CRLF
1308 1308 - 'lf': EOLs are ignored when patching and reset to LF
1309 1309 - None: get it from user settings, default to 'strict'
1310 1310 'eolmode' is ignored when using an external patcher program.
1311 1311
1312 1312 Returns whether patch was applied with fuzz factor.
1313 1313 """
1314 1314 patcher = ui.config('ui', 'patch')
1315 1315 args = []
1316 1316 if files is None:
1317 1317 files = {}
1318 1318 try:
1319 1319 if patcher:
1320 1320 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1321 1321 files)
1322 1322 else:
1323 1323 try:
1324 1324 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1325 1325 except NoHunks:
1326 1326 ui.warn(_('internal patcher failed\n'
1327 1327 'please report details to '
1328 1328 'http://mercurial.selenic.com/bts/\n'
1329 1329 'or mercurial@selenic.com\n'))
1330 1330 patcher = (util.find_exe('gpatch') or util.find_exe('patch')
1331 1331 or 'patch')
1332 1332 ui.debug('no valid hunks found; trying with %r instead\n' %
1333 1333 patcher)
1334 1334 if util.needbinarypatch():
1335 1335 args.append('--binary')
1336 1336 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1337 1337 files)
1338 1338 except PatchError, err:
1339 1339 s = str(err)
1340 1340 if s:
1341 1341 raise util.Abort(s)
1342 1342 else:
1343 1343 raise util.Abort(_('patch failed to apply'))
1344 1344
1345 1345 def b85diff(to, tn):
1346 1346 '''print base85-encoded binary diff'''
1347 1347 def gitindex(text):
1348 1348 if not text:
1349 1349 return '0' * 40
1350 1350 l = len(text)
1351 1351 s = util.sha1('blob %d\0' % l)
1352 1352 s.update(text)
1353 1353 return s.hexdigest()
1354 1354
1355 1355 def fmtline(line):
1356 1356 l = len(line)
1357 1357 if l <= 26:
1358 1358 l = chr(ord('A') + l - 1)
1359 1359 else:
1360 1360 l = chr(l - 26 + ord('a') - 1)
1361 1361 return '%c%s\n' % (l, base85.b85encode(line, True))
1362 1362
1363 1363 def chunk(text, csize=52):
1364 1364 l = len(text)
1365 1365 i = 0
1366 1366 while i < l:
1367 1367 yield text[i:i + csize]
1368 1368 i += csize
1369 1369
1370 1370 tohash = gitindex(to)
1371 1371 tnhash = gitindex(tn)
1372 1372 if tohash == tnhash:
1373 1373 return ""
1374 1374
1375 1375 # TODO: deltas
1376 1376 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1377 1377 (tohash, tnhash, len(tn))]
1378 1378 for l in chunk(zlib.compress(tn)):
1379 1379 ret.append(fmtline(l))
1380 1380 ret.append('\n')
1381 1381 return ''.join(ret)
1382 1382
1383 1383 class GitDiffRequired(Exception):
1384 1384 pass
1385 1385
1386 1386 def diffopts(ui, opts=None, untrusted=False):
1387 1387 def get(key, name=None, getter=ui.configbool):
1388 1388 return ((opts and opts.get(key)) or
1389 1389 getter('diff', name or key, None, untrusted=untrusted))
1390 1390 return mdiff.diffopts(
1391 1391 text=opts and opts.get('text'),
1392 1392 git=get('git'),
1393 1393 nodates=get('nodates'),
1394 1394 showfunc=get('show_function', 'showfunc'),
1395 1395 ignorews=get('ignore_all_space', 'ignorews'),
1396 1396 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1397 1397 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1398 1398 context=get('unified', getter=ui.config))
1399 1399
1400 1400 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1401 1401 losedatafn=None):
1402 1402 '''yields diff of changes to files between two nodes, or node and
1403 1403 working directory.
1404 1404
1405 1405 if node1 is None, use first dirstate parent instead.
1406 1406 if node2 is None, compare node1 with working directory.
1407 1407
1408 1408 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1409 1409 every time some change cannot be represented with the current
1410 1410 patch format. Return False to upgrade to git patch format, True to
1411 1411 accept the loss or raise an exception to abort the diff. It is
1412 1412 called with the name of current file being diffed as 'fn'. If set
1413 1413 to None, patches will always be upgraded to git format when
1414 1414 necessary.
1415 1415 '''
1416 1416
1417 1417 if opts is None:
1418 1418 opts = mdiff.defaultopts
1419 1419
1420 1420 if not node1 and not node2:
1421 1421 node1 = repo.dirstate.parents()[0]
1422 1422
1423 1423 def lrugetfilectx():
1424 1424 cache = {}
1425 1425 order = []
1426 1426 def getfilectx(f, ctx):
1427 1427 fctx = ctx.filectx(f, filelog=cache.get(f))
1428 1428 if f not in cache:
1429 1429 if len(cache) > 20:
1430 1430 del cache[order.pop(0)]
1431 1431 cache[f] = fctx.filelog()
1432 1432 else:
1433 1433 order.remove(f)
1434 1434 order.append(f)
1435 1435 return fctx
1436 1436 return getfilectx
1437 1437 getfilectx = lrugetfilectx()
1438 1438
1439 1439 ctx1 = repo[node1]
1440 1440 ctx2 = repo[node2]
1441 1441
1442 1442 if not changes:
1443 1443 changes = repo.status(ctx1, ctx2, match=match)
1444 1444 modified, added, removed = changes[:3]
1445 1445
1446 1446 if not modified and not added and not removed:
1447 1447 return []
1448 1448
1449 1449 revs = None
1450 1450 if not repo.ui.quiet:
1451 1451 hexfunc = repo.ui.debugflag and hex or short
1452 1452 revs = [hexfunc(node) for node in [node1, node2] if node]
1453 1453
1454 1454 copy = {}
1455 1455 if opts.git or opts.upgrade:
1456 1456 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1457 1457
1458 1458 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1459 1459 modified, added, removed, copy, getfilectx, opts, losedata)
1460 1460 if opts.upgrade and not opts.git:
1461 1461 try:
1462 1462 def losedata(fn):
1463 1463 if not losedatafn or not losedatafn(fn=fn):
1464 1464 raise GitDiffRequired()
1465 1465 # Buffer the whole output until we are sure it can be generated
1466 1466 return list(difffn(opts.copy(git=False), losedata))
1467 1467 except GitDiffRequired:
1468 1468 return difffn(opts.copy(git=True), None)
1469 1469 else:
1470 1470 return difffn(opts, None)
1471 1471
1472 1472 def difflabel(func, *args, **kw):
1473 1473 '''yields 2-tuples of (output, label) based on the output of func()'''
1474 1474 prefixes = [('diff', 'diff.diffline'),
1475 1475 ('copy', 'diff.extended'),
1476 1476 ('rename', 'diff.extended'),
1477 1477 ('old', 'diff.extended'),
1478 1478 ('new', 'diff.extended'),
1479 1479 ('deleted', 'diff.extended'),
1480 1480 ('---', 'diff.file_a'),
1481 1481 ('+++', 'diff.file_b'),
1482 1482 ('@@', 'diff.hunk'),
1483 1483 ('-', 'diff.deleted'),
1484 1484 ('+', 'diff.inserted')]
1485 1485
1486 1486 for chunk in func(*args, **kw):
1487 1487 lines = chunk.split('\n')
1488 1488 for i, line in enumerate(lines):
1489 1489 if i != 0:
1490 1490 yield ('\n', '')
1491 1491 stripline = line
1492 1492 if line and line[0] in '+-':
1493 1493 # highlight trailing whitespace, but only in changed lines
1494 1494 stripline = line.rstrip()
1495 1495 for prefix, label in prefixes:
1496 1496 if stripline.startswith(prefix):
1497 1497 yield (stripline, label)
1498 1498 break
1499 1499 else:
1500 1500 yield (line, '')
1501 1501 if line != stripline:
1502 1502 yield (line[len(stripline):], 'diff.trailingwhitespace')
1503 1503
1504 1504 def diffui(*args, **kw):
1505 1505 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1506 1506 return difflabel(diff, *args, **kw)
1507 1507
1508 1508
1509 1509 def _addmodehdr(header, omode, nmode):
1510 1510 if omode != nmode:
1511 1511 header.append('old mode %s\n' % omode)
1512 1512 header.append('new mode %s\n' % nmode)
1513 1513
1514 1514 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1515 1515 copy, getfilectx, opts, losedatafn):
1516 1516
1517 1517 date1 = util.datestr(ctx1.date())
1518 1518 man1 = ctx1.manifest()
1519 1519
1520 1520 gone = set()
1521 1521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1522 1522
1523 1523 copyto = dict([(v, k) for k, v in copy.items()])
1524 1524
1525 1525 if opts.git:
1526 1526 revs = None
1527 1527
1528 1528 for f in sorted(modified + added + removed):
1529 1529 to = None
1530 1530 tn = None
1531 1531 dodiff = True
1532 1532 header = []
1533 1533 if f in man1:
1534 1534 to = getfilectx(f, ctx1).data()
1535 1535 if f not in removed:
1536 1536 tn = getfilectx(f, ctx2).data()
1537 1537 a, b = f, f
1538 1538 if opts.git or losedatafn:
1539 1539 if f in added:
1540 1540 mode = gitmode[ctx2.flags(f)]
1541 1541 if f in copy or f in copyto:
1542 1542 if opts.git:
1543 1543 if f in copy:
1544 1544 a = copy[f]
1545 1545 else:
1546 1546 a = copyto[f]
1547 1547 omode = gitmode[man1.flags(a)]
1548 1548 _addmodehdr(header, omode, mode)
1549 1549 if a in removed and a not in gone:
1550 1550 op = 'rename'
1551 1551 gone.add(a)
1552 1552 else:
1553 1553 op = 'copy'
1554 1554 header.append('%s from %s\n' % (op, a))
1555 1555 header.append('%s to %s\n' % (op, f))
1556 1556 to = getfilectx(a, ctx1).data()
1557 1557 else:
1558 1558 losedatafn(f)
1559 1559 else:
1560 1560 if opts.git:
1561 1561 header.append('new file mode %s\n' % mode)
1562 1562 elif ctx2.flags(f):
1563 1563 losedatafn(f)
1564 1564 if util.binary(tn):
1565 1565 if opts.git:
1566 1566 dodiff = 'binary'
1567 1567 else:
1568 1568 losedatafn(f)
1569 1569 if not opts.git and not tn:
1570 1570 # regular diffs cannot represent new empty file
1571 1571 losedatafn(f)
1572 1572 elif f in removed:
1573 1573 if opts.git:
1574 1574 # have we already reported a copy above?
1575 1575 if ((f in copy and copy[f] in added
1576 1576 and copyto[copy[f]] == f) or
1577 1577 (f in copyto and copyto[f] in added
1578 1578 and copy[copyto[f]] == f)):
1579 1579 dodiff = False
1580 1580 else:
1581 1581 header.append('deleted file mode %s\n' %
1582 1582 gitmode[man1.flags(f)])
1583 1583 elif not to:
1584 1584 # regular diffs cannot represent empty file deletion
1585 1585 losedatafn(f)
1586 1586 else:
1587 1587 oflag = man1.flags(f)
1588 1588 nflag = ctx2.flags(f)
1589 1589 binary = util.binary(to) or util.binary(tn)
1590 1590 if opts.git:
1591 1591 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1592 1592 if binary:
1593 1593 dodiff = 'binary'
1594 1594 elif binary or nflag != oflag:
1595 1595 losedatafn(f)
1596 1596 if opts.git:
1597 1597 header.insert(0, mdiff.diffline(revs, a, b, opts))
1598 1598
1599 1599 if dodiff:
1600 1600 if dodiff == 'binary':
1601 1601 text = b85diff(to, tn)
1602 1602 else:
1603 1603 text = mdiff.unidiff(to, date1,
1604 1604 # ctx2 date may be dynamic
1605 1605 tn, util.datestr(ctx2.date()),
1606 1606 a, b, revs, opts=opts)
1607 1607 if header and (text or len(header) > 1):
1608 1608 yield ''.join(header)
1609 1609 if text:
1610 1610 yield text
1611 1611
1612 1612 def diffstatdata(lines):
1613 1613 filename, adds, removes = None, 0, 0
1614 1614 for line in lines:
1615 1615 if line.startswith('diff'):
1616 1616 if filename:
1617 1617 isbinary = adds == 0 and removes == 0
1618 1618 yield (filename, adds, removes, isbinary)
1619 1619 # set numbers to 0 anyway when starting new file
1620 1620 adds, removes = 0, 0
1621 1621 if line.startswith('diff --git'):
1622 1622 filename = gitre.search(line).group(1)
1623 1623 else:
1624 1624 # format: "diff -r ... -r ... filename"
1625 1625 filename = line.split(None, 5)[-1]
1626 1626 elif line.startswith('+') and not line.startswith('+++'):
1627 1627 adds += 1
1628 1628 elif line.startswith('-') and not line.startswith('---'):
1629 1629 removes += 1
1630 1630 if filename:
1631 1631 isbinary = adds == 0 and removes == 0
1632 1632 yield (filename, adds, removes, isbinary)
1633 1633
1634 1634 def diffstat(lines, width=80, git=False):
1635 1635 output = []
1636 1636 stats = list(diffstatdata(lines))
1637 1637
1638 1638 maxtotal, maxname = 0, 0
1639 1639 totaladds, totalremoves = 0, 0
1640 1640 hasbinary = False
1641 1641 for filename, adds, removes, isbinary in stats:
1642 1642 totaladds += adds
1643 1643 totalremoves += removes
1644 1644 maxname = max(maxname, len(filename))
1645 1645 maxtotal = max(maxtotal, adds + removes)
1646 1646 if isbinary:
1647 1647 hasbinary = True
1648 1648
1649 1649 countwidth = len(str(maxtotal))
1650 1650 if hasbinary and countwidth < 3:
1651 1651 countwidth = 3
1652 1652 graphwidth = width - countwidth - maxname - 6
1653 1653 if graphwidth < 10:
1654 1654 graphwidth = 10
1655 1655
1656 1656 def scale(i):
1657 1657 if maxtotal <= graphwidth:
1658 1658 return i
1659 1659 # If diffstat runs out of room it doesn't print anything,
1660 1660 # which isn't very useful, so always print at least one + or -
1661 1661 # if there were at least some changes.
1662 1662 return max(i * graphwidth // maxtotal, int(bool(i)))
1663 1663
1664 1664 for filename, adds, removes, isbinary in stats:
1665 1665 if git and isbinary:
1666 1666 count = 'Bin'
1667 1667 else:
1668 1668 count = adds + removes
1669 1669 pluses = '+' * scale(adds)
1670 1670 minuses = '-' * scale(removes)
1671 1671 output.append(' %-*s | %*s %s%s\n' % (maxname, filename, countwidth,
1672 1672 count, pluses, minuses))
1673 1673
1674 1674 if stats:
1675 1675 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1676 1676 % (len(stats), totaladds, totalremoves))
1677 1677
1678 1678 return ''.join(output)
1679 1679
1680 1680 def diffstatui(*args, **kw):
1681 1681 '''like diffstat(), but yields 2-tuples of (output, label) for
1682 1682 ui.write()
1683 1683 '''
1684 1684
1685 1685 for line in diffstat(*args, **kw).splitlines():
1686 1686 if line and line[-1] in '+-':
1687 1687 name, graph = line.rsplit(' ', 1)
1688 1688 yield (name + ' ', '')
1689 1689 m = re.search(r'\++', graph)
1690 1690 if m:
1691 1691 yield (m.group(0), 'diffstat.inserted')
1692 1692 m = re.search(r'-+', graph)
1693 1693 if m:
1694 1694 yield (m.group(0), 'diffstat.deleted')
1695 1695 else:
1696 1696 yield (line, '')
1697 1697 yield ('\n', '')
@@ -1,70 +1,69 b''
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import util, error
9 from i18n import _
10 9
11 10 from mercurial import store
12 11
13 12 class StreamException(Exception):
14 13 def __init__(self, code):
15 14 Exception.__init__(self)
16 15 self.code = code
17 16 def __str__(self):
18 17 return '%i\n' % self.code
19 18
20 19 # if server supports streaming clone, it advertises "stream"
21 20 # capability with value that is version+flags of repo it is serving.
22 21 # client only streams if it can read that repo format.
23 22
24 23 # stream file format is simple.
25 24 #
26 25 # server writes out line that says how many files, how many total
27 26 # bytes. separator is ascii space, byte counts are strings.
28 27 #
29 28 # then for each file:
30 29 #
31 30 # server writes out line that says filename, how many bytes in
32 31 # file. separator is ascii nul, byte count is string.
33 32 #
34 33 # server writes out raw file data.
35 34
36 35 def allowed(ui):
37 36 return ui.configbool('server', 'uncompressed', True, untrusted=True)
38 37
39 38 def stream_out(repo):
40 39 '''stream out all metadata files in repository.
41 40 writes to file-like object, must support write() and optional flush().'''
42 41
43 42 if not allowed(repo.ui):
44 43 raise StreamException(1)
45 44
46 45 entries = []
47 46 total_bytes = 0
48 47 try:
49 48 # get consistent snapshot of repo, lock during scan
50 49 lock = repo.lock()
51 50 try:
52 51 repo.ui.debug('scanning\n')
53 52 for name, ename, size in repo.store.walk():
54 53 entries.append((name, size))
55 54 total_bytes += size
56 55 finally:
57 56 lock.release()
58 57 except error.LockError:
59 58 raise StreamException(2)
60 59
61 60 yield '0\n'
62 61 repo.ui.debug('%d files, %d bytes to transfer\n' %
63 62 (len(entries), total_bytes))
64 63 yield '%d %d\n' % (len(entries), total_bytes)
65 64 for name, size in entries:
66 65 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
67 66 # partially encode name over the wire for backwards compat
68 67 yield '%s\0%d\n' % (store.encodedir(name), size)
69 68 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
70 69 yield chunk
@@ -1,314 +1,313 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # This is the mercurial setup script.
4 4 #
5 5 # 'python setup.py install', or
6 6 # 'python setup.py --help' for more options
7 7
8 8 import sys
9 9 if not hasattr(sys, 'version_info') or sys.version_info < (2, 4, 0, 'final'):
10 10 raise SystemExit("Mercurial requires Python 2.4 or later.")
11 11
12 12 # Solaris Python packaging brain damage
13 13 try:
14 14 import hashlib
15 15 sha = hashlib.sha1()
16 16 except:
17 17 try:
18 18 import sha
19 19 except:
20 20 raise SystemExit(
21 21 "Couldn't import standard hashlib (incomplete Python install).")
22 22
23 23 try:
24 24 import zlib
25 25 except:
26 26 raise SystemExit(
27 27 "Couldn't import standard zlib (incomplete Python install).")
28 28
29 29 try:
30 30 import bz2
31 31 except:
32 32 raise SystemExit(
33 33 "Couldn't import standard bz2 (incomplete Python install).")
34 34
35 35 import os, subprocess, time
36 36 import shutil
37 37 import tempfile
38 38 from distutils.core import setup, Extension
39 39 from distutils.dist import Distribution
40 from distutils.command.install_data import install_data
41 40 from distutils.command.build import build
42 41 from distutils.command.build_py import build_py
43 42 from distutils.spawn import spawn, find_executable
44 43 from distutils.ccompiler import new_compiler
45 44
46 45 scripts = ['hg']
47 46 if os.name == 'nt':
48 47 scripts.append('contrib/win32/hg.bat')
49 48
50 49 # simplified version of distutils.ccompiler.CCompiler.has_function
51 50 # that actually removes its temporary files.
52 51 def hasfunction(cc, funcname):
53 52 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
54 53 devnull = oldstderr = None
55 54 try:
56 55 try:
57 56 fname = os.path.join(tmpdir, 'funcname.c')
58 57 f = open(fname, 'w')
59 58 f.write('int main(void) {\n')
60 59 f.write(' %s();\n' % funcname)
61 60 f.write('}\n')
62 61 f.close()
63 62 # Redirect stderr to /dev/null to hide any error messages
64 63 # from the compiler.
65 64 # This will have to be changed if we ever have to check
66 65 # for a function on Windows.
67 66 devnull = open('/dev/null', 'w')
68 67 oldstderr = os.dup(sys.stderr.fileno())
69 68 os.dup2(devnull.fileno(), sys.stderr.fileno())
70 69 objects = cc.compile([fname], output_dir=tmpdir)
71 70 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
72 71 except:
73 72 return False
74 73 return True
75 74 finally:
76 75 if oldstderr is not None:
77 76 os.dup2(oldstderr, sys.stderr.fileno())
78 77 if devnull is not None:
79 78 devnull.close()
80 79 shutil.rmtree(tmpdir)
81 80
82 81 # py2exe needs to be installed to work
83 82 try:
84 83 import py2exe
85 84 py2exeloaded = True
86 85
87 86 # Help py2exe to find win32com.shell
88 87 try:
89 88 import modulefinder
90 89 import win32com
91 90 for p in win32com.__path__[1:]: # Take the path to win32comext
92 91 modulefinder.AddPackagePath("win32com", p)
93 92 pn = "win32com.shell"
94 93 __import__(pn)
95 94 m = sys.modules[pn]
96 95 for p in m.__path__[1:]:
97 96 modulefinder.AddPackagePath(pn, p)
98 97 except ImportError:
99 98 pass
100 99
101 100 except ImportError:
102 101 py2exeloaded = False
103 102 pass
104 103
105 104 def runcmd(cmd, env):
106 105 p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
107 106 stderr=subprocess.PIPE, env=env)
108 107 out, err = p.communicate()
109 108 # If root is executing setup.py, but the repository is owned by
110 109 # another user (as in "sudo python setup.py install") we will get
111 110 # trust warnings since the .hg/hgrc file is untrusted. That is
112 111 # fine, we don't want to load it anyway. Python may warn about
113 112 # a missing __init__.py in mercurial/locale, we also ignore that.
114 113 err = [e for e in err.splitlines()
115 114 if not e.startswith('Not trusting file') \
116 115 and not e.startswith('warning: Not importing')]
117 116 if err:
118 117 return ''
119 118 return out
120 119
121 120 version = ''
122 121
123 122 if os.path.isdir('.hg'):
124 123 # Execute hg out of this directory with a custom environment which
125 124 # includes the pure Python modules in mercurial/pure. We also take
126 125 # care to not use any hgrc files and do no localization.
127 126 pypath = ['mercurial', os.path.join('mercurial', 'pure')]
128 127 env = {'PYTHONPATH': os.pathsep.join(pypath),
129 128 'HGRCPATH': '',
130 129 'LANGUAGE': 'C'}
131 130 if 'LD_LIBRARY_PATH' in os.environ:
132 131 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
133 132 if 'SystemRoot' in os.environ:
134 133 # Copy SystemRoot into the custom environment for Python 2.6
135 134 # under Windows. Otherwise, the subprocess will fail with
136 135 # error 0xc0150004. See: http://bugs.python.org/issue3440
137 136 env['SystemRoot'] = os.environ['SystemRoot']
138 137 cmd = [sys.executable, 'hg', 'id', '-i', '-t']
139 138 l = runcmd(cmd, env).split()
140 139 while len(l) > 1 and l[-1][0].isalpha(): # remove non-numbered tags
141 140 l.pop()
142 141 if len(l) > 1: # tag found
143 142 version = l[-1]
144 143 if l[0].endswith('+'): # propagate the dirty status to the tag
145 144 version += '+'
146 145 elif len(l) == 1: # no tag found
147 146 cmd = [sys.executable, 'hg', 'parents', '--template',
148 147 '{latesttag}+{latesttagdistance}-']
149 148 version = runcmd(cmd, env) + l[0]
150 149 if version.endswith('+'):
151 150 version += time.strftime('%Y%m%d')
152 151 elif os.path.exists('.hg_archival.txt'):
153 152 kw = dict([[t.strip() for t in l.split(':', 1)]
154 153 for l in open('.hg_archival.txt')])
155 154 if 'tag' in kw:
156 155 version = kw['tag']
157 156 elif 'latesttag' in kw:
158 157 version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
159 158 else:
160 159 version = kw.get('node', '')[:12]
161 160
162 161 if version:
163 162 f = open("mercurial/__version__.py", "w")
164 163 f.write('# this file is autogenerated by setup.py\n')
165 164 f.write('version = "%s"\n' % version)
166 165 f.close()
167 166
168 167
169 168 try:
170 169 from mercurial import __version__
171 170 version = __version__.version
172 171 except ImportError:
173 172 version = 'unknown'
174 173
175 174 class hgbuildmo(build):
176 175
177 176 description = "build translations (.mo files)"
178 177
179 178 def run(self):
180 179 if not find_executable('msgfmt'):
181 180 self.warn("could not find msgfmt executable, no translations "
182 181 "will be built")
183 182 return
184 183
185 184 podir = 'i18n'
186 185 if not os.path.isdir(podir):
187 186 self.warn("could not find %s/ directory" % podir)
188 187 return
189 188
190 189 join = os.path.join
191 190 for po in os.listdir(podir):
192 191 if not po.endswith('.po'):
193 192 continue
194 193 pofile = join(podir, po)
195 194 modir = join('locale', po[:-3], 'LC_MESSAGES')
196 195 mofile = join(modir, 'hg.mo')
197 196 mobuildfile = join('mercurial', mofile)
198 197 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
199 198 if sys.platform != 'sunos5':
200 199 # msgfmt on Solaris does not know about -c
201 200 cmd.append('-c')
202 201 self.mkpath(join('mercurial', modir))
203 202 self.make_file([pofile], mobuildfile, spawn, (cmd,))
204 203
205 204 # Insert hgbuildmo first so that files in mercurial/locale/ are found
206 205 # when build_py is run next.
207 206 build.sub_commands.insert(0, ('build_mo', None))
208 207
209 208 Distribution.pure = 0
210 209 Distribution.global_options.append(('pure', None, "use pure (slow) Python "
211 210 "code instead of C extensions"))
212 211
213 212 class hgbuildpy(build_py):
214 213
215 214 def finalize_options(self):
216 215 build_py.finalize_options(self)
217 216
218 217 if self.distribution.pure:
219 218 if self.py_modules is None:
220 219 self.py_modules = []
221 220 for ext in self.distribution.ext_modules:
222 221 if ext.name.startswith("mercurial."):
223 222 self.py_modules.append("mercurial.pure.%s" % ext.name[10:])
224 223 self.distribution.ext_modules = []
225 224
226 225 def find_modules(self):
227 226 modules = build_py.find_modules(self)
228 227 for module in modules:
229 228 if module[0] == "mercurial.pure":
230 229 if module[1] != "__init__":
231 230 yield ("mercurial", module[1], module[2])
232 231 else:
233 232 yield module
234 233
235 234 cmdclass = {'build_mo': hgbuildmo,
236 235 'build_py': hgbuildpy}
237 236
238 237 packages = ['mercurial', 'mercurial.hgweb', 'hgext', 'hgext.convert',
239 238 'hgext.highlight', 'hgext.zeroconf']
240 239
241 240 pymodules = []
242 241
243 242 extmodules = [
244 243 Extension('mercurial.base85', ['mercurial/base85.c']),
245 244 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
246 245 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
247 246 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
248 247 Extension('mercurial.parsers', ['mercurial/parsers.c']),
249 248 ]
250 249
251 250 # disable osutil.c under windows + python 2.4 (issue1364)
252 251 if sys.platform == 'win32' and sys.version_info < (2, 5, 0, 'final'):
253 252 pymodules.append('mercurial.pure.osutil')
254 253 else:
255 254 extmodules.append(Extension('mercurial.osutil', ['mercurial/osutil.c']))
256 255
257 256 if sys.platform == 'linux2' and os.uname()[2] > '2.6':
258 257 # The inotify extension is only usable with Linux 2.6 kernels.
259 258 # You also need a reasonably recent C library.
260 259 cc = new_compiler()
261 260 if hasfunction(cc, 'inotify_add_watch'):
262 261 extmodules.append(Extension('hgext.inotify.linux._inotify',
263 262 ['hgext/inotify/linux/_inotify.c']))
264 263 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
265 264
266 265 packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
267 266 'help/*.txt']}
268 267
269 268 def ordinarypath(p):
270 269 return p and p[0] != '.' and p[-1] != '~'
271 270
272 271 for root in ('templates',):
273 272 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
274 273 curdir = curdir.split(os.sep, 1)[1]
275 274 dirs[:] = filter(ordinarypath, dirs)
276 275 for f in filter(ordinarypath, files):
277 276 f = os.path.join(curdir, f)
278 277 packagedata['mercurial'].append(f)
279 278
280 279 datafiles = []
281 280 setupversion = version
282 281 extra = {}
283 282
284 283 if py2exeloaded:
285 284 extra['console'] = [
286 285 {'script':'hg',
287 286 'copyright':'Copyright (C) 2005-2010 Matt Mackall and others',
288 287 'product_version':version}]
289 288
290 289 if os.name == 'nt':
291 290 # Windows binary file versions for exe/dll files must have the
292 291 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
293 292 setupversion = version.split('+', 1)[0]
294 293
295 294 setup(name='mercurial',
296 295 version=setupversion,
297 296 author='Matt Mackall',
298 297 author_email='mpm@selenic.com',
299 298 url='http://mercurial.selenic.com/',
300 299 description='Scalable distributed SCM',
301 300 license='GNU GPLv2+',
302 301 scripts=scripts,
303 302 packages=packages,
304 303 py_modules=pymodules,
305 304 ext_modules=extmodules,
306 305 data_files=datafiles,
307 306 package_data=packagedata,
308 307 cmdclass=cmdclass,
309 308 options=dict(py2exe=dict(packages=['hgext', 'email']),
310 309 bdist_mpkg=dict(zipdist=True,
311 310 license='COPYING',
312 311 readme='contrib/macosx/Readme.html',
313 312 welcome='contrib/macosx/Welcome.html')),
314 313 **extra)
@@ -1,29 +1,29 b''
1 1 #!/usr/bin/env python
2 2
3 3 """This does HTTP GET requests given a host:port and path and returns
4 4 a subset of the headers plus the body of the result."""
5 5
6 import httplib, sys, re
6 import httplib, sys
7 7
8 8 try:
9 9 import msvcrt, os
10 10 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
11 11 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
12 12 except ImportError:
13 13 pass
14 14
15 15 headers = [h.lower() for h in sys.argv[3:]]
16 16 conn = httplib.HTTPConnection(sys.argv[1])
17 17 conn.request("GET", sys.argv[2])
18 18 response = conn.getresponse()
19 19 print response.status, response.reason
20 20 for h in headers:
21 21 if response.getheader(h, None) is not None:
22 22 print "%s: %s" % (h, response.getheader(h))
23 23 print
24 24 data = response.read()
25 25 sys.stdout.write(data)
26 26
27 27 if 200 <= response.status <= 299:
28 28 sys.exit(0)
29 29 sys.exit(1)
@@ -1,25 +1,25 b''
1 1 #!/usr/bin/env python
2 2
3 import os, sys, time, errno, signal
3 import os, time, errno, signal
4 4
5 5 # Kill off any leftover daemon processes
6 6 try:
7 7 fp = open(os.environ['DAEMON_PIDS'])
8 8 for line in fp:
9 9 try:
10 10 pid = int(line)
11 11 except ValueError:
12 12 continue
13 13 try:
14 14 os.kill(pid, 0)
15 15 os.kill(pid, signal.SIGTERM)
16 16 for i in range(10):
17 17 time.sleep(0.05)
18 18 os.kill(pid, 0)
19 19 os.kill(pid, signal.SIGKILL)
20 20 except OSError, err:
21 21 if err.errno != errno.ESRCH:
22 22 raise
23 23 fp.close()
24 24 except IOError:
25 25 pass
@@ -1,957 +1,956 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 #
39 39 # (You could use any subset of the tests: test-s* happens to match
40 40 # enough that it's worth doing parallel runs, few enough that it
41 41 # completes fairly quickly, includes both shell and Python scripts, and
42 42 # includes some scripts that run daemon processes.)
43 43
44 44 from distutils import version
45 45 import difflib
46 46 import errno
47 47 import optparse
48 48 import os
49 import signal
49 import shutil
50 50 import subprocess
51 import shutil
52 51 import signal
53 52 import sys
54 53 import tempfile
55 54 import time
56 55
57 56 closefds = os.name == 'posix'
58 57 def Popen4(cmd, bufsize=-1):
59 58 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
60 59 close_fds=closefds,
61 60 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 61 stderr=subprocess.STDOUT)
63 62 p.fromchild = p.stdout
64 63 p.tochild = p.stdin
65 64 p.childerr = p.stderr
66 65 return p
67 66
68 67 # reserved exit code to skip test (used by hghave)
69 68 SKIPPED_STATUS = 80
70 69 SKIPPED_PREFIX = 'skipped: '
71 70 FAILED_PREFIX = 'hghave check failed: '
72 71 PYTHON = sys.executable
73 72 IMPL_PATH = 'PYTHONPATH'
74 73 if 'java' in sys.platform:
75 74 IMPL_PATH = 'JYTHONPATH'
76 75
77 76 requiredtools = ["python", "diff", "grep", "unzip", "gunzip", "bunzip2", "sed"]
78 77
79 78 defaults = {
80 79 'jobs': ('HGTEST_JOBS', 1),
81 80 'timeout': ('HGTEST_TIMEOUT', 180),
82 81 'port': ('HGTEST_PORT', 20059),
83 82 }
84 83
85 84 def parseargs():
86 85 parser = optparse.OptionParser("%prog [options] [tests]")
87 86 parser.add_option("-C", "--annotate", action="store_true",
88 87 help="output files annotated with coverage")
89 88 parser.add_option("--child", type="int",
90 89 help="run as child process, summary to given fd")
91 90 parser.add_option("-c", "--cover", action="store_true",
92 91 help="print a test coverage report")
93 92 parser.add_option("-f", "--first", action="store_true",
94 93 help="exit on the first test failure")
95 94 parser.add_option("-i", "--interactive", action="store_true",
96 95 help="prompt to accept changed output")
97 96 parser.add_option("-j", "--jobs", type="int",
98 97 help="number of jobs to run in parallel"
99 98 " (default: $%s or %d)" % defaults['jobs'])
100 99 parser.add_option("-k", "--keywords",
101 100 help="run tests matching keywords")
102 101 parser.add_option("--keep-tmpdir", action="store_true",
103 102 help="keep temporary directory after running tests")
104 103 parser.add_option("--tmpdir", type="string",
105 104 help="run tests in the given temporary directory"
106 105 " (implies --keep-tmpdir)")
107 106 parser.add_option("-d", "--debug", action="store_true",
108 107 help="debug mode: write output of test scripts to console"
109 108 " rather than capturing and diff'ing it (disables timeout)")
110 109 parser.add_option("-R", "--restart", action="store_true",
111 110 help="restart at last error")
112 111 parser.add_option("-p", "--port", type="int",
113 112 help="port on which servers should listen"
114 113 " (default: $%s or %d)" % defaults['port'])
115 114 parser.add_option("-r", "--retest", action="store_true",
116 115 help="retest failed tests")
117 116 parser.add_option("-S", "--noskips", action="store_true",
118 117 help="don't report skip tests verbosely")
119 118 parser.add_option("-t", "--timeout", type="int",
120 119 help="kill errant tests after TIMEOUT seconds"
121 120 " (default: $%s or %d)" % defaults['timeout'])
122 121 parser.add_option("-v", "--verbose", action="store_true",
123 122 help="output verbose messages")
124 123 parser.add_option("-n", "--nodiff", action="store_true",
125 124 help="skip showing test changes")
126 125 parser.add_option("--with-hg", type="string",
127 126 metavar="HG",
128 127 help="test using specified hg script rather than a "
129 128 "temporary installation")
130 129 parser.add_option("--local", action="store_true",
131 130 help="shortcut for --with-hg=<testdir>/../hg")
132 131 parser.add_option("--pure", action="store_true",
133 132 help="use pure Python code instead of C extensions")
134 133 parser.add_option("-3", "--py3k-warnings", action="store_true",
135 134 help="enable Py3k warnings on Python 2.6+")
136 135 parser.add_option("--inotify", action="store_true",
137 136 help="enable inotify extension when running tests")
138 137 parser.add_option("--blacklist", action="append",
139 138 help="skip tests listed in the specified blacklist file")
140 139
141 140 for option, default in defaults.items():
142 141 defaults[option] = int(os.environ.get(*default))
143 142 parser.set_defaults(**defaults)
144 143 (options, args) = parser.parse_args()
145 144
146 145 # jython is always pure
147 146 if 'java' in sys.platform or '__pypy__' in sys.modules:
148 147 options.pure = True
149 148
150 149 if options.with_hg:
151 150 if not (os.path.isfile(options.with_hg) and
152 151 os.access(options.with_hg, os.X_OK)):
153 152 parser.error('--with-hg must specify an executable hg script')
154 153 if not os.path.basename(options.with_hg) == 'hg':
155 154 sys.stderr.write('warning: --with-hg should specify an hg script')
156 155 if options.local:
157 156 testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
158 157 hgbin = os.path.join(os.path.dirname(testdir), 'hg')
159 158 if not os.access(hgbin, os.X_OK):
160 159 parser.error('--local specified, but %r not found or not executable'
161 160 % hgbin)
162 161 options.with_hg = hgbin
163 162
164 163 options.anycoverage = options.cover or options.annotate
165 164 if options.anycoverage:
166 165 try:
167 166 import coverage
168 167 covver = version.StrictVersion(coverage.__version__).version
169 168 if covver < (3, 3):
170 169 parser.error('coverage options require coverage 3.3 or later')
171 170 except ImportError:
172 171 parser.error('coverage options now require the coverage package')
173 172
174 173 if options.anycoverage and options.local:
175 174 # this needs some path mangling somewhere, I guess
176 175 parser.error("sorry, coverage options do not work when --local "
177 176 "is specified")
178 177
179 178 global vlog
180 179 if options.verbose:
181 180 if options.jobs > 1 or options.child is not None:
182 181 pid = "[%d]" % os.getpid()
183 182 else:
184 183 pid = None
185 184 def vlog(*msg):
186 185 if pid:
187 186 print pid,
188 187 for m in msg:
189 188 print m,
190 189 print
191 190 sys.stdout.flush()
192 191 else:
193 192 vlog = lambda *msg: None
194 193
195 194 if options.tmpdir:
196 195 options.tmpdir = os.path.expanduser(options.tmpdir)
197 196
198 197 if options.jobs < 1:
199 198 parser.error('--jobs must be positive')
200 199 if options.interactive and options.jobs > 1:
201 200 print '(--interactive overrides --jobs)'
202 201 options.jobs = 1
203 202 if options.interactive and options.debug:
204 203 parser.error("-i/--interactive and -d/--debug are incompatible")
205 204 if options.debug:
206 205 if options.timeout != defaults['timeout']:
207 206 sys.stderr.write(
208 207 'warning: --timeout option ignored with --debug\n')
209 208 options.timeout = 0
210 209 if options.py3k_warnings:
211 210 if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
212 211 parser.error('--py3k-warnings can only be used on Python 2.6+')
213 212 if options.blacklist:
214 213 blacklist = dict()
215 214 for filename in options.blacklist:
216 215 try:
217 216 path = os.path.expanduser(os.path.expandvars(filename))
218 217 f = open(path, "r")
219 218 except IOError, err:
220 219 if err.errno != errno.ENOENT:
221 220 raise
222 221 print "warning: no such blacklist file: %s" % filename
223 222 continue
224 223
225 224 for line in f.readlines():
226 225 line = line.strip()
227 226 if line and not line.startswith('#'):
228 227 blacklist[line] = filename
229 228
230 229 options.blacklist = blacklist
231 230
232 231 return (options, args)
233 232
234 233 def rename(src, dst):
235 234 """Like os.rename(), trade atomicity and opened files friendliness
236 235 for existing destination support.
237 236 """
238 237 shutil.copy(src, dst)
239 238 os.remove(src)
240 239
241 240 def splitnewlines(text):
242 241 '''like str.splitlines, but only split on newlines.
243 242 keep line endings.'''
244 243 i = 0
245 244 lines = []
246 245 while True:
247 246 n = text.find('\n', i)
248 247 if n == -1:
249 248 last = text[i:]
250 249 if last:
251 250 lines.append(last)
252 251 return lines
253 252 lines.append(text[i:n + 1])
254 253 i = n + 1
255 254
256 255 def parsehghaveoutput(lines):
257 256 '''Parse hghave log lines.
258 257 Return tuple of lists (missing, failed):
259 258 * the missing/unknown features
260 259 * the features for which existence check failed'''
261 260 missing = []
262 261 failed = []
263 262 for line in lines:
264 263 if line.startswith(SKIPPED_PREFIX):
265 264 line = line.splitlines()[0]
266 265 missing.append(line[len(SKIPPED_PREFIX):])
267 266 elif line.startswith(FAILED_PREFIX):
268 267 line = line.splitlines()[0]
269 268 failed.append(line[len(FAILED_PREFIX):])
270 269
271 270 return missing, failed
272 271
273 272 def showdiff(expected, output, ref, err):
274 273 for line in difflib.unified_diff(expected, output, ref, err):
275 274 sys.stdout.write(line)
276 275
277 276 def findprogram(program):
278 277 """Search PATH for a executable program"""
279 278 for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
280 279 name = os.path.join(p, program)
281 280 if os.access(name, os.X_OK):
282 281 return name
283 282 return None
284 283
285 284 def checktools():
286 285 # Before we go any further, check for pre-requisite tools
287 286 # stuff from coreutils (cat, rm, etc) are not tested
288 287 for p in requiredtools:
289 288 if os.name == 'nt':
290 289 p += '.exe'
291 290 found = findprogram(p)
292 291 if found:
293 292 vlog("# Found prerequisite", p, "at", found)
294 293 else:
295 294 print "WARNING: Did not find prerequisite tool: "+p
296 295
297 296 def killdaemons():
298 297 # Kill off any leftover daemon processes
299 298 try:
300 299 fp = open(DAEMON_PIDS)
301 300 for line in fp:
302 301 try:
303 302 pid = int(line)
304 303 except ValueError:
305 304 continue
306 305 try:
307 306 os.kill(pid, 0)
308 307 vlog('# Killing daemon process %d' % pid)
309 308 os.kill(pid, signal.SIGTERM)
310 309 time.sleep(0.25)
311 310 os.kill(pid, 0)
312 311 vlog('# Daemon process %d is stuck - really killing it' % pid)
313 312 os.kill(pid, signal.SIGKILL)
314 313 except OSError, err:
315 314 if err.errno != errno.ESRCH:
316 315 raise
317 316 fp.close()
318 317 os.unlink(DAEMON_PIDS)
319 318 except IOError:
320 319 pass
321 320
322 321 def cleanup(options):
323 322 if not options.keep_tmpdir:
324 323 vlog("# Cleaning up HGTMP", HGTMP)
325 324 shutil.rmtree(HGTMP, True)
326 325
327 326 def usecorrectpython():
328 327 # some tests run python interpreter. they must use same
329 328 # interpreter we use or bad things will happen.
330 329 exedir, exename = os.path.split(sys.executable)
331 330 if exename == 'python':
332 331 path = findprogram('python')
333 332 if os.path.dirname(path) == exedir:
334 333 return
335 334 vlog('# Making python executable in test path use correct Python')
336 335 mypython = os.path.join(BINDIR, 'python')
337 336 try:
338 337 os.symlink(sys.executable, mypython)
339 338 except AttributeError:
340 339 # windows fallback
341 340 shutil.copyfile(sys.executable, mypython)
342 341 shutil.copymode(sys.executable, mypython)
343 342
344 343 def installhg(options):
345 344 vlog("# Performing temporary installation of HG")
346 345 installerrs = os.path.join("tests", "install.err")
347 346 pure = options.pure and "--pure" or ""
348 347
349 348 # Run installer in hg root
350 349 script = os.path.realpath(sys.argv[0])
351 350 hgroot = os.path.dirname(os.path.dirname(script))
352 351 os.chdir(hgroot)
353 352 nohome = '--home=""'
354 353 if os.name == 'nt':
355 354 # The --home="" trick works only on OS where os.sep == '/'
356 355 # because of a distutils convert_path() fast-path. Avoid it at
357 356 # least on Windows for now, deal with .pydistutils.cfg bugs
358 357 # when they happen.
359 358 nohome = ''
360 359 cmd = ('%s setup.py %s clean --all'
361 360 ' install --force --prefix="%s" --install-lib="%s"'
362 361 ' --install-scripts="%s" %s >%s 2>&1'
363 362 % (sys.executable, pure, INST, PYTHONDIR, BINDIR, nohome,
364 363 installerrs))
365 364 vlog("# Running", cmd)
366 365 if os.system(cmd) == 0:
367 366 if not options.verbose:
368 367 os.remove(installerrs)
369 368 else:
370 369 f = open(installerrs)
371 370 for line in f:
372 371 print line,
373 372 f.close()
374 373 sys.exit(1)
375 374 os.chdir(TESTDIR)
376 375
377 376 usecorrectpython()
378 377
379 378 vlog("# Installing dummy diffstat")
380 379 f = open(os.path.join(BINDIR, 'diffstat'), 'w')
381 380 f.write('#!' + sys.executable + '\n'
382 381 'import sys\n'
383 382 'files = 0\n'
384 383 'for line in sys.stdin:\n'
385 384 ' if line.startswith("diff "):\n'
386 385 ' files += 1\n'
387 386 'sys.stdout.write("files patched: %d\\n" % files)\n')
388 387 f.close()
389 388 os.chmod(os.path.join(BINDIR, 'diffstat'), 0700)
390 389
391 390 if options.py3k_warnings and not options.anycoverage:
392 391 vlog("# Updating hg command to enable Py3k Warnings switch")
393 392 f = open(os.path.join(BINDIR, 'hg'), 'r')
394 393 lines = [line.rstrip() for line in f]
395 394 lines[0] += ' -3'
396 395 f.close()
397 396 f = open(os.path.join(BINDIR, 'hg'), 'w')
398 397 for line in lines:
399 398 f.write(line + '\n')
400 399 f.close()
401 400
402 401 if options.anycoverage:
403 402 custom = os.path.join(TESTDIR, 'sitecustomize.py')
404 403 target = os.path.join(PYTHONDIR, 'sitecustomize.py')
405 404 vlog('# Installing coverage trigger to %s' % target)
406 405 shutil.copyfile(custom, target)
407 406 rc = os.path.join(TESTDIR, '.coveragerc')
408 407 vlog('# Installing coverage rc to %s' % rc)
409 408 os.environ['COVERAGE_PROCESS_START'] = rc
410 409 fn = os.path.join(INST, '..', '.coverage')
411 410 os.environ['COVERAGE_FILE'] = fn
412 411
413 412 def outputcoverage(options):
414 413
415 414 vlog('# Producing coverage report')
416 415 os.chdir(PYTHONDIR)
417 416
418 417 def covrun(*args):
419 418 cmd = 'coverage %s' % ' '.join(args)
420 419 vlog('# Running: %s' % cmd)
421 420 os.system(cmd)
422 421
423 422 if options.child:
424 423 return
425 424
426 425 covrun('-c')
427 426 omit = ','.join([BINDIR, TESTDIR])
428 427 covrun('-i', '-r', '"--omit=%s"' % omit) # report
429 428 if options.annotate:
430 429 adir = os.path.join(TESTDIR, 'annotated')
431 430 if not os.path.isdir(adir):
432 431 os.mkdir(adir)
433 432 covrun('-i', '-a', '"--directory=%s"' % adir, '"--omit=%s"' % omit)
434 433
435 434 class Timeout(Exception):
436 435 pass
437 436
438 437 def alarmed(signum, frame):
439 438 raise Timeout
440 439
441 440 def run(cmd, options):
442 441 """Run command in a sub-process, capturing the output (stdout and stderr).
443 442 Return a tuple (exitcode, output). output is None in debug mode."""
444 443 # TODO: Use subprocess.Popen if we're running on Python 2.4
445 444 if options.debug:
446 445 proc = subprocess.Popen(cmd, shell=True)
447 446 ret = proc.wait()
448 447 return (ret, None)
449 448
450 449 if os.name == 'nt' or sys.platform.startswith('java'):
451 450 tochild, fromchild = os.popen4(cmd)
452 451 tochild.close()
453 452 output = fromchild.read()
454 453 ret = fromchild.close()
455 454 if ret == None:
456 455 ret = 0
457 456 else:
458 457 proc = Popen4(cmd)
459 458 def cleanup():
460 459 os.kill(proc.pid, signal.SIGTERM)
461 460 ret = proc.wait()
462 461 if ret == 0:
463 462 ret = signal.SIGTERM << 8
464 463 killdaemons()
465 464 return ret
466 465
467 466 try:
468 467 output = ''
469 468 proc.tochild.close()
470 469 output = proc.fromchild.read()
471 470 ret = proc.wait()
472 471 if os.WIFEXITED(ret):
473 472 ret = os.WEXITSTATUS(ret)
474 473 except Timeout:
475 474 vlog('# Process %d timed out - killing it' % proc.pid)
476 475 ret = cleanup()
477 476 output += ("\n### Abort: timeout after %d seconds.\n"
478 477 % options.timeout)
479 478 except KeyboardInterrupt:
480 479 vlog('# Handling keyboard interrupt')
481 480 cleanup()
482 481 raise
483 482
484 483 return ret, splitnewlines(output)
485 484
486 485 def runone(options, test, skips, fails):
487 486 '''tristate output:
488 487 None -> skipped
489 488 True -> passed
490 489 False -> failed'''
491 490
492 491 def skip(msg):
493 492 if not options.verbose:
494 493 skips.append((test, msg))
495 494 else:
496 495 print "\nSkipping %s: %s" % (testpath, msg)
497 496 return None
498 497
499 498 def fail(msg):
500 499 fails.append((test, msg))
501 500 if not options.nodiff:
502 501 print "\nERROR: %s %s" % (testpath, msg)
503 502 return None
504 503
505 504 vlog("# Test", test)
506 505
507 506 # create a fresh hgrc
508 507 hgrc = open(HGRCPATH, 'w+')
509 508 hgrc.write('[ui]\n')
510 509 hgrc.write('slash = True\n')
511 510 hgrc.write('[defaults]\n')
512 511 hgrc.write('backout = -d "0 0"\n')
513 512 hgrc.write('commit = -d "0 0"\n')
514 513 hgrc.write('tag = -d "0 0"\n')
515 514 if options.inotify:
516 515 hgrc.write('[extensions]\n')
517 516 hgrc.write('inotify=\n')
518 517 hgrc.write('[inotify]\n')
519 518 hgrc.write('pidfile=%s\n' % DAEMON_PIDS)
520 519 hgrc.write('appendpid=True\n')
521 520 hgrc.close()
522 521
523 522 testpath = os.path.join(TESTDIR, test)
524 523 ref = os.path.join(TESTDIR, test+".out")
525 524 err = os.path.join(TESTDIR, test+".err")
526 525 if os.path.exists(err):
527 526 os.remove(err) # Remove any previous output files
528 527 try:
529 528 tf = open(testpath)
530 529 firstline = tf.readline().rstrip()
531 530 tf.close()
532 531 except:
533 532 firstline = ''
534 533 lctest = test.lower()
535 534
536 535 if lctest.endswith('.py') or firstline == '#!/usr/bin/env python':
537 536 py3kswitch = options.py3k_warnings and ' -3' or ''
538 537 cmd = '%s%s "%s"' % (PYTHON, py3kswitch, testpath)
539 538 elif lctest.endswith('.bat'):
540 539 # do not run batch scripts on non-windows
541 540 if os.name != 'nt':
542 541 return skip("batch script")
543 542 # To reliably get the error code from batch files on WinXP,
544 543 # the "cmd /c call" prefix is needed. Grrr
545 544 cmd = 'cmd /c call "%s"' % testpath
546 545 else:
547 546 # do not run shell scripts on windows
548 547 if os.name == 'nt':
549 548 return skip("shell script")
550 549 # do not try to run non-executable programs
551 550 if not os.path.exists(testpath):
552 551 return fail("does not exist")
553 552 elif not os.access(testpath, os.X_OK):
554 553 return skip("not executable")
555 554 cmd = '"%s"' % testpath
556 555
557 556 # Make a tmp subdirectory to work in
558 557 tmpd = os.path.join(HGTMP, test)
559 558 os.mkdir(tmpd)
560 559 os.chdir(tmpd)
561 560
562 561 if options.timeout > 0:
563 562 signal.alarm(options.timeout)
564 563
565 564 vlog("# Running", cmd)
566 565 ret, out = run(cmd, options)
567 566 vlog("# Ret was:", ret)
568 567
569 568 if options.timeout > 0:
570 569 signal.alarm(0)
571 570
572 571 mark = '.'
573 572
574 573 skipped = (ret == SKIPPED_STATUS)
575 574 # If we're not in --debug mode and reference output file exists,
576 575 # check test output against it.
577 576 if options.debug:
578 577 refout = None # to match out == None
579 578 elif os.path.exists(ref):
580 579 f = open(ref, "r")
581 580 refout = splitnewlines(f.read())
582 581 f.close()
583 582 else:
584 583 refout = []
585 584
586 585 if skipped:
587 586 mark = 's'
588 587 if out is None: # debug mode: nothing to parse
589 588 missing = ['unknown']
590 589 failed = None
591 590 else:
592 591 missing, failed = parsehghaveoutput(out)
593 592 if not missing:
594 593 missing = ['irrelevant']
595 594 if failed:
596 595 fail("hghave failed checking for %s" % failed[-1])
597 596 skipped = False
598 597 else:
599 598 skip(missing[-1])
600 599 elif out != refout:
601 600 mark = '!'
602 601 if ret:
603 602 fail("output changed and returned error code %d" % ret)
604 603 else:
605 604 fail("output changed")
606 605 if not options.nodiff:
607 606 showdiff(refout, out, ref, err)
608 607 ret = 1
609 608 elif ret:
610 609 mark = '!'
611 610 fail("returned error code %d" % ret)
612 611
613 612 if not options.verbose:
614 613 sys.stdout.write(mark)
615 614 sys.stdout.flush()
616 615
617 616 if ret != 0 and not skipped and not options.debug:
618 617 # Save errors to a file for diagnosis
619 618 f = open(err, "wb")
620 619 for line in out:
621 620 f.write(line)
622 621 f.close()
623 622
624 623 killdaemons()
625 624
626 625 os.chdir(TESTDIR)
627 626 if not options.keep_tmpdir:
628 627 shutil.rmtree(tmpd, True)
629 628 if skipped:
630 629 return None
631 630 return ret == 0
632 631
633 632 _hgpath = None
634 633
635 634 def _gethgpath():
636 635 """Return the path to the mercurial package that is actually found by
637 636 the current Python interpreter."""
638 637 global _hgpath
639 638 if _hgpath is not None:
640 639 return _hgpath
641 640
642 641 cmd = '%s -c "import mercurial; print mercurial.__path__[0]"'
643 642 pipe = os.popen(cmd % PYTHON)
644 643 try:
645 644 _hgpath = pipe.read().strip()
646 645 finally:
647 646 pipe.close()
648 647 return _hgpath
649 648
650 649 def _checkhglib(verb):
651 650 """Ensure that the 'mercurial' package imported by python is
652 651 the one we expect it to be. If not, print a warning to stderr."""
653 652 expecthg = os.path.join(PYTHONDIR, 'mercurial')
654 653 actualhg = _gethgpath()
655 654 if actualhg != expecthg:
656 655 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
657 656 ' (expected %s)\n'
658 657 % (verb, actualhg, expecthg))
659 658
660 659 def runchildren(options, tests):
661 660 if INST:
662 661 installhg(options)
663 662 _checkhglib("Testing")
664 663
665 664 optcopy = dict(options.__dict__)
666 665 optcopy['jobs'] = 1
667 666 if optcopy['with_hg'] is None:
668 667 optcopy['with_hg'] = os.path.join(BINDIR, "hg")
669 668 optcopy.pop('anycoverage', None)
670 669
671 670 opts = []
672 671 for opt, value in optcopy.iteritems():
673 672 name = '--' + opt.replace('_', '-')
674 673 if value is True:
675 674 opts.append(name)
676 675 elif value is not None:
677 676 opts.append(name + '=' + str(value))
678 677
679 678 tests.reverse()
680 679 jobs = [[] for j in xrange(options.jobs)]
681 680 while tests:
682 681 for job in jobs:
683 682 if not tests:
684 683 break
685 684 job.append(tests.pop())
686 685 fps = {}
687 686
688 687 for j, job in enumerate(jobs):
689 688 if not job:
690 689 continue
691 690 rfd, wfd = os.pipe()
692 691 childopts = ['--child=%d' % wfd, '--port=%d' % (options.port + j * 3)]
693 692 childtmp = os.path.join(HGTMP, 'child%d' % j)
694 693 childopts += ['--tmpdir', childtmp]
695 694 cmdline = [PYTHON, sys.argv[0]] + opts + childopts + job
696 695 vlog(' '.join(cmdline))
697 696 fps[os.spawnvp(os.P_NOWAIT, cmdline[0], cmdline)] = os.fdopen(rfd, 'r')
698 697 os.close(wfd)
699 698 signal.signal(signal.SIGINT, signal.SIG_IGN)
700 699 failures = 0
701 700 tested, skipped, failed = 0, 0, 0
702 701 skips = []
703 702 fails = []
704 703 while fps:
705 704 pid, status = os.wait()
706 705 fp = fps.pop(pid)
707 706 l = fp.read().splitlines()
708 707 try:
709 708 test, skip, fail = map(int, l[:3])
710 709 except ValueError:
711 710 test, skip, fail = 0, 0, 0
712 711 split = -fail or len(l)
713 712 for s in l[3:split]:
714 713 skips.append(s.split(" ", 1))
715 714 for s in l[split:]:
716 715 fails.append(s.split(" ", 1))
717 716 tested += test
718 717 skipped += skip
719 718 failed += fail
720 719 vlog('pid %d exited, status %d' % (pid, status))
721 720 failures |= status
722 721 print
723 722 if not options.noskips:
724 723 for s in skips:
725 724 print "Skipped %s: %s" % (s[0], s[1])
726 725 for s in fails:
727 726 print "Failed %s: %s" % (s[0], s[1])
728 727
729 728 _checkhglib("Tested")
730 729 print "# Ran %d tests, %d skipped, %d failed." % (
731 730 tested, skipped, failed)
732 731
733 732 if options.anycoverage:
734 733 outputcoverage(options)
735 734 sys.exit(failures != 0)
736 735
737 736 def runtests(options, tests):
738 737 global DAEMON_PIDS, HGRCPATH
739 738 DAEMON_PIDS = os.environ["DAEMON_PIDS"] = os.path.join(HGTMP, 'daemon.pids')
740 739 HGRCPATH = os.environ["HGRCPATH"] = os.path.join(HGTMP, '.hgrc')
741 740
742 741 try:
743 742 if INST:
744 743 installhg(options)
745 744 _checkhglib("Testing")
746 745
747 746 if options.timeout > 0:
748 747 try:
749 748 signal.signal(signal.SIGALRM, alarmed)
750 749 vlog('# Running each test with %d second timeout' %
751 750 options.timeout)
752 751 except AttributeError:
753 752 print 'WARNING: cannot run tests with timeouts'
754 753 options.timeout = 0
755 754
756 755 tested = 0
757 756 failed = 0
758 757 skipped = 0
759 758
760 759 if options.restart:
761 760 orig = list(tests)
762 761 while tests:
763 762 if os.path.exists(tests[0] + ".err"):
764 763 break
765 764 tests.pop(0)
766 765 if not tests:
767 766 print "running all tests"
768 767 tests = orig
769 768
770 769 skips = []
771 770 fails = []
772 771
773 772 for test in tests:
774 773 if options.blacklist:
775 774 filename = options.blacklist.get(test)
776 775 if filename is not None:
777 776 skips.append((test, "blacklisted (%s)" % filename))
778 777 skipped += 1
779 778 continue
780 779
781 780 if options.retest and not os.path.exists(test + ".err"):
782 781 skipped += 1
783 782 continue
784 783
785 784 if options.keywords:
786 785 t = open(test).read().lower() + test.lower()
787 786 for k in options.keywords.lower().split():
788 787 if k in t:
789 788 break
790 789 else:
791 790 skipped += 1
792 791 continue
793 792
794 793 ret = runone(options, test, skips, fails)
795 794 if ret is None:
796 795 skipped += 1
797 796 elif not ret:
798 797 if options.interactive:
799 798 print "Accept this change? [n] ",
800 799 answer = sys.stdin.readline().strip()
801 800 if answer.lower() in "y yes".split():
802 801 rename(test + ".err", test + ".out")
803 802 tested += 1
804 803 fails.pop()
805 804 continue
806 805 failed += 1
807 806 if options.first:
808 807 break
809 808 tested += 1
810 809
811 810 if options.child:
812 811 fp = os.fdopen(options.child, 'w')
813 812 fp.write('%d\n%d\n%d\n' % (tested, skipped, failed))
814 813 for s in skips:
815 814 fp.write("%s %s\n" % s)
816 815 for s in fails:
817 816 fp.write("%s %s\n" % s)
818 817 fp.close()
819 818 else:
820 819 print
821 820 for s in skips:
822 821 print "Skipped %s: %s" % s
823 822 for s in fails:
824 823 print "Failed %s: %s" % s
825 824 _checkhglib("Tested")
826 825 print "# Ran %d tests, %d skipped, %d failed." % (
827 826 tested, skipped, failed)
828 827
829 828 if options.anycoverage:
830 829 outputcoverage(options)
831 830 except KeyboardInterrupt:
832 831 failed = True
833 832 print "\ninterrupted!"
834 833
835 834 if failed:
836 835 sys.exit(1)
837 836
838 837 def main():
839 838 (options, args) = parseargs()
840 839 if not options.child:
841 840 os.umask(022)
842 841
843 842 checktools()
844 843
845 844 # Reset some environment variables to well-known values so that
846 845 # the tests produce repeatable output.
847 846 os.environ['LANG'] = os.environ['LC_ALL'] = os.environ['LANGUAGE'] = 'C'
848 847 os.environ['TZ'] = 'GMT'
849 848 os.environ["EMAIL"] = "Foo Bar <foo.bar@example.com>"
850 849 os.environ['CDPATH'] = ''
851 850 os.environ['COLUMNS'] = '80'
852 851 os.environ['GREP_OPTIONS'] = ''
853 852 os.environ['http_proxy'] = ''
854 853
855 854 # unset env related to hooks
856 855 for k in os.environ.keys():
857 856 if k.startswith('HG_'):
858 857 del os.environ[k]
859 858
860 859 global TESTDIR, HGTMP, INST, BINDIR, PYTHONDIR, COVERAGE_FILE
861 860 TESTDIR = os.environ["TESTDIR"] = os.getcwd()
862 861 if options.tmpdir:
863 862 options.keep_tmpdir = True
864 863 tmpdir = options.tmpdir
865 864 if os.path.exists(tmpdir):
866 865 # Meaning of tmpdir has changed since 1.3: we used to create
867 866 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
868 867 # tmpdir already exists.
869 868 sys.exit("error: temp dir %r already exists" % tmpdir)
870 869
871 870 # Automatically removing tmpdir sounds convenient, but could
872 871 # really annoy anyone in the habit of using "--tmpdir=/tmp"
873 872 # or "--tmpdir=$HOME".
874 873 #vlog("# Removing temp dir", tmpdir)
875 874 #shutil.rmtree(tmpdir)
876 875 os.makedirs(tmpdir)
877 876 else:
878 877 tmpdir = tempfile.mkdtemp('', 'hgtests.')
879 878 HGTMP = os.environ['HGTMP'] = os.path.realpath(tmpdir)
880 879 DAEMON_PIDS = None
881 880 HGRCPATH = None
882 881
883 882 os.environ["HGEDITOR"] = sys.executable + ' -c "import sys; sys.exit(0)"'
884 883 os.environ["HGMERGE"] = "internal:merge"
885 884 os.environ["HGUSER"] = "test"
886 885 os.environ["HGENCODING"] = "ascii"
887 886 os.environ["HGENCODINGMODE"] = "strict"
888 887 os.environ["HGPORT"] = str(options.port)
889 888 os.environ["HGPORT1"] = str(options.port + 1)
890 889 os.environ["HGPORT2"] = str(options.port + 2)
891 890
892 891 if options.with_hg:
893 892 INST = None
894 893 BINDIR = os.path.dirname(os.path.realpath(options.with_hg))
895 894
896 895 # This looks redundant with how Python initializes sys.path from
897 896 # the location of the script being executed. Needed because the
898 897 # "hg" specified by --with-hg is not the only Python script
899 898 # executed in the test suite that needs to import 'mercurial'
900 899 # ... which means it's not really redundant at all.
901 900 PYTHONDIR = BINDIR
902 901 else:
903 902 INST = os.path.join(HGTMP, "install")
904 903 BINDIR = os.environ["BINDIR"] = os.path.join(INST, "bin")
905 904 PYTHONDIR = os.path.join(INST, "lib", "python")
906 905
907 906 os.environ["BINDIR"] = BINDIR
908 907 os.environ["PYTHON"] = PYTHON
909 908
910 909 if not options.child:
911 910 path = [BINDIR] + os.environ["PATH"].split(os.pathsep)
912 911 os.environ["PATH"] = os.pathsep.join(path)
913 912
914 913 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
915 914 # can run .../tests/run-tests.py test-foo where test-foo
916 915 # adds an extension to HGRC
917 916 pypath = [PYTHONDIR, TESTDIR]
918 917 # We have to augment PYTHONPATH, rather than simply replacing
919 918 # it, in case external libraries are only available via current
920 919 # PYTHONPATH. (In particular, the Subversion bindings on OS X
921 920 # are in /opt/subversion.)
922 921 oldpypath = os.environ.get(IMPL_PATH)
923 922 if oldpypath:
924 923 pypath.append(oldpypath)
925 924 os.environ[IMPL_PATH] = os.pathsep.join(pypath)
926 925
927 926 COVERAGE_FILE = os.path.join(TESTDIR, ".coverage")
928 927
929 928 if len(args) == 0:
930 929 args = os.listdir(".")
931 930 args.sort()
932 931
933 932 tests = []
934 933 for test in args:
935 934 if (test.startswith("test-") and '~' not in test and
936 935 ('.' not in test or test.endswith('.py') or
937 936 test.endswith('.bat'))):
938 937 tests.append(test)
939 938 if not tests:
940 939 print "# Ran 0 tests, 0 skipped, 0 failed."
941 940 return
942 941
943 942 vlog("# Using TESTDIR", TESTDIR)
944 943 vlog("# Using HGTMP", HGTMP)
945 944 vlog("# Using PATH", os.environ["PATH"])
946 945 vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
947 946
948 947 try:
949 948 if len(tests) > 1 and options.jobs > 1:
950 949 runchildren(options, tests)
951 950 else:
952 951 runtests(options, tests)
953 952 finally:
954 953 time.sleep(1)
955 954 cleanup(options)
956 955
957 956 main()
@@ -1,32 +1,31 b''
1 1 #!/usr/bin/python
2 import os
3 2 from mercurial.ui import ui
4 3 from mercurial.localrepo import localrepository
5 4 from mercurial.commands import add, commit, status
6 5
7 6 u = ui()
8 7
9 8 print '% creating repo'
10 9 repo = localrepository(u, '.', create=True)
11 10
12 11 f = open('test.py', 'w')
13 12 try:
14 13 f.write('foo\n')
15 14 finally:
16 15 f.close
17 16
18 17 print '% add and commit'
19 18 add(u, repo, 'test.py')
20 19 commit(u, repo, message='*')
21 20 status(u, repo, clean=True)
22 21
23 22
24 23 print '% change'
25 24 f = open('test.py', 'w')
26 25 try:
27 26 f.write('bar\n')
28 27 finally:
29 28 f.close()
30 29
31 30 # this would return clean instead of changed before the fix
32 31 status(u, repo, clean=True, modified=True)
General Comments 0
You need to be logged in to leave comments. Login now