Show More
@@ -1,1110 +1,1110 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $ |
|
3 | 3 | # Author: Engelbert Gruber <grubert@users.sourceforge.net> |
|
4 | 4 | # Copyright: This module is put into the public domain. |
|
5 | 5 | |
|
6 | 6 | """ |
|
7 | 7 | Simple man page writer for reStructuredText. |
|
8 | 8 | |
|
9 | 9 | Man pages (short for "manual pages") contain system documentation on unix-like |
|
10 | 10 | systems. The pages are grouped in numbered sections: |
|
11 | 11 | |
|
12 | 12 | 1 executable programs and shell commands |
|
13 | 13 | 2 system calls |
|
14 | 14 | 3 library functions |
|
15 | 15 | 4 special files |
|
16 | 16 | 5 file formats |
|
17 | 17 | 6 games |
|
18 | 18 | 7 miscellaneous |
|
19 | 19 | 8 system administration |
|
20 | 20 | |
|
21 | 21 | Man pages are written *troff*, a text file formatting system. |
|
22 | 22 | |
|
23 | 23 | See http://www.tldp.org/HOWTO/Man-Page for a start. |
|
24 | 24 | |
|
25 | 25 | Man pages have no subsection only parts. |
|
26 | 26 | Standard parts |
|
27 | 27 | |
|
28 | 28 | NAME , |
|
29 | 29 | SYNOPSIS , |
|
30 | 30 | DESCRIPTION , |
|
31 | 31 | OPTIONS , |
|
32 | 32 | FILES , |
|
33 | 33 | SEE ALSO , |
|
34 | 34 | BUGS , |
|
35 | 35 | |
|
36 | 36 | and |
|
37 | 37 | |
|
38 | 38 | AUTHOR . |
|
39 | 39 | |
|
40 | 40 | A unix-like system keeps an index of the DESCRIPTIONs, which is accesable |
|
41 | 41 | by the command whatis or apropos. |
|
42 | 42 | |
|
43 | 43 | """ |
|
44 | 44 | |
|
45 | 45 | __docformat__ = 'reStructuredText' |
|
46 | 46 | |
|
47 | 47 | import re |
|
48 | 48 | |
|
49 | 49 | from docutils import nodes, writers, languages |
|
50 | 50 | try: |
|
51 | 51 | import roman |
|
52 | 52 | except ImportError: |
|
53 | 53 | from docutils.utils import roman |
|
54 | 54 | import inspect |
|
55 | 55 | |
|
56 | 56 | FIELD_LIST_INDENT = 7 |
|
57 | 57 | DEFINITION_LIST_INDENT = 7 |
|
58 | 58 | OPTION_LIST_INDENT = 7 |
|
59 | 59 | BLOCKQOUTE_INDENT = 3.5 |
|
60 | 60 | |
|
61 | 61 | # Define two macros so man/roff can calculate the |
|
62 | 62 | # indent/unindent margins by itself |
|
63 | 63 | MACRO_DEF = (r""". |
|
64 | 64 | .nr rst2man-indent-level 0 |
|
65 | 65 | . |
|
66 | 66 | .de1 rstReportMargin |
|
67 | 67 | \\$1 \\n[an-margin] |
|
68 | 68 | level \\n[rst2man-indent-level] |
|
69 | 69 | level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
70 | 70 | - |
|
71 | 71 | \\n[rst2man-indent0] |
|
72 | 72 | \\n[rst2man-indent1] |
|
73 | 73 | \\n[rst2man-indent2] |
|
74 | 74 | .. |
|
75 | 75 | .de1 INDENT |
|
76 | 76 | .\" .rstReportMargin pre: |
|
77 | 77 | . RS \\$1 |
|
78 | 78 | . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] |
|
79 | 79 | . nr rst2man-indent-level +1 |
|
80 | 80 | .\" .rstReportMargin post: |
|
81 | 81 | .. |
|
82 | 82 | .de UNINDENT |
|
83 | 83 | . RE |
|
84 | 84 | .\" indent \\n[an-margin] |
|
85 | 85 | .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
86 | 86 | .nr rst2man-indent-level -1 |
|
87 | 87 | .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] |
|
88 | 88 | .in \\n[rst2man-indent\\n[rst2man-indent-level]]u |
|
89 | 89 | .. |
|
90 | 90 | """) |
|
91 | 91 | |
|
92 | 92 | class Writer(writers.Writer): |
|
93 | 93 | |
|
94 | 94 | supported = ('manpage') |
|
95 | 95 | """Formats this writer supports.""" |
|
96 | 96 | |
|
97 | 97 | output = None |
|
98 | 98 | """Final translated form of `document`.""" |
|
99 | 99 | |
|
100 | 100 | def __init__(self): |
|
101 | 101 | writers.Writer.__init__(self) |
|
102 | 102 | self.translator_class = Translator |
|
103 | 103 | |
|
104 | 104 | def translate(self): |
|
105 | 105 | visitor = self.translator_class(self.document) |
|
106 | 106 | self.document.walkabout(visitor) |
|
107 | 107 | self.output = visitor.astext() |
|
108 | 108 | |
|
109 | 109 | |
|
110 | 110 | class Table(object): |
|
111 | 111 | def __init__(self): |
|
112 | 112 | self._rows = [] |
|
113 | 113 | self._options = ['center'] |
|
114 | 114 | self._tab_char = '\t' |
|
115 | 115 | self._coldefs = [] |
|
116 | 116 | def new_row(self): |
|
117 | 117 | self._rows.append([]) |
|
118 | 118 | def append_separator(self, separator): |
|
119 | 119 | """Append the separator for table head.""" |
|
120 | 120 | self._rows.append([separator]) |
|
121 | 121 | def append_cell(self, cell_lines): |
|
122 | 122 | """cell_lines is an array of lines""" |
|
123 | 123 | start = 0 |
|
124 | 124 | if len(cell_lines) > 0 and cell_lines[0] == '.sp\n': |
|
125 | 125 | start = 1 |
|
126 | 126 | self._rows[-1].append(cell_lines[start:]) |
|
127 | 127 | if len(self._coldefs) < len(self._rows[-1]): |
|
128 | 128 | self._coldefs.append('l') |
|
129 | 129 | def _minimize_cell(self, cell_lines): |
|
130 | 130 | """Remove leading and trailing blank and ``.sp`` lines""" |
|
131 | 131 | while (cell_lines and cell_lines[0] in ('\n', '.sp\n')): |
|
132 | 132 | del cell_lines[0] |
|
133 | 133 | while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')): |
|
134 | 134 | del cell_lines[-1] |
|
135 | 135 | def as_list(self): |
|
136 | 136 | text = ['.TS\n'] |
|
137 | 137 | text.append(' '.join(self._options) + ';\n') |
|
138 | 138 | text.append('|%s|.\n' % ('|'.join(self._coldefs))) |
|
139 | 139 | for row in self._rows: |
|
140 | 140 | # row = array of cells. cell = array of lines. |
|
141 | 141 | text.append('_\n') # line above |
|
142 | 142 | text.append('T{\n') |
|
143 | 143 | for i in range(len(row)): |
|
144 | 144 | cell = row[i] |
|
145 | 145 | self._minimize_cell(cell) |
|
146 | 146 | text.extend(cell) |
|
147 | 147 | if not text[-1].endswith('\n'): |
|
148 | 148 | text[-1] += '\n' |
|
149 | 149 | if i < len(row)-1: |
|
150 | 150 | text.append('T}'+self._tab_char+'T{\n') |
|
151 | 151 | else: |
|
152 | 152 | text.append('T}\n') |
|
153 | 153 | text.append('_\n') |
|
154 | 154 | text.append('.TE\n') |
|
155 | 155 | return text |
|
156 | 156 | |
|
157 | 157 | class Translator(nodes.NodeVisitor): |
|
158 | 158 | """""" |
|
159 | 159 | |
|
160 | 160 | words_and_spaces = re.compile(r'\S+| +|\n') |
|
161 | 161 | document_start = """Man page generated from reStructeredText.""" |
|
162 | 162 | |
|
163 | 163 | def __init__(self, document): |
|
164 | 164 | nodes.NodeVisitor.__init__(self, document) |
|
165 | 165 | self.settings = settings = document.settings |
|
166 | 166 | lcode = settings.language_code |
|
167 | 167 | arglen = len(inspect.getargspec(languages.get_language)[0]) |
|
168 | 168 | if arglen == 2: |
|
169 | 169 | self.language = languages.get_language(lcode, |
|
170 | 170 | self.document.reporter) |
|
171 | 171 | else: |
|
172 | 172 | self.language = languages.get_language(lcode) |
|
173 | 173 | self.head = [] |
|
174 | 174 | self.body = [] |
|
175 | 175 | self.foot = [] |
|
176 | 176 | self.section_level = 0 |
|
177 | 177 | self.context = [] |
|
178 | 178 | self.topic_class = '' |
|
179 | 179 | self.colspecs = [] |
|
180 | 180 | self.compact_p = 1 |
|
181 | 181 | self.compact_simple = None |
|
182 | 182 | # the list style "*" bullet or "#" numbered |
|
183 | 183 | self._list_char = [] |
|
184 | 184 | # writing the header .TH and .SH NAME is postboned after |
|
185 | 185 | # docinfo. |
|
186 | 186 | self._docinfo = { |
|
187 | 187 | "title" : "", "title_upper": "", |
|
188 | 188 | "subtitle" : "", |
|
189 | 189 | "manual_section" : "", "manual_group" : "", |
|
190 | 190 | "author" : [], |
|
191 | 191 | "date" : "", |
|
192 | 192 | "copyright" : "", |
|
193 | 193 | "version" : "", |
|
194 | 194 | } |
|
195 | 195 | self._docinfo_keys = [] # a list to keep the sequence as in source. |
|
196 | 196 | self._docinfo_names = {} # to get name from text not normalized. |
|
197 | 197 | self._in_docinfo = None |
|
198 | 198 | self._active_table = None |
|
199 | 199 | self._in_literal = False |
|
200 | 200 | self.header_written = 0 |
|
201 | 201 | self._line_block = 0 |
|
202 | 202 | self.authors = [] |
|
203 | 203 | self.section_level = 0 |
|
204 | 204 | self._indent = [0] |
|
205 | 205 | # central definition of simple processing rules |
|
206 | 206 | # what to output on : visit, depart |
|
207 | 207 | # Do not use paragraph requests ``.PP`` because these set indentation. |
|
208 | 208 | # use ``.sp``. Remove superfluous ``.sp`` in ``astext``. |
|
209 | 209 | # |
|
210 | 210 | # Fonts are put on a stack, the top one is used. |
|
211 | 211 | # ``.ft P`` or ``\\fP`` pop from stack. |
|
212 | 212 | # ``B`` bold, ``I`` italic, ``R`` roman should be available. |
|
213 | 213 | # Hopefully ``C`` courier too. |
|
214 | 214 | self.defs = { |
|
215 | 215 | 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'), |
|
216 | 216 | 'definition_list_item' : ('.TP', ''), |
|
217 | 217 | 'field_name' : ('.TP\n.B ', '\n'), |
|
218 | 218 | 'literal' : ('\\fB', '\\fP'), |
|
219 | 219 | 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'), |
|
220 | 220 | |
|
221 | 221 | 'option_list_item' : ('.TP\n', ''), |
|
222 | 222 | |
|
223 | 223 | 'reference' : (r'\%', r'\:'), |
|
224 | 224 | 'emphasis': ('\\fI', '\\fP'), |
|
225 | 225 | 'strong' : ('\\fB', '\\fP'), |
|
226 | 226 | 'term' : ('\n.B ', '\n'), |
|
227 | 227 | 'title_reference' : ('\\fI', '\\fP'), |
|
228 | 228 | |
|
229 | 229 | 'topic-title' : ('.SS ',), |
|
230 | 230 | 'sidebar-title' : ('.SS ',), |
|
231 | 231 | |
|
232 | 232 | 'problematic' : ('\n.nf\n', '\n.fi\n'), |
|
233 | 233 | } |
|
234 | 234 | # NOTE don't specify the newline before a dot-command, but ensure |
|
235 | 235 | # it is there. |
|
236 | 236 | |
|
237 | 237 | def comment_begin(self, text): |
|
238 | 238 | """Return commented version of the passed text WITHOUT end of |
|
239 | 239 | line/comment.""" |
|
240 | 240 | prefix = '.\\" ' |
|
241 | 241 | out_text = ''.join( |
|
242 | 242 | [(prefix + in_line + '\n') |
|
243 | 243 | for in_line in text.split('\n')]) |
|
244 | 244 | return out_text |
|
245 | 245 | |
|
246 | 246 | def comment(self, text): |
|
247 | 247 | """Return commented version of the passed text.""" |
|
248 | 248 | return self.comment_begin(text)+'.\n' |
|
249 | 249 | |
|
250 | 250 | def ensure_eol(self): |
|
251 | 251 | """Ensure the last line in body is terminated by new line.""" |
|
252 | 252 | if self.body[-1][-1] != '\n': |
|
253 | 253 | self.body.append('\n') |
|
254 | 254 | |
|
255 | 255 | def astext(self): |
|
256 | 256 | """Return the final formatted document as a string.""" |
|
257 | 257 | if not self.header_written: |
|
258 | 258 | # ensure we get a ".TH" as viewers require it. |
|
259 | 259 | self.head.append(self.header()) |
|
260 | 260 | # filter body |
|
261 | 261 | for i in xrange(len(self.body)-1, 0, -1): |
|
262 | 262 | # remove superfluous vertical gaps. |
|
263 | 263 | if self.body[i] == '.sp\n': |
|
264 | 264 | if self.body[i - 1][:4] in ('.BI ','.IP '): |
|
265 | 265 | self.body[i] = '.\n' |
|
266 | 266 | elif (self.body[i - 1][:3] == '.B ' and |
|
267 | 267 | self.body[i - 2][:4] == '.TP\n'): |
|
268 | 268 | self.body[i] = '.\n' |
|
269 | 269 | elif (self.body[i - 1] == '\n' and |
|
270 | 270 | self.body[i - 2][0] != '.' and |
|
271 | 271 | (self.body[i - 3][:7] == '.TP\n.B ' |
|
272 | 272 | or self.body[i - 3][:4] == '\n.B ') |
|
273 | 273 | ): |
|
274 | 274 | self.body[i] = '.\n' |
|
275 | 275 | return ''.join(self.head + self.body + self.foot) |
|
276 | 276 | |
|
277 | 277 | def deunicode(self, text): |
|
278 | 278 | text = text.replace(u'\xa0', '\\ ') |
|
279 | 279 | text = text.replace(u'\u2020', '\\(dg') |
|
280 | 280 | return text |
|
281 | 281 | |
|
282 | 282 | def visit_Text(self, node): |
|
283 | 283 | text = node.astext() |
|
284 | 284 | text = text.replace('\\','\\e') |
|
285 | 285 | replace_pairs = [ |
|
286 | 286 | (u'-', ur'\-'), |
|
287 | 287 | (u'\'', ur'\(aq'), |
|
288 | 288 | (u'Β΄', ur'\''), |
|
289 | 289 | (u'`', ur'\(ga'), |
|
290 | 290 | ] |
|
291 | 291 | for (in_char, out_markup) in replace_pairs: |
|
292 | 292 | text = text.replace(in_char, out_markup) |
|
293 | 293 | # unicode |
|
294 | 294 | text = self.deunicode(text) |
|
295 | 295 | if self._in_literal: |
|
296 | 296 | # prevent interpretation of "." at line start |
|
297 | 297 | if text[0] == '.': |
|
298 | 298 | text = '\\&' + text |
|
299 | 299 | text = text.replace('\n.', '\n\\&.') |
|
300 | 300 | self.body.append(text) |
|
301 | 301 | |
|
302 | 302 | def depart_Text(self, node): |
|
303 | 303 | pass |
|
304 | 304 | |
|
305 | 305 | def list_start(self, node): |
|
306 | 306 | class enum_char(object): |
|
307 | 307 | enum_style = { |
|
308 | 308 | 'bullet' : '\\(bu', |
|
309 | 309 | 'emdash' : '\\(em', |
|
310 | 310 | } |
|
311 | 311 | |
|
312 | 312 | def __init__(self, style): |
|
313 | 313 | self._style = style |
|
314 | 314 | if 'start' in node: |
|
315 | 315 | self._cnt = node['start'] - 1 |
|
316 | 316 | else: |
|
317 | 317 | self._cnt = 0 |
|
318 | 318 | self._indent = 2 |
|
319 | 319 | if style == 'arabic': |
|
320 | 320 | # indentation depends on number of childrens |
|
321 | 321 | # and start value. |
|
322 | 322 | self._indent = len(str(len(node.children))) |
|
323 | 323 | self._indent += len(str(self._cnt)) + 1 |
|
324 | 324 | elif style == 'loweralpha': |
|
325 | 325 | self._cnt += ord('a') - 1 |
|
326 | 326 | self._indent = 3 |
|
327 | 327 | elif style == 'upperalpha': |
|
328 | 328 | self._cnt += ord('A') - 1 |
|
329 | 329 | self._indent = 3 |
|
330 | 330 | elif style.endswith('roman'): |
|
331 | 331 | self._indent = 5 |
|
332 | 332 | |
|
333 | 333 | def next(self): |
|
334 | 334 | if self._style == 'bullet': |
|
335 | 335 | return self.enum_style[self._style] |
|
336 | 336 | elif self._style == 'emdash': |
|
337 | 337 | return self.enum_style[self._style] |
|
338 | 338 | self._cnt += 1 |
|
339 | 339 | # TODO add prefix postfix |
|
340 | 340 | if self._style == 'arabic': |
|
341 | 341 | return "%d." % self._cnt |
|
342 | 342 | elif self._style in ('loweralpha', 'upperalpha'): |
|
343 | 343 | return "%c." % self._cnt |
|
344 | 344 | elif self._style.endswith('roman'): |
|
345 | 345 | res = roman.toRoman(self._cnt) + '.' |
|
346 | 346 | if self._style.startswith('upper'): |
|
347 | 347 | return res.upper() |
|
348 | 348 | return res.lower() |
|
349 | 349 | else: |
|
350 | 350 | return "%d." % self._cnt |
|
351 | 351 | def get_width(self): |
|
352 | 352 | return self._indent |
|
353 | 353 | def __repr__(self): |
|
354 | 354 | return 'enum_style-%s' % list(self._style) |
|
355 | 355 | |
|
356 | 356 | if 'enumtype' in node: |
|
357 | 357 | self._list_char.append(enum_char(node['enumtype'])) |
|
358 | 358 | else: |
|
359 | 359 | self._list_char.append(enum_char('bullet')) |
|
360 | 360 | if len(self._list_char) > 1: |
|
361 | 361 | # indent nested lists |
|
362 | 362 | self.indent(self._list_char[-2].get_width()) |
|
363 | 363 | else: |
|
364 | 364 | self.indent(self._list_char[-1].get_width()) |
|
365 | 365 | |
|
366 | 366 | def list_end(self): |
|
367 | 367 | self.dedent() |
|
368 | 368 | self._list_char.pop() |
|
369 | 369 | |
|
370 | 370 | def header(self): |
|
371 | 371 | tmpl = (".TH %(title_upper)s %(manual_section)s" |
|
372 | 372 | " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n" |
|
373 | 373 | ".SH NAME\n" |
|
374 | 374 | "%(title)s \- %(subtitle)s\n") |
|
375 | 375 | return tmpl % self._docinfo |
|
376 | 376 | |
|
377 | 377 | def append_header(self): |
|
378 | 378 | """append header with .TH and .SH NAME""" |
|
379 | 379 | # NOTE before everything |
|
380 | 380 | # .TH title_upper section date source manual |
|
381 | 381 | if self.header_written: |
|
382 | 382 | return |
|
383 | 383 | self.body.append(self.header()) |
|
384 | 384 | self.body.append(MACRO_DEF) |
|
385 | 385 | self.header_written = 1 |
|
386 | 386 | |
|
387 | 387 | def visit_address(self, node): |
|
388 | 388 | self.visit_docinfo_item(node, 'address') |
|
389 | 389 | |
|
390 | 390 | def depart_address(self, node): |
|
391 | 391 | pass |
|
392 | 392 | |
|
393 | 393 | def visit_admonition(self, node, name=None): |
|
394 | 394 | if name: |
|
395 | 395 | self.body.append('.IP %s\n' % |
|
396 | 396 | self.language.labels.get(name, name)) |
|
397 | 397 | |
|
398 | 398 | def depart_admonition(self, node): |
|
399 | 399 | self.body.append('.RE\n') |
|
400 | 400 | |
|
401 | 401 | def visit_attention(self, node): |
|
402 | 402 | self.visit_admonition(node, 'attention') |
|
403 | 403 | |
|
404 | 404 | depart_attention = depart_admonition |
|
405 | 405 | |
|
406 | 406 | def visit_docinfo_item(self, node, name): |
|
407 | 407 | if name == 'author': |
|
408 | 408 | self._docinfo[name].append(node.astext()) |
|
409 | 409 | else: |
|
410 | 410 | self._docinfo[name] = node.astext() |
|
411 | 411 | self._docinfo_keys.append(name) |
|
412 | 412 | raise nodes.SkipNode |
|
413 | 413 | |
|
414 | 414 | def depart_docinfo_item(self, node): |
|
415 | 415 | pass |
|
416 | 416 | |
|
417 | 417 | def visit_author(self, node): |
|
418 | 418 | self.visit_docinfo_item(node, 'author') |
|
419 | 419 | |
|
420 | 420 | depart_author = depart_docinfo_item |
|
421 | 421 | |
|
422 | 422 | def visit_authors(self, node): |
|
423 | 423 | # _author is called anyway. |
|
424 | 424 | pass |
|
425 | 425 | |
|
426 | 426 | def depart_authors(self, node): |
|
427 | 427 | pass |
|
428 | 428 | |
|
429 | 429 | def visit_block_quote(self, node): |
|
430 | 430 | # BUG/HACK: indent alway uses the _last_ indention, |
|
431 | 431 | # thus we need two of them. |
|
432 | 432 | self.indent(BLOCKQOUTE_INDENT) |
|
433 | 433 | self.indent(0) |
|
434 | 434 | |
|
435 | 435 | def depart_block_quote(self, node): |
|
436 | 436 | self.dedent() |
|
437 | 437 | self.dedent() |
|
438 | 438 | |
|
439 | 439 | def visit_bullet_list(self, node): |
|
440 | 440 | self.list_start(node) |
|
441 | 441 | |
|
442 | 442 | def depart_bullet_list(self, node): |
|
443 | 443 | self.list_end() |
|
444 | 444 | |
|
445 | 445 | def visit_caption(self, node): |
|
446 | 446 | pass |
|
447 | 447 | |
|
448 | 448 | def depart_caption(self, node): |
|
449 | 449 | pass |
|
450 | 450 | |
|
451 | 451 | def visit_caution(self, node): |
|
452 | 452 | self.visit_admonition(node, 'caution') |
|
453 | 453 | |
|
454 | 454 | depart_caution = depart_admonition |
|
455 | 455 | |
|
456 | 456 | def visit_citation(self, node): |
|
457 | 457 | num, text = node.astext().split(None, 1) |
|
458 | 458 | num = num.strip() |
|
459 | 459 | self.body.append('.IP [%s] 5\n' % num) |
|
460 | 460 | |
|
461 | 461 | def depart_citation(self, node): |
|
462 | 462 | pass |
|
463 | 463 | |
|
464 | 464 | def visit_citation_reference(self, node): |
|
465 | 465 | self.body.append('['+node.astext()+']') |
|
466 | 466 | raise nodes.SkipNode |
|
467 | 467 | |
|
468 | 468 | def visit_classifier(self, node): |
|
469 | 469 | pass |
|
470 | 470 | |
|
471 | 471 | def depart_classifier(self, node): |
|
472 | 472 | pass |
|
473 | 473 | |
|
474 | 474 | def visit_colspec(self, node): |
|
475 | 475 | self.colspecs.append(node) |
|
476 | 476 | |
|
477 | 477 | def depart_colspec(self, node): |
|
478 | 478 | pass |
|
479 | 479 | |
|
480 | 480 | def write_colspecs(self): |
|
481 | 481 | self.body.append("%s.\n" % ('L '*len(self.colspecs))) |
|
482 | 482 | |
|
483 | 483 | def visit_comment(self, node, |
|
484 | 484 | sub=re.compile('-(?=-)').sub): |
|
485 | 485 | self.body.append(self.comment(node.astext())) |
|
486 | 486 | raise nodes.SkipNode |
|
487 | 487 | |
|
488 | 488 | def visit_contact(self, node): |
|
489 | 489 | self.visit_docinfo_item(node, 'contact') |
|
490 | 490 | |
|
491 | 491 | depart_contact = depart_docinfo_item |
|
492 | 492 | |
|
493 | 493 | def visit_container(self, node): |
|
494 | 494 | pass |
|
495 | 495 | |
|
496 | 496 | def depart_container(self, node): |
|
497 | 497 | pass |
|
498 | 498 | |
|
499 | 499 | def visit_compound(self, node): |
|
500 | 500 | pass |
|
501 | 501 | |
|
502 | 502 | def depart_compound(self, node): |
|
503 | 503 | pass |
|
504 | 504 | |
|
505 | 505 | def visit_copyright(self, node): |
|
506 | 506 | self.visit_docinfo_item(node, 'copyright') |
|
507 | 507 | |
|
508 | 508 | def visit_danger(self, node): |
|
509 | 509 | self.visit_admonition(node, 'danger') |
|
510 | 510 | |
|
511 | 511 | depart_danger = depart_admonition |
|
512 | 512 | |
|
513 | 513 | def visit_date(self, node): |
|
514 | 514 | self.visit_docinfo_item(node, 'date') |
|
515 | 515 | |
|
516 | 516 | def visit_decoration(self, node): |
|
517 | 517 | pass |
|
518 | 518 | |
|
519 | 519 | def depart_decoration(self, node): |
|
520 | 520 | pass |
|
521 | 521 | |
|
522 | 522 | def visit_definition(self, node): |
|
523 | 523 | pass |
|
524 | 524 | |
|
525 | 525 | def depart_definition(self, node): |
|
526 | 526 | pass |
|
527 | 527 | |
|
528 | 528 | def visit_definition_list(self, node): |
|
529 | 529 | self.indent(DEFINITION_LIST_INDENT) |
|
530 | 530 | |
|
531 | 531 | def depart_definition_list(self, node): |
|
532 | 532 | self.dedent() |
|
533 | 533 | |
|
534 | 534 | def visit_definition_list_item(self, node): |
|
535 | 535 | self.body.append(self.defs['definition_list_item'][0]) |
|
536 | 536 | |
|
537 | 537 | def depart_definition_list_item(self, node): |
|
538 | 538 | self.body.append(self.defs['definition_list_item'][1]) |
|
539 | 539 | |
|
540 | 540 | def visit_description(self, node): |
|
541 | 541 | pass |
|
542 | 542 | |
|
543 | 543 | def depart_description(self, node): |
|
544 | 544 | pass |
|
545 | 545 | |
|
546 | 546 | def visit_docinfo(self, node): |
|
547 | 547 | self._in_docinfo = 1 |
|
548 | 548 | |
|
549 | 549 | def depart_docinfo(self, node): |
|
550 | 550 | self._in_docinfo = None |
|
551 | 551 | # NOTE nothing should be written before this |
|
552 | 552 | self.append_header() |
|
553 | 553 | |
|
554 | 554 | def visit_doctest_block(self, node): |
|
555 | 555 | self.body.append(self.defs['literal_block'][0]) |
|
556 | 556 | self._in_literal = True |
|
557 | 557 | |
|
558 | 558 | def depart_doctest_block(self, node): |
|
559 | 559 | self._in_literal = False |
|
560 | 560 | self.body.append(self.defs['literal_block'][1]) |
|
561 | 561 | |
|
562 | 562 | def visit_document(self, node): |
|
563 | 563 | # no blank line between comment and header. |
|
564 | 564 | self.body.append(self.comment(self.document_start).rstrip()+'\n') |
|
565 | 565 | # writing header is postboned |
|
566 | 566 | self.header_written = 0 |
|
567 | 567 | |
|
568 | 568 | def depart_document(self, node): |
|
569 | 569 | if self._docinfo['author']: |
|
570 | 570 | self.body.append('.SH AUTHOR\n%s\n' |
|
571 | 571 | % ', '.join(self._docinfo['author'])) |
|
572 | 572 | skip = ('author', 'copyright', 'date', |
|
573 | 573 | 'manual_group', 'manual_section', |
|
574 | 574 | 'subtitle', |
|
575 | 575 | 'title', 'title_upper', 'version') |
|
576 | 576 | for name in self._docinfo_keys: |
|
577 | 577 | if name == 'address': |
|
578 | 578 | self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % ( |
|
579 | 579 | self.language.labels.get(name, name), |
|
580 | 580 | self.defs['indent'][0] % 0, |
|
581 | 581 | self.defs['indent'][0] % BLOCKQOUTE_INDENT, |
|
582 | 582 | self._docinfo[name], |
|
583 | 583 | self.defs['indent'][1], |
|
584 | 584 | self.defs['indent'][1])) |
|
585 |
elif not |
|
|
585 | elif name not in skip: | |
|
586 | 586 | if name in self._docinfo_names: |
|
587 | 587 | label = self._docinfo_names[name] |
|
588 | 588 | else: |
|
589 | 589 | label = self.language.labels.get(name, name) |
|
590 | 590 | self.body.append("\n%s: %s\n" % (label, self._docinfo[name])) |
|
591 | 591 | if self._docinfo['copyright']: |
|
592 | 592 | self.body.append('.SH COPYRIGHT\n%s\n' |
|
593 | 593 | % self._docinfo['copyright']) |
|
594 | 594 | self.body.append(self.comment( |
|
595 | 595 | 'Generated by docutils manpage writer.\n')) |
|
596 | 596 | |
|
597 | 597 | def visit_emphasis(self, node): |
|
598 | 598 | self.body.append(self.defs['emphasis'][0]) |
|
599 | 599 | |
|
600 | 600 | def depart_emphasis(self, node): |
|
601 | 601 | self.body.append(self.defs['emphasis'][1]) |
|
602 | 602 | |
|
603 | 603 | def visit_entry(self, node): |
|
604 | 604 | # a cell in a table row |
|
605 | 605 | if 'morerows' in node: |
|
606 | 606 | self.document.reporter.warning('"table row spanning" not supported', |
|
607 | 607 | base_node=node) |
|
608 | 608 | if 'morecols' in node: |
|
609 | 609 | self.document.reporter.warning( |
|
610 | 610 | '"table cell spanning" not supported', base_node=node) |
|
611 | 611 | self.context.append(len(self.body)) |
|
612 | 612 | |
|
613 | 613 | def depart_entry(self, node): |
|
614 | 614 | start = self.context.pop() |
|
615 | 615 | self._active_table.append_cell(self.body[start:]) |
|
616 | 616 | del self.body[start:] |
|
617 | 617 | |
|
618 | 618 | def visit_enumerated_list(self, node): |
|
619 | 619 | self.list_start(node) |
|
620 | 620 | |
|
621 | 621 | def depart_enumerated_list(self, node): |
|
622 | 622 | self.list_end() |
|
623 | 623 | |
|
624 | 624 | def visit_error(self, node): |
|
625 | 625 | self.visit_admonition(node, 'error') |
|
626 | 626 | |
|
627 | 627 | depart_error = depart_admonition |
|
628 | 628 | |
|
629 | 629 | def visit_field(self, node): |
|
630 | 630 | pass |
|
631 | 631 | |
|
632 | 632 | def depart_field(self, node): |
|
633 | 633 | pass |
|
634 | 634 | |
|
635 | 635 | def visit_field_body(self, node): |
|
636 | 636 | if self._in_docinfo: |
|
637 | 637 | name_normalized = self._field_name.lower().replace(" ","_") |
|
638 | 638 | self._docinfo_names[name_normalized] = self._field_name |
|
639 | 639 | self.visit_docinfo_item(node, name_normalized) |
|
640 | 640 | raise nodes.SkipNode |
|
641 | 641 | |
|
642 | 642 | def depart_field_body(self, node): |
|
643 | 643 | pass |
|
644 | 644 | |
|
645 | 645 | def visit_field_list(self, node): |
|
646 | 646 | self.indent(FIELD_LIST_INDENT) |
|
647 | 647 | |
|
648 | 648 | def depart_field_list(self, node): |
|
649 | 649 | self.dedent() |
|
650 | 650 | |
|
651 | 651 | def visit_field_name(self, node): |
|
652 | 652 | if self._in_docinfo: |
|
653 | 653 | self._field_name = node.astext() |
|
654 | 654 | raise nodes.SkipNode |
|
655 | 655 | else: |
|
656 | 656 | self.body.append(self.defs['field_name'][0]) |
|
657 | 657 | |
|
658 | 658 | def depart_field_name(self, node): |
|
659 | 659 | self.body.append(self.defs['field_name'][1]) |
|
660 | 660 | |
|
661 | 661 | def visit_figure(self, node): |
|
662 | 662 | self.indent(2.5) |
|
663 | 663 | self.indent(0) |
|
664 | 664 | |
|
665 | 665 | def depart_figure(self, node): |
|
666 | 666 | self.dedent() |
|
667 | 667 | self.dedent() |
|
668 | 668 | |
|
669 | 669 | def visit_footer(self, node): |
|
670 | 670 | self.document.reporter.warning('"footer" not supported', |
|
671 | 671 | base_node=node) |
|
672 | 672 | |
|
673 | 673 | def depart_footer(self, node): |
|
674 | 674 | pass |
|
675 | 675 | |
|
676 | 676 | def visit_footnote(self, node): |
|
677 | 677 | num, text = node.astext().split(None, 1) |
|
678 | 678 | num = num.strip() |
|
679 | 679 | self.body.append('.IP [%s] 5\n' % self.deunicode(num)) |
|
680 | 680 | |
|
681 | 681 | def depart_footnote(self, node): |
|
682 | 682 | pass |
|
683 | 683 | |
|
684 | 684 | def footnote_backrefs(self, node): |
|
685 | 685 | self.document.reporter.warning('"footnote_backrefs" not supported', |
|
686 | 686 | base_node=node) |
|
687 | 687 | |
|
688 | 688 | def visit_footnote_reference(self, node): |
|
689 | 689 | self.body.append('['+self.deunicode(node.astext())+']') |
|
690 | 690 | raise nodes.SkipNode |
|
691 | 691 | |
|
692 | 692 | def depart_footnote_reference(self, node): |
|
693 | 693 | pass |
|
694 | 694 | |
|
695 | 695 | def visit_generated(self, node): |
|
696 | 696 | pass |
|
697 | 697 | |
|
698 | 698 | def depart_generated(self, node): |
|
699 | 699 | pass |
|
700 | 700 | |
|
701 | 701 | def visit_header(self, node): |
|
702 | 702 | raise NotImplementedError, node.astext() |
|
703 | 703 | |
|
704 | 704 | def depart_header(self, node): |
|
705 | 705 | pass |
|
706 | 706 | |
|
707 | 707 | def visit_hint(self, node): |
|
708 | 708 | self.visit_admonition(node, 'hint') |
|
709 | 709 | |
|
710 | 710 | depart_hint = depart_admonition |
|
711 | 711 | |
|
712 | 712 | def visit_subscript(self, node): |
|
713 | 713 | self.body.append('\\s-2\\d') |
|
714 | 714 | |
|
715 | 715 | def depart_subscript(self, node): |
|
716 | 716 | self.body.append('\\u\\s0') |
|
717 | 717 | |
|
718 | 718 | def visit_superscript(self, node): |
|
719 | 719 | self.body.append('\\s-2\\u') |
|
720 | 720 | |
|
721 | 721 | def depart_superscript(self, node): |
|
722 | 722 | self.body.append('\\d\\s0') |
|
723 | 723 | |
|
724 | 724 | def visit_attribution(self, node): |
|
725 | 725 | self.body.append('\\(em ') |
|
726 | 726 | |
|
727 | 727 | def depart_attribution(self, node): |
|
728 | 728 | self.body.append('\n') |
|
729 | 729 | |
|
730 | 730 | def visit_image(self, node): |
|
731 | 731 | self.document.reporter.warning('"image" not supported', |
|
732 | 732 | base_node=node) |
|
733 | 733 | text = [] |
|
734 | 734 | if 'alt' in node.attributes: |
|
735 | 735 | text.append(node.attributes['alt']) |
|
736 | 736 | if 'uri' in node.attributes: |
|
737 | 737 | text.append(node.attributes['uri']) |
|
738 | 738 | self.body.append('[image: %s]\n' % ('/'.join(text))) |
|
739 | 739 | raise nodes.SkipNode |
|
740 | 740 | |
|
741 | 741 | def visit_important(self, node): |
|
742 | 742 | self.visit_admonition(node, 'important') |
|
743 | 743 | |
|
744 | 744 | depart_important = depart_admonition |
|
745 | 745 | |
|
746 | 746 | def visit_label(self, node): |
|
747 | 747 | # footnote and citation |
|
748 | 748 | if (isinstance(node.parent, nodes.footnote) |
|
749 | 749 | or isinstance(node.parent, nodes.citation)): |
|
750 | 750 | raise nodes.SkipNode |
|
751 | 751 | self.document.reporter.warning('"unsupported "label"', |
|
752 | 752 | base_node=node) |
|
753 | 753 | self.body.append('[') |
|
754 | 754 | |
|
755 | 755 | def depart_label(self, node): |
|
756 | 756 | self.body.append(']\n') |
|
757 | 757 | |
|
758 | 758 | def visit_legend(self, node): |
|
759 | 759 | pass |
|
760 | 760 | |
|
761 | 761 | def depart_legend(self, node): |
|
762 | 762 | pass |
|
763 | 763 | |
|
764 | 764 | # WHAT should we use .INDENT, .UNINDENT ? |
|
765 | 765 | def visit_line_block(self, node): |
|
766 | 766 | self._line_block += 1 |
|
767 | 767 | if self._line_block == 1: |
|
768 | 768 | self.body.append('.sp\n') |
|
769 | 769 | self.body.append('.nf\n') |
|
770 | 770 | else: |
|
771 | 771 | self.body.append('.in +2\n') |
|
772 | 772 | |
|
773 | 773 | def depart_line_block(self, node): |
|
774 | 774 | self._line_block -= 1 |
|
775 | 775 | if self._line_block == 0: |
|
776 | 776 | self.body.append('.fi\n') |
|
777 | 777 | self.body.append('.sp\n') |
|
778 | 778 | else: |
|
779 | 779 | self.body.append('.in -2\n') |
|
780 | 780 | |
|
781 | 781 | def visit_line(self, node): |
|
782 | 782 | pass |
|
783 | 783 | |
|
784 | 784 | def depart_line(self, node): |
|
785 | 785 | self.body.append('\n') |
|
786 | 786 | |
|
787 | 787 | def visit_list_item(self, node): |
|
788 | 788 | # man 7 man argues to use ".IP" instead of ".TP" |
|
789 | 789 | self.body.append('.IP %s %d\n' % ( |
|
790 | 790 | self._list_char[-1].next(), |
|
791 | 791 | self._list_char[-1].get_width(),)) |
|
792 | 792 | |
|
793 | 793 | def depart_list_item(self, node): |
|
794 | 794 | pass |
|
795 | 795 | |
|
796 | 796 | def visit_literal(self, node): |
|
797 | 797 | self.body.append(self.defs['literal'][0]) |
|
798 | 798 | |
|
799 | 799 | def depart_literal(self, node): |
|
800 | 800 | self.body.append(self.defs['literal'][1]) |
|
801 | 801 | |
|
802 | 802 | def visit_literal_block(self, node): |
|
803 | 803 | self.body.append(self.defs['literal_block'][0]) |
|
804 | 804 | self._in_literal = True |
|
805 | 805 | |
|
806 | 806 | def depart_literal_block(self, node): |
|
807 | 807 | self._in_literal = False |
|
808 | 808 | self.body.append(self.defs['literal_block'][1]) |
|
809 | 809 | |
|
810 | 810 | def visit_meta(self, node): |
|
811 | 811 | raise NotImplementedError, node.astext() |
|
812 | 812 | |
|
813 | 813 | def depart_meta(self, node): |
|
814 | 814 | pass |
|
815 | 815 | |
|
816 | 816 | def visit_note(self, node): |
|
817 | 817 | self.visit_admonition(node, 'note') |
|
818 | 818 | |
|
819 | 819 | depart_note = depart_admonition |
|
820 | 820 | |
|
821 | 821 | def indent(self, by=0.5): |
|
822 | 822 | # if we are in a section ".SH" there already is a .RS |
|
823 | 823 | step = self._indent[-1] |
|
824 | 824 | self._indent.append(by) |
|
825 | 825 | self.body.append(self.defs['indent'][0] % step) |
|
826 | 826 | |
|
827 | 827 | def dedent(self): |
|
828 | 828 | self._indent.pop() |
|
829 | 829 | self.body.append(self.defs['indent'][1]) |
|
830 | 830 | |
|
831 | 831 | def visit_option_list(self, node): |
|
832 | 832 | self.indent(OPTION_LIST_INDENT) |
|
833 | 833 | |
|
834 | 834 | def depart_option_list(self, node): |
|
835 | 835 | self.dedent() |
|
836 | 836 | |
|
837 | 837 | def visit_option_list_item(self, node): |
|
838 | 838 | # one item of the list |
|
839 | 839 | self.body.append(self.defs['option_list_item'][0]) |
|
840 | 840 | |
|
841 | 841 | def depart_option_list_item(self, node): |
|
842 | 842 | self.body.append(self.defs['option_list_item'][1]) |
|
843 | 843 | |
|
844 | 844 | def visit_option_group(self, node): |
|
845 | 845 | # as one option could have several forms it is a group |
|
846 | 846 | # options without parameter bold only, .B, -v |
|
847 | 847 | # options with parameter bold italic, .BI, -f file |
|
848 | 848 | # |
|
849 | 849 | # we do not know if .B or .BI |
|
850 | 850 | self.context.append('.B') # blind guess |
|
851 | 851 | self.context.append(len(self.body)) # to be able to insert later |
|
852 | 852 | self.context.append(0) # option counter |
|
853 | 853 | |
|
854 | 854 | def depart_option_group(self, node): |
|
855 | 855 | self.context.pop() # the counter |
|
856 | 856 | start_position = self.context.pop() |
|
857 | 857 | text = self.body[start_position:] |
|
858 | 858 | del self.body[start_position:] |
|
859 | 859 | self.body.append('%s%s\n' % (self.context.pop(), ''.join(text))) |
|
860 | 860 | |
|
861 | 861 | def visit_option(self, node): |
|
862 | 862 | # each form of the option will be presented separately |
|
863 | 863 | if self.context[-1] > 0: |
|
864 | 864 | self.body.append(', ') |
|
865 | 865 | if self.context[-3] == '.BI': |
|
866 | 866 | self.body.append('\\') |
|
867 | 867 | self.body.append(' ') |
|
868 | 868 | |
|
869 | 869 | def depart_option(self, node): |
|
870 | 870 | self.context[-1] += 1 |
|
871 | 871 | |
|
872 | 872 | def visit_option_string(self, node): |
|
873 | 873 | # do not know if .B or .BI |
|
874 | 874 | pass |
|
875 | 875 | |
|
876 | 876 | def depart_option_string(self, node): |
|
877 | 877 | pass |
|
878 | 878 | |
|
879 | 879 | def visit_option_argument(self, node): |
|
880 | 880 | self.context[-3] = '.BI' # bold/italic alternate |
|
881 | 881 | if node['delimiter'] != ' ': |
|
882 | 882 | self.body.append('\\fB%s ' % node['delimiter']) |
|
883 | 883 | elif self.body[len(self.body)-1].endswith('='): |
|
884 | 884 | # a blank only means no blank in output, just changing font |
|
885 | 885 | self.body.append(' ') |
|
886 | 886 | else: |
|
887 | 887 | # blank backslash blank, switch font then a blank |
|
888 | 888 | self.body.append(' \\ ') |
|
889 | 889 | |
|
890 | 890 | def depart_option_argument(self, node): |
|
891 | 891 | pass |
|
892 | 892 | |
|
893 | 893 | def visit_organization(self, node): |
|
894 | 894 | self.visit_docinfo_item(node, 'organization') |
|
895 | 895 | |
|
896 | 896 | def depart_organization(self, node): |
|
897 | 897 | pass |
|
898 | 898 | |
|
899 | 899 | def visit_paragraph(self, node): |
|
900 | 900 | # ``.PP`` : Start standard indented paragraph. |
|
901 | 901 | # ``.LP`` : Start block paragraph, all except the first. |
|
902 | 902 | # ``.P [type]`` : Start paragraph type. |
|
903 | 903 | # NOTE dont use paragraph starts because they reset indentation. |
|
904 | 904 | # ``.sp`` is only vertical space |
|
905 | 905 | self.ensure_eol() |
|
906 | 906 | self.body.append('.sp\n') |
|
907 | 907 | |
|
908 | 908 | def depart_paragraph(self, node): |
|
909 | 909 | self.body.append('\n') |
|
910 | 910 | |
|
911 | 911 | def visit_problematic(self, node): |
|
912 | 912 | self.body.append(self.defs['problematic'][0]) |
|
913 | 913 | |
|
914 | 914 | def depart_problematic(self, node): |
|
915 | 915 | self.body.append(self.defs['problematic'][1]) |
|
916 | 916 | |
|
917 | 917 | def visit_raw(self, node): |
|
918 | 918 | if node.get('format') == 'manpage': |
|
919 | 919 | self.body.append(node.astext() + "\n") |
|
920 | 920 | # Keep non-manpage raw text out of output: |
|
921 | 921 | raise nodes.SkipNode |
|
922 | 922 | |
|
923 | 923 | def visit_reference(self, node): |
|
924 | 924 | """E.g. link or email address.""" |
|
925 | 925 | self.body.append(self.defs['reference'][0]) |
|
926 | 926 | |
|
927 | 927 | def depart_reference(self, node): |
|
928 | 928 | self.body.append(self.defs['reference'][1]) |
|
929 | 929 | |
|
930 | 930 | def visit_revision(self, node): |
|
931 | 931 | self.visit_docinfo_item(node, 'revision') |
|
932 | 932 | |
|
933 | 933 | depart_revision = depart_docinfo_item |
|
934 | 934 | |
|
935 | 935 | def visit_row(self, node): |
|
936 | 936 | self._active_table.new_row() |
|
937 | 937 | |
|
938 | 938 | def depart_row(self, node): |
|
939 | 939 | pass |
|
940 | 940 | |
|
941 | 941 | def visit_section(self, node): |
|
942 | 942 | self.section_level += 1 |
|
943 | 943 | |
|
944 | 944 | def depart_section(self, node): |
|
945 | 945 | self.section_level -= 1 |
|
946 | 946 | |
|
947 | 947 | def visit_status(self, node): |
|
948 | 948 | self.visit_docinfo_item(node, 'status') |
|
949 | 949 | |
|
950 | 950 | depart_status = depart_docinfo_item |
|
951 | 951 | |
|
952 | 952 | def visit_strong(self, node): |
|
953 | 953 | self.body.append(self.defs['strong'][0]) |
|
954 | 954 | |
|
955 | 955 | def depart_strong(self, node): |
|
956 | 956 | self.body.append(self.defs['strong'][1]) |
|
957 | 957 | |
|
958 | 958 | def visit_substitution_definition(self, node): |
|
959 | 959 | """Internal only.""" |
|
960 | 960 | raise nodes.SkipNode |
|
961 | 961 | |
|
962 | 962 | def visit_substitution_reference(self, node): |
|
963 | 963 | self.document.reporter.warning('"substitution_reference" not supported', |
|
964 | 964 | base_node=node) |
|
965 | 965 | |
|
966 | 966 | def visit_subtitle(self, node): |
|
967 | 967 | if isinstance(node.parent, nodes.sidebar): |
|
968 | 968 | self.body.append(self.defs['strong'][0]) |
|
969 | 969 | elif isinstance(node.parent, nodes.document): |
|
970 | 970 | self.visit_docinfo_item(node, 'subtitle') |
|
971 | 971 | elif isinstance(node.parent, nodes.section): |
|
972 | 972 | self.body.append(self.defs['strong'][0]) |
|
973 | 973 | |
|
974 | 974 | def depart_subtitle(self, node): |
|
975 | 975 | # document subtitle calls SkipNode |
|
976 | 976 | self.body.append(self.defs['strong'][1]+'\n.PP\n') |
|
977 | 977 | |
|
978 | 978 | def visit_system_message(self, node): |
|
979 | 979 | # TODO add report_level |
|
980 | 980 | #if node['level'] < self.document.reporter['writer'].report_level: |
|
981 | 981 | # Level is too low to display: |
|
982 | 982 | # raise nodes.SkipNode |
|
983 | 983 | attr = {} |
|
984 | 984 | backref_text = '' |
|
985 | 985 | if node.hasattr('id'): |
|
986 | 986 | attr['name'] = node['id'] |
|
987 | 987 | if node.hasattr('line'): |
|
988 | 988 | line = ', line %s' % node['line'] |
|
989 | 989 | else: |
|
990 | 990 | line = '' |
|
991 | 991 | self.body.append('.IP "System Message: %s/%s (%s:%s)"\n' |
|
992 | 992 | % (node['type'], node['level'], node['source'], line)) |
|
993 | 993 | |
|
994 | 994 | def depart_system_message(self, node): |
|
995 | 995 | pass |
|
996 | 996 | |
|
997 | 997 | def visit_table(self, node): |
|
998 | 998 | self._active_table = Table() |
|
999 | 999 | |
|
1000 | 1000 | def depart_table(self, node): |
|
1001 | 1001 | self.ensure_eol() |
|
1002 | 1002 | self.body.extend(self._active_table.as_list()) |
|
1003 | 1003 | self._active_table = None |
|
1004 | 1004 | |
|
1005 | 1005 | def visit_target(self, node): |
|
1006 | 1006 | # targets are in-document hyper targets, without any use for man-pages. |
|
1007 | 1007 | raise nodes.SkipNode |
|
1008 | 1008 | |
|
1009 | 1009 | def visit_tbody(self, node): |
|
1010 | 1010 | pass |
|
1011 | 1011 | |
|
1012 | 1012 | def depart_tbody(self, node): |
|
1013 | 1013 | pass |
|
1014 | 1014 | |
|
1015 | 1015 | def visit_term(self, node): |
|
1016 | 1016 | self.body.append(self.defs['term'][0]) |
|
1017 | 1017 | |
|
1018 | 1018 | def depart_term(self, node): |
|
1019 | 1019 | self.body.append(self.defs['term'][1]) |
|
1020 | 1020 | |
|
1021 | 1021 | def visit_tgroup(self, node): |
|
1022 | 1022 | pass |
|
1023 | 1023 | |
|
1024 | 1024 | def depart_tgroup(self, node): |
|
1025 | 1025 | pass |
|
1026 | 1026 | |
|
1027 | 1027 | def visit_thead(self, node): |
|
1028 | 1028 | # MAYBE double line '=' |
|
1029 | 1029 | pass |
|
1030 | 1030 | |
|
1031 | 1031 | def depart_thead(self, node): |
|
1032 | 1032 | # MAYBE double line '=' |
|
1033 | 1033 | pass |
|
1034 | 1034 | |
|
1035 | 1035 | def visit_tip(self, node): |
|
1036 | 1036 | self.visit_admonition(node, 'tip') |
|
1037 | 1037 | |
|
1038 | 1038 | depart_tip = depart_admonition |
|
1039 | 1039 | |
|
1040 | 1040 | def visit_title(self, node): |
|
1041 | 1041 | if isinstance(node.parent, nodes.topic): |
|
1042 | 1042 | self.body.append(self.defs['topic-title'][0]) |
|
1043 | 1043 | elif isinstance(node.parent, nodes.sidebar): |
|
1044 | 1044 | self.body.append(self.defs['sidebar-title'][0]) |
|
1045 | 1045 | elif isinstance(node.parent, nodes.admonition): |
|
1046 | 1046 | self.body.append('.IP "') |
|
1047 | 1047 | elif self.section_level == 0: |
|
1048 | 1048 | self._docinfo['title'] = node.astext() |
|
1049 | 1049 | # document title for .TH |
|
1050 | 1050 | self._docinfo['title_upper'] = node.astext().upper() |
|
1051 | 1051 | raise nodes.SkipNode |
|
1052 | 1052 | elif self.section_level == 1: |
|
1053 | 1053 | self.body.append('.SH ') |
|
1054 | 1054 | for n in node.traverse(nodes.Text): |
|
1055 | 1055 | n.parent.replace(n, nodes.Text(n.astext().upper())) |
|
1056 | 1056 | else: |
|
1057 | 1057 | self.body.append('.SS ') |
|
1058 | 1058 | |
|
1059 | 1059 | def depart_title(self, node): |
|
1060 | 1060 | if isinstance(node.parent, nodes.admonition): |
|
1061 | 1061 | self.body.append('"') |
|
1062 | 1062 | self.body.append('\n') |
|
1063 | 1063 | |
|
1064 | 1064 | def visit_title_reference(self, node): |
|
1065 | 1065 | """inline citation reference""" |
|
1066 | 1066 | self.body.append(self.defs['title_reference'][0]) |
|
1067 | 1067 | |
|
1068 | 1068 | def depart_title_reference(self, node): |
|
1069 | 1069 | self.body.append(self.defs['title_reference'][1]) |
|
1070 | 1070 | |
|
1071 | 1071 | def visit_topic(self, node): |
|
1072 | 1072 | pass |
|
1073 | 1073 | |
|
1074 | 1074 | def depart_topic(self, node): |
|
1075 | 1075 | pass |
|
1076 | 1076 | |
|
1077 | 1077 | def visit_sidebar(self, node): |
|
1078 | 1078 | pass |
|
1079 | 1079 | |
|
1080 | 1080 | def depart_sidebar(self, node): |
|
1081 | 1081 | pass |
|
1082 | 1082 | |
|
1083 | 1083 | def visit_rubric(self, node): |
|
1084 | 1084 | pass |
|
1085 | 1085 | |
|
1086 | 1086 | def depart_rubric(self, node): |
|
1087 | 1087 | pass |
|
1088 | 1088 | |
|
1089 | 1089 | def visit_transition(self, node): |
|
1090 | 1090 | # .PP Begin a new paragraph and reset prevailing indent. |
|
1091 | 1091 | # .sp N leaves N lines of blank space. |
|
1092 | 1092 | # .ce centers the next line |
|
1093 | 1093 | self.body.append('\n.sp\n.ce\n----\n') |
|
1094 | 1094 | |
|
1095 | 1095 | def depart_transition(self, node): |
|
1096 | 1096 | self.body.append('\n.ce 0\n.sp\n') |
|
1097 | 1097 | |
|
1098 | 1098 | def visit_version(self, node): |
|
1099 | 1099 | self.visit_docinfo_item(node, 'version') |
|
1100 | 1100 | |
|
1101 | 1101 | def visit_warning(self, node): |
|
1102 | 1102 | self.visit_admonition(node, 'warning') |
|
1103 | 1103 | |
|
1104 | 1104 | depart_warning = depart_admonition |
|
1105 | 1105 | |
|
1106 | 1106 | def unimplemented_visit(self, node): |
|
1107 | 1107 | raise NotImplementedError('visiting unimplemented node type: %s' |
|
1108 | 1108 | % node.__class__.__name__) |
|
1109 | 1109 | |
|
1110 | 1110 | # vim: set fileencoding=utf-8 et ts=4 ai : |
@@ -1,470 +1,470 b'' | |||
|
1 | 1 | # convcmd - convert extension commands definition |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from common import NoRepo, MissingTool, SKIPREV, mapfile |
|
9 | 9 | from cvs import convert_cvs |
|
10 | 10 | from darcs import darcs_source |
|
11 | 11 | from git import convert_git |
|
12 | 12 | from hg import mercurial_source, mercurial_sink |
|
13 | 13 | from subversion import svn_source, svn_sink |
|
14 | 14 | from monotone import monotone_source |
|
15 | 15 | from gnuarch import gnuarch_source |
|
16 | 16 | from bzr import bzr_source |
|
17 | 17 | from p4 import p4_source |
|
18 | 18 | import filemap, common |
|
19 | 19 | |
|
20 | 20 | import os, shutil |
|
21 | 21 | from mercurial import hg, util, encoding |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | |
|
24 | 24 | orig_encoding = 'ascii' |
|
25 | 25 | |
|
26 | 26 | def recode(s): |
|
27 | 27 | if isinstance(s, unicode): |
|
28 | 28 | return s.encode(orig_encoding, 'replace') |
|
29 | 29 | else: |
|
30 | 30 | return s.decode('utf-8').encode(orig_encoding, 'replace') |
|
31 | 31 | |
|
32 | 32 | source_converters = [ |
|
33 | 33 | ('cvs', convert_cvs, 'branchsort'), |
|
34 | 34 | ('git', convert_git, 'branchsort'), |
|
35 | 35 | ('svn', svn_source, 'branchsort'), |
|
36 | 36 | ('hg', mercurial_source, 'sourcesort'), |
|
37 | 37 | ('darcs', darcs_source, 'branchsort'), |
|
38 | 38 | ('mtn', monotone_source, 'branchsort'), |
|
39 | 39 | ('gnuarch', gnuarch_source, 'branchsort'), |
|
40 | 40 | ('bzr', bzr_source, 'branchsort'), |
|
41 | 41 | ('p4', p4_source, 'branchsort'), |
|
42 | 42 | ] |
|
43 | 43 | |
|
44 | 44 | sink_converters = [ |
|
45 | 45 | ('hg', mercurial_sink), |
|
46 | 46 | ('svn', svn_sink), |
|
47 | 47 | ] |
|
48 | 48 | |
|
49 | 49 | def convertsource(ui, path, type, rev): |
|
50 | 50 | exceptions = [] |
|
51 | 51 | if type and type not in [s[0] for s in source_converters]: |
|
52 | 52 | raise util.Abort(_('%s: invalid source repository type') % type) |
|
53 | 53 | for name, source, sortmode in source_converters: |
|
54 | 54 | try: |
|
55 | 55 | if not type or name == type: |
|
56 | 56 | return source(ui, path, rev), sortmode |
|
57 | 57 | except (NoRepo, MissingTool), inst: |
|
58 | 58 | exceptions.append(inst) |
|
59 | 59 | if not ui.quiet: |
|
60 | 60 | for inst in exceptions: |
|
61 | 61 | ui.write("%s\n" % inst) |
|
62 | 62 | raise util.Abort(_('%s: missing or unsupported repository') % path) |
|
63 | 63 | |
|
64 | 64 | def convertsink(ui, path, type): |
|
65 | 65 | if type and type not in [s[0] for s in sink_converters]: |
|
66 | 66 | raise util.Abort(_('%s: invalid destination repository type') % type) |
|
67 | 67 | for name, sink in sink_converters: |
|
68 | 68 | try: |
|
69 | 69 | if not type or name == type: |
|
70 | 70 | return sink(ui, path) |
|
71 | 71 | except NoRepo, inst: |
|
72 | 72 | ui.note(_("convert: %s\n") % inst) |
|
73 | 73 | except MissingTool, inst: |
|
74 | 74 | raise util.Abort('%s\n' % inst) |
|
75 | 75 | raise util.Abort(_('%s: unknown repository type') % path) |
|
76 | 76 | |
|
77 | 77 | class progresssource(object): |
|
78 | 78 | def __init__(self, ui, source, filecount): |
|
79 | 79 | self.ui = ui |
|
80 | 80 | self.source = source |
|
81 | 81 | self.filecount = filecount |
|
82 | 82 | self.retrieved = 0 |
|
83 | 83 | |
|
84 | 84 | def getfile(self, file, rev): |
|
85 | 85 | self.retrieved += 1 |
|
86 | 86 | self.ui.progress(_('getting files'), self.retrieved, |
|
87 | 87 | item=file, total=self.filecount) |
|
88 | 88 | return self.source.getfile(file, rev) |
|
89 | 89 | |
|
90 | 90 | def lookuprev(self, rev): |
|
91 | 91 | return self.source.lookuprev(rev) |
|
92 | 92 | |
|
93 | 93 | def close(self): |
|
94 | 94 | self.ui.progress(_('getting files'), None) |
|
95 | 95 | |
|
96 | 96 | class converter(object): |
|
97 | 97 | def __init__(self, ui, source, dest, revmapfile, opts): |
|
98 | 98 | |
|
99 | 99 | self.source = source |
|
100 | 100 | self.dest = dest |
|
101 | 101 | self.ui = ui |
|
102 | 102 | self.opts = opts |
|
103 | 103 | self.commitcache = {} |
|
104 | 104 | self.authors = {} |
|
105 | 105 | self.authorfile = None |
|
106 | 106 | |
|
107 | 107 | # Record converted revisions persistently: maps source revision |
|
108 | 108 | # ID to target revision ID (both strings). (This is how |
|
109 | 109 | # incremental conversions work.) |
|
110 | 110 | self.map = mapfile(ui, revmapfile) |
|
111 | 111 | |
|
112 | 112 | # Read first the dst author map if any |
|
113 | 113 | authorfile = self.dest.authorfile() |
|
114 | 114 | if authorfile and os.path.exists(authorfile): |
|
115 | 115 | self.readauthormap(authorfile) |
|
116 | 116 | # Extend/Override with new author map if necessary |
|
117 | 117 | if opts.get('authormap'): |
|
118 | 118 | self.readauthormap(opts.get('authormap')) |
|
119 | 119 | self.authorfile = self.dest.authorfile() |
|
120 | 120 | |
|
121 | 121 | self.splicemap = common.parsesplicemap(opts.get('splicemap')) |
|
122 | 122 | self.branchmap = mapfile(ui, opts.get('branchmap')) |
|
123 | 123 | |
|
124 | 124 | def walktree(self, heads): |
|
125 | 125 | '''Return a mapping that identifies the uncommitted parents of every |
|
126 | 126 | uncommitted changeset.''' |
|
127 | 127 | visit = heads |
|
128 | 128 | known = set() |
|
129 | 129 | parents = {} |
|
130 | 130 | while visit: |
|
131 | 131 | n = visit.pop(0) |
|
132 | 132 | if n in known or n in self.map: |
|
133 | 133 | continue |
|
134 | 134 | known.add(n) |
|
135 | 135 | self.ui.progress(_('scanning'), len(known), unit=_('revisions')) |
|
136 | 136 | commit = self.cachecommit(n) |
|
137 | 137 | parents[n] = [] |
|
138 | 138 | for p in commit.parents: |
|
139 | 139 | parents[n].append(p) |
|
140 | 140 | visit.append(p) |
|
141 | 141 | self.ui.progress(_('scanning'), None) |
|
142 | 142 | |
|
143 | 143 | return parents |
|
144 | 144 | |
|
145 | 145 | def mergesplicemap(self, parents, splicemap): |
|
146 | 146 | """A splicemap redefines child/parent relationships. Check the |
|
147 | 147 | map contains valid revision identifiers and merge the new |
|
148 | 148 | links in the source graph. |
|
149 | 149 | """ |
|
150 | 150 | for c in splicemap: |
|
151 | 151 | if c not in parents: |
|
152 | 152 | if not self.dest.hascommit(self.map.get(c, c)): |
|
153 | 153 | # Could be in source but not converted during this run |
|
154 | 154 | self.ui.warn(_('splice map revision %s is not being ' |
|
155 | 155 | 'converted, ignoring\n') % c) |
|
156 | 156 | continue |
|
157 | 157 | pc = [] |
|
158 | 158 | for p in splicemap[c]: |
|
159 | 159 | # We do not have to wait for nodes already in dest. |
|
160 | 160 | if self.dest.hascommit(self.map.get(p, p)): |
|
161 | 161 | continue |
|
162 | 162 | # Parent is not in dest and not being converted, not good |
|
163 | 163 | if p not in parents: |
|
164 | 164 | raise util.Abort(_('unknown splice map parent: %s') % p) |
|
165 | 165 | pc.append(p) |
|
166 | 166 | parents[c] = pc |
|
167 | 167 | |
|
168 | 168 | def toposort(self, parents, sortmode): |
|
169 | 169 | '''Return an ordering such that every uncommitted changeset is |
|
170 | 170 | preceeded by all its uncommitted ancestors.''' |
|
171 | 171 | |
|
172 | 172 | def mapchildren(parents): |
|
173 | 173 | """Return a (children, roots) tuple where 'children' maps parent |
|
174 | 174 | revision identifiers to children ones, and 'roots' is the list of |
|
175 | 175 | revisions without parents. 'parents' must be a mapping of revision |
|
176 | 176 | identifier to its parents ones. |
|
177 | 177 | """ |
|
178 | 178 | visit = parents.keys() |
|
179 | 179 | seen = set() |
|
180 | 180 | children = {} |
|
181 | 181 | roots = [] |
|
182 | 182 | |
|
183 | 183 | while visit: |
|
184 | 184 | n = visit.pop(0) |
|
185 | 185 | if n in seen: |
|
186 | 186 | continue |
|
187 | 187 | seen.add(n) |
|
188 | 188 | # Ensure that nodes without parents are present in the |
|
189 | 189 | # 'children' mapping. |
|
190 | 190 | children.setdefault(n, []) |
|
191 | 191 | hasparent = False |
|
192 | 192 | for p in parents[n]: |
|
193 |
if not |
|
|
193 | if p not in self.map: | |
|
194 | 194 | visit.append(p) |
|
195 | 195 | hasparent = True |
|
196 | 196 | children.setdefault(p, []).append(n) |
|
197 | 197 | if not hasparent: |
|
198 | 198 | roots.append(n) |
|
199 | 199 | |
|
200 | 200 | return children, roots |
|
201 | 201 | |
|
202 | 202 | # Sort functions are supposed to take a list of revisions which |
|
203 | 203 | # can be converted immediately and pick one |
|
204 | 204 | |
|
205 | 205 | def makebranchsorter(): |
|
206 | 206 | """If the previously converted revision has a child in the |
|
207 | 207 | eligible revisions list, pick it. Return the list head |
|
208 | 208 | otherwise. Branch sort attempts to minimize branch |
|
209 | 209 | switching, which is harmful for Mercurial backend |
|
210 | 210 | compression. |
|
211 | 211 | """ |
|
212 | 212 | prev = [None] |
|
213 | 213 | def picknext(nodes): |
|
214 | 214 | next = nodes[0] |
|
215 | 215 | for n in nodes: |
|
216 | 216 | if prev[0] in parents[n]: |
|
217 | 217 | next = n |
|
218 | 218 | break |
|
219 | 219 | prev[0] = next |
|
220 | 220 | return next |
|
221 | 221 | return picknext |
|
222 | 222 | |
|
223 | 223 | def makesourcesorter(): |
|
224 | 224 | """Source specific sort.""" |
|
225 | 225 | keyfn = lambda n: self.commitcache[n].sortkey |
|
226 | 226 | def picknext(nodes): |
|
227 | 227 | return sorted(nodes, key=keyfn)[0] |
|
228 | 228 | return picknext |
|
229 | 229 | |
|
230 | 230 | def makedatesorter(): |
|
231 | 231 | """Sort revisions by date.""" |
|
232 | 232 | dates = {} |
|
233 | 233 | def getdate(n): |
|
234 | 234 | if n not in dates: |
|
235 | 235 | dates[n] = util.parsedate(self.commitcache[n].date) |
|
236 | 236 | return dates[n] |
|
237 | 237 | |
|
238 | 238 | def picknext(nodes): |
|
239 | 239 | return min([(getdate(n), n) for n in nodes])[1] |
|
240 | 240 | |
|
241 | 241 | return picknext |
|
242 | 242 | |
|
243 | 243 | if sortmode == 'branchsort': |
|
244 | 244 | picknext = makebranchsorter() |
|
245 | 245 | elif sortmode == 'datesort': |
|
246 | 246 | picknext = makedatesorter() |
|
247 | 247 | elif sortmode == 'sourcesort': |
|
248 | 248 | picknext = makesourcesorter() |
|
249 | 249 | else: |
|
250 | 250 | raise util.Abort(_('unknown sort mode: %s') % sortmode) |
|
251 | 251 | |
|
252 | 252 | children, actives = mapchildren(parents) |
|
253 | 253 | |
|
254 | 254 | s = [] |
|
255 | 255 | pendings = {} |
|
256 | 256 | while actives: |
|
257 | 257 | n = picknext(actives) |
|
258 | 258 | actives.remove(n) |
|
259 | 259 | s.append(n) |
|
260 | 260 | |
|
261 | 261 | # Update dependents list |
|
262 | 262 | for c in children.get(n, []): |
|
263 | 263 | if c not in pendings: |
|
264 | 264 | pendings[c] = [p for p in parents[c] if p not in self.map] |
|
265 | 265 | try: |
|
266 | 266 | pendings[c].remove(n) |
|
267 | 267 | except ValueError: |
|
268 | 268 | raise util.Abort(_('cycle detected between %s and %s') |
|
269 | 269 | % (recode(c), recode(n))) |
|
270 | 270 | if not pendings[c]: |
|
271 | 271 | # Parents are converted, node is eligible |
|
272 | 272 | actives.insert(0, c) |
|
273 | 273 | pendings[c] = None |
|
274 | 274 | |
|
275 | 275 | if len(s) != len(parents): |
|
276 | 276 | raise util.Abort(_("not all revisions were sorted")) |
|
277 | 277 | |
|
278 | 278 | return s |
|
279 | 279 | |
|
280 | 280 | def writeauthormap(self): |
|
281 | 281 | authorfile = self.authorfile |
|
282 | 282 | if authorfile: |
|
283 | 283 | self.ui.status(_('Writing author map file %s\n') % authorfile) |
|
284 | 284 | ofile = open(authorfile, 'w+') |
|
285 | 285 | for author in self.authors: |
|
286 | 286 | ofile.write("%s=%s\n" % (author, self.authors[author])) |
|
287 | 287 | ofile.close() |
|
288 | 288 | |
|
289 | 289 | def readauthormap(self, authorfile): |
|
290 | 290 | afile = open(authorfile, 'r') |
|
291 | 291 | for line in afile: |
|
292 | 292 | |
|
293 | 293 | line = line.strip() |
|
294 | 294 | if not line or line.startswith('#'): |
|
295 | 295 | continue |
|
296 | 296 | |
|
297 | 297 | try: |
|
298 | 298 | srcauthor, dstauthor = line.split('=', 1) |
|
299 | 299 | except ValueError: |
|
300 | 300 | msg = _('Ignoring bad line in author map file %s: %s\n') |
|
301 | 301 | self.ui.warn(msg % (authorfile, line.rstrip())) |
|
302 | 302 | continue |
|
303 | 303 | |
|
304 | 304 | srcauthor = srcauthor.strip() |
|
305 | 305 | dstauthor = dstauthor.strip() |
|
306 | 306 | if self.authors.get(srcauthor) in (None, dstauthor): |
|
307 | 307 | msg = _('mapping author %s to %s\n') |
|
308 | 308 | self.ui.debug(msg % (srcauthor, dstauthor)) |
|
309 | 309 | self.authors[srcauthor] = dstauthor |
|
310 | 310 | continue |
|
311 | 311 | |
|
312 | 312 | m = _('overriding mapping for author %s, was %s, will be %s\n') |
|
313 | 313 | self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor)) |
|
314 | 314 | |
|
315 | 315 | afile.close() |
|
316 | 316 | |
|
317 | 317 | def cachecommit(self, rev): |
|
318 | 318 | commit = self.source.getcommit(rev) |
|
319 | 319 | commit.author = self.authors.get(commit.author, commit.author) |
|
320 | 320 | commit.branch = self.branchmap.get(commit.branch, commit.branch) |
|
321 | 321 | self.commitcache[rev] = commit |
|
322 | 322 | return commit |
|
323 | 323 | |
|
324 | 324 | def copy(self, rev): |
|
325 | 325 | commit = self.commitcache[rev] |
|
326 | 326 | |
|
327 | 327 | changes = self.source.getchanges(rev) |
|
328 | 328 | if isinstance(changes, basestring): |
|
329 | 329 | if changes == SKIPREV: |
|
330 | 330 | dest = SKIPREV |
|
331 | 331 | else: |
|
332 | 332 | dest = self.map[changes] |
|
333 | 333 | self.map[rev] = dest |
|
334 | 334 | return |
|
335 | 335 | files, copies = changes |
|
336 | 336 | pbranches = [] |
|
337 | 337 | if commit.parents: |
|
338 | 338 | for prev in commit.parents: |
|
339 | 339 | if prev not in self.commitcache: |
|
340 | 340 | self.cachecommit(prev) |
|
341 | 341 | pbranches.append((self.map[prev], |
|
342 | 342 | self.commitcache[prev].branch)) |
|
343 | 343 | self.dest.setbranch(commit.branch, pbranches) |
|
344 | 344 | try: |
|
345 | 345 | parents = self.splicemap[rev] |
|
346 | 346 | self.ui.status(_('spliced in %s as parents of %s\n') % |
|
347 | 347 | (parents, rev)) |
|
348 | 348 | parents = [self.map.get(p, p) for p in parents] |
|
349 | 349 | except KeyError: |
|
350 | 350 | parents = [b[0] for b in pbranches] |
|
351 | 351 | source = progresssource(self.ui, self.source, len(files)) |
|
352 | 352 | newnode = self.dest.putcommit(files, copies, parents, commit, |
|
353 | 353 | source, self.map) |
|
354 | 354 | source.close() |
|
355 | 355 | self.source.converted(rev, newnode) |
|
356 | 356 | self.map[rev] = newnode |
|
357 | 357 | |
|
358 | 358 | def convert(self, sortmode): |
|
359 | 359 | try: |
|
360 | 360 | self.source.before() |
|
361 | 361 | self.dest.before() |
|
362 | 362 | self.source.setrevmap(self.map) |
|
363 | 363 | self.ui.status(_("scanning source...\n")) |
|
364 | 364 | heads = self.source.getheads() |
|
365 | 365 | parents = self.walktree(heads) |
|
366 | 366 | self.mergesplicemap(parents, self.splicemap) |
|
367 | 367 | self.ui.status(_("sorting...\n")) |
|
368 | 368 | t = self.toposort(parents, sortmode) |
|
369 | 369 | num = len(t) |
|
370 | 370 | c = None |
|
371 | 371 | |
|
372 | 372 | self.ui.status(_("converting...\n")) |
|
373 | 373 | for i, c in enumerate(t): |
|
374 | 374 | num -= 1 |
|
375 | 375 | desc = self.commitcache[c].desc |
|
376 | 376 | if "\n" in desc: |
|
377 | 377 | desc = desc.splitlines()[0] |
|
378 | 378 | # convert log message to local encoding without using |
|
379 | 379 | # tolocal() because the encoding.encoding convert() |
|
380 | 380 | # uses is 'utf-8' |
|
381 | 381 | self.ui.status("%d %s\n" % (num, recode(desc))) |
|
382 | 382 | self.ui.note(_("source: %s\n") % recode(c)) |
|
383 | 383 | self.ui.progress(_('converting'), i, unit=_('revisions'), |
|
384 | 384 | total=len(t)) |
|
385 | 385 | self.copy(c) |
|
386 | 386 | self.ui.progress(_('converting'), None) |
|
387 | 387 | |
|
388 | 388 | tags = self.source.gettags() |
|
389 | 389 | ctags = {} |
|
390 | 390 | for k in tags: |
|
391 | 391 | v = tags[k] |
|
392 | 392 | if self.map.get(v, SKIPREV) != SKIPREV: |
|
393 | 393 | ctags[k] = self.map[v] |
|
394 | 394 | |
|
395 | 395 | if c and ctags: |
|
396 | 396 | nrev, tagsparent = self.dest.puttags(ctags) |
|
397 | 397 | if nrev and tagsparent: |
|
398 | 398 | # write another hash correspondence to override the previous |
|
399 | 399 | # one so we don't end up with extra tag heads |
|
400 | 400 | tagsparents = [e for e in self.map.iteritems() |
|
401 | 401 | if e[1] == tagsparent] |
|
402 | 402 | if tagsparents: |
|
403 | 403 | self.map[tagsparents[0][0]] = nrev |
|
404 | 404 | |
|
405 | 405 | bookmarks = self.source.getbookmarks() |
|
406 | 406 | cbookmarks = {} |
|
407 | 407 | for k in bookmarks: |
|
408 | 408 | v = bookmarks[k] |
|
409 | 409 | if self.map.get(v, SKIPREV) != SKIPREV: |
|
410 | 410 | cbookmarks[k] = self.map[v] |
|
411 | 411 | |
|
412 | 412 | if c and cbookmarks: |
|
413 | 413 | self.dest.putbookmarks(cbookmarks) |
|
414 | 414 | |
|
415 | 415 | self.writeauthormap() |
|
416 | 416 | finally: |
|
417 | 417 | self.cleanup() |
|
418 | 418 | |
|
419 | 419 | def cleanup(self): |
|
420 | 420 | try: |
|
421 | 421 | self.dest.after() |
|
422 | 422 | finally: |
|
423 | 423 | self.source.after() |
|
424 | 424 | self.map.close() |
|
425 | 425 | |
|
426 | 426 | def convert(ui, src, dest=None, revmapfile=None, **opts): |
|
427 | 427 | global orig_encoding |
|
428 | 428 | orig_encoding = encoding.encoding |
|
429 | 429 | encoding.encoding = 'UTF-8' |
|
430 | 430 | |
|
431 | 431 | # support --authors as an alias for --authormap |
|
432 | 432 | if not opts.get('authormap'): |
|
433 | 433 | opts['authormap'] = opts.get('authors') |
|
434 | 434 | |
|
435 | 435 | if not dest: |
|
436 | 436 | dest = hg.defaultdest(src) + "-hg" |
|
437 | 437 | ui.status(_("assuming destination %s\n") % dest) |
|
438 | 438 | |
|
439 | 439 | destc = convertsink(ui, dest, opts.get('dest_type')) |
|
440 | 440 | |
|
441 | 441 | try: |
|
442 | 442 | srcc, defaultsort = convertsource(ui, src, opts.get('source_type'), |
|
443 | 443 | opts.get('rev')) |
|
444 | 444 | except Exception: |
|
445 | 445 | for path in destc.created: |
|
446 | 446 | shutil.rmtree(path, True) |
|
447 | 447 | raise |
|
448 | 448 | |
|
449 | 449 | sortmodes = ('branchsort', 'datesort', 'sourcesort') |
|
450 | 450 | sortmode = [m for m in sortmodes if opts.get(m)] |
|
451 | 451 | if len(sortmode) > 1: |
|
452 | 452 | raise util.Abort(_('more than one sort mode specified')) |
|
453 | 453 | sortmode = sortmode and sortmode[0] or defaultsort |
|
454 | 454 | if sortmode == 'sourcesort' and not srcc.hasnativeorder(): |
|
455 | 455 | raise util.Abort(_('--sourcesort is not supported by this data source')) |
|
456 | 456 | |
|
457 | 457 | fmap = opts.get('filemap') |
|
458 | 458 | if fmap: |
|
459 | 459 | srcc = filemap.filemap_source(ui, srcc, fmap) |
|
460 | 460 | destc.setfilemapmode(True) |
|
461 | 461 | |
|
462 | 462 | if not revmapfile: |
|
463 | 463 | try: |
|
464 | 464 | revmapfile = destc.revmapfile() |
|
465 | 465 | except: |
|
466 | 466 | revmapfile = os.path.join(destc, "map") |
|
467 | 467 | |
|
468 | 468 | c = converter(ui, srcc, destc, revmapfile, opts) |
|
469 | 469 | c.convert(sortmode) |
|
470 | 470 |
@@ -1,395 +1,395 b'' | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | import os, time, cStringIO |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | from mercurial.node import bin, hex, nullid |
|
24 | 24 | from mercurial import hg, util, context, bookmarks, error |
|
25 | 25 | |
|
26 | 26 | from common import NoRepo, commit, converter_source, converter_sink |
|
27 | 27 | |
|
28 | 28 | class mercurial_sink(converter_sink): |
|
29 | 29 | def __init__(self, ui, path): |
|
30 | 30 | converter_sink.__init__(self, ui, path) |
|
31 | 31 | self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True) |
|
32 | 32 | self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False) |
|
33 | 33 | self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default') |
|
34 | 34 | self.lastbranch = None |
|
35 | 35 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
36 | 36 | try: |
|
37 | 37 | self.repo = hg.repository(self.ui, path) |
|
38 | 38 | if not self.repo.local(): |
|
39 | 39 | raise NoRepo(_('%s is not a local Mercurial repository') |
|
40 | 40 | % path) |
|
41 | 41 | except error.RepoError, err: |
|
42 | 42 | ui.traceback() |
|
43 | 43 | raise NoRepo(err.args[0]) |
|
44 | 44 | else: |
|
45 | 45 | try: |
|
46 | 46 | ui.status(_('initializing destination %s repository\n') % path) |
|
47 | 47 | self.repo = hg.repository(self.ui, path, create=True) |
|
48 | 48 | if not self.repo.local(): |
|
49 | 49 | raise NoRepo(_('%s is not a local Mercurial repository') |
|
50 | 50 | % path) |
|
51 | 51 | self.created.append(path) |
|
52 | 52 | except error.RepoError: |
|
53 | 53 | ui.traceback() |
|
54 | 54 | raise NoRepo(_("could not create hg repository %s as sink") |
|
55 | 55 | % path) |
|
56 | 56 | self.lock = None |
|
57 | 57 | self.wlock = None |
|
58 | 58 | self.filemapmode = False |
|
59 | 59 | |
|
60 | 60 | def before(self): |
|
61 | 61 | self.ui.debug('run hg sink pre-conversion action\n') |
|
62 | 62 | self.wlock = self.repo.wlock() |
|
63 | 63 | self.lock = self.repo.lock() |
|
64 | 64 | |
|
65 | 65 | def after(self): |
|
66 | 66 | self.ui.debug('run hg sink post-conversion action\n') |
|
67 | 67 | if self.lock: |
|
68 | 68 | self.lock.release() |
|
69 | 69 | if self.wlock: |
|
70 | 70 | self.wlock.release() |
|
71 | 71 | |
|
72 | 72 | def revmapfile(self): |
|
73 | 73 | return self.repo.join("shamap") |
|
74 | 74 | |
|
75 | 75 | def authorfile(self): |
|
76 | 76 | return self.repo.join("authormap") |
|
77 | 77 | |
|
78 | 78 | def getheads(self): |
|
79 | 79 | h = self.repo.changelog.heads() |
|
80 | 80 | return [hex(x) for x in h] |
|
81 | 81 | |
|
82 | 82 | def setbranch(self, branch, pbranches): |
|
83 | 83 | if not self.clonebranches: |
|
84 | 84 | return |
|
85 | 85 | |
|
86 | 86 | setbranch = (branch != self.lastbranch) |
|
87 | 87 | self.lastbranch = branch |
|
88 | 88 | if not branch: |
|
89 | 89 | branch = 'default' |
|
90 | 90 | pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches] |
|
91 | 91 | pbranch = pbranches and pbranches[0][1] or 'default' |
|
92 | 92 | |
|
93 | 93 | branchpath = os.path.join(self.path, branch) |
|
94 | 94 | if setbranch: |
|
95 | 95 | self.after() |
|
96 | 96 | try: |
|
97 | 97 | self.repo = hg.repository(self.ui, branchpath) |
|
98 | 98 | except: |
|
99 | 99 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
100 | 100 | self.before() |
|
101 | 101 | |
|
102 | 102 | # pbranches may bring revisions from other branches (merge parents) |
|
103 | 103 | # Make sure we have them, or pull them. |
|
104 | 104 | missings = {} |
|
105 | 105 | for b in pbranches: |
|
106 | 106 | try: |
|
107 | 107 | self.repo.lookup(b[0]) |
|
108 | 108 | except: |
|
109 | 109 | missings.setdefault(b[1], []).append(b[0]) |
|
110 | 110 | |
|
111 | 111 | if missings: |
|
112 | 112 | self.after() |
|
113 | 113 | for pbranch, heads in missings.iteritems(): |
|
114 | 114 | pbranchpath = os.path.join(self.path, pbranch) |
|
115 | 115 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
116 | 116 | self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch)) |
|
117 | 117 | self.repo.pull(prepo, [prepo.lookup(h) for h in heads]) |
|
118 | 118 | self.before() |
|
119 | 119 | |
|
120 | 120 | def _rewritetags(self, source, revmap, data): |
|
121 | 121 | fp = cStringIO.StringIO() |
|
122 | 122 | for line in data.splitlines(): |
|
123 | 123 | s = line.split(' ', 1) |
|
124 | 124 | if len(s) != 2: |
|
125 | 125 | continue |
|
126 | 126 | revid = revmap.get(source.lookuprev(s[0])) |
|
127 | 127 | if not revid: |
|
128 | 128 | continue |
|
129 | 129 | fp.write('%s %s\n' % (revid, s[1])) |
|
130 | 130 | return fp.getvalue() |
|
131 | 131 | |
|
132 | 132 | def putcommit(self, files, copies, parents, commit, source, revmap): |
|
133 | 133 | |
|
134 | 134 | files = dict(files) |
|
135 | 135 | def getfilectx(repo, memctx, f): |
|
136 | 136 | v = files[f] |
|
137 | 137 | data, mode = source.getfile(f, v) |
|
138 | 138 | if f == '.hgtags': |
|
139 | 139 | data = self._rewritetags(source, revmap, data) |
|
140 | 140 | return context.memfilectx(f, data, 'l' in mode, 'x' in mode, |
|
141 | 141 | copies.get(f)) |
|
142 | 142 | |
|
143 | 143 | pl = [] |
|
144 | 144 | for p in parents: |
|
145 | 145 | if p not in pl: |
|
146 | 146 | pl.append(p) |
|
147 | 147 | parents = pl |
|
148 | 148 | nparents = len(parents) |
|
149 | 149 | if self.filemapmode and nparents == 1: |
|
150 | 150 | m1node = self.repo.changelog.read(bin(parents[0]))[0] |
|
151 | 151 | parent = parents[0] |
|
152 | 152 | |
|
153 | 153 | if len(parents) < 2: |
|
154 | 154 | parents.append(nullid) |
|
155 | 155 | if len(parents) < 2: |
|
156 | 156 | parents.append(nullid) |
|
157 | 157 | p2 = parents.pop(0) |
|
158 | 158 | |
|
159 | 159 | text = commit.desc |
|
160 | 160 | extra = commit.extra.copy() |
|
161 | 161 | if self.branchnames and commit.branch: |
|
162 | 162 | extra['branch'] = commit.branch |
|
163 | 163 | if commit.rev: |
|
164 | 164 | extra['convert_revision'] = commit.rev |
|
165 | 165 | |
|
166 | 166 | while parents: |
|
167 | 167 | p1 = p2 |
|
168 | 168 | p2 = parents.pop(0) |
|
169 | 169 | ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), |
|
170 | 170 | getfilectx, commit.author, commit.date, extra) |
|
171 | 171 | self.repo.commitctx(ctx) |
|
172 | 172 | text = "(octopus merge fixup)\n" |
|
173 | 173 | p2 = hex(self.repo.changelog.tip()) |
|
174 | 174 | |
|
175 | 175 | if self.filemapmode and nparents == 1: |
|
176 | 176 | man = self.repo.manifest |
|
177 | 177 | mnode = self.repo.changelog.read(bin(p2))[0] |
|
178 | 178 | closed = 'close' in commit.extra |
|
179 | 179 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
180 | 180 | self.ui.status(_("filtering out empty revision\n")) |
|
181 | 181 | self.repo.rollback(force=True) |
|
182 | 182 | return parent |
|
183 | 183 | return p2 |
|
184 | 184 | |
|
185 | 185 | def puttags(self, tags): |
|
186 | 186 | try: |
|
187 | 187 | parentctx = self.repo[self.tagsbranch] |
|
188 | 188 | tagparent = parentctx.node() |
|
189 | 189 | except error.RepoError: |
|
190 | 190 | parentctx = None |
|
191 | 191 | tagparent = nullid |
|
192 | 192 | |
|
193 | 193 | try: |
|
194 | 194 | oldlines = sorted(parentctx['.hgtags'].data().splitlines(True)) |
|
195 | 195 | except: |
|
196 | 196 | oldlines = [] |
|
197 | 197 | |
|
198 | 198 | newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
199 | 199 | if newlines == oldlines: |
|
200 | 200 | return None, None |
|
201 | 201 | data = "".join(newlines) |
|
202 | 202 | def getfilectx(repo, memctx, f): |
|
203 | 203 | return context.memfilectx(f, data, False, False, None) |
|
204 | 204 | |
|
205 | 205 | self.ui.status(_("updating tags\n")) |
|
206 | 206 | date = "%s 0" % int(time.mktime(time.gmtime())) |
|
207 | 207 | extra = {'branch': self.tagsbranch} |
|
208 | 208 | ctx = context.memctx(self.repo, (tagparent, None), "update tags", |
|
209 | 209 | [".hgtags"], getfilectx, "convert-repo", date, |
|
210 | 210 | extra) |
|
211 | 211 | self.repo.commitctx(ctx) |
|
212 | 212 | return hex(self.repo.changelog.tip()), hex(tagparent) |
|
213 | 213 | |
|
214 | 214 | def setfilemapmode(self, active): |
|
215 | 215 | self.filemapmode = active |
|
216 | 216 | |
|
217 | 217 | def putbookmarks(self, updatedbookmark): |
|
218 | 218 | if not len(updatedbookmark): |
|
219 | 219 | return |
|
220 | 220 | |
|
221 | 221 | self.ui.status(_("updating bookmarks\n")) |
|
222 | 222 | for bookmark in updatedbookmark: |
|
223 | 223 | self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark]) |
|
224 | 224 | bookmarks.write(self.repo) |
|
225 | 225 | |
|
226 | 226 | def hascommit(self, rev): |
|
227 |
if not |
|
|
227 | if rev not in self.repo and self.clonebranches: | |
|
228 | 228 | raise util.Abort(_('revision %s not found in destination ' |
|
229 | 229 | 'repository (lookups with clonebranches=true ' |
|
230 | 230 | 'are not implemented)') % rev) |
|
231 | 231 | return rev in self.repo |
|
232 | 232 | |
|
233 | 233 | class mercurial_source(converter_source): |
|
234 | 234 | def __init__(self, ui, path, rev=None): |
|
235 | 235 | converter_source.__init__(self, ui, path, rev) |
|
236 | 236 | self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False) |
|
237 | 237 | self.ignored = set() |
|
238 | 238 | self.saverev = ui.configbool('convert', 'hg.saverev', False) |
|
239 | 239 | try: |
|
240 | 240 | self.repo = hg.repository(self.ui, path) |
|
241 | 241 | # try to provoke an exception if this isn't really a hg |
|
242 | 242 | # repo, but some other bogus compatible-looking url |
|
243 | 243 | if not self.repo.local(): |
|
244 | 244 | raise error.RepoError() |
|
245 | 245 | except error.RepoError: |
|
246 | 246 | ui.traceback() |
|
247 | 247 | raise NoRepo(_("%s is not a local Mercurial repository") % path) |
|
248 | 248 | self.lastrev = None |
|
249 | 249 | self.lastctx = None |
|
250 | 250 | self._changescache = None |
|
251 | 251 | self.convertfp = None |
|
252 | 252 | # Restrict converted revisions to startrev descendants |
|
253 | 253 | startnode = ui.config('convert', 'hg.startrev') |
|
254 | 254 | if startnode is not None: |
|
255 | 255 | try: |
|
256 | 256 | startnode = self.repo.lookup(startnode) |
|
257 | 257 | except error.RepoError: |
|
258 | 258 | raise util.Abort(_('%s is not a valid start revision') |
|
259 | 259 | % startnode) |
|
260 | 260 | startrev = self.repo.changelog.rev(startnode) |
|
261 | 261 | children = {startnode: 1} |
|
262 | 262 | for rev in self.repo.changelog.descendants(startrev): |
|
263 | 263 | children[self.repo.changelog.node(rev)] = 1 |
|
264 | 264 | self.keep = children.__contains__ |
|
265 | 265 | else: |
|
266 | 266 | self.keep = util.always |
|
267 | 267 | |
|
268 | 268 | def changectx(self, rev): |
|
269 | 269 | if self.lastrev != rev: |
|
270 | 270 | self.lastctx = self.repo[rev] |
|
271 | 271 | self.lastrev = rev |
|
272 | 272 | return self.lastctx |
|
273 | 273 | |
|
274 | 274 | def parents(self, ctx): |
|
275 | 275 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
276 | 276 | |
|
277 | 277 | def getheads(self): |
|
278 | 278 | if self.rev: |
|
279 | 279 | heads = [self.repo[self.rev].node()] |
|
280 | 280 | else: |
|
281 | 281 | heads = self.repo.heads() |
|
282 | 282 | return [hex(h) for h in heads if self.keep(h)] |
|
283 | 283 | |
|
284 | 284 | def getfile(self, name, rev): |
|
285 | 285 | try: |
|
286 | 286 | fctx = self.changectx(rev)[name] |
|
287 | 287 | return fctx.data(), fctx.flags() |
|
288 | 288 | except error.LookupError, err: |
|
289 | 289 | raise IOError(err) |
|
290 | 290 | |
|
291 | 291 | def getchanges(self, rev): |
|
292 | 292 | ctx = self.changectx(rev) |
|
293 | 293 | parents = self.parents(ctx) |
|
294 | 294 | if not parents: |
|
295 | 295 | files = sorted(ctx.manifest()) |
|
296 | 296 | # getcopies() is not needed for roots, but it is a simple way to |
|
297 | 297 | # detect missing revlogs and abort on errors or populate |
|
298 | 298 | # self.ignored |
|
299 | 299 | self.getcopies(ctx, parents, files) |
|
300 | 300 | return [(f, rev) for f in files if f not in self.ignored], {} |
|
301 | 301 | if self._changescache and self._changescache[0] == rev: |
|
302 | 302 | m, a, r = self._changescache[1] |
|
303 | 303 | else: |
|
304 | 304 | m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3] |
|
305 | 305 | # getcopies() detects missing revlogs early, run it before |
|
306 | 306 | # filtering the changes. |
|
307 | 307 | copies = self.getcopies(ctx, parents, m + a) |
|
308 | 308 | changes = [(name, rev) for name in m + a + r |
|
309 | 309 | if name not in self.ignored] |
|
310 | 310 | return sorted(changes), copies |
|
311 | 311 | |
|
312 | 312 | def getcopies(self, ctx, parents, files): |
|
313 | 313 | copies = {} |
|
314 | 314 | for name in files: |
|
315 | 315 | if name in self.ignored: |
|
316 | 316 | continue |
|
317 | 317 | try: |
|
318 | 318 | copysource, copynode = ctx.filectx(name).renamed() |
|
319 | 319 | if copysource in self.ignored or not self.keep(copynode): |
|
320 | 320 | continue |
|
321 | 321 | # Ignore copy sources not in parent revisions |
|
322 | 322 | found = False |
|
323 | 323 | for p in parents: |
|
324 | 324 | if copysource in p: |
|
325 | 325 | found = True |
|
326 | 326 | break |
|
327 | 327 | if not found: |
|
328 | 328 | continue |
|
329 | 329 | copies[name] = copysource |
|
330 | 330 | except TypeError: |
|
331 | 331 | pass |
|
332 | 332 | except error.LookupError, e: |
|
333 | 333 | if not self.ignoreerrors: |
|
334 | 334 | raise |
|
335 | 335 | self.ignored.add(name) |
|
336 | 336 | self.ui.warn(_('ignoring: %s\n') % e) |
|
337 | 337 | return copies |
|
338 | 338 | |
|
339 | 339 | def getcommit(self, rev): |
|
340 | 340 | ctx = self.changectx(rev) |
|
341 | 341 | parents = [p.hex() for p in self.parents(ctx)] |
|
342 | 342 | if self.saverev: |
|
343 | 343 | crev = rev |
|
344 | 344 | else: |
|
345 | 345 | crev = None |
|
346 | 346 | return commit(author=ctx.user(), |
|
347 | 347 | date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'), |
|
348 | 348 | desc=ctx.description(), rev=crev, parents=parents, |
|
349 | 349 | branch=ctx.branch(), extra=ctx.extra(), |
|
350 | 350 | sortkey=ctx.rev()) |
|
351 | 351 | |
|
352 | 352 | def gettags(self): |
|
353 | 353 | tags = [t for t in self.repo.tagslist() if t[0] != 'tip'] |
|
354 | 354 | return dict([(name, hex(node)) for name, node in tags |
|
355 | 355 | if self.keep(node)]) |
|
356 | 356 | |
|
357 | 357 | def getchangedfiles(self, rev, i): |
|
358 | 358 | ctx = self.changectx(rev) |
|
359 | 359 | parents = self.parents(ctx) |
|
360 | 360 | if not parents and i is None: |
|
361 | 361 | i = 0 |
|
362 | 362 | changes = [], ctx.manifest().keys(), [] |
|
363 | 363 | else: |
|
364 | 364 | i = i or 0 |
|
365 | 365 | changes = self.repo.status(parents[i].node(), ctx.node())[:3] |
|
366 | 366 | changes = [[f for f in l if f not in self.ignored] for l in changes] |
|
367 | 367 | |
|
368 | 368 | if i == 0: |
|
369 | 369 | self._changescache = (rev, changes) |
|
370 | 370 | |
|
371 | 371 | return changes[0] + changes[1] + changes[2] |
|
372 | 372 | |
|
373 | 373 | def converted(self, rev, destrev): |
|
374 | 374 | if self.convertfp is None: |
|
375 | 375 | self.convertfp = open(self.repo.join('shamap'), 'a') |
|
376 | 376 | self.convertfp.write('%s %s\n' % (destrev, rev)) |
|
377 | 377 | self.convertfp.flush() |
|
378 | 378 | |
|
379 | 379 | def before(self): |
|
380 | 380 | self.ui.debug('run hg source pre-conversion action\n') |
|
381 | 381 | |
|
382 | 382 | def after(self): |
|
383 | 383 | self.ui.debug('run hg source post-conversion action\n') |
|
384 | 384 | |
|
385 | 385 | def hasnativeorder(self): |
|
386 | 386 | return True |
|
387 | 387 | |
|
388 | 388 | def lookuprev(self, rev): |
|
389 | 389 | try: |
|
390 | 390 | return hex(self.repo.lookup(rev)) |
|
391 | 391 | except error.RepoError: |
|
392 | 392 | return None |
|
393 | 393 | |
|
394 | 394 | def getbookmarks(self): |
|
395 | 395 | return bookmarks.listbookmarks(self.repo) |
@@ -1,329 +1,329 b'' | |||
|
1 | 1 | # extdiff.py - external diff program support for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''command to allow external programs to compare revisions |
|
9 | 9 | |
|
10 | 10 | The extdiff Mercurial extension allows you to use external programs |
|
11 | 11 | to compare revisions, or revision with working directory. The external |
|
12 | 12 | diff programs are called with a configurable set of options and two |
|
13 | 13 | non-option arguments: paths to directories containing snapshots of |
|
14 | 14 | files to compare. |
|
15 | 15 | |
|
16 | 16 | The extdiff extension also allows you to configure new diff commands, so |
|
17 | 17 | you do not need to type :hg:`extdiff -p kdiff3` always. :: |
|
18 | 18 | |
|
19 | 19 | [extdiff] |
|
20 | 20 | # add new command that runs GNU diff(1) in 'context diff' mode |
|
21 | 21 | cdiff = gdiff -Nprc5 |
|
22 | 22 | ## or the old way: |
|
23 | 23 | #cmd.cdiff = gdiff |
|
24 | 24 | #opts.cdiff = -Nprc5 |
|
25 | 25 | |
|
26 | 26 | # add new command called vdiff, runs kdiff3 |
|
27 | 27 | vdiff = kdiff3 |
|
28 | 28 | |
|
29 | 29 | # add new command called meld, runs meld (no need to name twice) |
|
30 | 30 | meld = |
|
31 | 31 | |
|
32 | 32 | # add new command called vimdiff, runs gvimdiff with DirDiff plugin |
|
33 | 33 | # (see http://www.vim.org/scripts/script.php?script_id=102) Non |
|
34 | 34 | # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in |
|
35 | 35 | # your .vimrc |
|
36 | 36 | vimdiff = gvim -f "+next" \\ |
|
37 | 37 | "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))" |
|
38 | 38 | |
|
39 | 39 | Tool arguments can include variables that are expanded at runtime:: |
|
40 | 40 | |
|
41 | 41 | $parent1, $plabel1 - filename, descriptive label of first parent |
|
42 | 42 | $child, $clabel - filename, descriptive label of child revision |
|
43 | 43 | $parent2, $plabel2 - filename, descriptive label of second parent |
|
44 | 44 | $root - repository root |
|
45 | 45 | $parent is an alias for $parent1. |
|
46 | 46 | |
|
47 | 47 | The extdiff extension will look in your [diff-tools] and [merge-tools] |
|
48 | 48 | sections for diff tool arguments, when none are specified in [extdiff]. |
|
49 | 49 | |
|
50 | 50 | :: |
|
51 | 51 | |
|
52 | 52 | [extdiff] |
|
53 | 53 | kdiff3 = |
|
54 | 54 | |
|
55 | 55 | [diff-tools] |
|
56 | 56 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
57 | 57 | |
|
58 | 58 | You can use -I/-X and list of file or directory names like normal |
|
59 | 59 | :hg:`diff` command. The extdiff extension makes snapshots of only |
|
60 | 60 | needed files, so running the external diff program will actually be |
|
61 | 61 | pretty fast (at least faster than having to compare the entire tree). |
|
62 | 62 | ''' |
|
63 | 63 | |
|
64 | 64 | from mercurial.i18n import _ |
|
65 | 65 | from mercurial.node import short, nullid |
|
66 | 66 | from mercurial import scmutil, scmutil, util, commands, encoding |
|
67 | 67 | import os, shlex, shutil, tempfile, re |
|
68 | 68 | |
|
69 | 69 | def snapshot(ui, repo, files, node, tmproot): |
|
70 | 70 | '''snapshot files as of some revision |
|
71 | 71 | if not using snapshot, -I/-X does not work and recursive diff |
|
72 | 72 | in tools like kdiff3 and meld displays too many files.''' |
|
73 | 73 | dirname = os.path.basename(repo.root) |
|
74 | 74 | if dirname == "": |
|
75 | 75 | dirname = "root" |
|
76 | 76 | if node is not None: |
|
77 | 77 | dirname = '%s.%s' % (dirname, short(node)) |
|
78 | 78 | base = os.path.join(tmproot, dirname) |
|
79 | 79 | os.mkdir(base) |
|
80 | 80 | if node is not None: |
|
81 | 81 | ui.note(_('making snapshot of %d files from rev %s\n') % |
|
82 | 82 | (len(files), short(node))) |
|
83 | 83 | else: |
|
84 | 84 | ui.note(_('making snapshot of %d files from working directory\n') % |
|
85 | 85 | (len(files))) |
|
86 | 86 | wopener = scmutil.opener(base) |
|
87 | 87 | fns_and_mtime = [] |
|
88 | 88 | ctx = repo[node] |
|
89 | 89 | for fn in files: |
|
90 | 90 | wfn = util.pconvert(fn) |
|
91 |
if not |
|
|
91 | if wfn not in ctx: | |
|
92 | 92 | # File doesn't exist; could be a bogus modify |
|
93 | 93 | continue |
|
94 | 94 | ui.note(' %s\n' % wfn) |
|
95 | 95 | dest = os.path.join(base, wfn) |
|
96 | 96 | fctx = ctx[wfn] |
|
97 | 97 | data = repo.wwritedata(wfn, fctx.data()) |
|
98 | 98 | if 'l' in fctx.flags(): |
|
99 | 99 | wopener.symlink(data, wfn) |
|
100 | 100 | else: |
|
101 | 101 | wopener.write(wfn, data) |
|
102 | 102 | if 'x' in fctx.flags(): |
|
103 | 103 | util.setflags(dest, False, True) |
|
104 | 104 | if node is None: |
|
105 | 105 | fns_and_mtime.append((dest, repo.wjoin(fn), |
|
106 | 106 | os.lstat(dest).st_mtime)) |
|
107 | 107 | return dirname, fns_and_mtime |
|
108 | 108 | |
|
109 | 109 | def dodiff(ui, repo, diffcmd, diffopts, pats, opts): |
|
110 | 110 | '''Do the actuall diff: |
|
111 | 111 | |
|
112 | 112 | - copy to a temp structure if diffing 2 internal revisions |
|
113 | 113 | - copy to a temp structure if diffing working revision with |
|
114 | 114 | another one and more than 1 file is changed |
|
115 | 115 | - just invoke the diff for a single file in the working dir |
|
116 | 116 | ''' |
|
117 | 117 | |
|
118 | 118 | revs = opts.get('rev') |
|
119 | 119 | change = opts.get('change') |
|
120 | 120 | args = ' '.join(diffopts) |
|
121 | 121 | do3way = '$parent2' in args |
|
122 | 122 | |
|
123 | 123 | if revs and change: |
|
124 | 124 | msg = _('cannot specify --rev and --change at the same time') |
|
125 | 125 | raise util.Abort(msg) |
|
126 | 126 | elif change: |
|
127 | 127 | node2 = scmutil.revsingle(repo, change, None).node() |
|
128 | 128 | node1a, node1b = repo.changelog.parents(node2) |
|
129 | 129 | else: |
|
130 | 130 | node1a, node2 = scmutil.revpair(repo, revs) |
|
131 | 131 | if not revs: |
|
132 | 132 | node1b = repo.dirstate.p2() |
|
133 | 133 | else: |
|
134 | 134 | node1b = nullid |
|
135 | 135 | |
|
136 | 136 | # Disable 3-way merge if there is only one parent |
|
137 | 137 | if do3way: |
|
138 | 138 | if node1b == nullid: |
|
139 | 139 | do3way = False |
|
140 | 140 | |
|
141 | 141 | matcher = scmutil.match(repo[node2], pats, opts) |
|
142 | 142 | mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3]) |
|
143 | 143 | if do3way: |
|
144 | 144 | mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3]) |
|
145 | 145 | else: |
|
146 | 146 | mod_b, add_b, rem_b = set(), set(), set() |
|
147 | 147 | modadd = mod_a | add_a | mod_b | add_b |
|
148 | 148 | common = modadd | rem_a | rem_b |
|
149 | 149 | if not common: |
|
150 | 150 | return 0 |
|
151 | 151 | |
|
152 | 152 | tmproot = tempfile.mkdtemp(prefix='extdiff.') |
|
153 | 153 | try: |
|
154 | 154 | # Always make a copy of node1a (and node1b, if applicable) |
|
155 | 155 | dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) |
|
156 | 156 | dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0] |
|
157 | 157 | rev1a = '@%d' % repo[node1a].rev() |
|
158 | 158 | if do3way: |
|
159 | 159 | dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) |
|
160 | 160 | dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0] |
|
161 | 161 | rev1b = '@%d' % repo[node1b].rev() |
|
162 | 162 | else: |
|
163 | 163 | dir1b = None |
|
164 | 164 | rev1b = '' |
|
165 | 165 | |
|
166 | 166 | fns_and_mtime = [] |
|
167 | 167 | |
|
168 | 168 | # If node2 in not the wc or there is >1 change, copy it |
|
169 | 169 | dir2root = '' |
|
170 | 170 | rev2 = '' |
|
171 | 171 | if node2: |
|
172 | 172 | dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0] |
|
173 | 173 | rev2 = '@%d' % repo[node2].rev() |
|
174 | 174 | elif len(common) > 1: |
|
175 | 175 | #we only actually need to get the files to copy back to |
|
176 | 176 | #the working dir in this case (because the other cases |
|
177 | 177 | #are: diffing 2 revisions or single file -- in which case |
|
178 | 178 | #the file is already directly passed to the diff tool). |
|
179 | 179 | dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot) |
|
180 | 180 | else: |
|
181 | 181 | # This lets the diff tool open the changed file directly |
|
182 | 182 | dir2 = '' |
|
183 | 183 | dir2root = repo.root |
|
184 | 184 | |
|
185 | 185 | label1a = rev1a |
|
186 | 186 | label1b = rev1b |
|
187 | 187 | label2 = rev2 |
|
188 | 188 | |
|
189 | 189 | # If only one change, diff the files instead of the directories |
|
190 | 190 | # Handle bogus modifies correctly by checking if the files exist |
|
191 | 191 | if len(common) == 1: |
|
192 | 192 | common_file = util.localpath(common.pop()) |
|
193 | 193 | dir1a = os.path.join(tmproot, dir1a, common_file) |
|
194 | 194 | label1a = common_file + rev1a |
|
195 | 195 | if not os.path.isfile(dir1a): |
|
196 | 196 | dir1a = os.devnull |
|
197 | 197 | if do3way: |
|
198 | 198 | dir1b = os.path.join(tmproot, dir1b, common_file) |
|
199 | 199 | label1b = common_file + rev1b |
|
200 | 200 | if not os.path.isfile(dir1b): |
|
201 | 201 | dir1b = os.devnull |
|
202 | 202 | dir2 = os.path.join(dir2root, dir2, common_file) |
|
203 | 203 | label2 = common_file + rev2 |
|
204 | 204 | |
|
205 | 205 | # Function to quote file/dir names in the argument string. |
|
206 | 206 | # When not operating in 3-way mode, an empty string is |
|
207 | 207 | # returned for parent2 |
|
208 | 208 | replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b, |
|
209 | 209 | plabel1=label1a, plabel2=label1b, |
|
210 | 210 | clabel=label2, child=dir2, |
|
211 | 211 | root=repo.root) |
|
212 | 212 | def quote(match): |
|
213 | 213 | key = match.group()[1:] |
|
214 | 214 | if not do3way and key == 'parent2': |
|
215 | 215 | return '' |
|
216 | 216 | return util.shellquote(replace[key]) |
|
217 | 217 | |
|
218 | 218 | # Match parent2 first, so 'parent1?' will match both parent1 and parent |
|
219 | 219 | regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)' |
|
220 | 220 | if not do3way and not re.search(regex, args): |
|
221 | 221 | args += ' $parent1 $child' |
|
222 | 222 | args = re.sub(regex, quote, args) |
|
223 | 223 | cmdline = util.shellquote(diffcmd) + ' ' + args |
|
224 | 224 | |
|
225 | 225 | ui.debug('running %r in %s\n' % (cmdline, tmproot)) |
|
226 | 226 | util.system(cmdline, cwd=tmproot, out=ui.fout) |
|
227 | 227 | |
|
228 | 228 | for copy_fn, working_fn, mtime in fns_and_mtime: |
|
229 | 229 | if os.lstat(copy_fn).st_mtime != mtime: |
|
230 | 230 | ui.debug('file changed while diffing. ' |
|
231 | 231 | 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) |
|
232 | 232 | util.copyfile(copy_fn, working_fn) |
|
233 | 233 | |
|
234 | 234 | return 1 |
|
235 | 235 | finally: |
|
236 | 236 | ui.note(_('cleaning up temp directory\n')) |
|
237 | 237 | shutil.rmtree(tmproot) |
|
238 | 238 | |
|
239 | 239 | def extdiff(ui, repo, *pats, **opts): |
|
240 | 240 | '''use external program to diff repository (or selected files) |
|
241 | 241 | |
|
242 | 242 | Show differences between revisions for the specified files, using |
|
243 | 243 | an external program. The default program used is diff, with |
|
244 | 244 | default options "-Npru". |
|
245 | 245 | |
|
246 | 246 | To select a different program, use the -p/--program option. The |
|
247 | 247 | program will be passed the names of two directories to compare. To |
|
248 | 248 | pass additional options to the program, use -o/--option. These |
|
249 | 249 | will be passed before the names of the directories to compare. |
|
250 | 250 | |
|
251 | 251 | When two revision arguments are given, then changes are shown |
|
252 | 252 | between those revisions. If only one revision is specified then |
|
253 | 253 | that revision is compared to the working directory, and, when no |
|
254 | 254 | revisions are specified, the working directory files are compared |
|
255 | 255 | to its parent.''' |
|
256 | 256 | program = opts.get('program') |
|
257 | 257 | option = opts.get('option') |
|
258 | 258 | if not program: |
|
259 | 259 | program = 'diff' |
|
260 | 260 | option = option or ['-Npru'] |
|
261 | 261 | return dodiff(ui, repo, program, option, pats, opts) |
|
262 | 262 | |
|
263 | 263 | cmdtable = { |
|
264 | 264 | "extdiff": |
|
265 | 265 | (extdiff, |
|
266 | 266 | [('p', 'program', '', |
|
267 | 267 | _('comparison program to run'), _('CMD')), |
|
268 | 268 | ('o', 'option', [], |
|
269 | 269 | _('pass option to comparison program'), _('OPT')), |
|
270 | 270 | ('r', 'rev', [], |
|
271 | 271 | _('revision'), _('REV')), |
|
272 | 272 | ('c', 'change', '', |
|
273 | 273 | _('change made by revision'), _('REV')), |
|
274 | 274 | ] + commands.walkopts, |
|
275 | 275 | _('hg extdiff [OPT]... [FILE]...')), |
|
276 | 276 | } |
|
277 | 277 | |
|
278 | 278 | def uisetup(ui): |
|
279 | 279 | for cmd, path in ui.configitems('extdiff'): |
|
280 | 280 | if cmd.startswith('cmd.'): |
|
281 | 281 | cmd = cmd[4:] |
|
282 | 282 | if not path: |
|
283 | 283 | path = cmd |
|
284 | 284 | diffopts = ui.config('extdiff', 'opts.' + cmd, '') |
|
285 | 285 | diffopts = diffopts and [diffopts] or [] |
|
286 | 286 | elif cmd.startswith('opts.'): |
|
287 | 287 | continue |
|
288 | 288 | else: |
|
289 | 289 | # command = path opts |
|
290 | 290 | if path: |
|
291 | 291 | diffopts = shlex.split(path) |
|
292 | 292 | path = diffopts.pop(0) |
|
293 | 293 | else: |
|
294 | 294 | path, diffopts = cmd, [] |
|
295 | 295 | # look for diff arguments in [diff-tools] then [merge-tools] |
|
296 | 296 | if diffopts == []: |
|
297 | 297 | args = ui.config('diff-tools', cmd+'.diffargs') or \ |
|
298 | 298 | ui.config('merge-tools', cmd+'.diffargs') |
|
299 | 299 | if args: |
|
300 | 300 | diffopts = shlex.split(args) |
|
301 | 301 | def save(cmd, path, diffopts): |
|
302 | 302 | '''use closure to save diff command to use''' |
|
303 | 303 | def mydiff(ui, repo, *pats, **opts): |
|
304 | 304 | return dodiff(ui, repo, path, diffopts + opts['option'], |
|
305 | 305 | pats, opts) |
|
306 | 306 | doc = _('''\ |
|
307 | 307 | use %(path)s to diff repository (or selected files) |
|
308 | 308 | |
|
309 | 309 | Show differences between revisions for the specified files, using |
|
310 | 310 | the %(path)s program. |
|
311 | 311 | |
|
312 | 312 | When two revision arguments are given, then changes are shown |
|
313 | 313 | between those revisions. If only one revision is specified then |
|
314 | 314 | that revision is compared to the working directory, and, when no |
|
315 | 315 | revisions are specified, the working directory files are compared |
|
316 | 316 | to its parent.\ |
|
317 | 317 | ''') % dict(path=util.uirepr(path)) |
|
318 | 318 | |
|
319 | 319 | # We must translate the docstring right away since it is |
|
320 | 320 | # used as a format string. The string will unfortunately |
|
321 | 321 | # be translated again in commands.helpcmd and this will |
|
322 | 322 | # fail when the docstring contains non-ASCII characters. |
|
323 | 323 | # Decoding the string to a Unicode string here (using the |
|
324 | 324 | # right encoding) prevents that. |
|
325 | 325 | mydiff.__doc__ = doc.decode(encoding.encoding) |
|
326 | 326 | return mydiff |
|
327 | 327 | cmdtable[cmd] = (save(cmd, path, diffopts), |
|
328 | 328 | cmdtable['extdiff'][1][1:], |
|
329 | 329 | _('hg %s [OPTION]... [FILE]...') % cmd) |
@@ -1,702 +1,702 b'' | |||
|
1 | 1 | # keyword.py - $Keyword$ expansion for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | # |
|
8 | 8 | # $Id$ |
|
9 | 9 | # |
|
10 | 10 | # Keyword expansion hack against the grain of a DSCM |
|
11 | 11 | # |
|
12 | 12 | # There are many good reasons why this is not needed in a distributed |
|
13 | 13 | # SCM, still it may be useful in very small projects based on single |
|
14 | 14 | # files (like LaTeX packages), that are mostly addressed to an |
|
15 | 15 | # audience not running a version control system. |
|
16 | 16 | # |
|
17 | 17 | # For in-depth discussion refer to |
|
18 | 18 | # <http://mercurial.selenic.com/wiki/KeywordPlan>. |
|
19 | 19 | # |
|
20 | 20 | # Keyword expansion is based on Mercurial's changeset template mappings. |
|
21 | 21 | # |
|
22 | 22 | # Binary files are not touched. |
|
23 | 23 | # |
|
24 | 24 | # Files to act upon/ignore are specified in the [keyword] section. |
|
25 | 25 | # Customized keyword template mappings in the [keywordmaps] section. |
|
26 | 26 | # |
|
27 | 27 | # Run "hg help keyword" and "hg kwdemo" to get info on configuration. |
|
28 | 28 | |
|
29 | 29 | '''expand keywords in tracked files |
|
30 | 30 | |
|
31 | 31 | This extension expands RCS/CVS-like or self-customized $Keywords$ in |
|
32 | 32 | tracked text files selected by your configuration. |
|
33 | 33 | |
|
34 | 34 | Keywords are only expanded in local repositories and not stored in the |
|
35 | 35 | change history. The mechanism can be regarded as a convenience for the |
|
36 | 36 | current user or for archive distribution. |
|
37 | 37 | |
|
38 | 38 | Keywords expand to the changeset data pertaining to the latest change |
|
39 | 39 | relative to the working directory parent of each file. |
|
40 | 40 | |
|
41 | 41 | Configuration is done in the [keyword], [keywordset] and [keywordmaps] |
|
42 | 42 | sections of hgrc files. |
|
43 | 43 | |
|
44 | 44 | Example:: |
|
45 | 45 | |
|
46 | 46 | [keyword] |
|
47 | 47 | # expand keywords in every python file except those matching "x*" |
|
48 | 48 | **.py = |
|
49 | 49 | x* = ignore |
|
50 | 50 | |
|
51 | 51 | [keywordset] |
|
52 | 52 | # prefer svn- over cvs-like default keywordmaps |
|
53 | 53 | svn = True |
|
54 | 54 | |
|
55 | 55 | .. note:: |
|
56 | 56 | The more specific you are in your filename patterns the less you |
|
57 | 57 | lose speed in huge repositories. |
|
58 | 58 | |
|
59 | 59 | For [keywordmaps] template mapping and expansion demonstration and |
|
60 | 60 | control run :hg:`kwdemo`. See :hg:`help templates` for a list of |
|
61 | 61 | available templates and filters. |
|
62 | 62 | |
|
63 | 63 | Three additional date template filters are provided: |
|
64 | 64 | |
|
65 | 65 | :``utcdate``: "2006/09/18 15:13:13" |
|
66 | 66 | :``svnutcdate``: "2006-09-18 15:13:13Z" |
|
67 | 67 | :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)" |
|
68 | 68 | |
|
69 | 69 | The default template mappings (view with :hg:`kwdemo -d`) can be |
|
70 | 70 | replaced with customized keywords and templates. Again, run |
|
71 | 71 | :hg:`kwdemo` to control the results of your configuration changes. |
|
72 | 72 | |
|
73 | 73 | Before changing/disabling active keywords, you must run :hg:`kwshrink` |
|
74 | 74 | to avoid storing expanded keywords in the change history. |
|
75 | 75 | |
|
76 | 76 | To force expansion after enabling it, or a configuration change, run |
|
77 | 77 | :hg:`kwexpand`. |
|
78 | 78 | |
|
79 | 79 | Expansions spanning more than one line and incremental expansions, |
|
80 | 80 | like CVS' $Log$, are not supported. A keyword template map "Log = |
|
81 | 81 | {desc}" expands to the first line of the changeset description. |
|
82 | 82 | ''' |
|
83 | 83 | |
|
84 | 84 | from mercurial import commands, context, cmdutil, dispatch, filelog, extensions |
|
85 | 85 | from mercurial import localrepo, match, patch, templatefilters, templater, util |
|
86 | 86 | from mercurial import scmutil |
|
87 | 87 | from mercurial.hgweb import webcommands |
|
88 | 88 | from mercurial.i18n import _ |
|
89 | 89 | import os, re, shutil, tempfile |
|
90 | 90 | |
|
91 | 91 | commands.optionalrepo += ' kwdemo' |
|
92 | 92 | |
|
93 | 93 | cmdtable = {} |
|
94 | 94 | command = cmdutil.command(cmdtable) |
|
95 | 95 | |
|
96 | 96 | # hg commands that do not act on keywords |
|
97 | 97 | nokwcommands = ('add addremove annotate bundle export grep incoming init log' |
|
98 | 98 | ' outgoing push tip verify convert email glog') |
|
99 | 99 | |
|
100 | 100 | # hg commands that trigger expansion only when writing to working dir, |
|
101 | 101 | # not when reading filelog, and unexpand when reading from working dir |
|
102 | 102 | restricted = 'merge kwexpand kwshrink record qrecord resolve transplant' |
|
103 | 103 | |
|
104 | 104 | # names of extensions using dorecord |
|
105 | 105 | recordextensions = 'record' |
|
106 | 106 | |
|
107 | 107 | colortable = { |
|
108 | 108 | 'kwfiles.enabled': 'green bold', |
|
109 | 109 | 'kwfiles.deleted': 'cyan bold underline', |
|
110 | 110 | 'kwfiles.enabledunknown': 'green', |
|
111 | 111 | 'kwfiles.ignored': 'bold', |
|
112 | 112 | 'kwfiles.ignoredunknown': 'none' |
|
113 | 113 | } |
|
114 | 114 | |
|
115 | 115 | # date like in cvs' $Date |
|
116 | 116 | def utcdate(text): |
|
117 | 117 | ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13". |
|
118 | 118 | ''' |
|
119 | 119 | return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S') |
|
120 | 120 | # date like in svn's $Date |
|
121 | 121 | def svnisodate(text): |
|
122 | 122 | ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13 |
|
123 | 123 | +0200 (Tue, 18 Aug 2009)". |
|
124 | 124 | ''' |
|
125 | 125 | return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') |
|
126 | 126 | # date like in svn's $Id |
|
127 | 127 | def svnutcdate(text): |
|
128 | 128 | ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18 |
|
129 | 129 | 11:00:13Z". |
|
130 | 130 | ''' |
|
131 | 131 | return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ') |
|
132 | 132 | |
|
133 | 133 | templatefilters.filters.update({'utcdate': utcdate, |
|
134 | 134 | 'svnisodate': svnisodate, |
|
135 | 135 | 'svnutcdate': svnutcdate}) |
|
136 | 136 | |
|
137 | 137 | # make keyword tools accessible |
|
138 | 138 | kwtools = {'templater': None, 'hgcmd': ''} |
|
139 | 139 | |
|
140 | 140 | def _defaultkwmaps(ui): |
|
141 | 141 | '''Returns default keywordmaps according to keywordset configuration.''' |
|
142 | 142 | templates = { |
|
143 | 143 | 'Revision': '{node|short}', |
|
144 | 144 | 'Author': '{author|user}', |
|
145 | 145 | } |
|
146 | 146 | kwsets = ({ |
|
147 | 147 | 'Date': '{date|utcdate}', |
|
148 | 148 | 'RCSfile': '{file|basename},v', |
|
149 | 149 | 'RCSFile': '{file|basename},v', # kept for backwards compatibility |
|
150 | 150 | # with hg-keyword |
|
151 | 151 | 'Source': '{root}/{file},v', |
|
152 | 152 | 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}', |
|
153 | 153 | 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}', |
|
154 | 154 | }, { |
|
155 | 155 | 'Date': '{date|svnisodate}', |
|
156 | 156 | 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}', |
|
157 | 157 | 'LastChangedRevision': '{node|short}', |
|
158 | 158 | 'LastChangedBy': '{author|user}', |
|
159 | 159 | 'LastChangedDate': '{date|svnisodate}', |
|
160 | 160 | }) |
|
161 | 161 | templates.update(kwsets[ui.configbool('keywordset', 'svn')]) |
|
162 | 162 | return templates |
|
163 | 163 | |
|
164 | 164 | def _shrinktext(text, subfunc): |
|
165 | 165 | '''Helper for keyword expansion removal in text. |
|
166 | 166 | Depending on subfunc also returns number of substitutions.''' |
|
167 | 167 | return subfunc(r'$\1$', text) |
|
168 | 168 | |
|
169 | 169 | def _preselect(wstatus, changed): |
|
170 | 170 | '''Retrieves modfied and added files from a working directory state |
|
171 | 171 | and returns the subset of each contained in given changed files |
|
172 | 172 | retrieved from a change context.''' |
|
173 | 173 | modified, added = wstatus[:2] |
|
174 | 174 | modified = [f for f in modified if f in changed] |
|
175 | 175 | added = [f for f in added if f in changed] |
|
176 | 176 | return modified, added |
|
177 | 177 | |
|
178 | 178 | |
|
179 | 179 | class kwtemplater(object): |
|
180 | 180 | ''' |
|
181 | 181 | Sets up keyword templates, corresponding keyword regex, and |
|
182 | 182 | provides keyword substitution functions. |
|
183 | 183 | ''' |
|
184 | 184 | |
|
185 | 185 | def __init__(self, ui, repo, inc, exc): |
|
186 | 186 | self.ui = ui |
|
187 | 187 | self.repo = repo |
|
188 | 188 | self.match = match.match(repo.root, '', [], inc, exc) |
|
189 | 189 | self.restrict = kwtools['hgcmd'] in restricted.split() |
|
190 | 190 | self.record = False |
|
191 | 191 | |
|
192 | 192 | kwmaps = self.ui.configitems('keywordmaps') |
|
193 | 193 | if kwmaps: # override default templates |
|
194 | 194 | self.templates = dict((k, templater.parsestring(v, False)) |
|
195 | 195 | for k, v in kwmaps) |
|
196 | 196 | else: |
|
197 | 197 | self.templates = _defaultkwmaps(self.ui) |
|
198 | 198 | |
|
199 | 199 | @util.propertycache |
|
200 | 200 | def escape(self): |
|
201 | 201 | '''Returns bar-separated and escaped keywords.''' |
|
202 | 202 | return '|'.join(map(re.escape, self.templates.keys())) |
|
203 | 203 | |
|
204 | 204 | @util.propertycache |
|
205 | 205 | def rekw(self): |
|
206 | 206 | '''Returns regex for unexpanded keywords.''' |
|
207 | 207 | return re.compile(r'\$(%s)\$' % self.escape) |
|
208 | 208 | |
|
209 | 209 | @util.propertycache |
|
210 | 210 | def rekwexp(self): |
|
211 | 211 | '''Returns regex for expanded keywords.''' |
|
212 | 212 | return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape) |
|
213 | 213 | |
|
214 | 214 | def substitute(self, data, path, ctx, subfunc): |
|
215 | 215 | '''Replaces keywords in data with expanded template.''' |
|
216 | 216 | def kwsub(mobj): |
|
217 | 217 | kw = mobj.group(1) |
|
218 | 218 | ct = cmdutil.changeset_templater(self.ui, self.repo, |
|
219 | 219 | False, None, '', False) |
|
220 | 220 | ct.use_template(self.templates[kw]) |
|
221 | 221 | self.ui.pushbuffer() |
|
222 | 222 | ct.show(ctx, root=self.repo.root, file=path) |
|
223 | 223 | ekw = templatefilters.firstline(self.ui.popbuffer()) |
|
224 | 224 | return '$%s: %s $' % (kw, ekw) |
|
225 | 225 | return subfunc(kwsub, data) |
|
226 | 226 | |
|
227 | 227 | def linkctx(self, path, fileid): |
|
228 | 228 | '''Similar to filelog.linkrev, but returns a changectx.''' |
|
229 | 229 | return self.repo.filectx(path, fileid=fileid).changectx() |
|
230 | 230 | |
|
231 | 231 | def expand(self, path, node, data): |
|
232 | 232 | '''Returns data with keywords expanded.''' |
|
233 | 233 | if not self.restrict and self.match(path) and not util.binary(data): |
|
234 | 234 | ctx = self.linkctx(path, node) |
|
235 | 235 | return self.substitute(data, path, ctx, self.rekw.sub) |
|
236 | 236 | return data |
|
237 | 237 | |
|
238 | 238 | def iskwfile(self, cand, ctx): |
|
239 | 239 | '''Returns subset of candidates which are configured for keyword |
|
240 | 240 | expansion but are not symbolic links.''' |
|
241 |
return [f for f in cand if self.match(f) and |
|
|
241 | return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)] | |
|
242 | 242 | |
|
243 | 243 | def overwrite(self, ctx, candidates, lookup, expand, rekw=False): |
|
244 | 244 | '''Overwrites selected files expanding/shrinking keywords.''' |
|
245 | 245 | if self.restrict or lookup or self.record: # exclude kw_copy |
|
246 | 246 | candidates = self.iskwfile(candidates, ctx) |
|
247 | 247 | if not candidates: |
|
248 | 248 | return |
|
249 | 249 | kwcmd = self.restrict and lookup # kwexpand/kwshrink |
|
250 | 250 | if self.restrict or expand and lookup: |
|
251 | 251 | mf = ctx.manifest() |
|
252 | 252 | if self.restrict or rekw: |
|
253 | 253 | re_kw = self.rekw |
|
254 | 254 | else: |
|
255 | 255 | re_kw = self.rekwexp |
|
256 | 256 | if expand: |
|
257 | 257 | msg = _('overwriting %s expanding keywords\n') |
|
258 | 258 | else: |
|
259 | 259 | msg = _('overwriting %s shrinking keywords\n') |
|
260 | 260 | for f in candidates: |
|
261 | 261 | if self.restrict: |
|
262 | 262 | data = self.repo.file(f).read(mf[f]) |
|
263 | 263 | else: |
|
264 | 264 | data = self.repo.wread(f) |
|
265 | 265 | if util.binary(data): |
|
266 | 266 | continue |
|
267 | 267 | if expand: |
|
268 | 268 | if lookup: |
|
269 | 269 | ctx = self.linkctx(f, mf[f]) |
|
270 | 270 | data, found = self.substitute(data, f, ctx, re_kw.subn) |
|
271 | 271 | elif self.restrict: |
|
272 | 272 | found = re_kw.search(data) |
|
273 | 273 | else: |
|
274 | 274 | data, found = _shrinktext(data, re_kw.subn) |
|
275 | 275 | if found: |
|
276 | 276 | self.ui.note(msg % f) |
|
277 | 277 | fp = self.repo.wopener(f, "wb", atomictemp=True) |
|
278 | 278 | fp.write(data) |
|
279 | 279 | fp.close() |
|
280 | 280 | if kwcmd: |
|
281 | 281 | self.repo.dirstate.normal(f) |
|
282 | 282 | elif self.record: |
|
283 | 283 | self.repo.dirstate.normallookup(f) |
|
284 | 284 | |
|
285 | 285 | def shrink(self, fname, text): |
|
286 | 286 | '''Returns text with all keyword substitutions removed.''' |
|
287 | 287 | if self.match(fname) and not util.binary(text): |
|
288 | 288 | return _shrinktext(text, self.rekwexp.sub) |
|
289 | 289 | return text |
|
290 | 290 | |
|
291 | 291 | def shrinklines(self, fname, lines): |
|
292 | 292 | '''Returns lines with keyword substitutions removed.''' |
|
293 | 293 | if self.match(fname): |
|
294 | 294 | text = ''.join(lines) |
|
295 | 295 | if not util.binary(text): |
|
296 | 296 | return _shrinktext(text, self.rekwexp.sub).splitlines(True) |
|
297 | 297 | return lines |
|
298 | 298 | |
|
299 | 299 | def wread(self, fname, data): |
|
300 | 300 | '''If in restricted mode returns data read from wdir with |
|
301 | 301 | keyword substitutions removed.''' |
|
302 | 302 | if self.restrict: |
|
303 | 303 | return self.shrink(fname, data) |
|
304 | 304 | return data |
|
305 | 305 | |
|
306 | 306 | class kwfilelog(filelog.filelog): |
|
307 | 307 | ''' |
|
308 | 308 | Subclass of filelog to hook into its read, add, cmp methods. |
|
309 | 309 | Keywords are "stored" unexpanded, and processed on reading. |
|
310 | 310 | ''' |
|
311 | 311 | def __init__(self, opener, kwt, path): |
|
312 | 312 | super(kwfilelog, self).__init__(opener, path) |
|
313 | 313 | self.kwt = kwt |
|
314 | 314 | self.path = path |
|
315 | 315 | |
|
316 | 316 | def read(self, node): |
|
317 | 317 | '''Expands keywords when reading filelog.''' |
|
318 | 318 | data = super(kwfilelog, self).read(node) |
|
319 | 319 | if self.renamed(node): |
|
320 | 320 | return data |
|
321 | 321 | return self.kwt.expand(self.path, node, data) |
|
322 | 322 | |
|
323 | 323 | def add(self, text, meta, tr, link, p1=None, p2=None): |
|
324 | 324 | '''Removes keyword substitutions when adding to filelog.''' |
|
325 | 325 | text = self.kwt.shrink(self.path, text) |
|
326 | 326 | return super(kwfilelog, self).add(text, meta, tr, link, p1, p2) |
|
327 | 327 | |
|
328 | 328 | def cmp(self, node, text): |
|
329 | 329 | '''Removes keyword substitutions for comparison.''' |
|
330 | 330 | text = self.kwt.shrink(self.path, text) |
|
331 | 331 | return super(kwfilelog, self).cmp(node, text) |
|
332 | 332 | |
|
333 | 333 | def _status(ui, repo, wctx, kwt, *pats, **opts): |
|
334 | 334 | '''Bails out if [keyword] configuration is not active. |
|
335 | 335 | Returns status of working directory.''' |
|
336 | 336 | if kwt: |
|
337 | 337 | return repo.status(match=scmutil.match(wctx, pats, opts), clean=True, |
|
338 | 338 | unknown=opts.get('unknown') or opts.get('all')) |
|
339 | 339 | if ui.configitems('keyword'): |
|
340 | 340 | raise util.Abort(_('[keyword] patterns cannot match')) |
|
341 | 341 | raise util.Abort(_('no [keyword] patterns configured')) |
|
342 | 342 | |
|
343 | 343 | def _kwfwrite(ui, repo, expand, *pats, **opts): |
|
344 | 344 | '''Selects files and passes them to kwtemplater.overwrite.''' |
|
345 | 345 | wctx = repo[None] |
|
346 | 346 | if len(wctx.parents()) > 1: |
|
347 | 347 | raise util.Abort(_('outstanding uncommitted merge')) |
|
348 | 348 | kwt = kwtools['templater'] |
|
349 | 349 | wlock = repo.wlock() |
|
350 | 350 | try: |
|
351 | 351 | status = _status(ui, repo, wctx, kwt, *pats, **opts) |
|
352 | 352 | modified, added, removed, deleted, unknown, ignored, clean = status |
|
353 | 353 | if modified or added or removed or deleted: |
|
354 | 354 | raise util.Abort(_('outstanding uncommitted changes')) |
|
355 | 355 | kwt.overwrite(wctx, clean, True, expand) |
|
356 | 356 | finally: |
|
357 | 357 | wlock.release() |
|
358 | 358 | |
|
359 | 359 | @command('kwdemo', |
|
360 | 360 | [('d', 'default', None, _('show default keyword template maps')), |
|
361 | 361 | ('f', 'rcfile', '', |
|
362 | 362 | _('read maps from rcfile'), _('FILE'))], |
|
363 | 363 | _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')) |
|
364 | 364 | def demo(ui, repo, *args, **opts): |
|
365 | 365 | '''print [keywordmaps] configuration and an expansion example |
|
366 | 366 | |
|
367 | 367 | Show current, custom, or default keyword template maps and their |
|
368 | 368 | expansions. |
|
369 | 369 | |
|
370 | 370 | Extend the current configuration by specifying maps as arguments |
|
371 | 371 | and using -f/--rcfile to source an external hgrc file. |
|
372 | 372 | |
|
373 | 373 | Use -d/--default to disable current configuration. |
|
374 | 374 | |
|
375 | 375 | See :hg:`help templates` for information on templates and filters. |
|
376 | 376 | ''' |
|
377 | 377 | def demoitems(section, items): |
|
378 | 378 | ui.write('[%s]\n' % section) |
|
379 | 379 | for k, v in sorted(items): |
|
380 | 380 | ui.write('%s = %s\n' % (k, v)) |
|
381 | 381 | |
|
382 | 382 | fn = 'demo.txt' |
|
383 | 383 | tmpdir = tempfile.mkdtemp('', 'kwdemo.') |
|
384 | 384 | ui.note(_('creating temporary repository at %s\n') % tmpdir) |
|
385 | 385 | repo = localrepo.localrepository(ui, tmpdir, True) |
|
386 | 386 | ui.setconfig('keyword', fn, '') |
|
387 | 387 | svn = ui.configbool('keywordset', 'svn') |
|
388 | 388 | # explicitly set keywordset for demo output |
|
389 | 389 | ui.setconfig('keywordset', 'svn', svn) |
|
390 | 390 | |
|
391 | 391 | uikwmaps = ui.configitems('keywordmaps') |
|
392 | 392 | if args or opts.get('rcfile'): |
|
393 | 393 | ui.status(_('\n\tconfiguration using custom keyword template maps\n')) |
|
394 | 394 | if uikwmaps: |
|
395 | 395 | ui.status(_('\textending current template maps\n')) |
|
396 | 396 | if opts.get('default') or not uikwmaps: |
|
397 | 397 | if svn: |
|
398 | 398 | ui.status(_('\toverriding default svn keywordset\n')) |
|
399 | 399 | else: |
|
400 | 400 | ui.status(_('\toverriding default cvs keywordset\n')) |
|
401 | 401 | if opts.get('rcfile'): |
|
402 | 402 | ui.readconfig(opts.get('rcfile')) |
|
403 | 403 | if args: |
|
404 | 404 | # simulate hgrc parsing |
|
405 | 405 | rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args] |
|
406 | 406 | fp = repo.opener('hgrc', 'w') |
|
407 | 407 | fp.writelines(rcmaps) |
|
408 | 408 | fp.close() |
|
409 | 409 | ui.readconfig(repo.join('hgrc')) |
|
410 | 410 | kwmaps = dict(ui.configitems('keywordmaps')) |
|
411 | 411 | elif opts.get('default'): |
|
412 | 412 | if svn: |
|
413 | 413 | ui.status(_('\n\tconfiguration using default svn keywordset\n')) |
|
414 | 414 | else: |
|
415 | 415 | ui.status(_('\n\tconfiguration using default cvs keywordset\n')) |
|
416 | 416 | kwmaps = _defaultkwmaps(ui) |
|
417 | 417 | if uikwmaps: |
|
418 | 418 | ui.status(_('\tdisabling current template maps\n')) |
|
419 | 419 | for k, v in kwmaps.iteritems(): |
|
420 | 420 | ui.setconfig('keywordmaps', k, v) |
|
421 | 421 | else: |
|
422 | 422 | ui.status(_('\n\tconfiguration using current keyword template maps\n')) |
|
423 | 423 | if uikwmaps: |
|
424 | 424 | kwmaps = dict(uikwmaps) |
|
425 | 425 | else: |
|
426 | 426 | kwmaps = _defaultkwmaps(ui) |
|
427 | 427 | |
|
428 | 428 | uisetup(ui) |
|
429 | 429 | reposetup(ui, repo) |
|
430 | 430 | ui.write('[extensions]\nkeyword =\n') |
|
431 | 431 | demoitems('keyword', ui.configitems('keyword')) |
|
432 | 432 | demoitems('keywordset', ui.configitems('keywordset')) |
|
433 | 433 | demoitems('keywordmaps', kwmaps.iteritems()) |
|
434 | 434 | keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n' |
|
435 | 435 | repo.wopener.write(fn, keywords) |
|
436 | 436 | repo[None].add([fn]) |
|
437 | 437 | ui.note(_('\nkeywords written to %s:\n') % fn) |
|
438 | 438 | ui.note(keywords) |
|
439 | 439 | repo.dirstate.setbranch('demobranch') |
|
440 | 440 | for name, cmd in ui.configitems('hooks'): |
|
441 | 441 | if name.split('.', 1)[0].find('commit') > -1: |
|
442 | 442 | repo.ui.setconfig('hooks', name, '') |
|
443 | 443 | msg = _('hg keyword configuration and expansion example') |
|
444 | 444 | ui.note("hg ci -m '%s'\n" % msg) |
|
445 | 445 | repo.commit(text=msg) |
|
446 | 446 | ui.status(_('\n\tkeywords expanded\n')) |
|
447 | 447 | ui.write(repo.wread(fn)) |
|
448 | 448 | shutil.rmtree(tmpdir, ignore_errors=True) |
|
449 | 449 | |
|
450 | 450 | @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...')) |
|
451 | 451 | def expand(ui, repo, *pats, **opts): |
|
452 | 452 | '''expand keywords in the working directory |
|
453 | 453 | |
|
454 | 454 | Run after (re)enabling keyword expansion. |
|
455 | 455 | |
|
456 | 456 | kwexpand refuses to run if given files contain local changes. |
|
457 | 457 | ''' |
|
458 | 458 | # 3rd argument sets expansion to True |
|
459 | 459 | _kwfwrite(ui, repo, True, *pats, **opts) |
|
460 | 460 | |
|
461 | 461 | @command('kwfiles', |
|
462 | 462 | [('A', 'all', None, _('show keyword status flags of all files')), |
|
463 | 463 | ('i', 'ignore', None, _('show files excluded from expansion')), |
|
464 | 464 | ('u', 'unknown', None, _('only show unknown (not tracked) files')), |
|
465 | 465 | ] + commands.walkopts, |
|
466 | 466 | _('hg kwfiles [OPTION]... [FILE]...')) |
|
467 | 467 | def files(ui, repo, *pats, **opts): |
|
468 | 468 | '''show files configured for keyword expansion |
|
469 | 469 | |
|
470 | 470 | List which files in the working directory are matched by the |
|
471 | 471 | [keyword] configuration patterns. |
|
472 | 472 | |
|
473 | 473 | Useful to prevent inadvertent keyword expansion and to speed up |
|
474 | 474 | execution by including only files that are actual candidates for |
|
475 | 475 | expansion. |
|
476 | 476 | |
|
477 | 477 | See :hg:`help keyword` on how to construct patterns both for |
|
478 | 478 | inclusion and exclusion of files. |
|
479 | 479 | |
|
480 | 480 | With -A/--all and -v/--verbose the codes used to show the status |
|
481 | 481 | of files are:: |
|
482 | 482 | |
|
483 | 483 | K = keyword expansion candidate |
|
484 | 484 | k = keyword expansion candidate (not tracked) |
|
485 | 485 | I = ignored |
|
486 | 486 | i = ignored (not tracked) |
|
487 | 487 | ''' |
|
488 | 488 | kwt = kwtools['templater'] |
|
489 | 489 | wctx = repo[None] |
|
490 | 490 | status = _status(ui, repo, wctx, kwt, *pats, **opts) |
|
491 | 491 | cwd = pats and repo.getcwd() or '' |
|
492 | 492 | modified, added, removed, deleted, unknown, ignored, clean = status |
|
493 | 493 | files = [] |
|
494 | 494 | if not opts.get('unknown') or opts.get('all'): |
|
495 | 495 | files = sorted(modified + added + clean) |
|
496 | 496 | kwfiles = kwt.iskwfile(files, wctx) |
|
497 | 497 | kwdeleted = kwt.iskwfile(deleted, wctx) |
|
498 | 498 | kwunknown = kwt.iskwfile(unknown, wctx) |
|
499 | 499 | if not opts.get('ignore') or opts.get('all'): |
|
500 | 500 | showfiles = kwfiles, kwdeleted, kwunknown |
|
501 | 501 | else: |
|
502 | 502 | showfiles = [], [], [] |
|
503 | 503 | if opts.get('all') or opts.get('ignore'): |
|
504 | 504 | showfiles += ([f for f in files if f not in kwfiles], |
|
505 | 505 | [f for f in unknown if f not in kwunknown]) |
|
506 | 506 | kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split() |
|
507 | 507 | kwstates = zip('K!kIi', showfiles, kwlabels) |
|
508 | 508 | for char, filenames, kwstate in kwstates: |
|
509 | 509 | fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n' |
|
510 | 510 | for f in filenames: |
|
511 | 511 | ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate) |
|
512 | 512 | |
|
513 | 513 | @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...')) |
|
514 | 514 | def shrink(ui, repo, *pats, **opts): |
|
515 | 515 | '''revert expanded keywords in the working directory |
|
516 | 516 | |
|
517 | 517 | Must be run before changing/disabling active keywords. |
|
518 | 518 | |
|
519 | 519 | kwshrink refuses to run if given files contain local changes. |
|
520 | 520 | ''' |
|
521 | 521 | # 3rd argument sets expansion to False |
|
522 | 522 | _kwfwrite(ui, repo, False, *pats, **opts) |
|
523 | 523 | |
|
524 | 524 | |
|
525 | 525 | def uisetup(ui): |
|
526 | 526 | ''' Monkeypatches dispatch._parse to retrieve user command.''' |
|
527 | 527 | |
|
528 | 528 | def kwdispatch_parse(orig, ui, args): |
|
529 | 529 | '''Monkeypatch dispatch._parse to obtain running hg command.''' |
|
530 | 530 | cmd, func, args, options, cmdoptions = orig(ui, args) |
|
531 | 531 | kwtools['hgcmd'] = cmd |
|
532 | 532 | return cmd, func, args, options, cmdoptions |
|
533 | 533 | |
|
534 | 534 | extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse) |
|
535 | 535 | |
|
536 | 536 | def reposetup(ui, repo): |
|
537 | 537 | '''Sets up repo as kwrepo for keyword substitution. |
|
538 | 538 | Overrides file method to return kwfilelog instead of filelog |
|
539 | 539 | if file matches user configuration. |
|
540 | 540 | Wraps commit to overwrite configured files with updated |
|
541 | 541 | keyword substitutions. |
|
542 | 542 | Monkeypatches patch and webcommands.''' |
|
543 | 543 | |
|
544 | 544 | try: |
|
545 | 545 | if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split() |
|
546 | 546 | or '.hg' in util.splitpath(repo.root) |
|
547 | 547 | or repo._url.startswith('bundle:')): |
|
548 | 548 | return |
|
549 | 549 | except AttributeError: |
|
550 | 550 | pass |
|
551 | 551 | |
|
552 | 552 | inc, exc = [], ['.hg*'] |
|
553 | 553 | for pat, opt in ui.configitems('keyword'): |
|
554 | 554 | if opt != 'ignore': |
|
555 | 555 | inc.append(pat) |
|
556 | 556 | else: |
|
557 | 557 | exc.append(pat) |
|
558 | 558 | if not inc: |
|
559 | 559 | return |
|
560 | 560 | |
|
561 | 561 | kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc) |
|
562 | 562 | |
|
563 | 563 | class kwrepo(repo.__class__): |
|
564 | 564 | def file(self, f): |
|
565 | 565 | if f[0] == '/': |
|
566 | 566 | f = f[1:] |
|
567 | 567 | return kwfilelog(self.sopener, kwt, f) |
|
568 | 568 | |
|
569 | 569 | def wread(self, filename): |
|
570 | 570 | data = super(kwrepo, self).wread(filename) |
|
571 | 571 | return kwt.wread(filename, data) |
|
572 | 572 | |
|
573 | 573 | def commit(self, *args, **opts): |
|
574 | 574 | # use custom commitctx for user commands |
|
575 | 575 | # other extensions can still wrap repo.commitctx directly |
|
576 | 576 | self.commitctx = self.kwcommitctx |
|
577 | 577 | try: |
|
578 | 578 | return super(kwrepo, self).commit(*args, **opts) |
|
579 | 579 | finally: |
|
580 | 580 | del self.commitctx |
|
581 | 581 | |
|
582 | 582 | def kwcommitctx(self, ctx, error=False): |
|
583 | 583 | n = super(kwrepo, self).commitctx(ctx, error) |
|
584 | 584 | # no lock needed, only called from repo.commit() which already locks |
|
585 | 585 | if not kwt.record: |
|
586 | 586 | restrict = kwt.restrict |
|
587 | 587 | kwt.restrict = True |
|
588 | 588 | kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()), |
|
589 | 589 | False, True) |
|
590 | 590 | kwt.restrict = restrict |
|
591 | 591 | return n |
|
592 | 592 | |
|
593 | 593 | def rollback(self, dryrun=False, force=False): |
|
594 | 594 | wlock = self.wlock() |
|
595 | 595 | try: |
|
596 | 596 | if not dryrun: |
|
597 | 597 | changed = self['.'].files() |
|
598 | 598 | ret = super(kwrepo, self).rollback(dryrun, force) |
|
599 | 599 | if not dryrun: |
|
600 | 600 | ctx = self['.'] |
|
601 | 601 | modified, added = _preselect(self[None].status(), changed) |
|
602 | 602 | kwt.overwrite(ctx, modified, True, True) |
|
603 | 603 | kwt.overwrite(ctx, added, True, False) |
|
604 | 604 | return ret |
|
605 | 605 | finally: |
|
606 | 606 | wlock.release() |
|
607 | 607 | |
|
608 | 608 | # monkeypatches |
|
609 | 609 | def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None): |
|
610 | 610 | '''Monkeypatch/wrap patch.patchfile.__init__ to avoid |
|
611 | 611 | rejects or conflicts due to expanded keywords in working dir.''' |
|
612 | 612 | orig(self, ui, gp, backend, store, eolmode) |
|
613 | 613 | # shrink keywords read from working dir |
|
614 | 614 | self.lines = kwt.shrinklines(self.fname, self.lines) |
|
615 | 615 | |
|
616 | 616 | def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None, |
|
617 | 617 | opts=None, prefix=''): |
|
618 | 618 | '''Monkeypatch patch.diff to avoid expansion.''' |
|
619 | 619 | kwt.restrict = True |
|
620 | 620 | return orig(repo, node1, node2, match, changes, opts, prefix) |
|
621 | 621 | |
|
622 | 622 | def kwweb_skip(orig, web, req, tmpl): |
|
623 | 623 | '''Wraps webcommands.x turning off keyword expansion.''' |
|
624 | 624 | kwt.match = util.never |
|
625 | 625 | return orig(web, req, tmpl) |
|
626 | 626 | |
|
627 | 627 | def kw_copy(orig, ui, repo, pats, opts, rename=False): |
|
628 | 628 | '''Wraps cmdutil.copy so that copy/rename destinations do not |
|
629 | 629 | contain expanded keywords. |
|
630 | 630 | Note that the source of a regular file destination may also be a |
|
631 | 631 | symlink: |
|
632 | 632 | hg cp sym x -> x is symlink |
|
633 | 633 | cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords) |
|
634 | 634 | For the latter we have to follow the symlink to find out whether its |
|
635 | 635 | target is configured for expansion and we therefore must unexpand the |
|
636 | 636 | keywords in the destination.''' |
|
637 | 637 | orig(ui, repo, pats, opts, rename) |
|
638 | 638 | if opts.get('dry_run'): |
|
639 | 639 | return |
|
640 | 640 | wctx = repo[None] |
|
641 | 641 | cwd = repo.getcwd() |
|
642 | 642 | |
|
643 | 643 | def haskwsource(dest): |
|
644 | 644 | '''Returns true if dest is a regular file and configured for |
|
645 | 645 | expansion or a symlink which points to a file configured for |
|
646 | 646 | expansion. ''' |
|
647 | 647 | source = repo.dirstate.copied(dest) |
|
648 | 648 | if 'l' in wctx.flags(source): |
|
649 | 649 | source = scmutil.canonpath(repo.root, cwd, |
|
650 | 650 | os.path.realpath(source)) |
|
651 | 651 | return kwt.match(source) |
|
652 | 652 | |
|
653 | 653 | candidates = [f for f in repo.dirstate.copies() if |
|
654 |
|
|
|
654 | 'l' not in wctx.flags(f) and haskwsource(f)] | |
|
655 | 655 | kwt.overwrite(wctx, candidates, False, False) |
|
656 | 656 | |
|
657 | 657 | def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts): |
|
658 | 658 | '''Wraps record.dorecord expanding keywords after recording.''' |
|
659 | 659 | wlock = repo.wlock() |
|
660 | 660 | try: |
|
661 | 661 | # record returns 0 even when nothing has changed |
|
662 | 662 | # therefore compare nodes before and after |
|
663 | 663 | kwt.record = True |
|
664 | 664 | ctx = repo['.'] |
|
665 | 665 | wstatus = repo[None].status() |
|
666 | 666 | ret = orig(ui, repo, commitfunc, *pats, **opts) |
|
667 | 667 | recctx = repo['.'] |
|
668 | 668 | if ctx != recctx: |
|
669 | 669 | modified, added = _preselect(wstatus, recctx.files()) |
|
670 | 670 | kwt.restrict = False |
|
671 | 671 | kwt.overwrite(recctx, modified, False, True) |
|
672 | 672 | kwt.overwrite(recctx, added, False, True, True) |
|
673 | 673 | kwt.restrict = True |
|
674 | 674 | return ret |
|
675 | 675 | finally: |
|
676 | 676 | wlock.release() |
|
677 | 677 | |
|
678 | 678 | def kwfilectx_cmp(orig, self, fctx): |
|
679 | 679 | # keyword affects data size, comparing wdir and filelog size does |
|
680 | 680 | # not make sense |
|
681 | 681 | if (fctx._filerev is None and |
|
682 | 682 | (self._repo._encodefilterpats or |
|
683 |
kwt.match(fctx.path()) and |
|
|
683 | kwt.match(fctx.path()) and 'l' not in fctx.flags() or | |
|
684 | 684 | self.size() - 4 == fctx.size()) or |
|
685 | 685 | self.size() == fctx.size()): |
|
686 | 686 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
687 | 687 | return True |
|
688 | 688 | |
|
689 | 689 | extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp) |
|
690 | 690 | extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init) |
|
691 | 691 | extensions.wrapfunction(patch, 'diff', kw_diff) |
|
692 | 692 | extensions.wrapfunction(cmdutil, 'copy', kw_copy) |
|
693 | 693 | for c in 'annotate changeset rev filediff diff'.split(): |
|
694 | 694 | extensions.wrapfunction(webcommands, c, kwweb_skip) |
|
695 | 695 | for name in recordextensions.split(): |
|
696 | 696 | try: |
|
697 | 697 | record = extensions.find(name) |
|
698 | 698 | extensions.wrapfunction(record, 'dorecord', kw_dorecord) |
|
699 | 699 | except KeyError: |
|
700 | 700 | pass |
|
701 | 701 | |
|
702 | 702 | repo.__class__ = kwrepo |
@@ -1,29 +1,29 b'' | |||
|
1 | 1 | # Copyright 2010-2011 Fog Creek Software |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | '''largefile store working over Mercurial's wire protocol''' |
|
7 | 7 | |
|
8 | 8 | import lfutil |
|
9 | 9 | import remotestore |
|
10 | 10 | |
|
11 | 11 | class wirestore(remotestore.remotestore): |
|
12 | 12 | def __init__(self, ui, repo, remote): |
|
13 | 13 | cap = remote.capable('largefiles') |
|
14 | 14 | if not cap: |
|
15 | 15 | raise lfutil.storeprotonotcapable([]) |
|
16 | 16 | storetypes = cap.split(',') |
|
17 |
if |
|
|
17 | if 'serve' not in storetypes: | |
|
18 | 18 | raise lfutil.storeprotonotcapable(storetypes) |
|
19 | 19 | self.remote = remote |
|
20 | 20 | super(wirestore, self).__init__(ui, repo, remote.url()) |
|
21 | 21 | |
|
22 | 22 | def _put(self, hash, fd): |
|
23 | 23 | return self.remote.putlfile(hash, fd) |
|
24 | 24 | |
|
25 | 25 | def _get(self, hash): |
|
26 | 26 | return self.remote.getlfile(hash) |
|
27 | 27 | |
|
28 | 28 | def _stat(self, hash): |
|
29 | 29 | return self.remote.statlfile(hash) |
@@ -1,184 +1,184 b'' | |||
|
1 | 1 | # Mercurial extension to provide 'hg relink' command |
|
2 | 2 | # |
|
3 | 3 | # Copyright (C) 2007 Brendan Cully <brendan@kublai.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """recreates hardlinks between repository clones""" |
|
9 | 9 | |
|
10 | 10 | from mercurial import hg, util |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | import os, stat |
|
13 | 13 | |
|
14 | 14 | def relink(ui, repo, origin=None, **opts): |
|
15 | 15 | """recreate hardlinks between two repositories |
|
16 | 16 | |
|
17 | 17 | When repositories are cloned locally, their data files will be |
|
18 | 18 | hardlinked so that they only use the space of a single repository. |
|
19 | 19 | |
|
20 | 20 | Unfortunately, subsequent pulls into either repository will break |
|
21 | 21 | hardlinks for any files touched by the new changesets, even if |
|
22 | 22 | both repositories end up pulling the same changes. |
|
23 | 23 | |
|
24 | 24 | Similarly, passing --rev to "hg clone" will fail to use any |
|
25 | 25 | hardlinks, falling back to a complete copy of the source |
|
26 | 26 | repository. |
|
27 | 27 | |
|
28 | 28 | This command lets you recreate those hardlinks and reclaim that |
|
29 | 29 | wasted space. |
|
30 | 30 | |
|
31 | 31 | This repository will be relinked to share space with ORIGIN, which |
|
32 | 32 | must be on the same local disk. If ORIGIN is omitted, looks for |
|
33 | 33 | "default-relink", then "default", in [paths]. |
|
34 | 34 | |
|
35 | 35 | Do not attempt any read operations on this repository while the |
|
36 | 36 | command is running. (Both repositories will be locked against |
|
37 | 37 | writes.) |
|
38 | 38 | """ |
|
39 | 39 | if (not util.safehasattr(util, 'samefile') or |
|
40 | 40 | not util.safehasattr(util, 'samedevice')): |
|
41 | 41 | raise util.Abort(_('hardlinks are not supported on this system')) |
|
42 | 42 | src = hg.repository(ui, ui.expandpath(origin or 'default-relink', |
|
43 | 43 | origin or 'default')) |
|
44 | 44 | if not src.local(): |
|
45 | 45 | raise util.Abort(_('must specify local origin repository')) |
|
46 | 46 | ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path)) |
|
47 | 47 | if repo.root == src.root: |
|
48 | 48 | ui.status(_('there is nothing to relink\n')) |
|
49 | 49 | return |
|
50 | 50 | |
|
51 | 51 | locallock = repo.lock() |
|
52 | 52 | try: |
|
53 | 53 | remotelock = src.lock() |
|
54 | 54 | try: |
|
55 | 55 | candidates = sorted(collect(src, ui)) |
|
56 | 56 | targets = prune(candidates, src.store.path, repo.store.path, ui) |
|
57 | 57 | do_relink(src.store.path, repo.store.path, targets, ui) |
|
58 | 58 | finally: |
|
59 | 59 | remotelock.release() |
|
60 | 60 | finally: |
|
61 | 61 | locallock.release() |
|
62 | 62 | |
|
63 | 63 | def collect(src, ui): |
|
64 | 64 | seplen = len(os.path.sep) |
|
65 | 65 | candidates = [] |
|
66 | 66 | live = len(src['tip'].manifest()) |
|
67 | 67 | # Your average repository has some files which were deleted before |
|
68 | 68 | # the tip revision. We account for that by assuming that there are |
|
69 | 69 | # 3 tracked files for every 2 live files as of the tip version of |
|
70 | 70 | # the repository. |
|
71 | 71 | # |
|
72 | 72 | # mozilla-central as of 2010-06-10 had a ratio of just over 7:5. |
|
73 | 73 | total = live * 3 // 2 |
|
74 | 74 | src = src.store.path |
|
75 | 75 | pos = 0 |
|
76 | 76 | ui.status(_("tip has %d files, estimated total number of files: %s\n") |
|
77 | 77 | % (live, total)) |
|
78 | 78 | for dirpath, dirnames, filenames in os.walk(src): |
|
79 | 79 | dirnames.sort() |
|
80 | 80 | relpath = dirpath[len(src) + seplen:] |
|
81 | 81 | for filename in sorted(filenames): |
|
82 |
if |
|
|
82 | if filename[-2:] not in ('.d', '.i'): | |
|
83 | 83 | continue |
|
84 | 84 | st = os.stat(os.path.join(dirpath, filename)) |
|
85 | 85 | if not stat.S_ISREG(st.st_mode): |
|
86 | 86 | continue |
|
87 | 87 | pos += 1 |
|
88 | 88 | candidates.append((os.path.join(relpath, filename), st)) |
|
89 | 89 | ui.progress(_('collecting'), pos, filename, _('files'), total) |
|
90 | 90 | |
|
91 | 91 | ui.progress(_('collecting'), None) |
|
92 | 92 | ui.status(_('collected %d candidate storage files\n') % len(candidates)) |
|
93 | 93 | return candidates |
|
94 | 94 | |
|
95 | 95 | def prune(candidates, src, dst, ui): |
|
96 | 96 | def linkfilter(src, dst, st): |
|
97 | 97 | try: |
|
98 | 98 | ts = os.stat(dst) |
|
99 | 99 | except OSError: |
|
100 | 100 | # Destination doesn't have this file? |
|
101 | 101 | return False |
|
102 | 102 | if util.samefile(src, dst): |
|
103 | 103 | return False |
|
104 | 104 | if not util.samedevice(src, dst): |
|
105 | 105 | # No point in continuing |
|
106 | 106 | raise util.Abort( |
|
107 | 107 | _('source and destination are on different devices')) |
|
108 | 108 | if st.st_size != ts.st_size: |
|
109 | 109 | return False |
|
110 | 110 | return st |
|
111 | 111 | |
|
112 | 112 | targets = [] |
|
113 | 113 | total = len(candidates) |
|
114 | 114 | pos = 0 |
|
115 | 115 | for fn, st in candidates: |
|
116 | 116 | pos += 1 |
|
117 | 117 | srcpath = os.path.join(src, fn) |
|
118 | 118 | tgt = os.path.join(dst, fn) |
|
119 | 119 | ts = linkfilter(srcpath, tgt, st) |
|
120 | 120 | if not ts: |
|
121 | 121 | ui.debug('not linkable: %s\n' % fn) |
|
122 | 122 | continue |
|
123 | 123 | targets.append((fn, ts.st_size)) |
|
124 | 124 | ui.progress(_('pruning'), pos, fn, _('files'), total) |
|
125 | 125 | |
|
126 | 126 | ui.progress(_('pruning'), None) |
|
127 | 127 | ui.status(_('pruned down to %d probably relinkable files\n') % len(targets)) |
|
128 | 128 | return targets |
|
129 | 129 | |
|
130 | 130 | def do_relink(src, dst, files, ui): |
|
131 | 131 | def relinkfile(src, dst): |
|
132 | 132 | bak = dst + '.bak' |
|
133 | 133 | os.rename(dst, bak) |
|
134 | 134 | try: |
|
135 | 135 | util.oslink(src, dst) |
|
136 | 136 | except OSError: |
|
137 | 137 | os.rename(bak, dst) |
|
138 | 138 | raise |
|
139 | 139 | os.remove(bak) |
|
140 | 140 | |
|
141 | 141 | CHUNKLEN = 65536 |
|
142 | 142 | relinked = 0 |
|
143 | 143 | savedbytes = 0 |
|
144 | 144 | |
|
145 | 145 | pos = 0 |
|
146 | 146 | total = len(files) |
|
147 | 147 | for f, sz in files: |
|
148 | 148 | pos += 1 |
|
149 | 149 | source = os.path.join(src, f) |
|
150 | 150 | tgt = os.path.join(dst, f) |
|
151 | 151 | # Binary mode, so that read() works correctly, especially on Windows |
|
152 | 152 | sfp = file(source, 'rb') |
|
153 | 153 | dfp = file(tgt, 'rb') |
|
154 | 154 | sin = sfp.read(CHUNKLEN) |
|
155 | 155 | while sin: |
|
156 | 156 | din = dfp.read(CHUNKLEN) |
|
157 | 157 | if sin != din: |
|
158 | 158 | break |
|
159 | 159 | sin = sfp.read(CHUNKLEN) |
|
160 | 160 | sfp.close() |
|
161 | 161 | dfp.close() |
|
162 | 162 | if sin: |
|
163 | 163 | ui.debug('not linkable: %s\n' % f) |
|
164 | 164 | continue |
|
165 | 165 | try: |
|
166 | 166 | relinkfile(source, tgt) |
|
167 | 167 | ui.progress(_('relinking'), pos, f, _('files'), total) |
|
168 | 168 | relinked += 1 |
|
169 | 169 | savedbytes += sz |
|
170 | 170 | except OSError, inst: |
|
171 | 171 | ui.warn('%s: %s\n' % (tgt, str(inst))) |
|
172 | 172 | |
|
173 | 173 | ui.progress(_('relinking'), None) |
|
174 | 174 | |
|
175 | 175 | ui.status(_('relinked %d files (%s reclaimed)\n') % |
|
176 | 176 | (relinked, util.bytecount(savedbytes))) |
|
177 | 177 | |
|
178 | 178 | cmdtable = { |
|
179 | 179 | 'relink': ( |
|
180 | 180 | relink, |
|
181 | 181 | [], |
|
182 | 182 | _('[ORIGIN]') |
|
183 | 183 | ) |
|
184 | 184 | } |
@@ -1,380 +1,380 b'' | |||
|
1 | 1 | # bundlerepo.py - repository class for viewing uncompressed bundles |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """Repository class for viewing uncompressed bundles. |
|
9 | 9 | |
|
10 | 10 | This provides a read-only repository interface to bundles as if they |
|
11 | 11 | were part of the actual repository. |
|
12 | 12 | """ |
|
13 | 13 | |
|
14 | 14 | from node import nullid |
|
15 | 15 | from i18n import _ |
|
16 | 16 | import os, tempfile, shutil |
|
17 | 17 | import changegroup, util, mdiff, discovery, cmdutil |
|
18 | 18 | import localrepo, changelog, manifest, filelog, revlog, error |
|
19 | 19 | |
|
20 | 20 | class bundlerevlog(revlog.revlog): |
|
21 | 21 | def __init__(self, opener, indexfile, bundle, linkmapper): |
|
22 | 22 | # How it works: |
|
23 | 23 | # to retrieve a revision, we need to know the offset of |
|
24 | 24 | # the revision in the bundle (an unbundle object). |
|
25 | 25 | # |
|
26 | 26 | # We store this offset in the index (start), to differentiate a |
|
27 | 27 | # rev in the bundle and from a rev in the revlog, we check |
|
28 | 28 | # len(index[r]). If the tuple is bigger than 7, it is a bundle |
|
29 | 29 | # (it is bigger since we store the node to which the delta is) |
|
30 | 30 | # |
|
31 | 31 | revlog.revlog.__init__(self, opener, indexfile) |
|
32 | 32 | self.bundle = bundle |
|
33 | 33 | self.basemap = {} |
|
34 | 34 | n = len(self) |
|
35 | 35 | chain = None |
|
36 | 36 | while True: |
|
37 | 37 | chunkdata = bundle.deltachunk(chain) |
|
38 | 38 | if not chunkdata: |
|
39 | 39 | break |
|
40 | 40 | node = chunkdata['node'] |
|
41 | 41 | p1 = chunkdata['p1'] |
|
42 | 42 | p2 = chunkdata['p2'] |
|
43 | 43 | cs = chunkdata['cs'] |
|
44 | 44 | deltabase = chunkdata['deltabase'] |
|
45 | 45 | delta = chunkdata['delta'] |
|
46 | 46 | |
|
47 | 47 | size = len(delta) |
|
48 | 48 | start = bundle.tell() - size |
|
49 | 49 | |
|
50 | 50 | link = linkmapper(cs) |
|
51 | 51 | if node in self.nodemap: |
|
52 | 52 | # this can happen if two branches make the same change |
|
53 | 53 | chain = node |
|
54 | 54 | continue |
|
55 | 55 | |
|
56 | 56 | for p in (p1, p2): |
|
57 |
if not |
|
|
57 | if p not in self.nodemap: | |
|
58 | 58 | raise error.LookupError(p, self.indexfile, |
|
59 | 59 | _("unknown parent")) |
|
60 | 60 | # start, size, full unc. size, base (unused), link, p1, p2, node |
|
61 | 61 | e = (revlog.offset_type(start, 0), size, -1, -1, link, |
|
62 | 62 | self.rev(p1), self.rev(p2), node) |
|
63 | 63 | self.basemap[n] = deltabase |
|
64 | 64 | self.index.insert(-1, e) |
|
65 | 65 | self.nodemap[node] = n |
|
66 | 66 | chain = node |
|
67 | 67 | n += 1 |
|
68 | 68 | |
|
69 | 69 | def inbundle(self, rev): |
|
70 | 70 | """is rev from the bundle""" |
|
71 | 71 | if rev < 0: |
|
72 | 72 | return False |
|
73 | 73 | return rev in self.basemap |
|
74 | 74 | def bundlebase(self, rev): |
|
75 | 75 | return self.basemap[rev] |
|
76 | 76 | def _chunk(self, rev): |
|
77 | 77 | # Warning: in case of bundle, the diff is against bundlebase, |
|
78 | 78 | # not against rev - 1 |
|
79 | 79 | # XXX: could use some caching |
|
80 | 80 | if not self.inbundle(rev): |
|
81 | 81 | return revlog.revlog._chunk(self, rev) |
|
82 | 82 | self.bundle.seek(self.start(rev)) |
|
83 | 83 | return self.bundle.read(self.length(rev)) |
|
84 | 84 | |
|
85 | 85 | def revdiff(self, rev1, rev2): |
|
86 | 86 | """return or calculate a delta between two revisions""" |
|
87 | 87 | if self.inbundle(rev1) and self.inbundle(rev2): |
|
88 | 88 | # hot path for bundle |
|
89 | 89 | revb = self.rev(self.bundlebase(rev2)) |
|
90 | 90 | if revb == rev1: |
|
91 | 91 | return self._chunk(rev2) |
|
92 | 92 | elif not self.inbundle(rev1) and not self.inbundle(rev2): |
|
93 | 93 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
94 | 94 | |
|
95 | 95 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
96 | 96 | self.revision(self.node(rev2))) |
|
97 | 97 | |
|
98 | 98 | def revision(self, nodeorrev): |
|
99 | 99 | """return an uncompressed revision of a given node or revision |
|
100 | 100 | number. |
|
101 | 101 | """ |
|
102 | 102 | if isinstance(nodeorrev, int): |
|
103 | 103 | rev = nodeorrev |
|
104 | 104 | node = self.node(rev) |
|
105 | 105 | else: |
|
106 | 106 | node = nodeorrev |
|
107 | 107 | rev = self.rev(node) |
|
108 | 108 | |
|
109 | 109 | if node == nullid: |
|
110 | 110 | return "" |
|
111 | 111 | |
|
112 | 112 | text = None |
|
113 | 113 | chain = [] |
|
114 | 114 | iter_node = node |
|
115 | 115 | # reconstruct the revision if it is from a changegroup |
|
116 | 116 | while self.inbundle(rev): |
|
117 | 117 | if self._cache and self._cache[0] == iter_node: |
|
118 | 118 | text = self._cache[2] |
|
119 | 119 | break |
|
120 | 120 | chain.append(rev) |
|
121 | 121 | iter_node = self.bundlebase(rev) |
|
122 | 122 | rev = self.rev(iter_node) |
|
123 | 123 | if text is None: |
|
124 | 124 | text = revlog.revlog.revision(self, iter_node) |
|
125 | 125 | |
|
126 | 126 | while chain: |
|
127 | 127 | delta = self._chunk(chain.pop()) |
|
128 | 128 | text = mdiff.patches(text, [delta]) |
|
129 | 129 | |
|
130 | 130 | p1, p2 = self.parents(node) |
|
131 | 131 | if node != revlog.hash(text, p1, p2): |
|
132 | 132 | raise error.RevlogError(_("integrity check failed on %s:%d") |
|
133 | 133 | % (self.datafile, self.rev(node))) |
|
134 | 134 | |
|
135 | 135 | self._cache = (node, self.rev(node), text) |
|
136 | 136 | return text |
|
137 | 137 | |
|
138 | 138 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
139 | 139 | raise NotImplementedError |
|
140 | 140 | def addgroup(self, revs, linkmapper, transaction): |
|
141 | 141 | raise NotImplementedError |
|
142 | 142 | def strip(self, rev, minlink): |
|
143 | 143 | raise NotImplementedError |
|
144 | 144 | def checksize(self): |
|
145 | 145 | raise NotImplementedError |
|
146 | 146 | |
|
147 | 147 | class bundlechangelog(bundlerevlog, changelog.changelog): |
|
148 | 148 | def __init__(self, opener, bundle): |
|
149 | 149 | changelog.changelog.__init__(self, opener) |
|
150 | 150 | linkmapper = lambda x: x |
|
151 | 151 | bundlerevlog.__init__(self, opener, self.indexfile, bundle, |
|
152 | 152 | linkmapper) |
|
153 | 153 | |
|
154 | 154 | class bundlemanifest(bundlerevlog, manifest.manifest): |
|
155 | 155 | def __init__(self, opener, bundle, linkmapper): |
|
156 | 156 | manifest.manifest.__init__(self, opener) |
|
157 | 157 | bundlerevlog.__init__(self, opener, self.indexfile, bundle, |
|
158 | 158 | linkmapper) |
|
159 | 159 | |
|
160 | 160 | class bundlefilelog(bundlerevlog, filelog.filelog): |
|
161 | 161 | def __init__(self, opener, path, bundle, linkmapper, repo): |
|
162 | 162 | filelog.filelog.__init__(self, opener, path) |
|
163 | 163 | bundlerevlog.__init__(self, opener, self.indexfile, bundle, |
|
164 | 164 | linkmapper) |
|
165 | 165 | self._repo = repo |
|
166 | 166 | |
|
167 | 167 | def _file(self, f): |
|
168 | 168 | self._repo.file(f) |
|
169 | 169 | |
|
170 | 170 | class bundlerepository(localrepo.localrepository): |
|
171 | 171 | def __init__(self, ui, path, bundlename): |
|
172 | 172 | self._tempparent = None |
|
173 | 173 | try: |
|
174 | 174 | localrepo.localrepository.__init__(self, ui, path) |
|
175 | 175 | except error.RepoError: |
|
176 | 176 | self._tempparent = tempfile.mkdtemp() |
|
177 | 177 | localrepo.instance(ui, self._tempparent, 1) |
|
178 | 178 | localrepo.localrepository.__init__(self, ui, self._tempparent) |
|
179 | 179 | self.ui.setconfig('phases', 'publish', False) |
|
180 | 180 | |
|
181 | 181 | if path: |
|
182 | 182 | self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename |
|
183 | 183 | else: |
|
184 | 184 | self._url = 'bundle:' + bundlename |
|
185 | 185 | |
|
186 | 186 | self.tempfile = None |
|
187 | 187 | f = util.posixfile(bundlename, "rb") |
|
188 | 188 | self.bundle = changegroup.readbundle(f, bundlename) |
|
189 | 189 | if self.bundle.compressed(): |
|
190 | 190 | fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", |
|
191 | 191 | suffix=".hg10un", dir=self.path) |
|
192 | 192 | self.tempfile = temp |
|
193 | 193 | fptemp = os.fdopen(fdtemp, 'wb') |
|
194 | 194 | |
|
195 | 195 | try: |
|
196 | 196 | fptemp.write("HG10UN") |
|
197 | 197 | while True: |
|
198 | 198 | chunk = self.bundle.read(2**18) |
|
199 | 199 | if not chunk: |
|
200 | 200 | break |
|
201 | 201 | fptemp.write(chunk) |
|
202 | 202 | finally: |
|
203 | 203 | fptemp.close() |
|
204 | 204 | |
|
205 | 205 | f = util.posixfile(self.tempfile, "rb") |
|
206 | 206 | self.bundle = changegroup.readbundle(f, bundlename) |
|
207 | 207 | |
|
208 | 208 | # dict with the mapping 'filename' -> position in the bundle |
|
209 | 209 | self.bundlefilespos = {} |
|
210 | 210 | |
|
211 | 211 | @util.propertycache |
|
212 | 212 | def changelog(self): |
|
213 | 213 | # consume the header if it exists |
|
214 | 214 | self.bundle.changelogheader() |
|
215 | 215 | c = bundlechangelog(self.sopener, self.bundle) |
|
216 | 216 | self.manstart = self.bundle.tell() |
|
217 | 217 | return c |
|
218 | 218 | |
|
219 | 219 | @util.propertycache |
|
220 | 220 | def manifest(self): |
|
221 | 221 | self.bundle.seek(self.manstart) |
|
222 | 222 | # consume the header if it exists |
|
223 | 223 | self.bundle.manifestheader() |
|
224 | 224 | m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev) |
|
225 | 225 | self.filestart = self.bundle.tell() |
|
226 | 226 | return m |
|
227 | 227 | |
|
228 | 228 | @util.propertycache |
|
229 | 229 | def manstart(self): |
|
230 | 230 | self.changelog |
|
231 | 231 | return self.manstart |
|
232 | 232 | |
|
233 | 233 | @util.propertycache |
|
234 | 234 | def filestart(self): |
|
235 | 235 | self.manifest |
|
236 | 236 | return self.filestart |
|
237 | 237 | |
|
238 | 238 | def url(self): |
|
239 | 239 | return self._url |
|
240 | 240 | |
|
241 | 241 | def file(self, f): |
|
242 | 242 | if not self.bundlefilespos: |
|
243 | 243 | self.bundle.seek(self.filestart) |
|
244 | 244 | while True: |
|
245 | 245 | chunkdata = self.bundle.filelogheader() |
|
246 | 246 | if not chunkdata: |
|
247 | 247 | break |
|
248 | 248 | fname = chunkdata['filename'] |
|
249 | 249 | self.bundlefilespos[fname] = self.bundle.tell() |
|
250 | 250 | while True: |
|
251 | 251 | c = self.bundle.deltachunk(None) |
|
252 | 252 | if not c: |
|
253 | 253 | break |
|
254 | 254 | |
|
255 | 255 | if f[0] == '/': |
|
256 | 256 | f = f[1:] |
|
257 | 257 | if f in self.bundlefilespos: |
|
258 | 258 | self.bundle.seek(self.bundlefilespos[f]) |
|
259 | 259 | return bundlefilelog(self.sopener, f, self.bundle, |
|
260 | 260 | self.changelog.rev, self) |
|
261 | 261 | else: |
|
262 | 262 | return filelog.filelog(self.sopener, f) |
|
263 | 263 | |
|
264 | 264 | def close(self): |
|
265 | 265 | """Close assigned bundle file immediately.""" |
|
266 | 266 | self.bundle.close() |
|
267 | 267 | if self.tempfile is not None: |
|
268 | 268 | os.unlink(self.tempfile) |
|
269 | 269 | if self._tempparent: |
|
270 | 270 | shutil.rmtree(self._tempparent, True) |
|
271 | 271 | |
|
272 | 272 | def cancopy(self): |
|
273 | 273 | return False |
|
274 | 274 | |
|
275 | 275 | def getcwd(self): |
|
276 | 276 | return os.getcwd() # always outside the repo |
|
277 | 277 | |
|
278 | 278 | def _writebranchcache(self, branches, tip, tiprev): |
|
279 | 279 | # don't overwrite the disk cache with bundle-augmented data |
|
280 | 280 | pass |
|
281 | 281 | |
|
282 | 282 | def instance(ui, path, create): |
|
283 | 283 | if create: |
|
284 | 284 | raise util.Abort(_('cannot create new bundle repository')) |
|
285 | 285 | parentpath = ui.config("bundle", "mainreporoot", "") |
|
286 | 286 | if not parentpath: |
|
287 | 287 | # try to find the correct path to the working directory repo |
|
288 | 288 | parentpath = cmdutil.findrepo(os.getcwd()) |
|
289 | 289 | if parentpath is None: |
|
290 | 290 | parentpath = '' |
|
291 | 291 | if parentpath: |
|
292 | 292 | # Try to make the full path relative so we get a nice, short URL. |
|
293 | 293 | # In particular, we don't want temp dir names in test outputs. |
|
294 | 294 | cwd = os.getcwd() |
|
295 | 295 | if parentpath == cwd: |
|
296 | 296 | parentpath = '' |
|
297 | 297 | else: |
|
298 | 298 | cwd = os.path.join(cwd,'') |
|
299 | 299 | if parentpath.startswith(cwd): |
|
300 | 300 | parentpath = parentpath[len(cwd):] |
|
301 | 301 | u = util.url(path) |
|
302 | 302 | path = u.localpath() |
|
303 | 303 | if u.scheme == 'bundle': |
|
304 | 304 | s = path.split("+", 1) |
|
305 | 305 | if len(s) == 1: |
|
306 | 306 | repopath, bundlename = parentpath, s[0] |
|
307 | 307 | else: |
|
308 | 308 | repopath, bundlename = s |
|
309 | 309 | else: |
|
310 | 310 | repopath, bundlename = parentpath, path |
|
311 | 311 | return bundlerepository(ui, repopath, bundlename) |
|
312 | 312 | |
|
313 | 313 | def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None, |
|
314 | 314 | force=False): |
|
315 | 315 | '''obtains a bundle of changes incoming from other |
|
316 | 316 | |
|
317 | 317 | "onlyheads" restricts the returned changes to those reachable from the |
|
318 | 318 | specified heads. |
|
319 | 319 | "bundlename", if given, stores the bundle to this file path permanently; |
|
320 | 320 | otherwise it's stored to a temp file and gets deleted again when you call |
|
321 | 321 | the returned "cleanupfn". |
|
322 | 322 | "force" indicates whether to proceed on unrelated repos. |
|
323 | 323 | |
|
324 | 324 | Returns a tuple (local, csets, cleanupfn): |
|
325 | 325 | |
|
326 | 326 | "local" is a local repo from which to obtain the actual incoming |
|
327 | 327 | changesets; it is a bundlerepo for the obtained bundle when the |
|
328 | 328 | original "other" is remote. |
|
329 | 329 | "csets" lists the incoming changeset node ids. |
|
330 | 330 | "cleanupfn" must be called without arguments when you're done processing |
|
331 | 331 | the changes; it closes both the original "other" and the one returned |
|
332 | 332 | here. |
|
333 | 333 | ''' |
|
334 | 334 | tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, |
|
335 | 335 | force=force) |
|
336 | 336 | common, incoming, rheads = tmp |
|
337 | 337 | if not incoming: |
|
338 | 338 | try: |
|
339 | 339 | if bundlename: |
|
340 | 340 | os.unlink(bundlename) |
|
341 | 341 | except OSError: |
|
342 | 342 | pass |
|
343 | 343 | return other, [], other.close |
|
344 | 344 | |
|
345 | 345 | bundle = None |
|
346 | 346 | bundlerepo = None |
|
347 | 347 | localrepo = other |
|
348 | 348 | if bundlename or not other.local(): |
|
349 | 349 | # create a bundle (uncompressed if other repo is not local) |
|
350 | 350 | |
|
351 | 351 | if other.capable('getbundle'): |
|
352 | 352 | cg = other.getbundle('incoming', common=common, heads=rheads) |
|
353 | 353 | elif onlyheads is None and not other.capable('changegroupsubset'): |
|
354 | 354 | # compat with older servers when pulling all remote heads |
|
355 | 355 | cg = other.changegroup(incoming, "incoming") |
|
356 | 356 | rheads = None |
|
357 | 357 | else: |
|
358 | 358 | cg = other.changegroupsubset(incoming, rheads, 'incoming') |
|
359 | 359 | bundletype = other.local() and "HG10BZ" or "HG10UN" |
|
360 | 360 | fname = bundle = changegroup.writebundle(cg, bundlename, bundletype) |
|
361 | 361 | # keep written bundle? |
|
362 | 362 | if bundlename: |
|
363 | 363 | bundle = None |
|
364 | 364 | if not other.local(): |
|
365 | 365 | # use the created uncompressed bundlerepo |
|
366 | 366 | localrepo = bundlerepo = bundlerepository(ui, repo.root, fname) |
|
367 | 367 | # this repo contains local and other now, so filter out local again |
|
368 | 368 | common = repo.heads() |
|
369 | 369 | |
|
370 | 370 | csets = localrepo.changelog.findmissing(common, rheads) |
|
371 | 371 | |
|
372 | 372 | def cleanup(): |
|
373 | 373 | if bundlerepo: |
|
374 | 374 | bundlerepo.close() |
|
375 | 375 | if bundle: |
|
376 | 376 | os.unlink(bundle) |
|
377 | 377 | other.close() |
|
378 | 378 | |
|
379 | 379 | return (localrepo, csets, cleanup) |
|
380 | 380 |
@@ -1,796 +1,796 b'' | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | import errno |
|
8 | 8 | |
|
9 | 9 | from node import nullid |
|
10 | 10 | from i18n import _ |
|
11 | 11 | import scmutil, util, ignore, osutil, parsers, encoding |
|
12 | 12 | import struct, os, stat, errno |
|
13 | 13 | import cStringIO |
|
14 | 14 | |
|
15 | 15 | _format = ">cllll" |
|
16 | 16 | propertycache = util.propertycache |
|
17 | 17 | filecache = scmutil.filecache |
|
18 | 18 | |
|
19 | 19 | class repocache(filecache): |
|
20 | 20 | """filecache for files in .hg/""" |
|
21 | 21 | def join(self, obj, fname): |
|
22 | 22 | return obj._opener.join(fname) |
|
23 | 23 | |
|
24 | 24 | class rootcache(filecache): |
|
25 | 25 | """filecache for files in the repository root""" |
|
26 | 26 | def join(self, obj, fname): |
|
27 | 27 | return obj._join(fname) |
|
28 | 28 | |
|
29 | 29 | def _finddirs(path): |
|
30 | 30 | pos = path.rfind('/') |
|
31 | 31 | while pos != -1: |
|
32 | 32 | yield path[:pos] |
|
33 | 33 | pos = path.rfind('/', 0, pos) |
|
34 | 34 | |
|
35 | 35 | def _incdirs(dirs, path): |
|
36 | 36 | for base in _finddirs(path): |
|
37 | 37 | if base in dirs: |
|
38 | 38 | dirs[base] += 1 |
|
39 | 39 | return |
|
40 | 40 | dirs[base] = 1 |
|
41 | 41 | |
|
42 | 42 | def _decdirs(dirs, path): |
|
43 | 43 | for base in _finddirs(path): |
|
44 | 44 | if dirs[base] > 1: |
|
45 | 45 | dirs[base] -= 1 |
|
46 | 46 | return |
|
47 | 47 | del dirs[base] |
|
48 | 48 | |
|
49 | 49 | class dirstate(object): |
|
50 | 50 | |
|
51 | 51 | def __init__(self, opener, ui, root, validate): |
|
52 | 52 | '''Create a new dirstate object. |
|
53 | 53 | |
|
54 | 54 | opener is an open()-like callable that can be used to open the |
|
55 | 55 | dirstate file; root is the root of the directory tracked by |
|
56 | 56 | the dirstate. |
|
57 | 57 | ''' |
|
58 | 58 | self._opener = opener |
|
59 | 59 | self._validate = validate |
|
60 | 60 | self._root = root |
|
61 | 61 | self._rootdir = os.path.join(root, '') |
|
62 | 62 | self._dirty = False |
|
63 | 63 | self._dirtypl = False |
|
64 | 64 | self._lastnormaltime = 0 |
|
65 | 65 | self._ui = ui |
|
66 | 66 | self._filecache = {} |
|
67 | 67 | |
|
68 | 68 | @propertycache |
|
69 | 69 | def _map(self): |
|
70 | 70 | '''Return the dirstate contents as a map from filename to |
|
71 | 71 | (state, mode, size, time).''' |
|
72 | 72 | self._read() |
|
73 | 73 | return self._map |
|
74 | 74 | |
|
75 | 75 | @propertycache |
|
76 | 76 | def _copymap(self): |
|
77 | 77 | self._read() |
|
78 | 78 | return self._copymap |
|
79 | 79 | |
|
80 | 80 | @propertycache |
|
81 | 81 | def _foldmap(self): |
|
82 | 82 | f = {} |
|
83 | 83 | for name in self._map: |
|
84 | 84 | f[util.normcase(name)] = name |
|
85 | 85 | for name in self._dirs: |
|
86 | 86 | f[util.normcase(name)] = name |
|
87 | 87 | f['.'] = '.' # prevents useless util.fspath() invocation |
|
88 | 88 | return f |
|
89 | 89 | |
|
90 | 90 | @repocache('branch') |
|
91 | 91 | def _branch(self): |
|
92 | 92 | try: |
|
93 | 93 | return self._opener.read("branch").strip() or "default" |
|
94 | 94 | except IOError, inst: |
|
95 | 95 | if inst.errno != errno.ENOENT: |
|
96 | 96 | raise |
|
97 | 97 | return "default" |
|
98 | 98 | |
|
99 | 99 | @propertycache |
|
100 | 100 | def _pl(self): |
|
101 | 101 | try: |
|
102 | 102 | fp = self._opener("dirstate") |
|
103 | 103 | st = fp.read(40) |
|
104 | 104 | fp.close() |
|
105 | 105 | l = len(st) |
|
106 | 106 | if l == 40: |
|
107 | 107 | return st[:20], st[20:40] |
|
108 | 108 | elif l > 0 and l < 40: |
|
109 | 109 | raise util.Abort(_('working directory state appears damaged!')) |
|
110 | 110 | except IOError, err: |
|
111 | 111 | if err.errno != errno.ENOENT: |
|
112 | 112 | raise |
|
113 | 113 | return [nullid, nullid] |
|
114 | 114 | |
|
115 | 115 | @propertycache |
|
116 | 116 | def _dirs(self): |
|
117 | 117 | dirs = {} |
|
118 | 118 | for f, s in self._map.iteritems(): |
|
119 | 119 | if s[0] != 'r': |
|
120 | 120 | _incdirs(dirs, f) |
|
121 | 121 | return dirs |
|
122 | 122 | |
|
123 | 123 | def dirs(self): |
|
124 | 124 | return self._dirs |
|
125 | 125 | |
|
126 | 126 | @rootcache('.hgignore') |
|
127 | 127 | def _ignore(self): |
|
128 | 128 | files = [self._join('.hgignore')] |
|
129 | 129 | for name, path in self._ui.configitems("ui"): |
|
130 | 130 | if name == 'ignore' or name.startswith('ignore.'): |
|
131 | 131 | files.append(util.expandpath(path)) |
|
132 | 132 | return ignore.ignore(self._root, files, self._ui.warn) |
|
133 | 133 | |
|
134 | 134 | @propertycache |
|
135 | 135 | def _slash(self): |
|
136 | 136 | return self._ui.configbool('ui', 'slash') and os.sep != '/' |
|
137 | 137 | |
|
138 | 138 | @propertycache |
|
139 | 139 | def _checklink(self): |
|
140 | 140 | return util.checklink(self._root) |
|
141 | 141 | |
|
142 | 142 | @propertycache |
|
143 | 143 | def _checkexec(self): |
|
144 | 144 | return util.checkexec(self._root) |
|
145 | 145 | |
|
146 | 146 | @propertycache |
|
147 | 147 | def _checkcase(self): |
|
148 | 148 | return not util.checkcase(self._join('.hg')) |
|
149 | 149 | |
|
150 | 150 | def _join(self, f): |
|
151 | 151 | # much faster than os.path.join() |
|
152 | 152 | # it's safe because f is always a relative path |
|
153 | 153 | return self._rootdir + f |
|
154 | 154 | |
|
155 | 155 | def flagfunc(self, buildfallback): |
|
156 | 156 | if self._checklink and self._checkexec: |
|
157 | 157 | def f(x): |
|
158 | 158 | p = self._join(x) |
|
159 | 159 | if os.path.islink(p): |
|
160 | 160 | return 'l' |
|
161 | 161 | if util.isexec(p): |
|
162 | 162 | return 'x' |
|
163 | 163 | return '' |
|
164 | 164 | return f |
|
165 | 165 | |
|
166 | 166 | fallback = buildfallback() |
|
167 | 167 | if self._checklink: |
|
168 | 168 | def f(x): |
|
169 | 169 | if os.path.islink(self._join(x)): |
|
170 | 170 | return 'l' |
|
171 | 171 | if 'x' in fallback(x): |
|
172 | 172 | return 'x' |
|
173 | 173 | return '' |
|
174 | 174 | return f |
|
175 | 175 | if self._checkexec: |
|
176 | 176 | def f(x): |
|
177 | 177 | if 'l' in fallback(x): |
|
178 | 178 | return 'l' |
|
179 | 179 | if util.isexec(self._join(x)): |
|
180 | 180 | return 'x' |
|
181 | 181 | return '' |
|
182 | 182 | return f |
|
183 | 183 | else: |
|
184 | 184 | return fallback |
|
185 | 185 | |
|
186 | 186 | def getcwd(self): |
|
187 | 187 | cwd = os.getcwd() |
|
188 | 188 | if cwd == self._root: |
|
189 | 189 | return '' |
|
190 | 190 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
191 | 191 | rootsep = self._root |
|
192 | 192 | if not util.endswithsep(rootsep): |
|
193 | 193 | rootsep += os.sep |
|
194 | 194 | if cwd.startswith(rootsep): |
|
195 | 195 | return cwd[len(rootsep):] |
|
196 | 196 | else: |
|
197 | 197 | # we're outside the repo. return an absolute path. |
|
198 | 198 | return cwd |
|
199 | 199 | |
|
200 | 200 | def pathto(self, f, cwd=None): |
|
201 | 201 | if cwd is None: |
|
202 | 202 | cwd = self.getcwd() |
|
203 | 203 | path = util.pathto(self._root, cwd, f) |
|
204 | 204 | if self._slash: |
|
205 | 205 | return util.normpath(path) |
|
206 | 206 | return path |
|
207 | 207 | |
|
208 | 208 | def __getitem__(self, key): |
|
209 | 209 | '''Return the current state of key (a filename) in the dirstate. |
|
210 | 210 | |
|
211 | 211 | States are: |
|
212 | 212 | n normal |
|
213 | 213 | m needs merging |
|
214 | 214 | r marked for removal |
|
215 | 215 | a marked for addition |
|
216 | 216 | ? not tracked |
|
217 | 217 | ''' |
|
218 | 218 | return self._map.get(key, ("?",))[0] |
|
219 | 219 | |
|
220 | 220 | def __contains__(self, key): |
|
221 | 221 | return key in self._map |
|
222 | 222 | |
|
223 | 223 | def __iter__(self): |
|
224 | 224 | for x in sorted(self._map): |
|
225 | 225 | yield x |
|
226 | 226 | |
|
227 | 227 | def parents(self): |
|
228 | 228 | return [self._validate(p) for p in self._pl] |
|
229 | 229 | |
|
230 | 230 | def p1(self): |
|
231 | 231 | return self._validate(self._pl[0]) |
|
232 | 232 | |
|
233 | 233 | def p2(self): |
|
234 | 234 | return self._validate(self._pl[1]) |
|
235 | 235 | |
|
236 | 236 | def branch(self): |
|
237 | 237 | return encoding.tolocal(self._branch) |
|
238 | 238 | |
|
239 | 239 | def setparents(self, p1, p2=nullid): |
|
240 | 240 | """Set dirstate parents to p1 and p2. |
|
241 | 241 | |
|
242 | 242 | When moving from two parents to one, 'm' merged entries a |
|
243 | 243 | adjusted to normal and previous copy records discarded and |
|
244 | 244 | returned by the call. |
|
245 | 245 | |
|
246 | 246 | See localrepo.setparents() |
|
247 | 247 | """ |
|
248 | 248 | self._dirty = self._dirtypl = True |
|
249 | 249 | oldp2 = self._pl[1] |
|
250 | 250 | self._pl = p1, p2 |
|
251 | 251 | copies = {} |
|
252 | 252 | if oldp2 != nullid and p2 == nullid: |
|
253 | 253 | # Discard 'm' markers when moving away from a merge state |
|
254 | 254 | for f, s in self._map.iteritems(): |
|
255 | 255 | if s[0] == 'm': |
|
256 | 256 | if f in self._copymap: |
|
257 | 257 | copies[f] = self._copymap[f] |
|
258 | 258 | self.normallookup(f) |
|
259 | 259 | return copies |
|
260 | 260 | |
|
261 | 261 | def setbranch(self, branch): |
|
262 | 262 | if branch in ['tip', '.', 'null']: |
|
263 | 263 | raise util.Abort(_('the name \'%s\' is reserved') % branch) |
|
264 | 264 | self._branch = encoding.fromlocal(branch) |
|
265 | 265 | f = self._opener('branch', 'w', atomictemp=True) |
|
266 | 266 | try: |
|
267 | 267 | f.write(self._branch + '\n') |
|
268 | 268 | finally: |
|
269 | 269 | f.close() |
|
270 | 270 | |
|
271 | 271 | def _read(self): |
|
272 | 272 | self._map = {} |
|
273 | 273 | self._copymap = {} |
|
274 | 274 | try: |
|
275 | 275 | st = self._opener.read("dirstate") |
|
276 | 276 | except IOError, err: |
|
277 | 277 | if err.errno != errno.ENOENT: |
|
278 | 278 | raise |
|
279 | 279 | return |
|
280 | 280 | if not st: |
|
281 | 281 | return |
|
282 | 282 | |
|
283 | 283 | p = parsers.parse_dirstate(self._map, self._copymap, st) |
|
284 | 284 | if not self._dirtypl: |
|
285 | 285 | self._pl = p |
|
286 | 286 | |
|
287 | 287 | def invalidate(self): |
|
288 | 288 | for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs", |
|
289 | 289 | "_ignore"): |
|
290 | 290 | if a in self.__dict__: |
|
291 | 291 | delattr(self, a) |
|
292 | 292 | self._lastnormaltime = 0 |
|
293 | 293 | self._dirty = False |
|
294 | 294 | |
|
295 | 295 | def copy(self, source, dest): |
|
296 | 296 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
297 | 297 | if source == dest: |
|
298 | 298 | return |
|
299 | 299 | self._dirty = True |
|
300 | 300 | if source is not None: |
|
301 | 301 | self._copymap[dest] = source |
|
302 | 302 | elif dest in self._copymap: |
|
303 | 303 | del self._copymap[dest] |
|
304 | 304 | |
|
305 | 305 | def copied(self, file): |
|
306 | 306 | return self._copymap.get(file, None) |
|
307 | 307 | |
|
308 | 308 | def copies(self): |
|
309 | 309 | return self._copymap |
|
310 | 310 | |
|
311 | 311 | def _droppath(self, f): |
|
312 | 312 | if self[f] not in "?r" and "_dirs" in self.__dict__: |
|
313 | 313 | _decdirs(self._dirs, f) |
|
314 | 314 | |
|
315 | 315 | def _addpath(self, f, check=False): |
|
316 | 316 | oldstate = self[f] |
|
317 | 317 | if check or oldstate == "r": |
|
318 | 318 | scmutil.checkfilename(f) |
|
319 | 319 | if f in self._dirs: |
|
320 | 320 | raise util.Abort(_('directory %r already in dirstate') % f) |
|
321 | 321 | # shadows |
|
322 | 322 | for d in _finddirs(f): |
|
323 | 323 | if d in self._dirs: |
|
324 | 324 | break |
|
325 | 325 | if d in self._map and self[d] != 'r': |
|
326 | 326 | raise util.Abort( |
|
327 | 327 | _('file %r in dirstate clashes with %r') % (d, f)) |
|
328 | 328 | if oldstate in "?r" and "_dirs" in self.__dict__: |
|
329 | 329 | _incdirs(self._dirs, f) |
|
330 | 330 | |
|
331 | 331 | def normal(self, f): |
|
332 | 332 | '''Mark a file normal and clean.''' |
|
333 | 333 | self._dirty = True |
|
334 | 334 | self._addpath(f) |
|
335 | 335 | s = os.lstat(self._join(f)) |
|
336 | 336 | mtime = int(s.st_mtime) |
|
337 | 337 | self._map[f] = ('n', s.st_mode, s.st_size, mtime) |
|
338 | 338 | if f in self._copymap: |
|
339 | 339 | del self._copymap[f] |
|
340 | 340 | if mtime > self._lastnormaltime: |
|
341 | 341 | # Remember the most recent modification timeslot for status(), |
|
342 | 342 | # to make sure we won't miss future size-preserving file content |
|
343 | 343 | # modifications that happen within the same timeslot. |
|
344 | 344 | self._lastnormaltime = mtime |
|
345 | 345 | |
|
346 | 346 | def normallookup(self, f): |
|
347 | 347 | '''Mark a file normal, but possibly dirty.''' |
|
348 | 348 | if self._pl[1] != nullid and f in self._map: |
|
349 | 349 | # if there is a merge going on and the file was either |
|
350 | 350 | # in state 'm' (-1) or coming from other parent (-2) before |
|
351 | 351 | # being removed, restore that state. |
|
352 | 352 | entry = self._map[f] |
|
353 | 353 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
354 | 354 | source = self._copymap.get(f) |
|
355 | 355 | if entry[2] == -1: |
|
356 | 356 | self.merge(f) |
|
357 | 357 | elif entry[2] == -2: |
|
358 | 358 | self.otherparent(f) |
|
359 | 359 | if source: |
|
360 | 360 | self.copy(source, f) |
|
361 | 361 | return |
|
362 | 362 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
363 | 363 | return |
|
364 | 364 | self._dirty = True |
|
365 | 365 | self._addpath(f) |
|
366 | 366 | self._map[f] = ('n', 0, -1, -1) |
|
367 | 367 | if f in self._copymap: |
|
368 | 368 | del self._copymap[f] |
|
369 | 369 | |
|
370 | 370 | def otherparent(self, f): |
|
371 | 371 | '''Mark as coming from the other parent, always dirty.''' |
|
372 | 372 | if self._pl[1] == nullid: |
|
373 | 373 | raise util.Abort(_("setting %r to other parent " |
|
374 | 374 | "only allowed in merges") % f) |
|
375 | 375 | self._dirty = True |
|
376 | 376 | self._addpath(f) |
|
377 | 377 | self._map[f] = ('n', 0, -2, -1) |
|
378 | 378 | if f in self._copymap: |
|
379 | 379 | del self._copymap[f] |
|
380 | 380 | |
|
381 | 381 | def add(self, f): |
|
382 | 382 | '''Mark a file added.''' |
|
383 | 383 | self._dirty = True |
|
384 | 384 | self._addpath(f, True) |
|
385 | 385 | self._map[f] = ('a', 0, -1, -1) |
|
386 | 386 | if f in self._copymap: |
|
387 | 387 | del self._copymap[f] |
|
388 | 388 | |
|
389 | 389 | def remove(self, f): |
|
390 | 390 | '''Mark a file removed.''' |
|
391 | 391 | self._dirty = True |
|
392 | 392 | self._droppath(f) |
|
393 | 393 | size = 0 |
|
394 | 394 | if self._pl[1] != nullid and f in self._map: |
|
395 | 395 | # backup the previous state |
|
396 | 396 | entry = self._map[f] |
|
397 | 397 | if entry[0] == 'm': # merge |
|
398 | 398 | size = -1 |
|
399 | 399 | elif entry[0] == 'n' and entry[2] == -2: # other parent |
|
400 | 400 | size = -2 |
|
401 | 401 | self._map[f] = ('r', 0, size, 0) |
|
402 | 402 | if size == 0 and f in self._copymap: |
|
403 | 403 | del self._copymap[f] |
|
404 | 404 | |
|
405 | 405 | def merge(self, f): |
|
406 | 406 | '''Mark a file merged.''' |
|
407 | 407 | if self._pl[1] == nullid: |
|
408 | 408 | return self.normallookup(f) |
|
409 | 409 | self._dirty = True |
|
410 | 410 | s = os.lstat(self._join(f)) |
|
411 | 411 | self._addpath(f) |
|
412 | 412 | self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) |
|
413 | 413 | if f in self._copymap: |
|
414 | 414 | del self._copymap[f] |
|
415 | 415 | |
|
416 | 416 | def drop(self, f): |
|
417 | 417 | '''Drop a file from the dirstate''' |
|
418 | 418 | if f in self._map: |
|
419 | 419 | self._dirty = True |
|
420 | 420 | self._droppath(f) |
|
421 | 421 | del self._map[f] |
|
422 | 422 | |
|
423 | 423 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): |
|
424 | 424 | normed = util.normcase(path) |
|
425 | 425 | folded = self._foldmap.get(normed, None) |
|
426 | 426 | if folded is None: |
|
427 | 427 | if isknown: |
|
428 | 428 | folded = path |
|
429 | 429 | else: |
|
430 | 430 | if exists is None: |
|
431 | 431 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
432 | 432 | if not exists: |
|
433 | 433 | # Maybe a path component exists |
|
434 | 434 | if not ignoremissing and '/' in path: |
|
435 | 435 | d, f = path.rsplit('/', 1) |
|
436 | 436 | d = self._normalize(d, isknown, ignoremissing, None) |
|
437 | 437 | folded = d + "/" + f |
|
438 | 438 | else: |
|
439 | 439 | # No path components, preserve original case |
|
440 | 440 | folded = path |
|
441 | 441 | else: |
|
442 | 442 | # recursively normalize leading directory components |
|
443 | 443 | # against dirstate |
|
444 | 444 | if '/' in normed: |
|
445 | 445 | d, f = normed.rsplit('/', 1) |
|
446 | 446 | d = self._normalize(d, isknown, ignoremissing, True) |
|
447 | 447 | r = self._root + "/" + d |
|
448 | 448 | folded = d + "/" + util.fspath(f, r) |
|
449 | 449 | else: |
|
450 | 450 | folded = util.fspath(normed, self._root) |
|
451 | 451 | self._foldmap[normed] = folded |
|
452 | 452 | |
|
453 | 453 | return folded |
|
454 | 454 | |
|
455 | 455 | def normalize(self, path, isknown=False, ignoremissing=False): |
|
456 | 456 | ''' |
|
457 | 457 | normalize the case of a pathname when on a casefolding filesystem |
|
458 | 458 | |
|
459 | 459 | isknown specifies whether the filename came from walking the |
|
460 | 460 | disk, to avoid extra filesystem access. |
|
461 | 461 | |
|
462 | 462 | If ignoremissing is True, missing path are returned |
|
463 | 463 | unchanged. Otherwise, we try harder to normalize possibly |
|
464 | 464 | existing path components. |
|
465 | 465 | |
|
466 | 466 | The normalized case is determined based on the following precedence: |
|
467 | 467 | |
|
468 | 468 | - version of name already stored in the dirstate |
|
469 | 469 | - version of name stored on disk |
|
470 | 470 | - version provided via command arguments |
|
471 | 471 | ''' |
|
472 | 472 | |
|
473 | 473 | if self._checkcase: |
|
474 | 474 | return self._normalize(path, isknown, ignoremissing) |
|
475 | 475 | return path |
|
476 | 476 | |
|
477 | 477 | def clear(self): |
|
478 | 478 | self._map = {} |
|
479 | 479 | if "_dirs" in self.__dict__: |
|
480 | 480 | delattr(self, "_dirs") |
|
481 | 481 | self._copymap = {} |
|
482 | 482 | self._pl = [nullid, nullid] |
|
483 | 483 | self._lastnormaltime = 0 |
|
484 | 484 | self._dirty = True |
|
485 | 485 | |
|
486 | 486 | def rebuild(self, parent, files): |
|
487 | 487 | self.clear() |
|
488 | 488 | for f in files: |
|
489 | 489 | if 'x' in files.flags(f): |
|
490 | 490 | self._map[f] = ('n', 0777, -1, 0) |
|
491 | 491 | else: |
|
492 | 492 | self._map[f] = ('n', 0666, -1, 0) |
|
493 | 493 | self._pl = (parent, nullid) |
|
494 | 494 | self._dirty = True |
|
495 | 495 | |
|
496 | 496 | def write(self): |
|
497 | 497 | if not self._dirty: |
|
498 | 498 | return |
|
499 | 499 | st = self._opener("dirstate", "w", atomictemp=True) |
|
500 | 500 | |
|
501 | 501 | # use the modification time of the newly created temporary file as the |
|
502 | 502 | # filesystem's notion of 'now' |
|
503 | 503 | now = int(util.fstat(st).st_mtime) |
|
504 | 504 | |
|
505 | 505 | cs = cStringIO.StringIO() |
|
506 | 506 | copymap = self._copymap |
|
507 | 507 | pack = struct.pack |
|
508 | 508 | write = cs.write |
|
509 | 509 | write("".join(self._pl)) |
|
510 | 510 | for f, e in self._map.iteritems(): |
|
511 | 511 | if e[0] == 'n' and e[3] == now: |
|
512 | 512 | # The file was last modified "simultaneously" with the current |
|
513 | 513 | # write to dirstate (i.e. within the same second for file- |
|
514 | 514 | # systems with a granularity of 1 sec). This commonly happens |
|
515 | 515 | # for at least a couple of files on 'update'. |
|
516 | 516 | # The user could change the file without changing its size |
|
517 | 517 | # within the same second. Invalidate the file's stat data in |
|
518 | 518 | # dirstate, forcing future 'status' calls to compare the |
|
519 | 519 | # contents of the file. This prevents mistakenly treating such |
|
520 | 520 | # files as clean. |
|
521 | 521 | e = (e[0], 0, -1, -1) # mark entry as 'unset' |
|
522 | 522 | self._map[f] = e |
|
523 | 523 | |
|
524 | 524 | if f in copymap: |
|
525 | 525 | f = "%s\0%s" % (f, copymap[f]) |
|
526 | 526 | e = pack(_format, e[0], e[1], e[2], e[3], len(f)) |
|
527 | 527 | write(e) |
|
528 | 528 | write(f) |
|
529 | 529 | st.write(cs.getvalue()) |
|
530 | 530 | st.close() |
|
531 | 531 | self._lastnormaltime = 0 |
|
532 | 532 | self._dirty = self._dirtypl = False |
|
533 | 533 | |
|
534 | 534 | def _dirignore(self, f): |
|
535 | 535 | if f == '.': |
|
536 | 536 | return False |
|
537 | 537 | if self._ignore(f): |
|
538 | 538 | return True |
|
539 | 539 | for p in _finddirs(f): |
|
540 | 540 | if self._ignore(p): |
|
541 | 541 | return True |
|
542 | 542 | return False |
|
543 | 543 | |
|
544 | 544 | def walk(self, match, subrepos, unknown, ignored): |
|
545 | 545 | ''' |
|
546 | 546 | Walk recursively through the directory tree, finding all files |
|
547 | 547 | matched by match. |
|
548 | 548 | |
|
549 | 549 | Return a dict mapping filename to stat-like object (either |
|
550 | 550 | mercurial.osutil.stat instance or return value of os.stat()). |
|
551 | 551 | ''' |
|
552 | 552 | |
|
553 | 553 | def fwarn(f, msg): |
|
554 | 554 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
555 | 555 | return False |
|
556 | 556 | |
|
557 | 557 | def badtype(mode): |
|
558 | 558 | kind = _('unknown') |
|
559 | 559 | if stat.S_ISCHR(mode): |
|
560 | 560 | kind = _('character device') |
|
561 | 561 | elif stat.S_ISBLK(mode): |
|
562 | 562 | kind = _('block device') |
|
563 | 563 | elif stat.S_ISFIFO(mode): |
|
564 | 564 | kind = _('fifo') |
|
565 | 565 | elif stat.S_ISSOCK(mode): |
|
566 | 566 | kind = _('socket') |
|
567 | 567 | elif stat.S_ISDIR(mode): |
|
568 | 568 | kind = _('directory') |
|
569 | 569 | return _('unsupported file type (type is %s)') % kind |
|
570 | 570 | |
|
571 | 571 | ignore = self._ignore |
|
572 | 572 | dirignore = self._dirignore |
|
573 | 573 | if ignored: |
|
574 | 574 | ignore = util.never |
|
575 | 575 | dirignore = util.never |
|
576 | 576 | elif not unknown: |
|
577 | 577 | # if unknown and ignored are False, skip step 2 |
|
578 | 578 | ignore = util.always |
|
579 | 579 | dirignore = util.always |
|
580 | 580 | |
|
581 | 581 | matchfn = match.matchfn |
|
582 | 582 | badfn = match.bad |
|
583 | 583 | dmap = self._map |
|
584 | 584 | normpath = util.normpath |
|
585 | 585 | listdir = osutil.listdir |
|
586 | 586 | lstat = os.lstat |
|
587 | 587 | getkind = stat.S_IFMT |
|
588 | 588 | dirkind = stat.S_IFDIR |
|
589 | 589 | regkind = stat.S_IFREG |
|
590 | 590 | lnkkind = stat.S_IFLNK |
|
591 | 591 | join = self._join |
|
592 | 592 | work = [] |
|
593 | 593 | wadd = work.append |
|
594 | 594 | |
|
595 | 595 | exact = skipstep3 = False |
|
596 | 596 | if matchfn == match.exact: # match.exact |
|
597 | 597 | exact = True |
|
598 | 598 | dirignore = util.always # skip step 2 |
|
599 | 599 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
600 | 600 | skipstep3 = True |
|
601 | 601 | |
|
602 | 602 | if not exact and self._checkcase: |
|
603 | 603 | normalize = self._normalize |
|
604 | 604 | skipstep3 = False |
|
605 | 605 | else: |
|
606 | 606 | normalize = lambda x, y, z: x |
|
607 | 607 | |
|
608 | 608 | files = sorted(match.files()) |
|
609 | 609 | subrepos.sort() |
|
610 | 610 | i, j = 0, 0 |
|
611 | 611 | while i < len(files) and j < len(subrepos): |
|
612 | 612 | subpath = subrepos[j] + "/" |
|
613 | 613 | if files[i] < subpath: |
|
614 | 614 | i += 1 |
|
615 | 615 | continue |
|
616 | 616 | while i < len(files) and files[i].startswith(subpath): |
|
617 | 617 | del files[i] |
|
618 | 618 | j += 1 |
|
619 | 619 | |
|
620 | 620 | if not files or '.' in files: |
|
621 | 621 | files = [''] |
|
622 | 622 | results = dict.fromkeys(subrepos) |
|
623 | 623 | results['.hg'] = None |
|
624 | 624 | |
|
625 | 625 | # step 1: find all explicit files |
|
626 | 626 | for ff in files: |
|
627 | 627 | nf = normalize(normpath(ff), False, True) |
|
628 | 628 | if nf in results: |
|
629 | 629 | continue |
|
630 | 630 | |
|
631 | 631 | try: |
|
632 | 632 | st = lstat(join(nf)) |
|
633 | 633 | kind = getkind(st.st_mode) |
|
634 | 634 | if kind == dirkind: |
|
635 | 635 | skipstep3 = False |
|
636 | 636 | if nf in dmap: |
|
637 | 637 | #file deleted on disk but still in dirstate |
|
638 | 638 | results[nf] = None |
|
639 | 639 | match.dir(nf) |
|
640 | 640 | if not dirignore(nf): |
|
641 | 641 | wadd(nf) |
|
642 | 642 | elif kind == regkind or kind == lnkkind: |
|
643 | 643 | results[nf] = st |
|
644 | 644 | else: |
|
645 | 645 | badfn(ff, badtype(kind)) |
|
646 | 646 | if nf in dmap: |
|
647 | 647 | results[nf] = None |
|
648 | 648 | except OSError, inst: |
|
649 | 649 | if nf in dmap: # does it exactly match a file? |
|
650 | 650 | results[nf] = None |
|
651 | 651 | else: # does it match a directory? |
|
652 | 652 | prefix = nf + "/" |
|
653 | 653 | for fn in dmap: |
|
654 | 654 | if fn.startswith(prefix): |
|
655 | 655 | match.dir(nf) |
|
656 | 656 | skipstep3 = False |
|
657 | 657 | break |
|
658 | 658 | else: |
|
659 | 659 | badfn(ff, inst.strerror) |
|
660 | 660 | |
|
661 | 661 | # step 2: visit subdirectories |
|
662 | 662 | while work: |
|
663 | 663 | nd = work.pop() |
|
664 | 664 | skip = None |
|
665 | 665 | if nd == '.': |
|
666 | 666 | nd = '' |
|
667 | 667 | else: |
|
668 | 668 | skip = '.hg' |
|
669 | 669 | try: |
|
670 | 670 | entries = listdir(join(nd), stat=True, skip=skip) |
|
671 | 671 | except OSError, inst: |
|
672 | 672 | if inst.errno == errno.EACCES: |
|
673 | 673 | fwarn(nd, inst.strerror) |
|
674 | 674 | continue |
|
675 | 675 | raise |
|
676 | 676 | for f, kind, st in entries: |
|
677 | 677 | nf = normalize(nd and (nd + "/" + f) or f, True, True) |
|
678 | 678 | if nf not in results: |
|
679 | 679 | if kind == dirkind: |
|
680 | 680 | if not ignore(nf): |
|
681 | 681 | match.dir(nf) |
|
682 | 682 | wadd(nf) |
|
683 | 683 | if nf in dmap and matchfn(nf): |
|
684 | 684 | results[nf] = None |
|
685 | 685 | elif kind == regkind or kind == lnkkind: |
|
686 | 686 | if nf in dmap: |
|
687 | 687 | if matchfn(nf): |
|
688 | 688 | results[nf] = st |
|
689 | 689 | elif matchfn(nf) and not ignore(nf): |
|
690 | 690 | results[nf] = st |
|
691 | 691 | elif nf in dmap and matchfn(nf): |
|
692 | 692 | results[nf] = None |
|
693 | 693 | |
|
694 | 694 | # step 3: report unseen items in the dmap hash |
|
695 | 695 | if not skipstep3 and not exact: |
|
696 | 696 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) |
|
697 | 697 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): |
|
698 | 698 | if (not st is None and |
|
699 |
|
|
|
699 | getkind(st.st_mode) not in (regkind, lnkkind)): | |
|
700 | 700 | st = None |
|
701 | 701 | results[nf] = st |
|
702 | 702 | for s in subrepos: |
|
703 | 703 | del results[s] |
|
704 | 704 | del results['.hg'] |
|
705 | 705 | return results |
|
706 | 706 | |
|
707 | 707 | def status(self, match, subrepos, ignored, clean, unknown): |
|
708 | 708 | '''Determine the status of the working copy relative to the |
|
709 | 709 | dirstate and return a tuple of lists (unsure, modified, added, |
|
710 | 710 | removed, deleted, unknown, ignored, clean), where: |
|
711 | 711 | |
|
712 | 712 | unsure: |
|
713 | 713 | files that might have been modified since the dirstate was |
|
714 | 714 | written, but need to be read to be sure (size is the same |
|
715 | 715 | but mtime differs) |
|
716 | 716 | modified: |
|
717 | 717 | files that have definitely been modified since the dirstate |
|
718 | 718 | was written (different size or mode) |
|
719 | 719 | added: |
|
720 | 720 | files that have been explicitly added with hg add |
|
721 | 721 | removed: |
|
722 | 722 | files that have been explicitly removed with hg remove |
|
723 | 723 | deleted: |
|
724 | 724 | files that have been deleted through other means ("missing") |
|
725 | 725 | unknown: |
|
726 | 726 | files not in the dirstate that are not ignored |
|
727 | 727 | ignored: |
|
728 | 728 | files not in the dirstate that are ignored |
|
729 | 729 | (by _dirignore()) |
|
730 | 730 | clean: |
|
731 | 731 | files that have definitely not been modified since the |
|
732 | 732 | dirstate was written |
|
733 | 733 | ''' |
|
734 | 734 | listignored, listclean, listunknown = ignored, clean, unknown |
|
735 | 735 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
736 | 736 | removed, deleted, clean = [], [], [] |
|
737 | 737 | |
|
738 | 738 | dmap = self._map |
|
739 | 739 | ladd = lookup.append # aka "unsure" |
|
740 | 740 | madd = modified.append |
|
741 | 741 | aadd = added.append |
|
742 | 742 | uadd = unknown.append |
|
743 | 743 | iadd = ignored.append |
|
744 | 744 | radd = removed.append |
|
745 | 745 | dadd = deleted.append |
|
746 | 746 | cadd = clean.append |
|
747 | 747 | |
|
748 | 748 | lnkkind = stat.S_IFLNK |
|
749 | 749 | |
|
750 | 750 | for fn, st in self.walk(match, subrepos, listunknown, |
|
751 | 751 | listignored).iteritems(): |
|
752 | 752 | if fn not in dmap: |
|
753 | 753 | if (listignored or match.exact(fn)) and self._dirignore(fn): |
|
754 | 754 | if listignored: |
|
755 | 755 | iadd(fn) |
|
756 | 756 | elif listunknown: |
|
757 | 757 | uadd(fn) |
|
758 | 758 | continue |
|
759 | 759 | |
|
760 | 760 | state, mode, size, time = dmap[fn] |
|
761 | 761 | |
|
762 | 762 | if not st and state in "nma": |
|
763 | 763 | dadd(fn) |
|
764 | 764 | elif state == 'n': |
|
765 | 765 | # The "mode & lnkkind != lnkkind or self._checklink" |
|
766 | 766 | # lines are an expansion of "islink => checklink" |
|
767 | 767 | # where islink means "is this a link?" and checklink |
|
768 | 768 | # means "can we check links?". |
|
769 | 769 | mtime = int(st.st_mtime) |
|
770 | 770 | if (size >= 0 and |
|
771 | 771 | (size != st.st_size |
|
772 | 772 | or ((mode ^ st.st_mode) & 0100 and self._checkexec)) |
|
773 | 773 | and (mode & lnkkind != lnkkind or self._checklink) |
|
774 | 774 | or size == -2 # other parent |
|
775 | 775 | or fn in self._copymap): |
|
776 | 776 | madd(fn) |
|
777 | 777 | elif (mtime != time |
|
778 | 778 | and (mode & lnkkind != lnkkind or self._checklink)): |
|
779 | 779 | ladd(fn) |
|
780 | 780 | elif mtime == self._lastnormaltime: |
|
781 | 781 | # fn may have been changed in the same timeslot without |
|
782 | 782 | # changing its size. This can happen if we quickly do |
|
783 | 783 | # multiple commits in a single transaction. |
|
784 | 784 | # Force lookup, so we don't miss such a racy file change. |
|
785 | 785 | ladd(fn) |
|
786 | 786 | elif listclean: |
|
787 | 787 | cadd(fn) |
|
788 | 788 | elif state == 'm': |
|
789 | 789 | madd(fn) |
|
790 | 790 | elif state == 'a': |
|
791 | 791 | aadd(fn) |
|
792 | 792 | elif state == 'r': |
|
793 | 793 | radd(fn) |
|
794 | 794 | |
|
795 | 795 | return (lookup, modified, added, removed, deleted, unknown, ignored, |
|
796 | 796 | clean) |
@@ -1,789 +1,789 b'' | |||
|
1 | 1 | # dispatch.py - command dispatching for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re |
|
10 | 10 | import util, commands, hg, fancyopts, extensions, hook, error |
|
11 | 11 | import cmdutil, encoding |
|
12 | 12 | import ui as uimod |
|
13 | 13 | |
|
14 | 14 | class request(object): |
|
15 | 15 | def __init__(self, args, ui=None, repo=None, fin=None, fout=None, |
|
16 | 16 | ferr=None): |
|
17 | 17 | self.args = args |
|
18 | 18 | self.ui = ui |
|
19 | 19 | self.repo = repo |
|
20 | 20 | |
|
21 | 21 | # input/output/error streams |
|
22 | 22 | self.fin = fin |
|
23 | 23 | self.fout = fout |
|
24 | 24 | self.ferr = ferr |
|
25 | 25 | |
|
26 | 26 | def run(): |
|
27 | 27 | "run the command in sys.argv" |
|
28 | 28 | sys.exit((dispatch(request(sys.argv[1:])) or 0) & 255) |
|
29 | 29 | |
|
30 | 30 | def dispatch(req): |
|
31 | 31 | "run the command specified in req.args" |
|
32 | 32 | if req.ferr: |
|
33 | 33 | ferr = req.ferr |
|
34 | 34 | elif req.ui: |
|
35 | 35 | ferr = req.ui.ferr |
|
36 | 36 | else: |
|
37 | 37 | ferr = sys.stderr |
|
38 | 38 | |
|
39 | 39 | try: |
|
40 | 40 | if not req.ui: |
|
41 | 41 | req.ui = uimod.ui() |
|
42 | 42 | if '--traceback' in req.args: |
|
43 | 43 | req.ui.setconfig('ui', 'traceback', 'on') |
|
44 | 44 | |
|
45 | 45 | # set ui streams from the request |
|
46 | 46 | if req.fin: |
|
47 | 47 | req.ui.fin = req.fin |
|
48 | 48 | if req.fout: |
|
49 | 49 | req.ui.fout = req.fout |
|
50 | 50 | if req.ferr: |
|
51 | 51 | req.ui.ferr = req.ferr |
|
52 | 52 | except util.Abort, inst: |
|
53 | 53 | ferr.write(_("abort: %s\n") % inst) |
|
54 | 54 | if inst.hint: |
|
55 | 55 | ferr.write(_("(%s)\n") % inst.hint) |
|
56 | 56 | return -1 |
|
57 | 57 | except error.ParseError, inst: |
|
58 | 58 | if len(inst.args) > 1: |
|
59 | 59 | ferr.write(_("hg: parse error at %s: %s\n") % |
|
60 | 60 | (inst.args[1], inst.args[0])) |
|
61 | 61 | else: |
|
62 | 62 | ferr.write(_("hg: parse error: %s\n") % inst.args[0]) |
|
63 | 63 | return -1 |
|
64 | 64 | |
|
65 | 65 | return _runcatch(req) |
|
66 | 66 | |
|
67 | 67 | def _runcatch(req): |
|
68 | 68 | def catchterm(*args): |
|
69 | 69 | raise error.SignalInterrupt |
|
70 | 70 | |
|
71 | 71 | ui = req.ui |
|
72 | 72 | try: |
|
73 | 73 | for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM': |
|
74 | 74 | num = getattr(signal, name, None) |
|
75 | 75 | if num: |
|
76 | 76 | signal.signal(num, catchterm) |
|
77 | 77 | except ValueError: |
|
78 | 78 | pass # happens if called in a thread |
|
79 | 79 | |
|
80 | 80 | try: |
|
81 | 81 | try: |
|
82 | 82 | # enter the debugger before command execution |
|
83 | 83 | if '--debugger' in req.args: |
|
84 | 84 | ui.warn(_("entering debugger - " |
|
85 | 85 | "type c to continue starting hg or h for help\n")) |
|
86 | 86 | pdb.set_trace() |
|
87 | 87 | try: |
|
88 | 88 | return _dispatch(req) |
|
89 | 89 | finally: |
|
90 | 90 | ui.flush() |
|
91 | 91 | except: |
|
92 | 92 | # enter the debugger when we hit an exception |
|
93 | 93 | if '--debugger' in req.args: |
|
94 | 94 | traceback.print_exc() |
|
95 | 95 | pdb.post_mortem(sys.exc_info()[2]) |
|
96 | 96 | ui.traceback() |
|
97 | 97 | raise |
|
98 | 98 | |
|
99 | 99 | # Global exception handling, alphabetically |
|
100 | 100 | # Mercurial-specific first, followed by built-in and library exceptions |
|
101 | 101 | except error.AmbiguousCommand, inst: |
|
102 | 102 | ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") % |
|
103 | 103 | (inst.args[0], " ".join(inst.args[1]))) |
|
104 | 104 | except error.ParseError, inst: |
|
105 | 105 | if len(inst.args) > 1: |
|
106 | 106 | ui.warn(_("hg: parse error at %s: %s\n") % |
|
107 | 107 | (inst.args[1], inst.args[0])) |
|
108 | 108 | else: |
|
109 | 109 | ui.warn(_("hg: parse error: %s\n") % inst.args[0]) |
|
110 | 110 | return -1 |
|
111 | 111 | except error.LockHeld, inst: |
|
112 | 112 | if inst.errno == errno.ETIMEDOUT: |
|
113 | 113 | reason = _('timed out waiting for lock held by %s') % inst.locker |
|
114 | 114 | else: |
|
115 | 115 | reason = _('lock held by %s') % inst.locker |
|
116 | 116 | ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) |
|
117 | 117 | except error.LockUnavailable, inst: |
|
118 | 118 | ui.warn(_("abort: could not lock %s: %s\n") % |
|
119 | 119 | (inst.desc or inst.filename, inst.strerror)) |
|
120 | 120 | except error.CommandError, inst: |
|
121 | 121 | if inst.args[0]: |
|
122 | 122 | ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) |
|
123 | 123 | commands.help_(ui, inst.args[0], full=False, command=True) |
|
124 | 124 | else: |
|
125 | 125 | ui.warn(_("hg: %s\n") % inst.args[1]) |
|
126 | 126 | commands.help_(ui, 'shortlist') |
|
127 | 127 | except error.OutOfBandError, inst: |
|
128 | 128 | ui.warn(_("abort: remote error:\n")) |
|
129 | 129 | ui.warn(''.join(inst.args)) |
|
130 | 130 | except error.RepoError, inst: |
|
131 | 131 | ui.warn(_("abort: %s!\n") % inst) |
|
132 | 132 | if inst.hint: |
|
133 | 133 | ui.warn(_("(%s)\n") % inst.hint) |
|
134 | 134 | except error.ResponseError, inst: |
|
135 | 135 | ui.warn(_("abort: %s") % inst.args[0]) |
|
136 | 136 | if not isinstance(inst.args[1], basestring): |
|
137 | 137 | ui.warn(" %r\n" % (inst.args[1],)) |
|
138 | 138 | elif not inst.args[1]: |
|
139 | 139 | ui.warn(_(" empty string\n")) |
|
140 | 140 | else: |
|
141 | 141 | ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) |
|
142 | 142 | except error.RevlogError, inst: |
|
143 | 143 | ui.warn(_("abort: %s!\n") % inst) |
|
144 | 144 | except error.SignalInterrupt: |
|
145 | 145 | ui.warn(_("killed!\n")) |
|
146 | 146 | except error.UnknownCommand, inst: |
|
147 | 147 | ui.warn(_("hg: unknown command '%s'\n") % inst.args[0]) |
|
148 | 148 | try: |
|
149 | 149 | # check if the command is in a disabled extension |
|
150 | 150 | # (but don't check for extensions themselves) |
|
151 | 151 | commands.help_(ui, inst.args[0], unknowncmd=True) |
|
152 | 152 | except error.UnknownCommand: |
|
153 | 153 | commands.help_(ui, 'shortlist') |
|
154 | 154 | except util.Abort, inst: |
|
155 | 155 | ui.warn(_("abort: %s\n") % inst) |
|
156 | 156 | if inst.hint: |
|
157 | 157 | ui.warn(_("(%s)\n") % inst.hint) |
|
158 | 158 | except ImportError, inst: |
|
159 | 159 | ui.warn(_("abort: %s!\n") % inst) |
|
160 | 160 | m = str(inst).split()[-1] |
|
161 | 161 | if m in "mpatch bdiff".split(): |
|
162 | 162 | ui.warn(_("(did you forget to compile extensions?)\n")) |
|
163 | 163 | elif m in "zlib".split(): |
|
164 | 164 | ui.warn(_("(is your Python install correct?)\n")) |
|
165 | 165 | except IOError, inst: |
|
166 | 166 | if util.safehasattr(inst, "code"): |
|
167 | 167 | ui.warn(_("abort: %s\n") % inst) |
|
168 | 168 | elif util.safehasattr(inst, "reason"): |
|
169 | 169 | try: # usually it is in the form (errno, strerror) |
|
170 | 170 | reason = inst.reason.args[1] |
|
171 | 171 | except (AttributeError, IndexError): |
|
172 | 172 | # it might be anything, for example a string |
|
173 | 173 | reason = inst.reason |
|
174 | 174 | ui.warn(_("abort: error: %s\n") % reason) |
|
175 | 175 | elif util.safehasattr(inst, "args") and inst.args[0] == errno.EPIPE: |
|
176 | 176 | if ui.debugflag: |
|
177 | 177 | ui.warn(_("broken pipe\n")) |
|
178 | 178 | elif getattr(inst, "strerror", None): |
|
179 | 179 | if getattr(inst, "filename", None): |
|
180 | 180 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) |
|
181 | 181 | else: |
|
182 | 182 | ui.warn(_("abort: %s\n") % inst.strerror) |
|
183 | 183 | else: |
|
184 | 184 | raise |
|
185 | 185 | except OSError, inst: |
|
186 | 186 | if getattr(inst, "filename", None): |
|
187 | 187 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) |
|
188 | 188 | else: |
|
189 | 189 | ui.warn(_("abort: %s\n") % inst.strerror) |
|
190 | 190 | except KeyboardInterrupt: |
|
191 | 191 | try: |
|
192 | 192 | ui.warn(_("interrupted!\n")) |
|
193 | 193 | except IOError, inst: |
|
194 | 194 | if inst.errno == errno.EPIPE: |
|
195 | 195 | if ui.debugflag: |
|
196 | 196 | ui.warn(_("\nbroken pipe\n")) |
|
197 | 197 | else: |
|
198 | 198 | raise |
|
199 | 199 | except MemoryError: |
|
200 | 200 | ui.warn(_("abort: out of memory\n")) |
|
201 | 201 | except SystemExit, inst: |
|
202 | 202 | # Commands shouldn't sys.exit directly, but give a return code. |
|
203 | 203 | # Just in case catch this and and pass exit code to caller. |
|
204 | 204 | return inst.code |
|
205 | 205 | except socket.error, inst: |
|
206 | 206 | ui.warn(_("abort: %s\n") % inst.args[-1]) |
|
207 | 207 | except: |
|
208 | 208 | ui.warn(_("** unknown exception encountered," |
|
209 | 209 | " please report by visiting\n")) |
|
210 | 210 | ui.warn(_("** http://mercurial.selenic.com/wiki/BugTracker\n")) |
|
211 | 211 | ui.warn(_("** Python %s\n") % sys.version.replace('\n', '')) |
|
212 | 212 | ui.warn(_("** Mercurial Distributed SCM (version %s)\n") |
|
213 | 213 | % util.version()) |
|
214 | 214 | ui.warn(_("** Extensions loaded: %s\n") |
|
215 | 215 | % ", ".join([x[0] for x in extensions.extensions()])) |
|
216 | 216 | raise |
|
217 | 217 | |
|
218 | 218 | return -1 |
|
219 | 219 | |
|
220 | 220 | def aliasargs(fn, givenargs): |
|
221 | 221 | args = getattr(fn, 'args', []) |
|
222 | 222 | if args: |
|
223 | 223 | cmd = ' '.join(map(util.shellquote, args)) |
|
224 | 224 | |
|
225 | 225 | nums = [] |
|
226 | 226 | def replacer(m): |
|
227 | 227 | num = int(m.group(1)) - 1 |
|
228 | 228 | nums.append(num) |
|
229 | 229 | if num < len(givenargs): |
|
230 | 230 | return givenargs[num] |
|
231 | 231 | raise util.Abort(_('too few arguments for command alias')) |
|
232 | 232 | cmd = re.sub(r'\$(\d+|\$)', replacer, cmd) |
|
233 | 233 | givenargs = [x for i, x in enumerate(givenargs) |
|
234 | 234 | if i not in nums] |
|
235 | 235 | args = shlex.split(cmd) |
|
236 | 236 | return args + givenargs |
|
237 | 237 | |
|
238 | 238 | class cmdalias(object): |
|
239 | 239 | def __init__(self, name, definition, cmdtable): |
|
240 | 240 | self.name = self.cmd = name |
|
241 | 241 | self.cmdname = '' |
|
242 | 242 | self.definition = definition |
|
243 | 243 | self.args = [] |
|
244 | 244 | self.opts = [] |
|
245 | 245 | self.help = '' |
|
246 | 246 | self.norepo = True |
|
247 | 247 | self.optionalrepo = False |
|
248 | 248 | self.badalias = False |
|
249 | 249 | |
|
250 | 250 | try: |
|
251 | 251 | aliases, entry = cmdutil.findcmd(self.name, cmdtable) |
|
252 | 252 | for alias, e in cmdtable.iteritems(): |
|
253 | 253 | if e is entry: |
|
254 | 254 | self.cmd = alias |
|
255 | 255 | break |
|
256 | 256 | self.shadows = True |
|
257 | 257 | except error.UnknownCommand: |
|
258 | 258 | self.shadows = False |
|
259 | 259 | |
|
260 | 260 | if not self.definition: |
|
261 | 261 | def fn(ui, *args): |
|
262 | 262 | ui.warn(_("no definition for alias '%s'\n") % self.name) |
|
263 | 263 | return 1 |
|
264 | 264 | self.fn = fn |
|
265 | 265 | self.badalias = True |
|
266 | 266 | return |
|
267 | 267 | |
|
268 | 268 | if self.definition.startswith('!'): |
|
269 | 269 | self.shell = True |
|
270 | 270 | def fn(ui, *args): |
|
271 | 271 | env = {'HG_ARGS': ' '.join((self.name,) + args)} |
|
272 | 272 | def _checkvar(m): |
|
273 | 273 | if m.groups()[0] == '$': |
|
274 | 274 | return m.group() |
|
275 | 275 | elif int(m.groups()[0]) <= len(args): |
|
276 | 276 | return m.group() |
|
277 | 277 | else: |
|
278 | 278 | ui.debug("No argument found for substitution " |
|
279 | 279 | "of %i variable in alias '%s' definition." |
|
280 | 280 | % (int(m.groups()[0]), self.name)) |
|
281 | 281 | return '' |
|
282 | 282 | cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:]) |
|
283 | 283 | replace = dict((str(i + 1), arg) for i, arg in enumerate(args)) |
|
284 | 284 | replace['0'] = self.name |
|
285 | 285 | replace['@'] = ' '.join(args) |
|
286 | 286 | cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True) |
|
287 | 287 | return util.system(cmd, environ=env, out=ui.fout) |
|
288 | 288 | self.fn = fn |
|
289 | 289 | return |
|
290 | 290 | |
|
291 | 291 | args = shlex.split(self.definition) |
|
292 | 292 | self.cmdname = cmd = args.pop(0) |
|
293 | 293 | args = map(util.expandpath, args) |
|
294 | 294 | |
|
295 | 295 | for invalidarg in ("--cwd", "-R", "--repository", "--repo"): |
|
296 | 296 | if _earlygetopt([invalidarg], args): |
|
297 | 297 | def fn(ui, *args): |
|
298 | 298 | ui.warn(_("error in definition for alias '%s': %s may only " |
|
299 | 299 | "be given on the command line\n") |
|
300 | 300 | % (self.name, invalidarg)) |
|
301 | 301 | return 1 |
|
302 | 302 | |
|
303 | 303 | self.fn = fn |
|
304 | 304 | self.badalias = True |
|
305 | 305 | return |
|
306 | 306 | |
|
307 | 307 | try: |
|
308 | 308 | tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1] |
|
309 | 309 | if len(tableentry) > 2: |
|
310 | 310 | self.fn, self.opts, self.help = tableentry |
|
311 | 311 | else: |
|
312 | 312 | self.fn, self.opts = tableentry |
|
313 | 313 | |
|
314 | 314 | self.args = aliasargs(self.fn, args) |
|
315 | 315 | if cmd not in commands.norepo.split(' '): |
|
316 | 316 | self.norepo = False |
|
317 | 317 | if cmd in commands.optionalrepo.split(' '): |
|
318 | 318 | self.optionalrepo = True |
|
319 | 319 | if self.help.startswith("hg " + cmd): |
|
320 | 320 | # drop prefix in old-style help lines so hg shows the alias |
|
321 | 321 | self.help = self.help[4 + len(cmd):] |
|
322 | 322 | self.__doc__ = self.fn.__doc__ |
|
323 | 323 | |
|
324 | 324 | except error.UnknownCommand: |
|
325 | 325 | def fn(ui, *args): |
|
326 | 326 | ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \ |
|
327 | 327 | % (self.name, cmd)) |
|
328 | 328 | try: |
|
329 | 329 | # check if the command is in a disabled extension |
|
330 | 330 | commands.help_(ui, cmd, unknowncmd=True) |
|
331 | 331 | except error.UnknownCommand: |
|
332 | 332 | pass |
|
333 | 333 | return 1 |
|
334 | 334 | self.fn = fn |
|
335 | 335 | self.badalias = True |
|
336 | 336 | except error.AmbiguousCommand: |
|
337 | 337 | def fn(ui, *args): |
|
338 | 338 | ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \ |
|
339 | 339 | % (self.name, cmd)) |
|
340 | 340 | return 1 |
|
341 | 341 | self.fn = fn |
|
342 | 342 | self.badalias = True |
|
343 | 343 | |
|
344 | 344 | def __call__(self, ui, *args, **opts): |
|
345 | 345 | if self.shadows: |
|
346 | 346 | ui.debug("alias '%s' shadows command '%s'\n" % |
|
347 | 347 | (self.name, self.cmdname)) |
|
348 | 348 | |
|
349 | 349 | if util.safehasattr(self, 'shell'): |
|
350 | 350 | return self.fn(ui, *args, **opts) |
|
351 | 351 | else: |
|
352 | 352 | try: |
|
353 | 353 | util.checksignature(self.fn)(ui, *args, **opts) |
|
354 | 354 | except error.SignatureError: |
|
355 | 355 | args = ' '.join([self.cmdname] + self.args) |
|
356 | 356 | ui.debug("alias '%s' expands to '%s'\n" % (self.name, args)) |
|
357 | 357 | raise |
|
358 | 358 | |
|
359 | 359 | def addaliases(ui, cmdtable): |
|
360 | 360 | # aliases are processed after extensions have been loaded, so they |
|
361 | 361 | # may use extension commands. Aliases can also use other alias definitions, |
|
362 | 362 | # but only if they have been defined prior to the current definition. |
|
363 | 363 | for alias, definition in ui.configitems('alias'): |
|
364 | 364 | aliasdef = cmdalias(alias, definition, cmdtable) |
|
365 | 365 | |
|
366 | 366 | try: |
|
367 | 367 | olddef = cmdtable[aliasdef.cmd][0] |
|
368 | 368 | if olddef.definition == aliasdef.definition: |
|
369 | 369 | continue |
|
370 | 370 | except (KeyError, AttributeError): |
|
371 | 371 | # definition might not exist or it might not be a cmdalias |
|
372 | 372 | pass |
|
373 | 373 | |
|
374 | 374 | cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help) |
|
375 | 375 | if aliasdef.norepo: |
|
376 | 376 | commands.norepo += ' %s' % alias |
|
377 | 377 | if aliasdef.optionalrepo: |
|
378 | 378 | commands.optionalrepo += ' %s' % alias |
|
379 | 379 | |
|
380 | 380 | def _parse(ui, args): |
|
381 | 381 | options = {} |
|
382 | 382 | cmdoptions = {} |
|
383 | 383 | |
|
384 | 384 | try: |
|
385 | 385 | args = fancyopts.fancyopts(args, commands.globalopts, options) |
|
386 | 386 | except fancyopts.getopt.GetoptError, inst: |
|
387 | 387 | raise error.CommandError(None, inst) |
|
388 | 388 | |
|
389 | 389 | if args: |
|
390 | 390 | cmd, args = args[0], args[1:] |
|
391 | 391 | aliases, entry = cmdutil.findcmd(cmd, commands.table, |
|
392 | 392 | ui.configbool("ui", "strict")) |
|
393 | 393 | cmd = aliases[0] |
|
394 | 394 | args = aliasargs(entry[0], args) |
|
395 | 395 | defaults = ui.config("defaults", cmd) |
|
396 | 396 | if defaults: |
|
397 | 397 | args = map(util.expandpath, shlex.split(defaults)) + args |
|
398 | 398 | c = list(entry[1]) |
|
399 | 399 | else: |
|
400 | 400 | cmd = None |
|
401 | 401 | c = [] |
|
402 | 402 | |
|
403 | 403 | # combine global options into local |
|
404 | 404 | for o in commands.globalopts: |
|
405 | 405 | c.append((o[0], o[1], options[o[1]], o[3])) |
|
406 | 406 | |
|
407 | 407 | try: |
|
408 | 408 | args = fancyopts.fancyopts(args, c, cmdoptions, True) |
|
409 | 409 | except fancyopts.getopt.GetoptError, inst: |
|
410 | 410 | raise error.CommandError(cmd, inst) |
|
411 | 411 | |
|
412 | 412 | # separate global options back out |
|
413 | 413 | for o in commands.globalopts: |
|
414 | 414 | n = o[1] |
|
415 | 415 | options[n] = cmdoptions[n] |
|
416 | 416 | del cmdoptions[n] |
|
417 | 417 | |
|
418 | 418 | return (cmd, cmd and entry[0] or None, args, options, cmdoptions) |
|
419 | 419 | |
|
420 | 420 | def _parseconfig(ui, config): |
|
421 | 421 | """parse the --config options from the command line""" |
|
422 | 422 | configs = [] |
|
423 | 423 | |
|
424 | 424 | for cfg in config: |
|
425 | 425 | try: |
|
426 | 426 | name, value = cfg.split('=', 1) |
|
427 | 427 | section, name = name.split('.', 1) |
|
428 | 428 | if not section or not name: |
|
429 | 429 | raise IndexError |
|
430 | 430 | ui.setconfig(section, name, value) |
|
431 | 431 | configs.append((section, name, value)) |
|
432 | 432 | except (IndexError, ValueError): |
|
433 | 433 | raise util.Abort(_('malformed --config option: %r ' |
|
434 | 434 | '(use --config section.name=value)') % cfg) |
|
435 | 435 | |
|
436 | 436 | return configs |
|
437 | 437 | |
|
438 | 438 | def _earlygetopt(aliases, args): |
|
439 | 439 | """Return list of values for an option (or aliases). |
|
440 | 440 | |
|
441 | 441 | The values are listed in the order they appear in args. |
|
442 | 442 | The options and values are removed from args. |
|
443 | 443 | """ |
|
444 | 444 | try: |
|
445 | 445 | argcount = args.index("--") |
|
446 | 446 | except ValueError: |
|
447 | 447 | argcount = len(args) |
|
448 | 448 | shortopts = [opt for opt in aliases if len(opt) == 2] |
|
449 | 449 | values = [] |
|
450 | 450 | pos = 0 |
|
451 | 451 | while pos < argcount: |
|
452 | 452 | if args[pos] in aliases: |
|
453 | 453 | if pos + 1 >= argcount: |
|
454 | 454 | # ignore and let getopt report an error if there is no value |
|
455 | 455 | break |
|
456 | 456 | del args[pos] |
|
457 | 457 | values.append(args.pop(pos)) |
|
458 | 458 | argcount -= 2 |
|
459 | 459 | elif args[pos][:2] in shortopts: |
|
460 | 460 | # short option can have no following space, e.g. hg log -Rfoo |
|
461 | 461 | values.append(args.pop(pos)[2:]) |
|
462 | 462 | argcount -= 1 |
|
463 | 463 | else: |
|
464 | 464 | pos += 1 |
|
465 | 465 | return values |
|
466 | 466 | |
|
467 | 467 | def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions): |
|
468 | 468 | # run pre-hook, and abort if it fails |
|
469 | 469 | ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs), |
|
470 | 470 | pats=cmdpats, opts=cmdoptions) |
|
471 | 471 | if ret: |
|
472 | 472 | return ret |
|
473 | 473 | ret = _runcommand(ui, options, cmd, d) |
|
474 | 474 | # run post-hook, passing command result |
|
475 | 475 | hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), |
|
476 | 476 | result=ret, pats=cmdpats, opts=cmdoptions) |
|
477 | 477 | return ret |
|
478 | 478 | |
|
479 | 479 | def _getlocal(ui, rpath): |
|
480 | 480 | """Return (path, local ui object) for the given target path. |
|
481 | 481 | |
|
482 | 482 | Takes paths in [cwd]/.hg/hgrc into account." |
|
483 | 483 | """ |
|
484 | 484 | try: |
|
485 | 485 | wd = os.getcwd() |
|
486 | 486 | except OSError, e: |
|
487 | 487 | raise util.Abort(_("error getting current working directory: %s") % |
|
488 | 488 | e.strerror) |
|
489 | 489 | path = cmdutil.findrepo(wd) or "" |
|
490 | 490 | if not path: |
|
491 | 491 | lui = ui |
|
492 | 492 | else: |
|
493 | 493 | lui = ui.copy() |
|
494 | 494 | lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) |
|
495 | 495 | |
|
496 | 496 | if rpath and rpath[-1]: |
|
497 | 497 | path = lui.expandpath(rpath[-1]) |
|
498 | 498 | lui = ui.copy() |
|
499 | 499 | lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) |
|
500 | 500 | |
|
501 | 501 | return path, lui |
|
502 | 502 | |
|
503 | 503 | def _checkshellalias(lui, ui, args): |
|
504 | 504 | options = {} |
|
505 | 505 | |
|
506 | 506 | try: |
|
507 | 507 | args = fancyopts.fancyopts(args, commands.globalopts, options) |
|
508 | 508 | except fancyopts.getopt.GetoptError: |
|
509 | 509 | return |
|
510 | 510 | |
|
511 | 511 | if not args: |
|
512 | 512 | return |
|
513 | 513 | |
|
514 | 514 | norepo = commands.norepo |
|
515 | 515 | optionalrepo = commands.optionalrepo |
|
516 | 516 | def restorecommands(): |
|
517 | 517 | commands.norepo = norepo |
|
518 | 518 | commands.optionalrepo = optionalrepo |
|
519 | 519 | |
|
520 | 520 | cmdtable = commands.table.copy() |
|
521 | 521 | addaliases(lui, cmdtable) |
|
522 | 522 | |
|
523 | 523 | cmd = args[0] |
|
524 | 524 | try: |
|
525 | 525 | aliases, entry = cmdutil.findcmd(cmd, cmdtable, |
|
526 | 526 | lui.configbool("ui", "strict")) |
|
527 | 527 | except (error.AmbiguousCommand, error.UnknownCommand): |
|
528 | 528 | restorecommands() |
|
529 | 529 | return |
|
530 | 530 | |
|
531 | 531 | cmd = aliases[0] |
|
532 | 532 | fn = entry[0] |
|
533 | 533 | |
|
534 | 534 | if cmd and util.safehasattr(fn, 'shell'): |
|
535 | 535 | d = lambda: fn(ui, *args[1:]) |
|
536 | 536 | return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, |
|
537 | 537 | [], {}) |
|
538 | 538 | |
|
539 | 539 | restorecommands() |
|
540 | 540 | |
|
541 | 541 | _loaded = set() |
|
542 | 542 | def _dispatch(req): |
|
543 | 543 | args = req.args |
|
544 | 544 | ui = req.ui |
|
545 | 545 | |
|
546 | 546 | # read --config before doing anything else |
|
547 | 547 | # (e.g. to change trust settings for reading .hg/hgrc) |
|
548 | 548 | cfgs = _parseconfig(ui, _earlygetopt(['--config'], args)) |
|
549 | 549 | |
|
550 | 550 | # check for cwd |
|
551 | 551 | cwd = _earlygetopt(['--cwd'], args) |
|
552 | 552 | if cwd: |
|
553 | 553 | os.chdir(cwd[-1]) |
|
554 | 554 | |
|
555 | 555 | rpath = _earlygetopt(["-R", "--repository", "--repo"], args) |
|
556 | 556 | path, lui = _getlocal(ui, rpath) |
|
557 | 557 | |
|
558 | 558 | # Now that we're operating in the right directory/repository with |
|
559 | 559 | # the right config settings, check for shell aliases |
|
560 | 560 | shellaliasfn = _checkshellalias(lui, ui, args) |
|
561 | 561 | if shellaliasfn: |
|
562 | 562 | return shellaliasfn() |
|
563 | 563 | |
|
564 | 564 | # Configure extensions in phases: uisetup, extsetup, cmdtable, and |
|
565 | 565 | # reposetup. Programs like TortoiseHg will call _dispatch several |
|
566 | 566 | # times so we keep track of configured extensions in _loaded. |
|
567 | 567 | extensions.loadall(lui) |
|
568 | 568 | exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded] |
|
569 | 569 | # Propagate any changes to lui.__class__ by extensions |
|
570 | 570 | ui.__class__ = lui.__class__ |
|
571 | 571 | |
|
572 | 572 | # (uisetup and extsetup are handled in extensions.loadall) |
|
573 | 573 | |
|
574 | 574 | for name, module in exts: |
|
575 | 575 | cmdtable = getattr(module, 'cmdtable', {}) |
|
576 | 576 | overrides = [cmd for cmd in cmdtable if cmd in commands.table] |
|
577 | 577 | if overrides: |
|
578 | 578 | ui.warn(_("extension '%s' overrides commands: %s\n") |
|
579 | 579 | % (name, " ".join(overrides))) |
|
580 | 580 | commands.table.update(cmdtable) |
|
581 | 581 | _loaded.add(name) |
|
582 | 582 | |
|
583 | 583 | # (reposetup is handled in hg.repository) |
|
584 | 584 | |
|
585 | 585 | addaliases(lui, commands.table) |
|
586 | 586 | |
|
587 | 587 | # check for fallback encoding |
|
588 | 588 | fallback = lui.config('ui', 'fallbackencoding') |
|
589 | 589 | if fallback: |
|
590 | 590 | encoding.fallbackencoding = fallback |
|
591 | 591 | |
|
592 | 592 | fullargs = args |
|
593 | 593 | cmd, func, args, options, cmdoptions = _parse(lui, args) |
|
594 | 594 | |
|
595 | 595 | if options["config"]: |
|
596 | 596 | raise util.Abort(_("option --config may not be abbreviated!")) |
|
597 | 597 | if options["cwd"]: |
|
598 | 598 | raise util.Abort(_("option --cwd may not be abbreviated!")) |
|
599 | 599 | if options["repository"]: |
|
600 | 600 | raise util.Abort(_( |
|
601 | 601 | "option -R has to be separated from other options (e.g. not -qR) " |
|
602 | 602 | "and --repository may only be abbreviated as --repo!")) |
|
603 | 603 | |
|
604 | 604 | if options["encoding"]: |
|
605 | 605 | encoding.encoding = options["encoding"] |
|
606 | 606 | if options["encodingmode"]: |
|
607 | 607 | encoding.encodingmode = options["encodingmode"] |
|
608 | 608 | if options["time"]: |
|
609 | 609 | def get_times(): |
|
610 | 610 | t = os.times() |
|
611 | 611 | if t[4] == 0.0: # Windows leaves this as zero, so use time.clock() |
|
612 | 612 | t = (t[0], t[1], t[2], t[3], time.clock()) |
|
613 | 613 | return t |
|
614 | 614 | s = get_times() |
|
615 | 615 | def print_time(): |
|
616 | 616 | t = get_times() |
|
617 | 617 | ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % |
|
618 | 618 | (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) |
|
619 | 619 | atexit.register(print_time) |
|
620 | 620 | |
|
621 | 621 | uis = set([ui, lui]) |
|
622 | 622 | |
|
623 | 623 | if req.repo: |
|
624 | 624 | uis.add(req.repo.ui) |
|
625 | 625 | |
|
626 | 626 | # copy configs that were passed on the cmdline (--config) to the repo ui |
|
627 | 627 | for cfg in cfgs: |
|
628 | 628 | req.repo.ui.setconfig(*cfg) |
|
629 | 629 | |
|
630 | 630 | if options['verbose'] or options['debug'] or options['quiet']: |
|
631 | 631 | for opt in ('verbose', 'debug', 'quiet'): |
|
632 | 632 | val = str(bool(options[opt])) |
|
633 | 633 | for ui_ in uis: |
|
634 | 634 | ui_.setconfig('ui', opt, val) |
|
635 | 635 | |
|
636 | 636 | if options['traceback']: |
|
637 | 637 | for ui_ in uis: |
|
638 | 638 | ui_.setconfig('ui', 'traceback', 'on') |
|
639 | 639 | |
|
640 | 640 | if options['noninteractive']: |
|
641 | 641 | for ui_ in uis: |
|
642 | 642 | ui_.setconfig('ui', 'interactive', 'off') |
|
643 | 643 | |
|
644 | 644 | if cmdoptions.get('insecure', False): |
|
645 | 645 | for ui_ in uis: |
|
646 | 646 | ui_.setconfig('web', 'cacerts', '') |
|
647 | 647 | |
|
648 | 648 | if options['version']: |
|
649 | 649 | return commands.version_(ui) |
|
650 | 650 | if options['help']: |
|
651 | 651 | return commands.help_(ui, cmd) |
|
652 | 652 | elif not cmd: |
|
653 | 653 | return commands.help_(ui, 'shortlist') |
|
654 | 654 | |
|
655 | 655 | repo = None |
|
656 | 656 | cmdpats = args[:] |
|
657 | 657 | if cmd not in commands.norepo.split(): |
|
658 | 658 | # use the repo from the request only if we don't have -R |
|
659 | 659 | if not rpath and not cwd: |
|
660 | 660 | repo = req.repo |
|
661 | 661 | |
|
662 | 662 | if repo: |
|
663 | 663 | # set the descriptors of the repo ui to those of ui |
|
664 | 664 | repo.ui.fin = ui.fin |
|
665 | 665 | repo.ui.fout = ui.fout |
|
666 | 666 | repo.ui.ferr = ui.ferr |
|
667 | 667 | else: |
|
668 | 668 | try: |
|
669 | 669 | repo = hg.repository(ui, path=path) |
|
670 | 670 | if not repo.local(): |
|
671 | 671 | raise util.Abort(_("repository '%s' is not local") % path) |
|
672 | 672 | repo.ui.setconfig("bundle", "mainreporoot", repo.root) |
|
673 | 673 | except error.RequirementError: |
|
674 | 674 | raise |
|
675 | 675 | except error.RepoError: |
|
676 | 676 | if cmd not in commands.optionalrepo.split(): |
|
677 | 677 | if args and not path: # try to infer -R from command args |
|
678 | 678 | repos = map(cmdutil.findrepo, args) |
|
679 | 679 | guess = repos[0] |
|
680 | 680 | if guess and repos.count(guess) == len(repos): |
|
681 | 681 | req.args = ['--repository', guess] + fullargs |
|
682 | 682 | return _dispatch(req) |
|
683 | 683 | if not path: |
|
684 | 684 | raise error.RepoError(_("no repository found in '%s'" |
|
685 | 685 | " (.hg not found)") |
|
686 | 686 | % os.getcwd()) |
|
687 | 687 | raise |
|
688 | 688 | if repo: |
|
689 | 689 | ui = repo.ui |
|
690 | 690 | args.insert(0, repo) |
|
691 | 691 | elif rpath: |
|
692 | 692 | ui.warn(_("warning: --repository ignored\n")) |
|
693 | 693 | |
|
694 | 694 | msg = ' '.join(' ' in a and repr(a) or a for a in fullargs) |
|
695 | 695 | ui.log("command", msg + "\n") |
|
696 | 696 | d = lambda: util.checksignature(func)(ui, *args, **cmdoptions) |
|
697 | 697 | try: |
|
698 | 698 | return runcommand(lui, repo, cmd, fullargs, ui, options, d, |
|
699 | 699 | cmdpats, cmdoptions) |
|
700 | 700 | finally: |
|
701 | 701 | if repo and repo != req.repo: |
|
702 | 702 | repo.close() |
|
703 | 703 | |
|
704 | 704 | def lsprofile(ui, func, fp): |
|
705 | 705 | format = ui.config('profiling', 'format', default='text') |
|
706 | 706 | field = ui.config('profiling', 'sort', default='inlinetime') |
|
707 | 707 | climit = ui.configint('profiling', 'nested', default=5) |
|
708 | 708 | |
|
709 |
if not |
|
|
709 | if format not in ['text', 'kcachegrind']: | |
|
710 | 710 | ui.warn(_("unrecognized profiling format '%s'" |
|
711 | 711 | " - Ignored\n") % format) |
|
712 | 712 | format = 'text' |
|
713 | 713 | |
|
714 | 714 | try: |
|
715 | 715 | from mercurial import lsprof |
|
716 | 716 | except ImportError: |
|
717 | 717 | raise util.Abort(_( |
|
718 | 718 | 'lsprof not available - install from ' |
|
719 | 719 | 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/')) |
|
720 | 720 | p = lsprof.Profiler() |
|
721 | 721 | p.enable(subcalls=True) |
|
722 | 722 | try: |
|
723 | 723 | return func() |
|
724 | 724 | finally: |
|
725 | 725 | p.disable() |
|
726 | 726 | |
|
727 | 727 | if format == 'kcachegrind': |
|
728 | 728 | import lsprofcalltree |
|
729 | 729 | calltree = lsprofcalltree.KCacheGrind(p) |
|
730 | 730 | calltree.output(fp) |
|
731 | 731 | else: |
|
732 | 732 | # format == 'text' |
|
733 | 733 | stats = lsprof.Stats(p.getstats()) |
|
734 | 734 | stats.sort(field) |
|
735 | 735 | stats.pprint(limit=30, file=fp, climit=climit) |
|
736 | 736 | |
|
737 | 737 | def statprofile(ui, func, fp): |
|
738 | 738 | try: |
|
739 | 739 | import statprof |
|
740 | 740 | except ImportError: |
|
741 | 741 | raise util.Abort(_( |
|
742 | 742 | 'statprof not available - install using "easy_install statprof"')) |
|
743 | 743 | |
|
744 | 744 | freq = ui.configint('profiling', 'freq', default=1000) |
|
745 | 745 | if freq > 0: |
|
746 | 746 | statprof.reset(freq) |
|
747 | 747 | else: |
|
748 | 748 | ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq) |
|
749 | 749 | |
|
750 | 750 | statprof.start() |
|
751 | 751 | try: |
|
752 | 752 | return func() |
|
753 | 753 | finally: |
|
754 | 754 | statprof.stop() |
|
755 | 755 | statprof.display(fp) |
|
756 | 756 | |
|
757 | 757 | def _runcommand(ui, options, cmd, cmdfunc): |
|
758 | 758 | def checkargs(): |
|
759 | 759 | try: |
|
760 | 760 | return cmdfunc() |
|
761 | 761 | except error.SignatureError: |
|
762 | 762 | raise error.CommandError(cmd, _("invalid arguments")) |
|
763 | 763 | |
|
764 | 764 | if options['profile']: |
|
765 | 765 | profiler = os.getenv('HGPROF') |
|
766 | 766 | if profiler is None: |
|
767 | 767 | profiler = ui.config('profiling', 'type', default='ls') |
|
768 | 768 | if profiler not in ('ls', 'stat'): |
|
769 | 769 | ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler) |
|
770 | 770 | profiler = 'ls' |
|
771 | 771 | |
|
772 | 772 | output = ui.config('profiling', 'output') |
|
773 | 773 | |
|
774 | 774 | if output: |
|
775 | 775 | path = ui.expandpath(output) |
|
776 | 776 | fp = open(path, 'wb') |
|
777 | 777 | else: |
|
778 | 778 | fp = sys.stderr |
|
779 | 779 | |
|
780 | 780 | try: |
|
781 | 781 | if profiler == 'ls': |
|
782 | 782 | return lsprofile(ui, checkargs, fp) |
|
783 | 783 | else: |
|
784 | 784 | return statprofile(ui, checkargs, fp) |
|
785 | 785 | finally: |
|
786 | 786 | if output: |
|
787 | 787 | fp.close() |
|
788 | 788 | else: |
|
789 | 789 | return checkargs() |
@@ -1,764 +1,764 b'' | |||
|
1 | 1 | # This library is free software; you can redistribute it and/or |
|
2 | 2 | # modify it under the terms of the GNU Lesser General Public |
|
3 | 3 | # License as published by the Free Software Foundation; either |
|
4 | 4 | # version 2.1 of the License, or (at your option) any later version. |
|
5 | 5 | # |
|
6 | 6 | # This library is distributed in the hope that it will be useful, |
|
7 | 7 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | 8 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
9 | 9 | # Lesser General Public License for more details. |
|
10 | 10 | # |
|
11 | 11 | # You should have received a copy of the GNU Lesser General Public |
|
12 | 12 | # License along with this library; if not, see |
|
13 | 13 | # <http://www.gnu.org/licenses/>. |
|
14 | 14 | |
|
15 | 15 | # This file is part of urlgrabber, a high-level cross-protocol url-grabber |
|
16 | 16 | # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko |
|
17 | 17 | |
|
18 | 18 | # Modified by Benoit Boissinot: |
|
19 | 19 | # - fix for digest auth (inspired from urllib2.py @ Python v2.4) |
|
20 | 20 | # Modified by Dirkjan Ochtman: |
|
21 | 21 | # - import md5 function from a local util module |
|
22 | 22 | # Modified by Martin Geisler: |
|
23 | 23 | # - moved md5 function from local util module to this module |
|
24 | 24 | # Modified by Augie Fackler: |
|
25 | 25 | # - add safesend method and use it to prevent broken pipe errors |
|
26 | 26 | # on large POST requests |
|
27 | 27 | |
|
28 | 28 | """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive. |
|
29 | 29 | |
|
30 | 30 | >>> import urllib2 |
|
31 | 31 | >>> from keepalive import HTTPHandler |
|
32 | 32 | >>> keepalive_handler = HTTPHandler() |
|
33 | 33 | >>> opener = urllib2.build_opener(keepalive_handler) |
|
34 | 34 | >>> urllib2.install_opener(opener) |
|
35 | 35 | >>> |
|
36 | 36 | >>> fo = urllib2.urlopen('http://www.python.org') |
|
37 | 37 | |
|
38 | 38 | If a connection to a given host is requested, and all of the existing |
|
39 | 39 | connections are still in use, another connection will be opened. If |
|
40 | 40 | the handler tries to use an existing connection but it fails in some |
|
41 | 41 | way, it will be closed and removed from the pool. |
|
42 | 42 | |
|
43 | 43 | To remove the handler, simply re-run build_opener with no arguments, and |
|
44 | 44 | install that opener. |
|
45 | 45 | |
|
46 | 46 | You can explicitly close connections by using the close_connection() |
|
47 | 47 | method of the returned file-like object (described below) or you can |
|
48 | 48 | use the handler methods: |
|
49 | 49 | |
|
50 | 50 | close_connection(host) |
|
51 | 51 | close_all() |
|
52 | 52 | open_connections() |
|
53 | 53 | |
|
54 | 54 | NOTE: using the close_connection and close_all methods of the handler |
|
55 | 55 | should be done with care when using multiple threads. |
|
56 | 56 | * there is nothing that prevents another thread from creating new |
|
57 | 57 | connections immediately after connections are closed |
|
58 | 58 | * no checks are done to prevent in-use connections from being closed |
|
59 | 59 | |
|
60 | 60 | >>> keepalive_handler.close_all() |
|
61 | 61 | |
|
62 | 62 | EXTRA ATTRIBUTES AND METHODS |
|
63 | 63 | |
|
64 | 64 | Upon a status of 200, the object returned has a few additional |
|
65 | 65 | attributes and methods, which should not be used if you want to |
|
66 | 66 | remain consistent with the normal urllib2-returned objects: |
|
67 | 67 | |
|
68 | 68 | close_connection() - close the connection to the host |
|
69 | 69 | readlines() - you know, readlines() |
|
70 | 70 | status - the return status (ie 404) |
|
71 | 71 | reason - english translation of status (ie 'File not found') |
|
72 | 72 | |
|
73 | 73 | If you want the best of both worlds, use this inside an |
|
74 | 74 | AttributeError-catching try: |
|
75 | 75 | |
|
76 | 76 | >>> try: status = fo.status |
|
77 | 77 | >>> except AttributeError: status = None |
|
78 | 78 | |
|
79 | 79 | Unfortunately, these are ONLY there if status == 200, so it's not |
|
80 | 80 | easy to distinguish between non-200 responses. The reason is that |
|
81 | 81 | urllib2 tries to do clever things with error codes 301, 302, 401, |
|
82 | 82 | and 407, and it wraps the object upon return. |
|
83 | 83 | |
|
84 | 84 | For python versions earlier than 2.4, you can avoid this fancy error |
|
85 | 85 | handling by setting the module-level global HANDLE_ERRORS to zero. |
|
86 | 86 | You see, prior to 2.4, it's the HTTP Handler's job to determine what |
|
87 | 87 | to handle specially, and what to just pass up. HANDLE_ERRORS == 0 |
|
88 | 88 | means "pass everything up". In python 2.4, however, this job no |
|
89 | 89 | longer belongs to the HTTP Handler and is now done by a NEW handler, |
|
90 | 90 | HTTPErrorProcessor. Here's the bottom line: |
|
91 | 91 | |
|
92 | 92 | python version < 2.4 |
|
93 | 93 | HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as |
|
94 | 94 | errors |
|
95 | 95 | HANDLE_ERRORS == 0 pass everything up, error processing is |
|
96 | 96 | left to the calling code |
|
97 | 97 | python version >= 2.4 |
|
98 | 98 | HANDLE_ERRORS == 1 pass up 200, treat the rest as errors |
|
99 | 99 | HANDLE_ERRORS == 0 (default) pass everything up, let the |
|
100 | 100 | other handlers (specifically, |
|
101 | 101 | HTTPErrorProcessor) decide what to do |
|
102 | 102 | |
|
103 | 103 | In practice, setting the variable either way makes little difference |
|
104 | 104 | in python 2.4, so for the most consistent behavior across versions, |
|
105 | 105 | you probably just want to use the defaults, which will give you |
|
106 | 106 | exceptions on errors. |
|
107 | 107 | |
|
108 | 108 | """ |
|
109 | 109 | |
|
110 | 110 | # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $ |
|
111 | 111 | |
|
112 | 112 | import errno |
|
113 | 113 | import httplib |
|
114 | 114 | import socket |
|
115 | 115 | import thread |
|
116 | 116 | import urllib2 |
|
117 | 117 | |
|
118 | 118 | DEBUG = None |
|
119 | 119 | |
|
120 | 120 | import sys |
|
121 | 121 | if sys.version_info < (2, 4): |
|
122 | 122 | HANDLE_ERRORS = 1 |
|
123 | 123 | else: HANDLE_ERRORS = 0 |
|
124 | 124 | |
|
125 | 125 | class ConnectionManager(object): |
|
126 | 126 | """ |
|
127 | 127 | The connection manager must be able to: |
|
128 | 128 | * keep track of all existing |
|
129 | 129 | """ |
|
130 | 130 | def __init__(self): |
|
131 | 131 | self._lock = thread.allocate_lock() |
|
132 | 132 | self._hostmap = {} # map hosts to a list of connections |
|
133 | 133 | self._connmap = {} # map connections to host |
|
134 | 134 | self._readymap = {} # map connection to ready state |
|
135 | 135 | |
|
136 | 136 | def add(self, host, connection, ready): |
|
137 | 137 | self._lock.acquire() |
|
138 | 138 | try: |
|
139 |
if not |
|
|
139 | if host not in self._hostmap: | |
|
140 | 140 | self._hostmap[host] = [] |
|
141 | 141 | self._hostmap[host].append(connection) |
|
142 | 142 | self._connmap[connection] = host |
|
143 | 143 | self._readymap[connection] = ready |
|
144 | 144 | finally: |
|
145 | 145 | self._lock.release() |
|
146 | 146 | |
|
147 | 147 | def remove(self, connection): |
|
148 | 148 | self._lock.acquire() |
|
149 | 149 | try: |
|
150 | 150 | try: |
|
151 | 151 | host = self._connmap[connection] |
|
152 | 152 | except KeyError: |
|
153 | 153 | pass |
|
154 | 154 | else: |
|
155 | 155 | del self._connmap[connection] |
|
156 | 156 | del self._readymap[connection] |
|
157 | 157 | self._hostmap[host].remove(connection) |
|
158 | 158 | if not self._hostmap[host]: del self._hostmap[host] |
|
159 | 159 | finally: |
|
160 | 160 | self._lock.release() |
|
161 | 161 | |
|
162 | 162 | def set_ready(self, connection, ready): |
|
163 | 163 | try: |
|
164 | 164 | self._readymap[connection] = ready |
|
165 | 165 | except KeyError: |
|
166 | 166 | pass |
|
167 | 167 | |
|
168 | 168 | def get_ready_conn(self, host): |
|
169 | 169 | conn = None |
|
170 | 170 | self._lock.acquire() |
|
171 | 171 | try: |
|
172 | 172 | if host in self._hostmap: |
|
173 | 173 | for c in self._hostmap[host]: |
|
174 | 174 | if self._readymap[c]: |
|
175 | 175 | self._readymap[c] = 0 |
|
176 | 176 | conn = c |
|
177 | 177 | break |
|
178 | 178 | finally: |
|
179 | 179 | self._lock.release() |
|
180 | 180 | return conn |
|
181 | 181 | |
|
182 | 182 | def get_all(self, host=None): |
|
183 | 183 | if host: |
|
184 | 184 | return list(self._hostmap.get(host, [])) |
|
185 | 185 | else: |
|
186 | 186 | return dict(self._hostmap) |
|
187 | 187 | |
|
188 | 188 | class KeepAliveHandler(object): |
|
189 | 189 | def __init__(self): |
|
190 | 190 | self._cm = ConnectionManager() |
|
191 | 191 | |
|
192 | 192 | #### Connection Management |
|
193 | 193 | def open_connections(self): |
|
194 | 194 | """return a list of connected hosts and the number of connections |
|
195 | 195 | to each. [('foo.com:80', 2), ('bar.org', 1)]""" |
|
196 | 196 | return [(host, len(li)) for (host, li) in self._cm.get_all().items()] |
|
197 | 197 | |
|
198 | 198 | def close_connection(self, host): |
|
199 | 199 | """close connection(s) to <host> |
|
200 | 200 | host is the host:port spec, as in 'www.cnn.com:8080' as passed in. |
|
201 | 201 | no error occurs if there is no connection to that host.""" |
|
202 | 202 | for h in self._cm.get_all(host): |
|
203 | 203 | self._cm.remove(h) |
|
204 | 204 | h.close() |
|
205 | 205 | |
|
206 | 206 | def close_all(self): |
|
207 | 207 | """close all open connections""" |
|
208 | 208 | for host, conns in self._cm.get_all().iteritems(): |
|
209 | 209 | for h in conns: |
|
210 | 210 | self._cm.remove(h) |
|
211 | 211 | h.close() |
|
212 | 212 | |
|
213 | 213 | def _request_closed(self, request, host, connection): |
|
214 | 214 | """tells us that this request is now closed and the the |
|
215 | 215 | connection is ready for another request""" |
|
216 | 216 | self._cm.set_ready(connection, 1) |
|
217 | 217 | |
|
218 | 218 | def _remove_connection(self, host, connection, close=0): |
|
219 | 219 | if close: |
|
220 | 220 | connection.close() |
|
221 | 221 | self._cm.remove(connection) |
|
222 | 222 | |
|
223 | 223 | #### Transaction Execution |
|
224 | 224 | def http_open(self, req): |
|
225 | 225 | return self.do_open(HTTPConnection, req) |
|
226 | 226 | |
|
227 | 227 | def do_open(self, http_class, req): |
|
228 | 228 | host = req.get_host() |
|
229 | 229 | if not host: |
|
230 | 230 | raise urllib2.URLError('no host given') |
|
231 | 231 | |
|
232 | 232 | try: |
|
233 | 233 | h = self._cm.get_ready_conn(host) |
|
234 | 234 | while h: |
|
235 | 235 | r = self._reuse_connection(h, req, host) |
|
236 | 236 | |
|
237 | 237 | # if this response is non-None, then it worked and we're |
|
238 | 238 | # done. Break out, skipping the else block. |
|
239 | 239 | if r: |
|
240 | 240 | break |
|
241 | 241 | |
|
242 | 242 | # connection is bad - possibly closed by server |
|
243 | 243 | # discard it and ask for the next free connection |
|
244 | 244 | h.close() |
|
245 | 245 | self._cm.remove(h) |
|
246 | 246 | h = self._cm.get_ready_conn(host) |
|
247 | 247 | else: |
|
248 | 248 | # no (working) free connections were found. Create a new one. |
|
249 | 249 | h = http_class(host) |
|
250 | 250 | if DEBUG: |
|
251 | 251 | DEBUG.info("creating new connection to %s (%d)", |
|
252 | 252 | host, id(h)) |
|
253 | 253 | self._cm.add(host, h, 0) |
|
254 | 254 | self._start_transaction(h, req) |
|
255 | 255 | r = h.getresponse() |
|
256 | 256 | except (socket.error, httplib.HTTPException), err: |
|
257 | 257 | raise urllib2.URLError(err) |
|
258 | 258 | |
|
259 | 259 | # if not a persistent connection, don't try to reuse it |
|
260 | 260 | if r.will_close: |
|
261 | 261 | self._cm.remove(h) |
|
262 | 262 | |
|
263 | 263 | if DEBUG: |
|
264 | 264 | DEBUG.info("STATUS: %s, %s", r.status, r.reason) |
|
265 | 265 | r._handler = self |
|
266 | 266 | r._host = host |
|
267 | 267 | r._url = req.get_full_url() |
|
268 | 268 | r._connection = h |
|
269 | 269 | r.code = r.status |
|
270 | 270 | r.headers = r.msg |
|
271 | 271 | r.msg = r.reason |
|
272 | 272 | |
|
273 | 273 | if r.status == 200 or not HANDLE_ERRORS: |
|
274 | 274 | return r |
|
275 | 275 | else: |
|
276 | 276 | return self.parent.error('http', req, r, |
|
277 | 277 | r.status, r.msg, r.headers) |
|
278 | 278 | |
|
279 | 279 | def _reuse_connection(self, h, req, host): |
|
280 | 280 | """start the transaction with a re-used connection |
|
281 | 281 | return a response object (r) upon success or None on failure. |
|
282 | 282 | This DOES not close or remove bad connections in cases where |
|
283 | 283 | it returns. However, if an unexpected exception occurs, it |
|
284 | 284 | will close and remove the connection before re-raising. |
|
285 | 285 | """ |
|
286 | 286 | try: |
|
287 | 287 | self._start_transaction(h, req) |
|
288 | 288 | r = h.getresponse() |
|
289 | 289 | # note: just because we got something back doesn't mean it |
|
290 | 290 | # worked. We'll check the version below, too. |
|
291 | 291 | except (socket.error, httplib.HTTPException): |
|
292 | 292 | r = None |
|
293 | 293 | except: |
|
294 | 294 | # adding this block just in case we've missed |
|
295 | 295 | # something we will still raise the exception, but |
|
296 | 296 | # lets try and close the connection and remove it |
|
297 | 297 | # first. We previously got into a nasty loop |
|
298 | 298 | # where an exception was uncaught, and so the |
|
299 | 299 | # connection stayed open. On the next try, the |
|
300 | 300 | # same exception was raised, etc. The tradeoff is |
|
301 | 301 | # that it's now possible this call will raise |
|
302 | 302 | # a DIFFERENT exception |
|
303 | 303 | if DEBUG: |
|
304 | 304 | DEBUG.error("unexpected exception - closing " |
|
305 | 305 | "connection to %s (%d)", host, id(h)) |
|
306 | 306 | self._cm.remove(h) |
|
307 | 307 | h.close() |
|
308 | 308 | raise |
|
309 | 309 | |
|
310 | 310 | if r is None or r.version == 9: |
|
311 | 311 | # httplib falls back to assuming HTTP 0.9 if it gets a |
|
312 | 312 | # bad header back. This is most likely to happen if |
|
313 | 313 | # the socket has been closed by the server since we |
|
314 | 314 | # last used the connection. |
|
315 | 315 | if DEBUG: |
|
316 | 316 | DEBUG.info("failed to re-use connection to %s (%d)", |
|
317 | 317 | host, id(h)) |
|
318 | 318 | r = None |
|
319 | 319 | else: |
|
320 | 320 | if DEBUG: |
|
321 | 321 | DEBUG.info("re-using connection to %s (%d)", host, id(h)) |
|
322 | 322 | |
|
323 | 323 | return r |
|
324 | 324 | |
|
325 | 325 | def _start_transaction(self, h, req): |
|
326 | 326 | # What follows mostly reimplements HTTPConnection.request() |
|
327 | 327 | # except it adds self.parent.addheaders in the mix. |
|
328 | 328 | headers = req.headers.copy() |
|
329 | 329 | if sys.version_info >= (2, 4): |
|
330 | 330 | headers.update(req.unredirected_hdrs) |
|
331 | 331 | headers.update(self.parent.addheaders) |
|
332 | 332 | headers = dict((n.lower(), v) for n, v in headers.items()) |
|
333 | 333 | skipheaders = {} |
|
334 | 334 | for n in ('host', 'accept-encoding'): |
|
335 | 335 | if n in headers: |
|
336 | 336 | skipheaders['skip_' + n.replace('-', '_')] = 1 |
|
337 | 337 | try: |
|
338 | 338 | if req.has_data(): |
|
339 | 339 | data = req.get_data() |
|
340 | 340 | h.putrequest('POST', req.get_selector(), **skipheaders) |
|
341 | 341 | if 'content-type' not in headers: |
|
342 | 342 | h.putheader('Content-type', |
|
343 | 343 | 'application/x-www-form-urlencoded') |
|
344 | 344 | if 'content-length' not in headers: |
|
345 | 345 | h.putheader('Content-length', '%d' % len(data)) |
|
346 | 346 | else: |
|
347 | 347 | h.putrequest('GET', req.get_selector(), **skipheaders) |
|
348 | 348 | except (socket.error), err: |
|
349 | 349 | raise urllib2.URLError(err) |
|
350 | 350 | for k, v in headers.items(): |
|
351 | 351 | h.putheader(k, v) |
|
352 | 352 | h.endheaders() |
|
353 | 353 | if req.has_data(): |
|
354 | 354 | h.send(data) |
|
355 | 355 | |
|
356 | 356 | class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler): |
|
357 | 357 | pass |
|
358 | 358 | |
|
359 | 359 | class HTTPResponse(httplib.HTTPResponse): |
|
360 | 360 | # we need to subclass HTTPResponse in order to |
|
361 | 361 | # 1) add readline() and readlines() methods |
|
362 | 362 | # 2) add close_connection() methods |
|
363 | 363 | # 3) add info() and geturl() methods |
|
364 | 364 | |
|
365 | 365 | # in order to add readline(), read must be modified to deal with a |
|
366 | 366 | # buffer. example: readline must read a buffer and then spit back |
|
367 | 367 | # one line at a time. The only real alternative is to read one |
|
368 | 368 | # BYTE at a time (ick). Once something has been read, it can't be |
|
369 | 369 | # put back (ok, maybe it can, but that's even uglier than this), |
|
370 | 370 | # so if you THEN do a normal read, you must first take stuff from |
|
371 | 371 | # the buffer. |
|
372 | 372 | |
|
373 | 373 | # the read method wraps the original to accomodate buffering, |
|
374 | 374 | # although read() never adds to the buffer. |
|
375 | 375 | # Both readline and readlines have been stolen with almost no |
|
376 | 376 | # modification from socket.py |
|
377 | 377 | |
|
378 | 378 | |
|
379 | 379 | def __init__(self, sock, debuglevel=0, strict=0, method=None): |
|
380 | 380 | if method: # the httplib in python 2.3 uses the method arg |
|
381 | 381 | httplib.HTTPResponse.__init__(self, sock, debuglevel, method) |
|
382 | 382 | else: # 2.2 doesn't |
|
383 | 383 | httplib.HTTPResponse.__init__(self, sock, debuglevel) |
|
384 | 384 | self.fileno = sock.fileno |
|
385 | 385 | self.code = None |
|
386 | 386 | self._rbuf = '' |
|
387 | 387 | self._rbufsize = 8096 |
|
388 | 388 | self._handler = None # inserted by the handler later |
|
389 | 389 | self._host = None # (same) |
|
390 | 390 | self._url = None # (same) |
|
391 | 391 | self._connection = None # (same) |
|
392 | 392 | |
|
393 | 393 | _raw_read = httplib.HTTPResponse.read |
|
394 | 394 | |
|
395 | 395 | def close(self): |
|
396 | 396 | if self.fp: |
|
397 | 397 | self.fp.close() |
|
398 | 398 | self.fp = None |
|
399 | 399 | if self._handler: |
|
400 | 400 | self._handler._request_closed(self, self._host, |
|
401 | 401 | self._connection) |
|
402 | 402 | |
|
403 | 403 | def close_connection(self): |
|
404 | 404 | self._handler._remove_connection(self._host, self._connection, close=1) |
|
405 | 405 | self.close() |
|
406 | 406 | |
|
407 | 407 | def info(self): |
|
408 | 408 | return self.headers |
|
409 | 409 | |
|
410 | 410 | def geturl(self): |
|
411 | 411 | return self._url |
|
412 | 412 | |
|
413 | 413 | def read(self, amt=None): |
|
414 | 414 | # the _rbuf test is only in this first if for speed. It's not |
|
415 | 415 | # logically necessary |
|
416 | 416 | if self._rbuf and not amt is None: |
|
417 | 417 | L = len(self._rbuf) |
|
418 | 418 | if amt > L: |
|
419 | 419 | amt -= L |
|
420 | 420 | else: |
|
421 | 421 | s = self._rbuf[:amt] |
|
422 | 422 | self._rbuf = self._rbuf[amt:] |
|
423 | 423 | return s |
|
424 | 424 | |
|
425 | 425 | s = self._rbuf + self._raw_read(amt) |
|
426 | 426 | self._rbuf = '' |
|
427 | 427 | return s |
|
428 | 428 | |
|
429 | 429 | # stolen from Python SVN #68532 to fix issue1088 |
|
430 | 430 | def _read_chunked(self, amt): |
|
431 | 431 | chunk_left = self.chunk_left |
|
432 | 432 | value = '' |
|
433 | 433 | |
|
434 | 434 | # XXX This accumulates chunks by repeated string concatenation, |
|
435 | 435 | # which is not efficient as the number or size of chunks gets big. |
|
436 | 436 | while True: |
|
437 | 437 | if chunk_left is None: |
|
438 | 438 | line = self.fp.readline() |
|
439 | 439 | i = line.find(';') |
|
440 | 440 | if i >= 0: |
|
441 | 441 | line = line[:i] # strip chunk-extensions |
|
442 | 442 | try: |
|
443 | 443 | chunk_left = int(line, 16) |
|
444 | 444 | except ValueError: |
|
445 | 445 | # close the connection as protocol synchronisation is |
|
446 | 446 | # probably lost |
|
447 | 447 | self.close() |
|
448 | 448 | raise httplib.IncompleteRead(value) |
|
449 | 449 | if chunk_left == 0: |
|
450 | 450 | break |
|
451 | 451 | if amt is None: |
|
452 | 452 | value += self._safe_read(chunk_left) |
|
453 | 453 | elif amt < chunk_left: |
|
454 | 454 | value += self._safe_read(amt) |
|
455 | 455 | self.chunk_left = chunk_left - amt |
|
456 | 456 | return value |
|
457 | 457 | elif amt == chunk_left: |
|
458 | 458 | value += self._safe_read(amt) |
|
459 | 459 | self._safe_read(2) # toss the CRLF at the end of the chunk |
|
460 | 460 | self.chunk_left = None |
|
461 | 461 | return value |
|
462 | 462 | else: |
|
463 | 463 | value += self._safe_read(chunk_left) |
|
464 | 464 | amt -= chunk_left |
|
465 | 465 | |
|
466 | 466 | # we read the whole chunk, get another |
|
467 | 467 | self._safe_read(2) # toss the CRLF at the end of the chunk |
|
468 | 468 | chunk_left = None |
|
469 | 469 | |
|
470 | 470 | # read and discard trailer up to the CRLF terminator |
|
471 | 471 | ### note: we shouldn't have any trailers! |
|
472 | 472 | while True: |
|
473 | 473 | line = self.fp.readline() |
|
474 | 474 | if not line: |
|
475 | 475 | # a vanishingly small number of sites EOF without |
|
476 | 476 | # sending the trailer |
|
477 | 477 | break |
|
478 | 478 | if line == '\r\n': |
|
479 | 479 | break |
|
480 | 480 | |
|
481 | 481 | # we read everything; close the "file" |
|
482 | 482 | self.close() |
|
483 | 483 | |
|
484 | 484 | return value |
|
485 | 485 | |
|
486 | 486 | def readline(self, limit=-1): |
|
487 | 487 | i = self._rbuf.find('\n') |
|
488 | 488 | while i < 0 and not (0 < limit <= len(self._rbuf)): |
|
489 | 489 | new = self._raw_read(self._rbufsize) |
|
490 | 490 | if not new: |
|
491 | 491 | break |
|
492 | 492 | i = new.find('\n') |
|
493 | 493 | if i >= 0: |
|
494 | 494 | i = i + len(self._rbuf) |
|
495 | 495 | self._rbuf = self._rbuf + new |
|
496 | 496 | if i < 0: |
|
497 | 497 | i = len(self._rbuf) |
|
498 | 498 | else: |
|
499 | 499 | i = i + 1 |
|
500 | 500 | if 0 <= limit < len(self._rbuf): |
|
501 | 501 | i = limit |
|
502 | 502 | data, self._rbuf = self._rbuf[:i], self._rbuf[i:] |
|
503 | 503 | return data |
|
504 | 504 | |
|
505 | 505 | def readlines(self, sizehint = 0): |
|
506 | 506 | total = 0 |
|
507 | 507 | list = [] |
|
508 | 508 | while True: |
|
509 | 509 | line = self.readline() |
|
510 | 510 | if not line: |
|
511 | 511 | break |
|
512 | 512 | list.append(line) |
|
513 | 513 | total += len(line) |
|
514 | 514 | if sizehint and total >= sizehint: |
|
515 | 515 | break |
|
516 | 516 | return list |
|
517 | 517 | |
|
518 | 518 | def safesend(self, str): |
|
519 | 519 | """Send `str' to the server. |
|
520 | 520 | |
|
521 | 521 | Shamelessly ripped off from httplib to patch a bad behavior. |
|
522 | 522 | """ |
|
523 | 523 | # _broken_pipe_resp is an attribute we set in this function |
|
524 | 524 | # if the socket is closed while we're sending data but |
|
525 | 525 | # the server sent us a response before hanging up. |
|
526 | 526 | # In that case, we want to pretend to send the rest of the |
|
527 | 527 | # outgoing data, and then let the user use getresponse() |
|
528 | 528 | # (which we wrap) to get this last response before |
|
529 | 529 | # opening a new socket. |
|
530 | 530 | if getattr(self, '_broken_pipe_resp', None) is not None: |
|
531 | 531 | return |
|
532 | 532 | |
|
533 | 533 | if self.sock is None: |
|
534 | 534 | if self.auto_open: |
|
535 | 535 | self.connect() |
|
536 | 536 | else: |
|
537 | 537 | raise httplib.NotConnected() |
|
538 | 538 | |
|
539 | 539 | # send the data to the server. if we get a broken pipe, then close |
|
540 | 540 | # the socket. we want to reconnect when somebody tries to send again. |
|
541 | 541 | # |
|
542 | 542 | # NOTE: we DO propagate the error, though, because we cannot simply |
|
543 | 543 | # ignore the error... the caller will know if they can retry. |
|
544 | 544 | if self.debuglevel > 0: |
|
545 | 545 | print "send:", repr(str) |
|
546 | 546 | try: |
|
547 | 547 | blocksize = 8192 |
|
548 | 548 | read = getattr(str, 'read', None) |
|
549 | 549 | if read is not None: |
|
550 | 550 | if self.debuglevel > 0: |
|
551 | 551 | print "sendIng a read()able" |
|
552 | 552 | data = read(blocksize) |
|
553 | 553 | while data: |
|
554 | 554 | self.sock.sendall(data) |
|
555 | 555 | data = read(blocksize) |
|
556 | 556 | else: |
|
557 | 557 | self.sock.sendall(str) |
|
558 | 558 | except socket.error, v: |
|
559 | 559 | reraise = True |
|
560 | 560 | if v[0] == errno.EPIPE: # Broken pipe |
|
561 | 561 | if self._HTTPConnection__state == httplib._CS_REQ_SENT: |
|
562 | 562 | self._broken_pipe_resp = None |
|
563 | 563 | self._broken_pipe_resp = self.getresponse() |
|
564 | 564 | reraise = False |
|
565 | 565 | self.close() |
|
566 | 566 | if reraise: |
|
567 | 567 | raise |
|
568 | 568 | |
|
569 | 569 | def wrapgetresponse(cls): |
|
570 | 570 | """Wraps getresponse in cls with a broken-pipe sane version. |
|
571 | 571 | """ |
|
572 | 572 | def safegetresponse(self): |
|
573 | 573 | # In safesend() we might set the _broken_pipe_resp |
|
574 | 574 | # attribute, in which case the socket has already |
|
575 | 575 | # been closed and we just need to give them the response |
|
576 | 576 | # back. Otherwise, we use the normal response path. |
|
577 | 577 | r = getattr(self, '_broken_pipe_resp', None) |
|
578 | 578 | if r is not None: |
|
579 | 579 | return r |
|
580 | 580 | return cls.getresponse(self) |
|
581 | 581 | safegetresponse.__doc__ = cls.getresponse.__doc__ |
|
582 | 582 | return safegetresponse |
|
583 | 583 | |
|
584 | 584 | class HTTPConnection(httplib.HTTPConnection): |
|
585 | 585 | # use the modified response class |
|
586 | 586 | response_class = HTTPResponse |
|
587 | 587 | send = safesend |
|
588 | 588 | getresponse = wrapgetresponse(httplib.HTTPConnection) |
|
589 | 589 | |
|
590 | 590 | |
|
591 | 591 | ######################################################################### |
|
592 | 592 | ##### TEST FUNCTIONS |
|
593 | 593 | ######################################################################### |
|
594 | 594 | |
|
595 | 595 | def error_handler(url): |
|
596 | 596 | global HANDLE_ERRORS |
|
597 | 597 | orig = HANDLE_ERRORS |
|
598 | 598 | keepalive_handler = HTTPHandler() |
|
599 | 599 | opener = urllib2.build_opener(keepalive_handler) |
|
600 | 600 | urllib2.install_opener(opener) |
|
601 | 601 | pos = {0: 'off', 1: 'on'} |
|
602 | 602 | for i in (0, 1): |
|
603 | 603 | print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i) |
|
604 | 604 | HANDLE_ERRORS = i |
|
605 | 605 | try: |
|
606 | 606 | fo = urllib2.urlopen(url) |
|
607 | 607 | fo.read() |
|
608 | 608 | fo.close() |
|
609 | 609 | try: |
|
610 | 610 | status, reason = fo.status, fo.reason |
|
611 | 611 | except AttributeError: |
|
612 | 612 | status, reason = None, None |
|
613 | 613 | except IOError, e: |
|
614 | 614 | print " EXCEPTION: %s" % e |
|
615 | 615 | raise |
|
616 | 616 | else: |
|
617 | 617 | print " status = %s, reason = %s" % (status, reason) |
|
618 | 618 | HANDLE_ERRORS = orig |
|
619 | 619 | hosts = keepalive_handler.open_connections() |
|
620 | 620 | print "open connections:", hosts |
|
621 | 621 | keepalive_handler.close_all() |
|
622 | 622 | |
|
623 | 623 | def md5(s): |
|
624 | 624 | try: |
|
625 | 625 | from hashlib import md5 as _md5 |
|
626 | 626 | except ImportError: |
|
627 | 627 | from md5 import md5 as _md5 |
|
628 | 628 | global md5 |
|
629 | 629 | md5 = _md5 |
|
630 | 630 | return _md5(s) |
|
631 | 631 | |
|
632 | 632 | def continuity(url): |
|
633 | 633 | format = '%25s: %s' |
|
634 | 634 | |
|
635 | 635 | # first fetch the file with the normal http handler |
|
636 | 636 | opener = urllib2.build_opener() |
|
637 | 637 | urllib2.install_opener(opener) |
|
638 | 638 | fo = urllib2.urlopen(url) |
|
639 | 639 | foo = fo.read() |
|
640 | 640 | fo.close() |
|
641 | 641 | m = md5.new(foo) |
|
642 | 642 | print format % ('normal urllib', m.hexdigest()) |
|
643 | 643 | |
|
644 | 644 | # now install the keepalive handler and try again |
|
645 | 645 | opener = urllib2.build_opener(HTTPHandler()) |
|
646 | 646 | urllib2.install_opener(opener) |
|
647 | 647 | |
|
648 | 648 | fo = urllib2.urlopen(url) |
|
649 | 649 | foo = fo.read() |
|
650 | 650 | fo.close() |
|
651 | 651 | m = md5.new(foo) |
|
652 | 652 | print format % ('keepalive read', m.hexdigest()) |
|
653 | 653 | |
|
654 | 654 | fo = urllib2.urlopen(url) |
|
655 | 655 | foo = '' |
|
656 | 656 | while True: |
|
657 | 657 | f = fo.readline() |
|
658 | 658 | if f: |
|
659 | 659 | foo = foo + f |
|
660 | 660 | else: break |
|
661 | 661 | fo.close() |
|
662 | 662 | m = md5.new(foo) |
|
663 | 663 | print format % ('keepalive readline', m.hexdigest()) |
|
664 | 664 | |
|
665 | 665 | def comp(N, url): |
|
666 | 666 | print ' making %i connections to:\n %s' % (N, url) |
|
667 | 667 | |
|
668 | 668 | sys.stdout.write(' first using the normal urllib handlers') |
|
669 | 669 | # first use normal opener |
|
670 | 670 | opener = urllib2.build_opener() |
|
671 | 671 | urllib2.install_opener(opener) |
|
672 | 672 | t1 = fetch(N, url) |
|
673 | 673 | print ' TIME: %.3f s' % t1 |
|
674 | 674 | |
|
675 | 675 | sys.stdout.write(' now using the keepalive handler ') |
|
676 | 676 | # now install the keepalive handler and try again |
|
677 | 677 | opener = urllib2.build_opener(HTTPHandler()) |
|
678 | 678 | urllib2.install_opener(opener) |
|
679 | 679 | t2 = fetch(N, url) |
|
680 | 680 | print ' TIME: %.3f s' % t2 |
|
681 | 681 | print ' improvement factor: %.2f' % (t1 / t2) |
|
682 | 682 | |
|
683 | 683 | def fetch(N, url, delay=0): |
|
684 | 684 | import time |
|
685 | 685 | lens = [] |
|
686 | 686 | starttime = time.time() |
|
687 | 687 | for i in range(N): |
|
688 | 688 | if delay and i > 0: |
|
689 | 689 | time.sleep(delay) |
|
690 | 690 | fo = urllib2.urlopen(url) |
|
691 | 691 | foo = fo.read() |
|
692 | 692 | fo.close() |
|
693 | 693 | lens.append(len(foo)) |
|
694 | 694 | diff = time.time() - starttime |
|
695 | 695 | |
|
696 | 696 | j = 0 |
|
697 | 697 | for i in lens[1:]: |
|
698 | 698 | j = j + 1 |
|
699 | 699 | if not i == lens[0]: |
|
700 | 700 | print "WARNING: inconsistent length on read %i: %i" % (j, i) |
|
701 | 701 | |
|
702 | 702 | return diff |
|
703 | 703 | |
|
704 | 704 | def test_timeout(url): |
|
705 | 705 | global DEBUG |
|
706 | 706 | dbbackup = DEBUG |
|
707 | 707 | class FakeLogger(object): |
|
708 | 708 | def debug(self, msg, *args): |
|
709 | 709 | print msg % args |
|
710 | 710 | info = warning = error = debug |
|
711 | 711 | DEBUG = FakeLogger() |
|
712 | 712 | print " fetching the file to establish a connection" |
|
713 | 713 | fo = urllib2.urlopen(url) |
|
714 | 714 | data1 = fo.read() |
|
715 | 715 | fo.close() |
|
716 | 716 | |
|
717 | 717 | i = 20 |
|
718 | 718 | print " waiting %i seconds for the server to close the connection" % i |
|
719 | 719 | while i > 0: |
|
720 | 720 | sys.stdout.write('\r %2i' % i) |
|
721 | 721 | sys.stdout.flush() |
|
722 | 722 | time.sleep(1) |
|
723 | 723 | i -= 1 |
|
724 | 724 | sys.stderr.write('\r') |
|
725 | 725 | |
|
726 | 726 | print " fetching the file a second time" |
|
727 | 727 | fo = urllib2.urlopen(url) |
|
728 | 728 | data2 = fo.read() |
|
729 | 729 | fo.close() |
|
730 | 730 | |
|
731 | 731 | if data1 == data2: |
|
732 | 732 | print ' data are identical' |
|
733 | 733 | else: |
|
734 | 734 | print ' ERROR: DATA DIFFER' |
|
735 | 735 | |
|
736 | 736 | DEBUG = dbbackup |
|
737 | 737 | |
|
738 | 738 | |
|
739 | 739 | def test(url, N=10): |
|
740 | 740 | print "checking error hander (do this on a non-200)" |
|
741 | 741 | try: error_handler(url) |
|
742 | 742 | except IOError: |
|
743 | 743 | print "exiting - exception will prevent further tests" |
|
744 | 744 | sys.exit() |
|
745 | 745 | |
|
746 | 746 | print "performing continuity test (making sure stuff isn't corrupted)" |
|
747 | 747 | continuity(url) |
|
748 | 748 | |
|
749 | 749 | print "performing speed comparison" |
|
750 | 750 | comp(N, url) |
|
751 | 751 | |
|
752 | 752 | print "performing dropped-connection check" |
|
753 | 753 | test_timeout(url) |
|
754 | 754 | |
|
755 | 755 | if __name__ == '__main__': |
|
756 | 756 | import time |
|
757 | 757 | import sys |
|
758 | 758 | try: |
|
759 | 759 | N = int(sys.argv[1]) |
|
760 | 760 | url = sys.argv[2] |
|
761 | 761 | except: |
|
762 | 762 | print "%s <integer> <url>" % sys.argv[0] |
|
763 | 763 | else: |
|
764 | 764 | test(url, N) |
@@ -1,1890 +1,1890 b'' | |||
|
1 | 1 | # patch.py - patch file parsing routines |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Brendan Cully <brendan@kublai.com> |
|
4 | 4 | # Copyright 2007 Chris Mason <chris.mason@oracle.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | import cStringIO, email.Parser, os, errno, re |
|
10 | 10 | import tempfile, zlib, shutil |
|
11 | 11 | |
|
12 | 12 | from i18n import _ |
|
13 | 13 | from node import hex, nullid, short |
|
14 | 14 | import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error |
|
15 | 15 | import context |
|
16 | 16 | |
|
17 | 17 | gitre = re.compile('diff --git a/(.*) b/(.*)') |
|
18 | 18 | |
|
19 | 19 | class PatchError(Exception): |
|
20 | 20 | pass |
|
21 | 21 | |
|
22 | 22 | |
|
23 | 23 | # public functions |
|
24 | 24 | |
|
25 | 25 | def split(stream): |
|
26 | 26 | '''return an iterator of individual patches from a stream''' |
|
27 | 27 | def isheader(line, inheader): |
|
28 | 28 | if inheader and line[0] in (' ', '\t'): |
|
29 | 29 | # continuation |
|
30 | 30 | return True |
|
31 | 31 | if line[0] in (' ', '-', '+'): |
|
32 | 32 | # diff line - don't check for header pattern in there |
|
33 | 33 | return False |
|
34 | 34 | l = line.split(': ', 1) |
|
35 | 35 | return len(l) == 2 and ' ' not in l[0] |
|
36 | 36 | |
|
37 | 37 | def chunk(lines): |
|
38 | 38 | return cStringIO.StringIO(''.join(lines)) |
|
39 | 39 | |
|
40 | 40 | def hgsplit(stream, cur): |
|
41 | 41 | inheader = True |
|
42 | 42 | |
|
43 | 43 | for line in stream: |
|
44 | 44 | if not line.strip(): |
|
45 | 45 | inheader = False |
|
46 | 46 | if not inheader and line.startswith('# HG changeset patch'): |
|
47 | 47 | yield chunk(cur) |
|
48 | 48 | cur = [] |
|
49 | 49 | inheader = True |
|
50 | 50 | |
|
51 | 51 | cur.append(line) |
|
52 | 52 | |
|
53 | 53 | if cur: |
|
54 | 54 | yield chunk(cur) |
|
55 | 55 | |
|
56 | 56 | def mboxsplit(stream, cur): |
|
57 | 57 | for line in stream: |
|
58 | 58 | if line.startswith('From '): |
|
59 | 59 | for c in split(chunk(cur[1:])): |
|
60 | 60 | yield c |
|
61 | 61 | cur = [] |
|
62 | 62 | |
|
63 | 63 | cur.append(line) |
|
64 | 64 | |
|
65 | 65 | if cur: |
|
66 | 66 | for c in split(chunk(cur[1:])): |
|
67 | 67 | yield c |
|
68 | 68 | |
|
69 | 69 | def mimesplit(stream, cur): |
|
70 | 70 | def msgfp(m): |
|
71 | 71 | fp = cStringIO.StringIO() |
|
72 | 72 | g = email.Generator.Generator(fp, mangle_from_=False) |
|
73 | 73 | g.flatten(m) |
|
74 | 74 | fp.seek(0) |
|
75 | 75 | return fp |
|
76 | 76 | |
|
77 | 77 | for line in stream: |
|
78 | 78 | cur.append(line) |
|
79 | 79 | c = chunk(cur) |
|
80 | 80 | |
|
81 | 81 | m = email.Parser.Parser().parse(c) |
|
82 | 82 | if not m.is_multipart(): |
|
83 | 83 | yield msgfp(m) |
|
84 | 84 | else: |
|
85 | 85 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
86 | 86 | for part in m.walk(): |
|
87 | 87 | ct = part.get_content_type() |
|
88 | 88 | if ct not in ok_types: |
|
89 | 89 | continue |
|
90 | 90 | yield msgfp(part) |
|
91 | 91 | |
|
92 | 92 | def headersplit(stream, cur): |
|
93 | 93 | inheader = False |
|
94 | 94 | |
|
95 | 95 | for line in stream: |
|
96 | 96 | if not inheader and isheader(line, inheader): |
|
97 | 97 | yield chunk(cur) |
|
98 | 98 | cur = [] |
|
99 | 99 | inheader = True |
|
100 | 100 | if inheader and not isheader(line, inheader): |
|
101 | 101 | inheader = False |
|
102 | 102 | |
|
103 | 103 | cur.append(line) |
|
104 | 104 | |
|
105 | 105 | if cur: |
|
106 | 106 | yield chunk(cur) |
|
107 | 107 | |
|
108 | 108 | def remainder(cur): |
|
109 | 109 | yield chunk(cur) |
|
110 | 110 | |
|
111 | 111 | class fiter(object): |
|
112 | 112 | def __init__(self, fp): |
|
113 | 113 | self.fp = fp |
|
114 | 114 | |
|
115 | 115 | def __iter__(self): |
|
116 | 116 | return self |
|
117 | 117 | |
|
118 | 118 | def next(self): |
|
119 | 119 | l = self.fp.readline() |
|
120 | 120 | if not l: |
|
121 | 121 | raise StopIteration |
|
122 | 122 | return l |
|
123 | 123 | |
|
124 | 124 | inheader = False |
|
125 | 125 | cur = [] |
|
126 | 126 | |
|
127 | 127 | mimeheaders = ['content-type'] |
|
128 | 128 | |
|
129 | 129 | if not util.safehasattr(stream, 'next'): |
|
130 | 130 | # http responses, for example, have readline but not next |
|
131 | 131 | stream = fiter(stream) |
|
132 | 132 | |
|
133 | 133 | for line in stream: |
|
134 | 134 | cur.append(line) |
|
135 | 135 | if line.startswith('# HG changeset patch'): |
|
136 | 136 | return hgsplit(stream, cur) |
|
137 | 137 | elif line.startswith('From '): |
|
138 | 138 | return mboxsplit(stream, cur) |
|
139 | 139 | elif isheader(line, inheader): |
|
140 | 140 | inheader = True |
|
141 | 141 | if line.split(':', 1)[0].lower() in mimeheaders: |
|
142 | 142 | # let email parser handle this |
|
143 | 143 | return mimesplit(stream, cur) |
|
144 | 144 | elif line.startswith('--- ') and inheader: |
|
145 | 145 | # No evil headers seen by diff start, split by hand |
|
146 | 146 | return headersplit(stream, cur) |
|
147 | 147 | # Not enough info, keep reading |
|
148 | 148 | |
|
149 | 149 | # if we are here, we have a very plain patch |
|
150 | 150 | return remainder(cur) |
|
151 | 151 | |
|
152 | 152 | def extract(ui, fileobj): |
|
153 | 153 | '''extract patch from data read from fileobj. |
|
154 | 154 | |
|
155 | 155 | patch can be a normal patch or contained in an email message. |
|
156 | 156 | |
|
157 | 157 | return tuple (filename, message, user, date, branch, node, p1, p2). |
|
158 | 158 | Any item in the returned tuple can be None. If filename is None, |
|
159 | 159 | fileobj did not contain a patch. Caller must unlink filename when done.''' |
|
160 | 160 | |
|
161 | 161 | # attempt to detect the start of a patch |
|
162 | 162 | # (this heuristic is borrowed from quilt) |
|
163 | 163 | diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' |
|
164 | 164 | r'retrieving revision [0-9]+(\.[0-9]+)*$|' |
|
165 | 165 | r'---[ \t].*?^\+\+\+[ \t]|' |
|
166 | 166 | r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL) |
|
167 | 167 | |
|
168 | 168 | fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') |
|
169 | 169 | tmpfp = os.fdopen(fd, 'w') |
|
170 | 170 | try: |
|
171 | 171 | msg = email.Parser.Parser().parse(fileobj) |
|
172 | 172 | |
|
173 | 173 | subject = msg['Subject'] |
|
174 | 174 | user = msg['From'] |
|
175 | 175 | if not subject and not user: |
|
176 | 176 | # Not an email, restore parsed headers if any |
|
177 | 177 | subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n' |
|
178 | 178 | |
|
179 | 179 | gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') |
|
180 | 180 | # should try to parse msg['Date'] |
|
181 | 181 | date = None |
|
182 | 182 | nodeid = None |
|
183 | 183 | branch = None |
|
184 | 184 | parents = [] |
|
185 | 185 | |
|
186 | 186 | if subject: |
|
187 | 187 | if subject.startswith('[PATCH'): |
|
188 | 188 | pend = subject.find(']') |
|
189 | 189 | if pend >= 0: |
|
190 | 190 | subject = subject[pend + 1:].lstrip() |
|
191 | 191 | subject = re.sub(r'\n[ \t]+', ' ', subject) |
|
192 | 192 | ui.debug('Subject: %s\n' % subject) |
|
193 | 193 | if user: |
|
194 | 194 | ui.debug('From: %s\n' % user) |
|
195 | 195 | diffs_seen = 0 |
|
196 | 196 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
197 | 197 | message = '' |
|
198 | 198 | for part in msg.walk(): |
|
199 | 199 | content_type = part.get_content_type() |
|
200 | 200 | ui.debug('Content-Type: %s\n' % content_type) |
|
201 | 201 | if content_type not in ok_types: |
|
202 | 202 | continue |
|
203 | 203 | payload = part.get_payload(decode=True) |
|
204 | 204 | m = diffre.search(payload) |
|
205 | 205 | if m: |
|
206 | 206 | hgpatch = False |
|
207 | 207 | hgpatchheader = False |
|
208 | 208 | ignoretext = False |
|
209 | 209 | |
|
210 | 210 | ui.debug('found patch at byte %d\n' % m.start(0)) |
|
211 | 211 | diffs_seen += 1 |
|
212 | 212 | cfp = cStringIO.StringIO() |
|
213 | 213 | for line in payload[:m.start(0)].splitlines(): |
|
214 | 214 | if line.startswith('# HG changeset patch') and not hgpatch: |
|
215 | 215 | ui.debug('patch generated by hg export\n') |
|
216 | 216 | hgpatch = True |
|
217 | 217 | hgpatchheader = True |
|
218 | 218 | # drop earlier commit message content |
|
219 | 219 | cfp.seek(0) |
|
220 | 220 | cfp.truncate() |
|
221 | 221 | subject = None |
|
222 | 222 | elif hgpatchheader: |
|
223 | 223 | if line.startswith('# User '): |
|
224 | 224 | user = line[7:] |
|
225 | 225 | ui.debug('From: %s\n' % user) |
|
226 | 226 | elif line.startswith("# Date "): |
|
227 | 227 | date = line[7:] |
|
228 | 228 | elif line.startswith("# Branch "): |
|
229 | 229 | branch = line[9:] |
|
230 | 230 | elif line.startswith("# Node ID "): |
|
231 | 231 | nodeid = line[10:] |
|
232 | 232 | elif line.startswith("# Parent "): |
|
233 | 233 | parents.append(line[9:].lstrip()) |
|
234 | 234 | elif not line.startswith("# "): |
|
235 | 235 | hgpatchheader = False |
|
236 | 236 | elif line == '---' and gitsendmail: |
|
237 | 237 | ignoretext = True |
|
238 | 238 | if not hgpatchheader and not ignoretext: |
|
239 | 239 | cfp.write(line) |
|
240 | 240 | cfp.write('\n') |
|
241 | 241 | message = cfp.getvalue() |
|
242 | 242 | if tmpfp: |
|
243 | 243 | tmpfp.write(payload) |
|
244 | 244 | if not payload.endswith('\n'): |
|
245 | 245 | tmpfp.write('\n') |
|
246 | 246 | elif not diffs_seen and message and content_type == 'text/plain': |
|
247 | 247 | message += '\n' + payload |
|
248 | 248 | except: |
|
249 | 249 | tmpfp.close() |
|
250 | 250 | os.unlink(tmpname) |
|
251 | 251 | raise |
|
252 | 252 | |
|
253 | 253 | if subject and not message.startswith(subject): |
|
254 | 254 | message = '%s\n%s' % (subject, message) |
|
255 | 255 | tmpfp.close() |
|
256 | 256 | if not diffs_seen: |
|
257 | 257 | os.unlink(tmpname) |
|
258 | 258 | return None, message, user, date, branch, None, None, None |
|
259 | 259 | p1 = parents and parents.pop(0) or None |
|
260 | 260 | p2 = parents and parents.pop(0) or None |
|
261 | 261 | return tmpname, message, user, date, branch, nodeid, p1, p2 |
|
262 | 262 | |
|
263 | 263 | class patchmeta(object): |
|
264 | 264 | """Patched file metadata |
|
265 | 265 | |
|
266 | 266 | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY |
|
267 | 267 | or COPY. 'path' is patched file path. 'oldpath' is set to the |
|
268 | 268 | origin file when 'op' is either COPY or RENAME, None otherwise. If |
|
269 | 269 | file mode is changed, 'mode' is a tuple (islink, isexec) where |
|
270 | 270 | 'islink' is True if the file is a symlink and 'isexec' is True if |
|
271 | 271 | the file is executable. Otherwise, 'mode' is None. |
|
272 | 272 | """ |
|
273 | 273 | def __init__(self, path): |
|
274 | 274 | self.path = path |
|
275 | 275 | self.oldpath = None |
|
276 | 276 | self.mode = None |
|
277 | 277 | self.op = 'MODIFY' |
|
278 | 278 | self.binary = False |
|
279 | 279 | |
|
280 | 280 | def setmode(self, mode): |
|
281 | 281 | islink = mode & 020000 |
|
282 | 282 | isexec = mode & 0100 |
|
283 | 283 | self.mode = (islink, isexec) |
|
284 | 284 | |
|
285 | 285 | def copy(self): |
|
286 | 286 | other = patchmeta(self.path) |
|
287 | 287 | other.oldpath = self.oldpath |
|
288 | 288 | other.mode = self.mode |
|
289 | 289 | other.op = self.op |
|
290 | 290 | other.binary = self.binary |
|
291 | 291 | return other |
|
292 | 292 | |
|
293 | 293 | def _ispatchinga(self, afile): |
|
294 | 294 | if afile == '/dev/null': |
|
295 | 295 | return self.op == 'ADD' |
|
296 | 296 | return afile == 'a/' + (self.oldpath or self.path) |
|
297 | 297 | |
|
298 | 298 | def _ispatchingb(self, bfile): |
|
299 | 299 | if bfile == '/dev/null': |
|
300 | 300 | return self.op == 'DELETE' |
|
301 | 301 | return bfile == 'b/' + self.path |
|
302 | 302 | |
|
303 | 303 | def ispatching(self, afile, bfile): |
|
304 | 304 | return self._ispatchinga(afile) and self._ispatchingb(bfile) |
|
305 | 305 | |
|
306 | 306 | def __repr__(self): |
|
307 | 307 | return "<patchmeta %s %r>" % (self.op, self.path) |
|
308 | 308 | |
|
309 | 309 | def readgitpatch(lr): |
|
310 | 310 | """extract git-style metadata about patches from <patchname>""" |
|
311 | 311 | |
|
312 | 312 | # Filter patch for git information |
|
313 | 313 | gp = None |
|
314 | 314 | gitpatches = [] |
|
315 | 315 | for line in lr: |
|
316 | 316 | line = line.rstrip(' \r\n') |
|
317 | 317 | if line.startswith('diff --git'): |
|
318 | 318 | m = gitre.match(line) |
|
319 | 319 | if m: |
|
320 | 320 | if gp: |
|
321 | 321 | gitpatches.append(gp) |
|
322 | 322 | dst = m.group(2) |
|
323 | 323 | gp = patchmeta(dst) |
|
324 | 324 | elif gp: |
|
325 | 325 | if line.startswith('--- '): |
|
326 | 326 | gitpatches.append(gp) |
|
327 | 327 | gp = None |
|
328 | 328 | continue |
|
329 | 329 | if line.startswith('rename from '): |
|
330 | 330 | gp.op = 'RENAME' |
|
331 | 331 | gp.oldpath = line[12:] |
|
332 | 332 | elif line.startswith('rename to '): |
|
333 | 333 | gp.path = line[10:] |
|
334 | 334 | elif line.startswith('copy from '): |
|
335 | 335 | gp.op = 'COPY' |
|
336 | 336 | gp.oldpath = line[10:] |
|
337 | 337 | elif line.startswith('copy to '): |
|
338 | 338 | gp.path = line[8:] |
|
339 | 339 | elif line.startswith('deleted file'): |
|
340 | 340 | gp.op = 'DELETE' |
|
341 | 341 | elif line.startswith('new file mode '): |
|
342 | 342 | gp.op = 'ADD' |
|
343 | 343 | gp.setmode(int(line[-6:], 8)) |
|
344 | 344 | elif line.startswith('new mode '): |
|
345 | 345 | gp.setmode(int(line[-6:], 8)) |
|
346 | 346 | elif line.startswith('GIT binary patch'): |
|
347 | 347 | gp.binary = True |
|
348 | 348 | if gp: |
|
349 | 349 | gitpatches.append(gp) |
|
350 | 350 | |
|
351 | 351 | return gitpatches |
|
352 | 352 | |
|
353 | 353 | class linereader(object): |
|
354 | 354 | # simple class to allow pushing lines back into the input stream |
|
355 | 355 | def __init__(self, fp): |
|
356 | 356 | self.fp = fp |
|
357 | 357 | self.buf = [] |
|
358 | 358 | |
|
359 | 359 | def push(self, line): |
|
360 | 360 | if line is not None: |
|
361 | 361 | self.buf.append(line) |
|
362 | 362 | |
|
363 | 363 | def readline(self): |
|
364 | 364 | if self.buf: |
|
365 | 365 | l = self.buf[0] |
|
366 | 366 | del self.buf[0] |
|
367 | 367 | return l |
|
368 | 368 | return self.fp.readline() |
|
369 | 369 | |
|
370 | 370 | def __iter__(self): |
|
371 | 371 | while True: |
|
372 | 372 | l = self.readline() |
|
373 | 373 | if not l: |
|
374 | 374 | break |
|
375 | 375 | yield l |
|
376 | 376 | |
|
377 | 377 | class abstractbackend(object): |
|
378 | 378 | def __init__(self, ui): |
|
379 | 379 | self.ui = ui |
|
380 | 380 | |
|
381 | 381 | def getfile(self, fname): |
|
382 | 382 | """Return target file data and flags as a (data, (islink, |
|
383 | 383 | isexec)) tuple. |
|
384 | 384 | """ |
|
385 | 385 | raise NotImplementedError |
|
386 | 386 | |
|
387 | 387 | def setfile(self, fname, data, mode, copysource): |
|
388 | 388 | """Write data to target file fname and set its mode. mode is a |
|
389 | 389 | (islink, isexec) tuple. If data is None, the file content should |
|
390 | 390 | be left unchanged. If the file is modified after being copied, |
|
391 | 391 | copysource is set to the original file name. |
|
392 | 392 | """ |
|
393 | 393 | raise NotImplementedError |
|
394 | 394 | |
|
395 | 395 | def unlink(self, fname): |
|
396 | 396 | """Unlink target file.""" |
|
397 | 397 | raise NotImplementedError |
|
398 | 398 | |
|
399 | 399 | def writerej(self, fname, failed, total, lines): |
|
400 | 400 | """Write rejected lines for fname. total is the number of hunks |
|
401 | 401 | which failed to apply and total the total number of hunks for this |
|
402 | 402 | files. |
|
403 | 403 | """ |
|
404 | 404 | pass |
|
405 | 405 | |
|
406 | 406 | def exists(self, fname): |
|
407 | 407 | raise NotImplementedError |
|
408 | 408 | |
|
409 | 409 | class fsbackend(abstractbackend): |
|
410 | 410 | def __init__(self, ui, basedir): |
|
411 | 411 | super(fsbackend, self).__init__(ui) |
|
412 | 412 | self.opener = scmutil.opener(basedir) |
|
413 | 413 | |
|
414 | 414 | def _join(self, f): |
|
415 | 415 | return os.path.join(self.opener.base, f) |
|
416 | 416 | |
|
417 | 417 | def getfile(self, fname): |
|
418 | 418 | path = self._join(fname) |
|
419 | 419 | if os.path.islink(path): |
|
420 | 420 | return (os.readlink(path), (True, False)) |
|
421 | 421 | isexec = False |
|
422 | 422 | try: |
|
423 | 423 | isexec = os.lstat(path).st_mode & 0100 != 0 |
|
424 | 424 | except OSError, e: |
|
425 | 425 | if e.errno != errno.ENOENT: |
|
426 | 426 | raise |
|
427 | 427 | return (self.opener.read(fname), (False, isexec)) |
|
428 | 428 | |
|
429 | 429 | def setfile(self, fname, data, mode, copysource): |
|
430 | 430 | islink, isexec = mode |
|
431 | 431 | if data is None: |
|
432 | 432 | util.setflags(self._join(fname), islink, isexec) |
|
433 | 433 | return |
|
434 | 434 | if islink: |
|
435 | 435 | self.opener.symlink(data, fname) |
|
436 | 436 | else: |
|
437 | 437 | self.opener.write(fname, data) |
|
438 | 438 | if isexec: |
|
439 | 439 | util.setflags(self._join(fname), False, True) |
|
440 | 440 | |
|
441 | 441 | def unlink(self, fname): |
|
442 | 442 | try: |
|
443 | 443 | util.unlinkpath(self._join(fname)) |
|
444 | 444 | except OSError, inst: |
|
445 | 445 | if inst.errno != errno.ENOENT: |
|
446 | 446 | raise |
|
447 | 447 | |
|
448 | 448 | def writerej(self, fname, failed, total, lines): |
|
449 | 449 | fname = fname + ".rej" |
|
450 | 450 | self.ui.warn( |
|
451 | 451 | _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % |
|
452 | 452 | (failed, total, fname)) |
|
453 | 453 | fp = self.opener(fname, 'w') |
|
454 | 454 | fp.writelines(lines) |
|
455 | 455 | fp.close() |
|
456 | 456 | |
|
457 | 457 | def exists(self, fname): |
|
458 | 458 | return os.path.lexists(self._join(fname)) |
|
459 | 459 | |
|
460 | 460 | class workingbackend(fsbackend): |
|
461 | 461 | def __init__(self, ui, repo, similarity): |
|
462 | 462 | super(workingbackend, self).__init__(ui, repo.root) |
|
463 | 463 | self.repo = repo |
|
464 | 464 | self.similarity = similarity |
|
465 | 465 | self.removed = set() |
|
466 | 466 | self.changed = set() |
|
467 | 467 | self.copied = [] |
|
468 | 468 | |
|
469 | 469 | def _checkknown(self, fname): |
|
470 | 470 | if self.repo.dirstate[fname] == '?' and self.exists(fname): |
|
471 | 471 | raise PatchError(_('cannot patch %s: file is not tracked') % fname) |
|
472 | 472 | |
|
473 | 473 | def setfile(self, fname, data, mode, copysource): |
|
474 | 474 | self._checkknown(fname) |
|
475 | 475 | super(workingbackend, self).setfile(fname, data, mode, copysource) |
|
476 | 476 | if copysource is not None: |
|
477 | 477 | self.copied.append((copysource, fname)) |
|
478 | 478 | self.changed.add(fname) |
|
479 | 479 | |
|
480 | 480 | def unlink(self, fname): |
|
481 | 481 | self._checkknown(fname) |
|
482 | 482 | super(workingbackend, self).unlink(fname) |
|
483 | 483 | self.removed.add(fname) |
|
484 | 484 | self.changed.add(fname) |
|
485 | 485 | |
|
486 | 486 | def close(self): |
|
487 | 487 | wctx = self.repo[None] |
|
488 | 488 | addremoved = set(self.changed) |
|
489 | 489 | for src, dst in self.copied: |
|
490 | 490 | scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst) |
|
491 | 491 | if self.removed: |
|
492 | 492 | wctx.forget(sorted(self.removed)) |
|
493 | 493 | for f in self.removed: |
|
494 | 494 | if f not in self.repo.dirstate: |
|
495 | 495 | # File was deleted and no longer belongs to the |
|
496 | 496 | # dirstate, it was probably marked added then |
|
497 | 497 | # deleted, and should not be considered by |
|
498 | 498 | # addremove(). |
|
499 | 499 | addremoved.discard(f) |
|
500 | 500 | if addremoved: |
|
501 | 501 | cwd = self.repo.getcwd() |
|
502 | 502 | if cwd: |
|
503 | 503 | addremoved = [util.pathto(self.repo.root, cwd, f) |
|
504 | 504 | for f in addremoved] |
|
505 | 505 | scmutil.addremove(self.repo, addremoved, similarity=self.similarity) |
|
506 | 506 | return sorted(self.changed) |
|
507 | 507 | |
|
508 | 508 | class filestore(object): |
|
509 | 509 | def __init__(self, maxsize=None): |
|
510 | 510 | self.opener = None |
|
511 | 511 | self.files = {} |
|
512 | 512 | self.created = 0 |
|
513 | 513 | self.maxsize = maxsize |
|
514 | 514 | if self.maxsize is None: |
|
515 | 515 | self.maxsize = 4*(2**20) |
|
516 | 516 | self.size = 0 |
|
517 | 517 | self.data = {} |
|
518 | 518 | |
|
519 | 519 | def setfile(self, fname, data, mode, copied=None): |
|
520 | 520 | if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: |
|
521 | 521 | self.data[fname] = (data, mode, copied) |
|
522 | 522 | self.size += len(data) |
|
523 | 523 | else: |
|
524 | 524 | if self.opener is None: |
|
525 | 525 | root = tempfile.mkdtemp(prefix='hg-patch-') |
|
526 | 526 | self.opener = scmutil.opener(root) |
|
527 | 527 | # Avoid filename issues with these simple names |
|
528 | 528 | fn = str(self.created) |
|
529 | 529 | self.opener.write(fn, data) |
|
530 | 530 | self.created += 1 |
|
531 | 531 | self.files[fname] = (fn, mode, copied) |
|
532 | 532 | |
|
533 | 533 | def getfile(self, fname): |
|
534 | 534 | if fname in self.data: |
|
535 | 535 | return self.data[fname] |
|
536 | 536 | if not self.opener or fname not in self.files: |
|
537 | 537 | raise IOError() |
|
538 | 538 | fn, mode, copied = self.files[fname] |
|
539 | 539 | return self.opener.read(fn), mode, copied |
|
540 | 540 | |
|
541 | 541 | def close(self): |
|
542 | 542 | if self.opener: |
|
543 | 543 | shutil.rmtree(self.opener.base) |
|
544 | 544 | |
|
545 | 545 | class repobackend(abstractbackend): |
|
546 | 546 | def __init__(self, ui, repo, ctx, store): |
|
547 | 547 | super(repobackend, self).__init__(ui) |
|
548 | 548 | self.repo = repo |
|
549 | 549 | self.ctx = ctx |
|
550 | 550 | self.store = store |
|
551 | 551 | self.changed = set() |
|
552 | 552 | self.removed = set() |
|
553 | 553 | self.copied = {} |
|
554 | 554 | |
|
555 | 555 | def _checkknown(self, fname): |
|
556 | 556 | if fname not in self.ctx: |
|
557 | 557 | raise PatchError(_('cannot patch %s: file is not tracked') % fname) |
|
558 | 558 | |
|
559 | 559 | def getfile(self, fname): |
|
560 | 560 | try: |
|
561 | 561 | fctx = self.ctx[fname] |
|
562 | 562 | except error.LookupError: |
|
563 | 563 | raise IOError() |
|
564 | 564 | flags = fctx.flags() |
|
565 | 565 | return fctx.data(), ('l' in flags, 'x' in flags) |
|
566 | 566 | |
|
567 | 567 | def setfile(self, fname, data, mode, copysource): |
|
568 | 568 | if copysource: |
|
569 | 569 | self._checkknown(copysource) |
|
570 | 570 | if data is None: |
|
571 | 571 | data = self.ctx[fname].data() |
|
572 | 572 | self.store.setfile(fname, data, mode, copysource) |
|
573 | 573 | self.changed.add(fname) |
|
574 | 574 | if copysource: |
|
575 | 575 | self.copied[fname] = copysource |
|
576 | 576 | |
|
577 | 577 | def unlink(self, fname): |
|
578 | 578 | self._checkknown(fname) |
|
579 | 579 | self.removed.add(fname) |
|
580 | 580 | |
|
581 | 581 | def exists(self, fname): |
|
582 | 582 | return fname in self.ctx |
|
583 | 583 | |
|
584 | 584 | def close(self): |
|
585 | 585 | return self.changed | self.removed |
|
586 | 586 | |
|
587 | 587 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 |
|
588 | 588 | unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') |
|
589 | 589 | contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') |
|
590 | 590 | eolmodes = ['strict', 'crlf', 'lf', 'auto'] |
|
591 | 591 | |
|
592 | 592 | class patchfile(object): |
|
593 | 593 | def __init__(self, ui, gp, backend, store, eolmode='strict'): |
|
594 | 594 | self.fname = gp.path |
|
595 | 595 | self.eolmode = eolmode |
|
596 | 596 | self.eol = None |
|
597 | 597 | self.backend = backend |
|
598 | 598 | self.ui = ui |
|
599 | 599 | self.lines = [] |
|
600 | 600 | self.exists = False |
|
601 | 601 | self.missing = True |
|
602 | 602 | self.mode = gp.mode |
|
603 | 603 | self.copysource = gp.oldpath |
|
604 | 604 | self.create = gp.op in ('ADD', 'COPY', 'RENAME') |
|
605 | 605 | self.remove = gp.op == 'DELETE' |
|
606 | 606 | try: |
|
607 | 607 | if self.copysource is None: |
|
608 | 608 | data, mode = backend.getfile(self.fname) |
|
609 | 609 | self.exists = True |
|
610 | 610 | else: |
|
611 | 611 | data, mode = store.getfile(self.copysource)[:2] |
|
612 | 612 | self.exists = backend.exists(self.fname) |
|
613 | 613 | self.missing = False |
|
614 | 614 | if data: |
|
615 | 615 | self.lines = mdiff.splitnewlines(data) |
|
616 | 616 | if self.mode is None: |
|
617 | 617 | self.mode = mode |
|
618 | 618 | if self.lines: |
|
619 | 619 | # Normalize line endings |
|
620 | 620 | if self.lines[0].endswith('\r\n'): |
|
621 | 621 | self.eol = '\r\n' |
|
622 | 622 | elif self.lines[0].endswith('\n'): |
|
623 | 623 | self.eol = '\n' |
|
624 | 624 | if eolmode != 'strict': |
|
625 | 625 | nlines = [] |
|
626 | 626 | for l in self.lines: |
|
627 | 627 | if l.endswith('\r\n'): |
|
628 | 628 | l = l[:-2] + '\n' |
|
629 | 629 | nlines.append(l) |
|
630 | 630 | self.lines = nlines |
|
631 | 631 | except IOError: |
|
632 | 632 | if self.create: |
|
633 | 633 | self.missing = False |
|
634 | 634 | if self.mode is None: |
|
635 | 635 | self.mode = (False, False) |
|
636 | 636 | if self.missing: |
|
637 | 637 | self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) |
|
638 | 638 | |
|
639 | 639 | self.hash = {} |
|
640 | 640 | self.dirty = 0 |
|
641 | 641 | self.offset = 0 |
|
642 | 642 | self.skew = 0 |
|
643 | 643 | self.rej = [] |
|
644 | 644 | self.fileprinted = False |
|
645 | 645 | self.printfile(False) |
|
646 | 646 | self.hunks = 0 |
|
647 | 647 | |
|
648 | 648 | def writelines(self, fname, lines, mode): |
|
649 | 649 | if self.eolmode == 'auto': |
|
650 | 650 | eol = self.eol |
|
651 | 651 | elif self.eolmode == 'crlf': |
|
652 | 652 | eol = '\r\n' |
|
653 | 653 | else: |
|
654 | 654 | eol = '\n' |
|
655 | 655 | |
|
656 | 656 | if self.eolmode != 'strict' and eol and eol != '\n': |
|
657 | 657 | rawlines = [] |
|
658 | 658 | for l in lines: |
|
659 | 659 | if l and l[-1] == '\n': |
|
660 | 660 | l = l[:-1] + eol |
|
661 | 661 | rawlines.append(l) |
|
662 | 662 | lines = rawlines |
|
663 | 663 | |
|
664 | 664 | self.backend.setfile(fname, ''.join(lines), mode, self.copysource) |
|
665 | 665 | |
|
666 | 666 | def printfile(self, warn): |
|
667 | 667 | if self.fileprinted: |
|
668 | 668 | return |
|
669 | 669 | if warn or self.ui.verbose: |
|
670 | 670 | self.fileprinted = True |
|
671 | 671 | s = _("patching file %s\n") % self.fname |
|
672 | 672 | if warn: |
|
673 | 673 | self.ui.warn(s) |
|
674 | 674 | else: |
|
675 | 675 | self.ui.note(s) |
|
676 | 676 | |
|
677 | 677 | |
|
678 | 678 | def findlines(self, l, linenum): |
|
679 | 679 | # looks through the hash and finds candidate lines. The |
|
680 | 680 | # result is a list of line numbers sorted based on distance |
|
681 | 681 | # from linenum |
|
682 | 682 | |
|
683 | 683 | cand = self.hash.get(l, []) |
|
684 | 684 | if len(cand) > 1: |
|
685 | 685 | # resort our list of potentials forward then back. |
|
686 | 686 | cand.sort(key=lambda x: abs(x - linenum)) |
|
687 | 687 | return cand |
|
688 | 688 | |
|
689 | 689 | def write_rej(self): |
|
690 | 690 | # our rejects are a little different from patch(1). This always |
|
691 | 691 | # creates rejects in the same form as the original patch. A file |
|
692 | 692 | # header is inserted so that you can run the reject through patch again |
|
693 | 693 | # without having to type the filename. |
|
694 | 694 | if not self.rej: |
|
695 | 695 | return |
|
696 | 696 | base = os.path.basename(self.fname) |
|
697 | 697 | lines = ["--- %s\n+++ %s\n" % (base, base)] |
|
698 | 698 | for x in self.rej: |
|
699 | 699 | for l in x.hunk: |
|
700 | 700 | lines.append(l) |
|
701 | 701 | if l[-1] != '\n': |
|
702 | 702 | lines.append("\n\ No newline at end of file\n") |
|
703 | 703 | self.backend.writerej(self.fname, len(self.rej), self.hunks, lines) |
|
704 | 704 | |
|
705 | 705 | def apply(self, h): |
|
706 | 706 | if not h.complete(): |
|
707 | 707 | raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % |
|
708 | 708 | (h.number, h.desc, len(h.a), h.lena, len(h.b), |
|
709 | 709 | h.lenb)) |
|
710 | 710 | |
|
711 | 711 | self.hunks += 1 |
|
712 | 712 | |
|
713 | 713 | if self.missing: |
|
714 | 714 | self.rej.append(h) |
|
715 | 715 | return -1 |
|
716 | 716 | |
|
717 | 717 | if self.exists and self.create: |
|
718 | 718 | if self.copysource: |
|
719 | 719 | self.ui.warn(_("cannot create %s: destination already " |
|
720 | 720 | "exists\n" % self.fname)) |
|
721 | 721 | else: |
|
722 | 722 | self.ui.warn(_("file %s already exists\n") % self.fname) |
|
723 | 723 | self.rej.append(h) |
|
724 | 724 | return -1 |
|
725 | 725 | |
|
726 | 726 | if isinstance(h, binhunk): |
|
727 | 727 | if self.remove: |
|
728 | 728 | self.backend.unlink(self.fname) |
|
729 | 729 | else: |
|
730 | 730 | self.lines[:] = h.new() |
|
731 | 731 | self.offset += len(h.new()) |
|
732 | 732 | self.dirty = True |
|
733 | 733 | return 0 |
|
734 | 734 | |
|
735 | 735 | horig = h |
|
736 | 736 | if (self.eolmode in ('crlf', 'lf') |
|
737 | 737 | or self.eolmode == 'auto' and self.eol): |
|
738 | 738 | # If new eols are going to be normalized, then normalize |
|
739 | 739 | # hunk data before patching. Otherwise, preserve input |
|
740 | 740 | # line-endings. |
|
741 | 741 | h = h.getnormalized() |
|
742 | 742 | |
|
743 | 743 | # fast case first, no offsets, no fuzz |
|
744 | 744 | old, oldstart, new, newstart = h.fuzzit(0, False) |
|
745 | 745 | oldstart += self.offset |
|
746 | 746 | orig_start = oldstart |
|
747 | 747 | # if there's skew we want to emit the "(offset %d lines)" even |
|
748 | 748 | # when the hunk cleanly applies at start + skew, so skip the |
|
749 | 749 | # fast case code |
|
750 | 750 | if (self.skew == 0 and |
|
751 | 751 | diffhelpers.testhunk(old, self.lines, oldstart) == 0): |
|
752 | 752 | if self.remove: |
|
753 | 753 | self.backend.unlink(self.fname) |
|
754 | 754 | else: |
|
755 | 755 | self.lines[oldstart:oldstart + len(old)] = new |
|
756 | 756 | self.offset += len(new) - len(old) |
|
757 | 757 | self.dirty = True |
|
758 | 758 | return 0 |
|
759 | 759 | |
|
760 | 760 | # ok, we couldn't match the hunk. Lets look for offsets and fuzz it |
|
761 | 761 | self.hash = {} |
|
762 | 762 | for x, s in enumerate(self.lines): |
|
763 | 763 | self.hash.setdefault(s, []).append(x) |
|
764 | 764 | |
|
765 | 765 | for fuzzlen in xrange(3): |
|
766 | 766 | for toponly in [True, False]: |
|
767 | 767 | old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly) |
|
768 | 768 | oldstart = oldstart + self.offset + self.skew |
|
769 | 769 | oldstart = min(oldstart, len(self.lines)) |
|
770 | 770 | if old: |
|
771 | 771 | cand = self.findlines(old[0][1:], oldstart) |
|
772 | 772 | else: |
|
773 | 773 | # Only adding lines with no or fuzzed context, just |
|
774 | 774 | # take the skew in account |
|
775 | 775 | cand = [oldstart] |
|
776 | 776 | |
|
777 | 777 | for l in cand: |
|
778 | 778 | if not old or diffhelpers.testhunk(old, self.lines, l) == 0: |
|
779 | 779 | self.lines[l : l + len(old)] = new |
|
780 | 780 | self.offset += len(new) - len(old) |
|
781 | 781 | self.skew = l - orig_start |
|
782 | 782 | self.dirty = True |
|
783 | 783 | offset = l - orig_start - fuzzlen |
|
784 | 784 | if fuzzlen: |
|
785 | 785 | msg = _("Hunk #%d succeeded at %d " |
|
786 | 786 | "with fuzz %d " |
|
787 | 787 | "(offset %d lines).\n") |
|
788 | 788 | self.printfile(True) |
|
789 | 789 | self.ui.warn(msg % |
|
790 | 790 | (h.number, l + 1, fuzzlen, offset)) |
|
791 | 791 | else: |
|
792 | 792 | msg = _("Hunk #%d succeeded at %d " |
|
793 | 793 | "(offset %d lines).\n") |
|
794 | 794 | self.ui.note(msg % (h.number, l + 1, offset)) |
|
795 | 795 | return fuzzlen |
|
796 | 796 | self.printfile(True) |
|
797 | 797 | self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) |
|
798 | 798 | self.rej.append(horig) |
|
799 | 799 | return -1 |
|
800 | 800 | |
|
801 | 801 | def close(self): |
|
802 | 802 | if self.dirty: |
|
803 | 803 | self.writelines(self.fname, self.lines, self.mode) |
|
804 | 804 | self.write_rej() |
|
805 | 805 | return len(self.rej) |
|
806 | 806 | |
|
807 | 807 | class hunk(object): |
|
808 | 808 | def __init__(self, desc, num, lr, context): |
|
809 | 809 | self.number = num |
|
810 | 810 | self.desc = desc |
|
811 | 811 | self.hunk = [desc] |
|
812 | 812 | self.a = [] |
|
813 | 813 | self.b = [] |
|
814 | 814 | self.starta = self.lena = None |
|
815 | 815 | self.startb = self.lenb = None |
|
816 | 816 | if lr is not None: |
|
817 | 817 | if context: |
|
818 | 818 | self.read_context_hunk(lr) |
|
819 | 819 | else: |
|
820 | 820 | self.read_unified_hunk(lr) |
|
821 | 821 | |
|
822 | 822 | def getnormalized(self): |
|
823 | 823 | """Return a copy with line endings normalized to LF.""" |
|
824 | 824 | |
|
825 | 825 | def normalize(lines): |
|
826 | 826 | nlines = [] |
|
827 | 827 | for line in lines: |
|
828 | 828 | if line.endswith('\r\n'): |
|
829 | 829 | line = line[:-2] + '\n' |
|
830 | 830 | nlines.append(line) |
|
831 | 831 | return nlines |
|
832 | 832 | |
|
833 | 833 | # Dummy object, it is rebuilt manually |
|
834 | 834 | nh = hunk(self.desc, self.number, None, None) |
|
835 | 835 | nh.number = self.number |
|
836 | 836 | nh.desc = self.desc |
|
837 | 837 | nh.hunk = self.hunk |
|
838 | 838 | nh.a = normalize(self.a) |
|
839 | 839 | nh.b = normalize(self.b) |
|
840 | 840 | nh.starta = self.starta |
|
841 | 841 | nh.startb = self.startb |
|
842 | 842 | nh.lena = self.lena |
|
843 | 843 | nh.lenb = self.lenb |
|
844 | 844 | return nh |
|
845 | 845 | |
|
846 | 846 | def read_unified_hunk(self, lr): |
|
847 | 847 | m = unidesc.match(self.desc) |
|
848 | 848 | if not m: |
|
849 | 849 | raise PatchError(_("bad hunk #%d") % self.number) |
|
850 | 850 | self.starta, self.lena, self.startb, self.lenb = m.groups() |
|
851 | 851 | if self.lena is None: |
|
852 | 852 | self.lena = 1 |
|
853 | 853 | else: |
|
854 | 854 | self.lena = int(self.lena) |
|
855 | 855 | if self.lenb is None: |
|
856 | 856 | self.lenb = 1 |
|
857 | 857 | else: |
|
858 | 858 | self.lenb = int(self.lenb) |
|
859 | 859 | self.starta = int(self.starta) |
|
860 | 860 | self.startb = int(self.startb) |
|
861 | 861 | diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, |
|
862 | 862 | self.b) |
|
863 | 863 | # if we hit eof before finishing out the hunk, the last line will |
|
864 | 864 | # be zero length. Lets try to fix it up. |
|
865 | 865 | while len(self.hunk[-1]) == 0: |
|
866 | 866 | del self.hunk[-1] |
|
867 | 867 | del self.a[-1] |
|
868 | 868 | del self.b[-1] |
|
869 | 869 | self.lena -= 1 |
|
870 | 870 | self.lenb -= 1 |
|
871 | 871 | self._fixnewline(lr) |
|
872 | 872 | |
|
873 | 873 | def read_context_hunk(self, lr): |
|
874 | 874 | self.desc = lr.readline() |
|
875 | 875 | m = contextdesc.match(self.desc) |
|
876 | 876 | if not m: |
|
877 | 877 | raise PatchError(_("bad hunk #%d") % self.number) |
|
878 | 878 | self.starta, aend = m.groups() |
|
879 | 879 | self.starta = int(self.starta) |
|
880 | 880 | if aend is None: |
|
881 | 881 | aend = self.starta |
|
882 | 882 | self.lena = int(aend) - self.starta |
|
883 | 883 | if self.starta: |
|
884 | 884 | self.lena += 1 |
|
885 | 885 | for x in xrange(self.lena): |
|
886 | 886 | l = lr.readline() |
|
887 | 887 | if l.startswith('---'): |
|
888 | 888 | # lines addition, old block is empty |
|
889 | 889 | lr.push(l) |
|
890 | 890 | break |
|
891 | 891 | s = l[2:] |
|
892 | 892 | if l.startswith('- ') or l.startswith('! '): |
|
893 | 893 | u = '-' + s |
|
894 | 894 | elif l.startswith(' '): |
|
895 | 895 | u = ' ' + s |
|
896 | 896 | else: |
|
897 | 897 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
898 | 898 | (self.number, x)) |
|
899 | 899 | self.a.append(u) |
|
900 | 900 | self.hunk.append(u) |
|
901 | 901 | |
|
902 | 902 | l = lr.readline() |
|
903 | 903 | if l.startswith('\ '): |
|
904 | 904 | s = self.a[-1][:-1] |
|
905 | 905 | self.a[-1] = s |
|
906 | 906 | self.hunk[-1] = s |
|
907 | 907 | l = lr.readline() |
|
908 | 908 | m = contextdesc.match(l) |
|
909 | 909 | if not m: |
|
910 | 910 | raise PatchError(_("bad hunk #%d") % self.number) |
|
911 | 911 | self.startb, bend = m.groups() |
|
912 | 912 | self.startb = int(self.startb) |
|
913 | 913 | if bend is None: |
|
914 | 914 | bend = self.startb |
|
915 | 915 | self.lenb = int(bend) - self.startb |
|
916 | 916 | if self.startb: |
|
917 | 917 | self.lenb += 1 |
|
918 | 918 | hunki = 1 |
|
919 | 919 | for x in xrange(self.lenb): |
|
920 | 920 | l = lr.readline() |
|
921 | 921 | if l.startswith('\ '): |
|
922 | 922 | # XXX: the only way to hit this is with an invalid line range. |
|
923 | 923 | # The no-eol marker is not counted in the line range, but I |
|
924 | 924 | # guess there are diff(1) out there which behave differently. |
|
925 | 925 | s = self.b[-1][:-1] |
|
926 | 926 | self.b[-1] = s |
|
927 | 927 | self.hunk[hunki - 1] = s |
|
928 | 928 | continue |
|
929 | 929 | if not l: |
|
930 | 930 | # line deletions, new block is empty and we hit EOF |
|
931 | 931 | lr.push(l) |
|
932 | 932 | break |
|
933 | 933 | s = l[2:] |
|
934 | 934 | if l.startswith('+ ') or l.startswith('! '): |
|
935 | 935 | u = '+' + s |
|
936 | 936 | elif l.startswith(' '): |
|
937 | 937 | u = ' ' + s |
|
938 | 938 | elif len(self.b) == 0: |
|
939 | 939 | # line deletions, new block is empty |
|
940 | 940 | lr.push(l) |
|
941 | 941 | break |
|
942 | 942 | else: |
|
943 | 943 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
944 | 944 | (self.number, x)) |
|
945 | 945 | self.b.append(s) |
|
946 | 946 | while True: |
|
947 | 947 | if hunki >= len(self.hunk): |
|
948 | 948 | h = "" |
|
949 | 949 | else: |
|
950 | 950 | h = self.hunk[hunki] |
|
951 | 951 | hunki += 1 |
|
952 | 952 | if h == u: |
|
953 | 953 | break |
|
954 | 954 | elif h.startswith('-'): |
|
955 | 955 | continue |
|
956 | 956 | else: |
|
957 | 957 | self.hunk.insert(hunki - 1, u) |
|
958 | 958 | break |
|
959 | 959 | |
|
960 | 960 | if not self.a: |
|
961 | 961 | # this happens when lines were only added to the hunk |
|
962 | 962 | for x in self.hunk: |
|
963 | 963 | if x.startswith('-') or x.startswith(' '): |
|
964 | 964 | self.a.append(x) |
|
965 | 965 | if not self.b: |
|
966 | 966 | # this happens when lines were only deleted from the hunk |
|
967 | 967 | for x in self.hunk: |
|
968 | 968 | if x.startswith('+') or x.startswith(' '): |
|
969 | 969 | self.b.append(x[1:]) |
|
970 | 970 | # @@ -start,len +start,len @@ |
|
971 | 971 | self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, |
|
972 | 972 | self.startb, self.lenb) |
|
973 | 973 | self.hunk[0] = self.desc |
|
974 | 974 | self._fixnewline(lr) |
|
975 | 975 | |
|
976 | 976 | def _fixnewline(self, lr): |
|
977 | 977 | l = lr.readline() |
|
978 | 978 | if l.startswith('\ '): |
|
979 | 979 | diffhelpers.fix_newline(self.hunk, self.a, self.b) |
|
980 | 980 | else: |
|
981 | 981 | lr.push(l) |
|
982 | 982 | |
|
983 | 983 | def complete(self): |
|
984 | 984 | return len(self.a) == self.lena and len(self.b) == self.lenb |
|
985 | 985 | |
|
986 | 986 | def _fuzzit(self, old, new, fuzz, toponly): |
|
987 | 987 | # this removes context lines from the top and bottom of list 'l'. It |
|
988 | 988 | # checks the hunk to make sure only context lines are removed, and then |
|
989 | 989 | # returns a new shortened list of lines. |
|
990 | 990 | fuzz = min(fuzz, len(old)) |
|
991 | 991 | if fuzz: |
|
992 | 992 | top = 0 |
|
993 | 993 | bot = 0 |
|
994 | 994 | hlen = len(self.hunk) |
|
995 | 995 | for x in xrange(hlen - 1): |
|
996 | 996 | # the hunk starts with the @@ line, so use x+1 |
|
997 | 997 | if self.hunk[x + 1][0] == ' ': |
|
998 | 998 | top += 1 |
|
999 | 999 | else: |
|
1000 | 1000 | break |
|
1001 | 1001 | if not toponly: |
|
1002 | 1002 | for x in xrange(hlen - 1): |
|
1003 | 1003 | if self.hunk[hlen - bot - 1][0] == ' ': |
|
1004 | 1004 | bot += 1 |
|
1005 | 1005 | else: |
|
1006 | 1006 | break |
|
1007 | 1007 | |
|
1008 | 1008 | bot = min(fuzz, bot) |
|
1009 | 1009 | top = min(fuzz, top) |
|
1010 | 1010 | return old[top:len(old)-bot], new[top:len(new)-bot], top |
|
1011 | 1011 | return old, new, 0 |
|
1012 | 1012 | |
|
1013 | 1013 | def fuzzit(self, fuzz, toponly): |
|
1014 | 1014 | old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly) |
|
1015 | 1015 | oldstart = self.starta + top |
|
1016 | 1016 | newstart = self.startb + top |
|
1017 | 1017 | # zero length hunk ranges already have their start decremented |
|
1018 | 1018 | if self.lena and oldstart > 0: |
|
1019 | 1019 | oldstart -= 1 |
|
1020 | 1020 | if self.lenb and newstart > 0: |
|
1021 | 1021 | newstart -= 1 |
|
1022 | 1022 | return old, oldstart, new, newstart |
|
1023 | 1023 | |
|
1024 | 1024 | class binhunk(object): |
|
1025 | 1025 | 'A binary patch file. Only understands literals so far.' |
|
1026 | 1026 | def __init__(self, lr, fname): |
|
1027 | 1027 | self.text = None |
|
1028 | 1028 | self.hunk = ['GIT binary patch\n'] |
|
1029 | 1029 | self._fname = fname |
|
1030 | 1030 | self._read(lr) |
|
1031 | 1031 | |
|
1032 | 1032 | def complete(self): |
|
1033 | 1033 | return self.text is not None |
|
1034 | 1034 | |
|
1035 | 1035 | def new(self): |
|
1036 | 1036 | return [self.text] |
|
1037 | 1037 | |
|
1038 | 1038 | def _read(self, lr): |
|
1039 | 1039 | def getline(lr, hunk): |
|
1040 | 1040 | l = lr.readline() |
|
1041 | 1041 | hunk.append(l) |
|
1042 | 1042 | return l.rstrip('\r\n') |
|
1043 | 1043 | |
|
1044 | 1044 | while True: |
|
1045 | 1045 | line = getline(lr, self.hunk) |
|
1046 | 1046 | if not line: |
|
1047 | 1047 | raise PatchError(_('could not extract "%s" binary data') |
|
1048 | 1048 | % self._fname) |
|
1049 | 1049 | if line.startswith('literal '): |
|
1050 | 1050 | break |
|
1051 | 1051 | size = int(line[8:].rstrip()) |
|
1052 | 1052 | dec = [] |
|
1053 | 1053 | line = getline(lr, self.hunk) |
|
1054 | 1054 | while len(line) > 1: |
|
1055 | 1055 | l = line[0] |
|
1056 | 1056 | if l <= 'Z' and l >= 'A': |
|
1057 | 1057 | l = ord(l) - ord('A') + 1 |
|
1058 | 1058 | else: |
|
1059 | 1059 | l = ord(l) - ord('a') + 27 |
|
1060 | 1060 | try: |
|
1061 | 1061 | dec.append(base85.b85decode(line[1:])[:l]) |
|
1062 | 1062 | except ValueError, e: |
|
1063 | 1063 | raise PatchError(_('could not decode "%s" binary patch: %s') |
|
1064 | 1064 | % (self._fname, str(e))) |
|
1065 | 1065 | line = getline(lr, self.hunk) |
|
1066 | 1066 | text = zlib.decompress(''.join(dec)) |
|
1067 | 1067 | if len(text) != size: |
|
1068 | 1068 | raise PatchError(_('"%s" length is %d bytes, should be %d') |
|
1069 | 1069 | % (self._fname, len(text), size)) |
|
1070 | 1070 | self.text = text |
|
1071 | 1071 | |
|
1072 | 1072 | def parsefilename(str): |
|
1073 | 1073 | # --- filename \t|space stuff |
|
1074 | 1074 | s = str[4:].rstrip('\r\n') |
|
1075 | 1075 | i = s.find('\t') |
|
1076 | 1076 | if i < 0: |
|
1077 | 1077 | i = s.find(' ') |
|
1078 | 1078 | if i < 0: |
|
1079 | 1079 | return s |
|
1080 | 1080 | return s[:i] |
|
1081 | 1081 | |
|
1082 | 1082 | def pathstrip(path, strip): |
|
1083 | 1083 | pathlen = len(path) |
|
1084 | 1084 | i = 0 |
|
1085 | 1085 | if strip == 0: |
|
1086 | 1086 | return '', path.rstrip() |
|
1087 | 1087 | count = strip |
|
1088 | 1088 | while count > 0: |
|
1089 | 1089 | i = path.find('/', i) |
|
1090 | 1090 | if i == -1: |
|
1091 | 1091 | raise PatchError(_("unable to strip away %d of %d dirs from %s") % |
|
1092 | 1092 | (count, strip, path)) |
|
1093 | 1093 | i += 1 |
|
1094 | 1094 | # consume '//' in the path |
|
1095 | 1095 | while i < pathlen - 1 and path[i] == '/': |
|
1096 | 1096 | i += 1 |
|
1097 | 1097 | count -= 1 |
|
1098 | 1098 | return path[:i].lstrip(), path[i:].rstrip() |
|
1099 | 1099 | |
|
1100 | 1100 | def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip): |
|
1101 | 1101 | nulla = afile_orig == "/dev/null" |
|
1102 | 1102 | nullb = bfile_orig == "/dev/null" |
|
1103 | 1103 | create = nulla and hunk.starta == 0 and hunk.lena == 0 |
|
1104 | 1104 | remove = nullb and hunk.startb == 0 and hunk.lenb == 0 |
|
1105 | 1105 | abase, afile = pathstrip(afile_orig, strip) |
|
1106 | 1106 | gooda = not nulla and backend.exists(afile) |
|
1107 | 1107 | bbase, bfile = pathstrip(bfile_orig, strip) |
|
1108 | 1108 | if afile == bfile: |
|
1109 | 1109 | goodb = gooda |
|
1110 | 1110 | else: |
|
1111 | 1111 | goodb = not nullb and backend.exists(bfile) |
|
1112 | 1112 | missing = not goodb and not gooda and not create |
|
1113 | 1113 | |
|
1114 | 1114 | # some diff programs apparently produce patches where the afile is |
|
1115 | 1115 | # not /dev/null, but afile starts with bfile |
|
1116 | 1116 | abasedir = afile[:afile.rfind('/') + 1] |
|
1117 | 1117 | bbasedir = bfile[:bfile.rfind('/') + 1] |
|
1118 | 1118 | if (missing and abasedir == bbasedir and afile.startswith(bfile) |
|
1119 | 1119 | and hunk.starta == 0 and hunk.lena == 0): |
|
1120 | 1120 | create = True |
|
1121 | 1121 | missing = False |
|
1122 | 1122 | |
|
1123 | 1123 | # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the |
|
1124 | 1124 | # diff is between a file and its backup. In this case, the original |
|
1125 | 1125 | # file should be patched (see original mpatch code). |
|
1126 | 1126 | isbackup = (abase == bbase and bfile.startswith(afile)) |
|
1127 | 1127 | fname = None |
|
1128 | 1128 | if not missing: |
|
1129 | 1129 | if gooda and goodb: |
|
1130 | 1130 | fname = isbackup and afile or bfile |
|
1131 | 1131 | elif gooda: |
|
1132 | 1132 | fname = afile |
|
1133 | 1133 | |
|
1134 | 1134 | if not fname: |
|
1135 | 1135 | if not nullb: |
|
1136 | 1136 | fname = isbackup and afile or bfile |
|
1137 | 1137 | elif not nulla: |
|
1138 | 1138 | fname = afile |
|
1139 | 1139 | else: |
|
1140 | 1140 | raise PatchError(_("undefined source and destination files")) |
|
1141 | 1141 | |
|
1142 | 1142 | gp = patchmeta(fname) |
|
1143 | 1143 | if create: |
|
1144 | 1144 | gp.op = 'ADD' |
|
1145 | 1145 | elif remove: |
|
1146 | 1146 | gp.op = 'DELETE' |
|
1147 | 1147 | return gp |
|
1148 | 1148 | |
|
1149 | 1149 | def scangitpatch(lr, firstline): |
|
1150 | 1150 | """ |
|
1151 | 1151 | Git patches can emit: |
|
1152 | 1152 | - rename a to b |
|
1153 | 1153 | - change b |
|
1154 | 1154 | - copy a to c |
|
1155 | 1155 | - change c |
|
1156 | 1156 | |
|
1157 | 1157 | We cannot apply this sequence as-is, the renamed 'a' could not be |
|
1158 | 1158 | found for it would have been renamed already. And we cannot copy |
|
1159 | 1159 | from 'b' instead because 'b' would have been changed already. So |
|
1160 | 1160 | we scan the git patch for copy and rename commands so we can |
|
1161 | 1161 | perform the copies ahead of time. |
|
1162 | 1162 | """ |
|
1163 | 1163 | pos = 0 |
|
1164 | 1164 | try: |
|
1165 | 1165 | pos = lr.fp.tell() |
|
1166 | 1166 | fp = lr.fp |
|
1167 | 1167 | except IOError: |
|
1168 | 1168 | fp = cStringIO.StringIO(lr.fp.read()) |
|
1169 | 1169 | gitlr = linereader(fp) |
|
1170 | 1170 | gitlr.push(firstline) |
|
1171 | 1171 | gitpatches = readgitpatch(gitlr) |
|
1172 | 1172 | fp.seek(pos) |
|
1173 | 1173 | return gitpatches |
|
1174 | 1174 | |
|
1175 | 1175 | def iterhunks(fp): |
|
1176 | 1176 | """Read a patch and yield the following events: |
|
1177 | 1177 | - ("file", afile, bfile, firsthunk): select a new target file. |
|
1178 | 1178 | - ("hunk", hunk): a new hunk is ready to be applied, follows a |
|
1179 | 1179 | "file" event. |
|
1180 | 1180 | - ("git", gitchanges): current diff is in git format, gitchanges |
|
1181 | 1181 | maps filenames to gitpatch records. Unique event. |
|
1182 | 1182 | """ |
|
1183 | 1183 | afile = "" |
|
1184 | 1184 | bfile = "" |
|
1185 | 1185 | state = None |
|
1186 | 1186 | hunknum = 0 |
|
1187 | 1187 | emitfile = newfile = False |
|
1188 | 1188 | gitpatches = None |
|
1189 | 1189 | |
|
1190 | 1190 | # our states |
|
1191 | 1191 | BFILE = 1 |
|
1192 | 1192 | context = None |
|
1193 | 1193 | lr = linereader(fp) |
|
1194 | 1194 | |
|
1195 | 1195 | while True: |
|
1196 | 1196 | x = lr.readline() |
|
1197 | 1197 | if not x: |
|
1198 | 1198 | break |
|
1199 | 1199 | if state == BFILE and ( |
|
1200 | 1200 | (not context and x[0] == '@') |
|
1201 | 1201 | or (context is not False and x.startswith('***************')) |
|
1202 | 1202 | or x.startswith('GIT binary patch')): |
|
1203 | 1203 | gp = None |
|
1204 | 1204 | if (gitpatches and |
|
1205 | 1205 | gitpatches[-1].ispatching(afile, bfile)): |
|
1206 | 1206 | gp = gitpatches.pop() |
|
1207 | 1207 | if x.startswith('GIT binary patch'): |
|
1208 | 1208 | h = binhunk(lr, gp.path) |
|
1209 | 1209 | else: |
|
1210 | 1210 | if context is None and x.startswith('***************'): |
|
1211 | 1211 | context = True |
|
1212 | 1212 | h = hunk(x, hunknum + 1, lr, context) |
|
1213 | 1213 | hunknum += 1 |
|
1214 | 1214 | if emitfile: |
|
1215 | 1215 | emitfile = False |
|
1216 | 1216 | yield 'file', (afile, bfile, h, gp and gp.copy() or None) |
|
1217 | 1217 | yield 'hunk', h |
|
1218 | 1218 | elif x.startswith('diff --git'): |
|
1219 | 1219 | m = gitre.match(x.rstrip(' \r\n')) |
|
1220 | 1220 | if not m: |
|
1221 | 1221 | continue |
|
1222 | 1222 | if gitpatches is None: |
|
1223 | 1223 | # scan whole input for git metadata |
|
1224 | 1224 | gitpatches = scangitpatch(lr, x) |
|
1225 | 1225 | yield 'git', [g.copy() for g in gitpatches |
|
1226 | 1226 | if g.op in ('COPY', 'RENAME')] |
|
1227 | 1227 | gitpatches.reverse() |
|
1228 | 1228 | afile = 'a/' + m.group(1) |
|
1229 | 1229 | bfile = 'b/' + m.group(2) |
|
1230 | 1230 | while gitpatches and not gitpatches[-1].ispatching(afile, bfile): |
|
1231 | 1231 | gp = gitpatches.pop() |
|
1232 | 1232 | yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) |
|
1233 | 1233 | if not gitpatches: |
|
1234 | 1234 | raise PatchError(_('failed to synchronize metadata for "%s"') |
|
1235 | 1235 | % afile[2:]) |
|
1236 | 1236 | gp = gitpatches[-1] |
|
1237 | 1237 | newfile = True |
|
1238 | 1238 | elif x.startswith('---'): |
|
1239 | 1239 | # check for a unified diff |
|
1240 | 1240 | l2 = lr.readline() |
|
1241 | 1241 | if not l2.startswith('+++'): |
|
1242 | 1242 | lr.push(l2) |
|
1243 | 1243 | continue |
|
1244 | 1244 | newfile = True |
|
1245 | 1245 | context = False |
|
1246 | 1246 | afile = parsefilename(x) |
|
1247 | 1247 | bfile = parsefilename(l2) |
|
1248 | 1248 | elif x.startswith('***'): |
|
1249 | 1249 | # check for a context diff |
|
1250 | 1250 | l2 = lr.readline() |
|
1251 | 1251 | if not l2.startswith('---'): |
|
1252 | 1252 | lr.push(l2) |
|
1253 | 1253 | continue |
|
1254 | 1254 | l3 = lr.readline() |
|
1255 | 1255 | lr.push(l3) |
|
1256 | 1256 | if not l3.startswith("***************"): |
|
1257 | 1257 | lr.push(l2) |
|
1258 | 1258 | continue |
|
1259 | 1259 | newfile = True |
|
1260 | 1260 | context = True |
|
1261 | 1261 | afile = parsefilename(x) |
|
1262 | 1262 | bfile = parsefilename(l2) |
|
1263 | 1263 | |
|
1264 | 1264 | if newfile: |
|
1265 | 1265 | newfile = False |
|
1266 | 1266 | emitfile = True |
|
1267 | 1267 | state = BFILE |
|
1268 | 1268 | hunknum = 0 |
|
1269 | 1269 | |
|
1270 | 1270 | while gitpatches: |
|
1271 | 1271 | gp = gitpatches.pop() |
|
1272 | 1272 | yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) |
|
1273 | 1273 | |
|
1274 | 1274 | def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'): |
|
1275 | 1275 | """Reads a patch from fp and tries to apply it. |
|
1276 | 1276 | |
|
1277 | 1277 | Returns 0 for a clean patch, -1 if any rejects were found and 1 if |
|
1278 | 1278 | there was any fuzz. |
|
1279 | 1279 | |
|
1280 | 1280 | If 'eolmode' is 'strict', the patch content and patched file are |
|
1281 | 1281 | read in binary mode. Otherwise, line endings are ignored when |
|
1282 | 1282 | patching then normalized according to 'eolmode'. |
|
1283 | 1283 | """ |
|
1284 | 1284 | return _applydiff(ui, fp, patchfile, backend, store, strip=strip, |
|
1285 | 1285 | eolmode=eolmode) |
|
1286 | 1286 | |
|
1287 | 1287 | def _applydiff(ui, fp, patcher, backend, store, strip=1, |
|
1288 | 1288 | eolmode='strict'): |
|
1289 | 1289 | |
|
1290 | 1290 | def pstrip(p): |
|
1291 | 1291 | return pathstrip(p, strip - 1)[1] |
|
1292 | 1292 | |
|
1293 | 1293 | rejects = 0 |
|
1294 | 1294 | err = 0 |
|
1295 | 1295 | current_file = None |
|
1296 | 1296 | |
|
1297 | 1297 | for state, values in iterhunks(fp): |
|
1298 | 1298 | if state == 'hunk': |
|
1299 | 1299 | if not current_file: |
|
1300 | 1300 | continue |
|
1301 | 1301 | ret = current_file.apply(values) |
|
1302 | 1302 | if ret > 0: |
|
1303 | 1303 | err = 1 |
|
1304 | 1304 | elif state == 'file': |
|
1305 | 1305 | if current_file: |
|
1306 | 1306 | rejects += current_file.close() |
|
1307 | 1307 | current_file = None |
|
1308 | 1308 | afile, bfile, first_hunk, gp = values |
|
1309 | 1309 | if gp: |
|
1310 | 1310 | gp.path = pstrip(gp.path) |
|
1311 | 1311 | if gp.oldpath: |
|
1312 | 1312 | gp.oldpath = pstrip(gp.oldpath) |
|
1313 | 1313 | else: |
|
1314 | 1314 | gp = makepatchmeta(backend, afile, bfile, first_hunk, strip) |
|
1315 | 1315 | if gp.op == 'RENAME': |
|
1316 | 1316 | backend.unlink(gp.oldpath) |
|
1317 | 1317 | if not first_hunk: |
|
1318 | 1318 | if gp.op == 'DELETE': |
|
1319 | 1319 | backend.unlink(gp.path) |
|
1320 | 1320 | continue |
|
1321 | 1321 | data, mode = None, None |
|
1322 | 1322 | if gp.op in ('RENAME', 'COPY'): |
|
1323 | 1323 | data, mode = store.getfile(gp.oldpath)[:2] |
|
1324 | 1324 | if gp.mode: |
|
1325 | 1325 | mode = gp.mode |
|
1326 | 1326 | if gp.op == 'ADD': |
|
1327 | 1327 | # Added files without content have no hunk and |
|
1328 | 1328 | # must be created |
|
1329 | 1329 | data = '' |
|
1330 | 1330 | if data or mode: |
|
1331 | 1331 | if (gp.op in ('ADD', 'RENAME', 'COPY') |
|
1332 | 1332 | and backend.exists(gp.path)): |
|
1333 | 1333 | raise PatchError(_("cannot create %s: destination " |
|
1334 | 1334 | "already exists") % gp.path) |
|
1335 | 1335 | backend.setfile(gp.path, data, mode, gp.oldpath) |
|
1336 | 1336 | continue |
|
1337 | 1337 | try: |
|
1338 | 1338 | current_file = patcher(ui, gp, backend, store, |
|
1339 | 1339 | eolmode=eolmode) |
|
1340 | 1340 | except PatchError, inst: |
|
1341 | 1341 | ui.warn(str(inst) + '\n') |
|
1342 | 1342 | current_file = None |
|
1343 | 1343 | rejects += 1 |
|
1344 | 1344 | continue |
|
1345 | 1345 | elif state == 'git': |
|
1346 | 1346 | for gp in values: |
|
1347 | 1347 | path = pstrip(gp.oldpath) |
|
1348 | 1348 | data, mode = backend.getfile(path) |
|
1349 | 1349 | store.setfile(path, data, mode) |
|
1350 | 1350 | else: |
|
1351 | 1351 | raise util.Abort(_('unsupported parser state: %s') % state) |
|
1352 | 1352 | |
|
1353 | 1353 | if current_file: |
|
1354 | 1354 | rejects += current_file.close() |
|
1355 | 1355 | |
|
1356 | 1356 | if rejects: |
|
1357 | 1357 | return -1 |
|
1358 | 1358 | return err |
|
1359 | 1359 | |
|
1360 | 1360 | def _externalpatch(ui, repo, patcher, patchname, strip, files, |
|
1361 | 1361 | similarity): |
|
1362 | 1362 | """use <patcher> to apply <patchname> to the working directory. |
|
1363 | 1363 | returns whether patch was applied with fuzz factor.""" |
|
1364 | 1364 | |
|
1365 | 1365 | fuzz = False |
|
1366 | 1366 | args = [] |
|
1367 | 1367 | cwd = repo.root |
|
1368 | 1368 | if cwd: |
|
1369 | 1369 | args.append('-d %s' % util.shellquote(cwd)) |
|
1370 | 1370 | fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, |
|
1371 | 1371 | util.shellquote(patchname))) |
|
1372 | 1372 | try: |
|
1373 | 1373 | for line in fp: |
|
1374 | 1374 | line = line.rstrip() |
|
1375 | 1375 | ui.note(line + '\n') |
|
1376 | 1376 | if line.startswith('patching file '): |
|
1377 | 1377 | pf = util.parsepatchoutput(line) |
|
1378 | 1378 | printed_file = False |
|
1379 | 1379 | files.add(pf) |
|
1380 | 1380 | elif line.find('with fuzz') >= 0: |
|
1381 | 1381 | fuzz = True |
|
1382 | 1382 | if not printed_file: |
|
1383 | 1383 | ui.warn(pf + '\n') |
|
1384 | 1384 | printed_file = True |
|
1385 | 1385 | ui.warn(line + '\n') |
|
1386 | 1386 | elif line.find('saving rejects to file') >= 0: |
|
1387 | 1387 | ui.warn(line + '\n') |
|
1388 | 1388 | elif line.find('FAILED') >= 0: |
|
1389 | 1389 | if not printed_file: |
|
1390 | 1390 | ui.warn(pf + '\n') |
|
1391 | 1391 | printed_file = True |
|
1392 | 1392 | ui.warn(line + '\n') |
|
1393 | 1393 | finally: |
|
1394 | 1394 | if files: |
|
1395 | 1395 | cfiles = list(files) |
|
1396 | 1396 | cwd = repo.getcwd() |
|
1397 | 1397 | if cwd: |
|
1398 | 1398 | cfiles = [util.pathto(repo.root, cwd, f) |
|
1399 | 1399 | for f in cfiles] |
|
1400 | 1400 | scmutil.addremove(repo, cfiles, similarity=similarity) |
|
1401 | 1401 | code = fp.close() |
|
1402 | 1402 | if code: |
|
1403 | 1403 | raise PatchError(_("patch command failed: %s") % |
|
1404 | 1404 | util.explainexit(code)[0]) |
|
1405 | 1405 | return fuzz |
|
1406 | 1406 | |
|
1407 | 1407 | def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'): |
|
1408 | 1408 | if files is None: |
|
1409 | 1409 | files = set() |
|
1410 | 1410 | if eolmode is None: |
|
1411 | 1411 | eolmode = ui.config('patch', 'eol', 'strict') |
|
1412 | 1412 | if eolmode.lower() not in eolmodes: |
|
1413 | 1413 | raise util.Abort(_('unsupported line endings type: %s') % eolmode) |
|
1414 | 1414 | eolmode = eolmode.lower() |
|
1415 | 1415 | |
|
1416 | 1416 | store = filestore() |
|
1417 | 1417 | try: |
|
1418 | 1418 | fp = open(patchobj, 'rb') |
|
1419 | 1419 | except TypeError: |
|
1420 | 1420 | fp = patchobj |
|
1421 | 1421 | try: |
|
1422 | 1422 | ret = applydiff(ui, fp, backend, store, strip=strip, |
|
1423 | 1423 | eolmode=eolmode) |
|
1424 | 1424 | finally: |
|
1425 | 1425 | if fp != patchobj: |
|
1426 | 1426 | fp.close() |
|
1427 | 1427 | files.update(backend.close()) |
|
1428 | 1428 | store.close() |
|
1429 | 1429 | if ret < 0: |
|
1430 | 1430 | raise PatchError(_('patch failed to apply')) |
|
1431 | 1431 | return ret > 0 |
|
1432 | 1432 | |
|
1433 | 1433 | def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict', |
|
1434 | 1434 | similarity=0): |
|
1435 | 1435 | """use builtin patch to apply <patchobj> to the working directory. |
|
1436 | 1436 | returns whether patch was applied with fuzz factor.""" |
|
1437 | 1437 | backend = workingbackend(ui, repo, similarity) |
|
1438 | 1438 | return patchbackend(ui, backend, patchobj, strip, files, eolmode) |
|
1439 | 1439 | |
|
1440 | 1440 | def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None, |
|
1441 | 1441 | eolmode='strict'): |
|
1442 | 1442 | backend = repobackend(ui, repo, ctx, store) |
|
1443 | 1443 | return patchbackend(ui, backend, patchobj, strip, files, eolmode) |
|
1444 | 1444 | |
|
1445 | 1445 | def makememctx(repo, parents, text, user, date, branch, files, store, |
|
1446 | 1446 | editor=None): |
|
1447 | 1447 | def getfilectx(repo, memctx, path): |
|
1448 | 1448 | data, (islink, isexec), copied = store.getfile(path) |
|
1449 | 1449 | return context.memfilectx(path, data, islink=islink, isexec=isexec, |
|
1450 | 1450 | copied=copied) |
|
1451 | 1451 | extra = {} |
|
1452 | 1452 | if branch: |
|
1453 | 1453 | extra['branch'] = encoding.fromlocal(branch) |
|
1454 | 1454 | ctx = context.memctx(repo, parents, text, files, getfilectx, user, |
|
1455 | 1455 | date, extra) |
|
1456 | 1456 | if editor: |
|
1457 | 1457 | ctx._text = editor(repo, ctx, []) |
|
1458 | 1458 | return ctx |
|
1459 | 1459 | |
|
1460 | 1460 | def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict', |
|
1461 | 1461 | similarity=0): |
|
1462 | 1462 | """Apply <patchname> to the working directory. |
|
1463 | 1463 | |
|
1464 | 1464 | 'eolmode' specifies how end of lines should be handled. It can be: |
|
1465 | 1465 | - 'strict': inputs are read in binary mode, EOLs are preserved |
|
1466 | 1466 | - 'crlf': EOLs are ignored when patching and reset to CRLF |
|
1467 | 1467 | - 'lf': EOLs are ignored when patching and reset to LF |
|
1468 | 1468 | - None: get it from user settings, default to 'strict' |
|
1469 | 1469 | 'eolmode' is ignored when using an external patcher program. |
|
1470 | 1470 | |
|
1471 | 1471 | Returns whether patch was applied with fuzz factor. |
|
1472 | 1472 | """ |
|
1473 | 1473 | patcher = ui.config('ui', 'patch') |
|
1474 | 1474 | if files is None: |
|
1475 | 1475 | files = set() |
|
1476 | 1476 | try: |
|
1477 | 1477 | if patcher: |
|
1478 | 1478 | return _externalpatch(ui, repo, patcher, patchname, strip, |
|
1479 | 1479 | files, similarity) |
|
1480 | 1480 | return internalpatch(ui, repo, patchname, strip, files, eolmode, |
|
1481 | 1481 | similarity) |
|
1482 | 1482 | except PatchError, err: |
|
1483 | 1483 | raise util.Abort(str(err)) |
|
1484 | 1484 | |
|
1485 | 1485 | def changedfiles(ui, repo, patchpath, strip=1): |
|
1486 | 1486 | backend = fsbackend(ui, repo.root) |
|
1487 | 1487 | fp = open(patchpath, 'rb') |
|
1488 | 1488 | try: |
|
1489 | 1489 | changed = set() |
|
1490 | 1490 | for state, values in iterhunks(fp): |
|
1491 | 1491 | if state == 'file': |
|
1492 | 1492 | afile, bfile, first_hunk, gp = values |
|
1493 | 1493 | if gp: |
|
1494 | 1494 | gp.path = pathstrip(gp.path, strip - 1)[1] |
|
1495 | 1495 | if gp.oldpath: |
|
1496 | 1496 | gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1] |
|
1497 | 1497 | else: |
|
1498 | 1498 | gp = makepatchmeta(backend, afile, bfile, first_hunk, strip) |
|
1499 | 1499 | changed.add(gp.path) |
|
1500 | 1500 | if gp.op == 'RENAME': |
|
1501 | 1501 | changed.add(gp.oldpath) |
|
1502 | 1502 | elif state not in ('hunk', 'git'): |
|
1503 | 1503 | raise util.Abort(_('unsupported parser state: %s') % state) |
|
1504 | 1504 | return changed |
|
1505 | 1505 | finally: |
|
1506 | 1506 | fp.close() |
|
1507 | 1507 | |
|
1508 | 1508 | def b85diff(to, tn): |
|
1509 | 1509 | '''print base85-encoded binary diff''' |
|
1510 | 1510 | def gitindex(text): |
|
1511 | 1511 | if not text: |
|
1512 | 1512 | return hex(nullid) |
|
1513 | 1513 | l = len(text) |
|
1514 | 1514 | s = util.sha1('blob %d\0' % l) |
|
1515 | 1515 | s.update(text) |
|
1516 | 1516 | return s.hexdigest() |
|
1517 | 1517 | |
|
1518 | 1518 | def fmtline(line): |
|
1519 | 1519 | l = len(line) |
|
1520 | 1520 | if l <= 26: |
|
1521 | 1521 | l = chr(ord('A') + l - 1) |
|
1522 | 1522 | else: |
|
1523 | 1523 | l = chr(l - 26 + ord('a') - 1) |
|
1524 | 1524 | return '%c%s\n' % (l, base85.b85encode(line, True)) |
|
1525 | 1525 | |
|
1526 | 1526 | def chunk(text, csize=52): |
|
1527 | 1527 | l = len(text) |
|
1528 | 1528 | i = 0 |
|
1529 | 1529 | while i < l: |
|
1530 | 1530 | yield text[i:i + csize] |
|
1531 | 1531 | i += csize |
|
1532 | 1532 | |
|
1533 | 1533 | tohash = gitindex(to) |
|
1534 | 1534 | tnhash = gitindex(tn) |
|
1535 | 1535 | if tohash == tnhash: |
|
1536 | 1536 | return "" |
|
1537 | 1537 | |
|
1538 | 1538 | # TODO: deltas |
|
1539 | 1539 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % |
|
1540 | 1540 | (tohash, tnhash, len(tn))] |
|
1541 | 1541 | for l in chunk(zlib.compress(tn)): |
|
1542 | 1542 | ret.append(fmtline(l)) |
|
1543 | 1543 | ret.append('\n') |
|
1544 | 1544 | return ''.join(ret) |
|
1545 | 1545 | |
|
1546 | 1546 | class GitDiffRequired(Exception): |
|
1547 | 1547 | pass |
|
1548 | 1548 | |
|
1549 | 1549 | def diffopts(ui, opts=None, untrusted=False, section='diff'): |
|
1550 | 1550 | def get(key, name=None, getter=ui.configbool): |
|
1551 | 1551 | return ((opts and opts.get(key)) or |
|
1552 | 1552 | getter(section, name or key, None, untrusted=untrusted)) |
|
1553 | 1553 | return mdiff.diffopts( |
|
1554 | 1554 | text=opts and opts.get('text'), |
|
1555 | 1555 | git=get('git'), |
|
1556 | 1556 | nodates=get('nodates'), |
|
1557 | 1557 | showfunc=get('show_function', 'showfunc'), |
|
1558 | 1558 | ignorews=get('ignore_all_space', 'ignorews'), |
|
1559 | 1559 | ignorewsamount=get('ignore_space_change', 'ignorewsamount'), |
|
1560 | 1560 | ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), |
|
1561 | 1561 | context=get('unified', getter=ui.config)) |
|
1562 | 1562 | |
|
1563 | 1563 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, |
|
1564 | 1564 | losedatafn=None, prefix=''): |
|
1565 | 1565 | '''yields diff of changes to files between two nodes, or node and |
|
1566 | 1566 | working directory. |
|
1567 | 1567 | |
|
1568 | 1568 | if node1 is None, use first dirstate parent instead. |
|
1569 | 1569 | if node2 is None, compare node1 with working directory. |
|
1570 | 1570 | |
|
1571 | 1571 | losedatafn(**kwarg) is a callable run when opts.upgrade=True and |
|
1572 | 1572 | every time some change cannot be represented with the current |
|
1573 | 1573 | patch format. Return False to upgrade to git patch format, True to |
|
1574 | 1574 | accept the loss or raise an exception to abort the diff. It is |
|
1575 | 1575 | called with the name of current file being diffed as 'fn'. If set |
|
1576 | 1576 | to None, patches will always be upgraded to git format when |
|
1577 | 1577 | necessary. |
|
1578 | 1578 | |
|
1579 | 1579 | prefix is a filename prefix that is prepended to all filenames on |
|
1580 | 1580 | display (used for subrepos). |
|
1581 | 1581 | ''' |
|
1582 | 1582 | |
|
1583 | 1583 | if opts is None: |
|
1584 | 1584 | opts = mdiff.defaultopts |
|
1585 | 1585 | |
|
1586 | 1586 | if not node1 and not node2: |
|
1587 | 1587 | node1 = repo.dirstate.p1() |
|
1588 | 1588 | |
|
1589 | 1589 | def lrugetfilectx(): |
|
1590 | 1590 | cache = {} |
|
1591 | 1591 | order = [] |
|
1592 | 1592 | def getfilectx(f, ctx): |
|
1593 | 1593 | fctx = ctx.filectx(f, filelog=cache.get(f)) |
|
1594 | 1594 | if f not in cache: |
|
1595 | 1595 | if len(cache) > 20: |
|
1596 | 1596 | del cache[order.pop(0)] |
|
1597 | 1597 | cache[f] = fctx.filelog() |
|
1598 | 1598 | else: |
|
1599 | 1599 | order.remove(f) |
|
1600 | 1600 | order.append(f) |
|
1601 | 1601 | return fctx |
|
1602 | 1602 | return getfilectx |
|
1603 | 1603 | getfilectx = lrugetfilectx() |
|
1604 | 1604 | |
|
1605 | 1605 | ctx1 = repo[node1] |
|
1606 | 1606 | ctx2 = repo[node2] |
|
1607 | 1607 | |
|
1608 | 1608 | if not changes: |
|
1609 | 1609 | changes = repo.status(ctx1, ctx2, match=match) |
|
1610 | 1610 | modified, added, removed = changes[:3] |
|
1611 | 1611 | |
|
1612 | 1612 | if not modified and not added and not removed: |
|
1613 | 1613 | return [] |
|
1614 | 1614 | |
|
1615 | 1615 | revs = None |
|
1616 | 1616 | if not repo.ui.quiet: |
|
1617 | 1617 | hexfunc = repo.ui.debugflag and hex or short |
|
1618 | 1618 | revs = [hexfunc(node) for node in [node1, node2] if node] |
|
1619 | 1619 | |
|
1620 | 1620 | copy = {} |
|
1621 | 1621 | if opts.git or opts.upgrade: |
|
1622 | 1622 | copy = copies.pathcopies(ctx1, ctx2) |
|
1623 | 1623 | |
|
1624 | 1624 | difffn = (lambda opts, losedata: |
|
1625 | 1625 | trydiff(repo, revs, ctx1, ctx2, modified, added, removed, |
|
1626 | 1626 | copy, getfilectx, opts, losedata, prefix)) |
|
1627 | 1627 | if opts.upgrade and not opts.git: |
|
1628 | 1628 | try: |
|
1629 | 1629 | def losedata(fn): |
|
1630 | 1630 | if not losedatafn or not losedatafn(fn=fn): |
|
1631 | 1631 | raise GitDiffRequired() |
|
1632 | 1632 | # Buffer the whole output until we are sure it can be generated |
|
1633 | 1633 | return list(difffn(opts.copy(git=False), losedata)) |
|
1634 | 1634 | except GitDiffRequired: |
|
1635 | 1635 | return difffn(opts.copy(git=True), None) |
|
1636 | 1636 | else: |
|
1637 | 1637 | return difffn(opts, None) |
|
1638 | 1638 | |
|
1639 | 1639 | def difflabel(func, *args, **kw): |
|
1640 | 1640 | '''yields 2-tuples of (output, label) based on the output of func()''' |
|
1641 | 1641 | headprefixes = [('diff', 'diff.diffline'), |
|
1642 | 1642 | ('copy', 'diff.extended'), |
|
1643 | 1643 | ('rename', 'diff.extended'), |
|
1644 | 1644 | ('old', 'diff.extended'), |
|
1645 | 1645 | ('new', 'diff.extended'), |
|
1646 | 1646 | ('deleted', 'diff.extended'), |
|
1647 | 1647 | ('---', 'diff.file_a'), |
|
1648 | 1648 | ('+++', 'diff.file_b')] |
|
1649 | 1649 | textprefixes = [('@', 'diff.hunk'), |
|
1650 | 1650 | ('-', 'diff.deleted'), |
|
1651 | 1651 | ('+', 'diff.inserted')] |
|
1652 | 1652 | head = False |
|
1653 | 1653 | for chunk in func(*args, **kw): |
|
1654 | 1654 | lines = chunk.split('\n') |
|
1655 | 1655 | for i, line in enumerate(lines): |
|
1656 | 1656 | if i != 0: |
|
1657 | 1657 | yield ('\n', '') |
|
1658 | 1658 | if head: |
|
1659 | 1659 | if line.startswith('@'): |
|
1660 | 1660 | head = False |
|
1661 | 1661 | else: |
|
1662 |
if line and |
|
|
1662 | if line and line[0] not in ' +-@\\': | |
|
1663 | 1663 | head = True |
|
1664 | 1664 | stripline = line |
|
1665 | 1665 | if not head and line and line[0] in '+-': |
|
1666 | 1666 | # highlight trailing whitespace, but only in changed lines |
|
1667 | 1667 | stripline = line.rstrip() |
|
1668 | 1668 | prefixes = textprefixes |
|
1669 | 1669 | if head: |
|
1670 | 1670 | prefixes = headprefixes |
|
1671 | 1671 | for prefix, label in prefixes: |
|
1672 | 1672 | if stripline.startswith(prefix): |
|
1673 | 1673 | yield (stripline, label) |
|
1674 | 1674 | break |
|
1675 | 1675 | else: |
|
1676 | 1676 | yield (line, '') |
|
1677 | 1677 | if line != stripline: |
|
1678 | 1678 | yield (line[len(stripline):], 'diff.trailingwhitespace') |
|
1679 | 1679 | |
|
1680 | 1680 | def diffui(*args, **kw): |
|
1681 | 1681 | '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' |
|
1682 | 1682 | return difflabel(diff, *args, **kw) |
|
1683 | 1683 | |
|
1684 | 1684 | |
|
1685 | 1685 | def _addmodehdr(header, omode, nmode): |
|
1686 | 1686 | if omode != nmode: |
|
1687 | 1687 | header.append('old mode %s\n' % omode) |
|
1688 | 1688 | header.append('new mode %s\n' % nmode) |
|
1689 | 1689 | |
|
1690 | 1690 | def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, |
|
1691 | 1691 | copy, getfilectx, opts, losedatafn, prefix): |
|
1692 | 1692 | |
|
1693 | 1693 | def join(f): |
|
1694 | 1694 | return os.path.join(prefix, f) |
|
1695 | 1695 | |
|
1696 | 1696 | date1 = util.datestr(ctx1.date()) |
|
1697 | 1697 | man1 = ctx1.manifest() |
|
1698 | 1698 | |
|
1699 | 1699 | gone = set() |
|
1700 | 1700 | gitmode = {'l': '120000', 'x': '100755', '': '100644'} |
|
1701 | 1701 | |
|
1702 | 1702 | copyto = dict([(v, k) for k, v in copy.items()]) |
|
1703 | 1703 | |
|
1704 | 1704 | if opts.git: |
|
1705 | 1705 | revs = None |
|
1706 | 1706 | |
|
1707 | 1707 | for f in sorted(modified + added + removed): |
|
1708 | 1708 | to = None |
|
1709 | 1709 | tn = None |
|
1710 | 1710 | dodiff = True |
|
1711 | 1711 | header = [] |
|
1712 | 1712 | if f in man1: |
|
1713 | 1713 | to = getfilectx(f, ctx1).data() |
|
1714 | 1714 | if f not in removed: |
|
1715 | 1715 | tn = getfilectx(f, ctx2).data() |
|
1716 | 1716 | a, b = f, f |
|
1717 | 1717 | if opts.git or losedatafn: |
|
1718 | 1718 | if f in added: |
|
1719 | 1719 | mode = gitmode[ctx2.flags(f)] |
|
1720 | 1720 | if f in copy or f in copyto: |
|
1721 | 1721 | if opts.git: |
|
1722 | 1722 | if f in copy: |
|
1723 | 1723 | a = copy[f] |
|
1724 | 1724 | else: |
|
1725 | 1725 | a = copyto[f] |
|
1726 | 1726 | omode = gitmode[man1.flags(a)] |
|
1727 | 1727 | _addmodehdr(header, omode, mode) |
|
1728 | 1728 | if a in removed and a not in gone: |
|
1729 | 1729 | op = 'rename' |
|
1730 | 1730 | gone.add(a) |
|
1731 | 1731 | else: |
|
1732 | 1732 | op = 'copy' |
|
1733 | 1733 | header.append('%s from %s\n' % (op, join(a))) |
|
1734 | 1734 | header.append('%s to %s\n' % (op, join(f))) |
|
1735 | 1735 | to = getfilectx(a, ctx1).data() |
|
1736 | 1736 | else: |
|
1737 | 1737 | losedatafn(f) |
|
1738 | 1738 | else: |
|
1739 | 1739 | if opts.git: |
|
1740 | 1740 | header.append('new file mode %s\n' % mode) |
|
1741 | 1741 | elif ctx2.flags(f): |
|
1742 | 1742 | losedatafn(f) |
|
1743 | 1743 | # In theory, if tn was copied or renamed we should check |
|
1744 | 1744 | # if the source is binary too but the copy record already |
|
1745 | 1745 | # forces git mode. |
|
1746 | 1746 | if util.binary(tn): |
|
1747 | 1747 | if opts.git: |
|
1748 | 1748 | dodiff = 'binary' |
|
1749 | 1749 | else: |
|
1750 | 1750 | losedatafn(f) |
|
1751 | 1751 | if not opts.git and not tn: |
|
1752 | 1752 | # regular diffs cannot represent new empty file |
|
1753 | 1753 | losedatafn(f) |
|
1754 | 1754 | elif f in removed: |
|
1755 | 1755 | if opts.git: |
|
1756 | 1756 | # have we already reported a copy above? |
|
1757 | 1757 | if ((f in copy and copy[f] in added |
|
1758 | 1758 | and copyto[copy[f]] == f) or |
|
1759 | 1759 | (f in copyto and copyto[f] in added |
|
1760 | 1760 | and copy[copyto[f]] == f)): |
|
1761 | 1761 | dodiff = False |
|
1762 | 1762 | else: |
|
1763 | 1763 | header.append('deleted file mode %s\n' % |
|
1764 | 1764 | gitmode[man1.flags(f)]) |
|
1765 | 1765 | elif not to or util.binary(to): |
|
1766 | 1766 | # regular diffs cannot represent empty file deletion |
|
1767 | 1767 | losedatafn(f) |
|
1768 | 1768 | else: |
|
1769 | 1769 | oflag = man1.flags(f) |
|
1770 | 1770 | nflag = ctx2.flags(f) |
|
1771 | 1771 | binary = util.binary(to) or util.binary(tn) |
|
1772 | 1772 | if opts.git: |
|
1773 | 1773 | _addmodehdr(header, gitmode[oflag], gitmode[nflag]) |
|
1774 | 1774 | if binary: |
|
1775 | 1775 | dodiff = 'binary' |
|
1776 | 1776 | elif binary or nflag != oflag: |
|
1777 | 1777 | losedatafn(f) |
|
1778 | 1778 | if opts.git: |
|
1779 | 1779 | header.insert(0, mdiff.diffline(revs, join(a), join(b), opts)) |
|
1780 | 1780 | |
|
1781 | 1781 | if dodiff: |
|
1782 | 1782 | if dodiff == 'binary': |
|
1783 | 1783 | text = b85diff(to, tn) |
|
1784 | 1784 | else: |
|
1785 | 1785 | text = mdiff.unidiff(to, date1, |
|
1786 | 1786 | # ctx2 date may be dynamic |
|
1787 | 1787 | tn, util.datestr(ctx2.date()), |
|
1788 | 1788 | join(a), join(b), revs, opts=opts) |
|
1789 | 1789 | if header and (text or len(header) > 1): |
|
1790 | 1790 | yield ''.join(header) |
|
1791 | 1791 | if text: |
|
1792 | 1792 | yield text |
|
1793 | 1793 | |
|
1794 | 1794 | def diffstatsum(stats): |
|
1795 | 1795 | maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False |
|
1796 | 1796 | for f, a, r, b in stats: |
|
1797 | 1797 | maxfile = max(maxfile, encoding.colwidth(f)) |
|
1798 | 1798 | maxtotal = max(maxtotal, a + r) |
|
1799 | 1799 | addtotal += a |
|
1800 | 1800 | removetotal += r |
|
1801 | 1801 | binary = binary or b |
|
1802 | 1802 | |
|
1803 | 1803 | return maxfile, maxtotal, addtotal, removetotal, binary |
|
1804 | 1804 | |
|
1805 | 1805 | def diffstatdata(lines): |
|
1806 | 1806 | diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$') |
|
1807 | 1807 | |
|
1808 | 1808 | results = [] |
|
1809 | 1809 | filename, adds, removes, isbinary = None, 0, 0, False |
|
1810 | 1810 | |
|
1811 | 1811 | def addresult(): |
|
1812 | 1812 | if filename: |
|
1813 | 1813 | results.append((filename, adds, removes, isbinary)) |
|
1814 | 1814 | |
|
1815 | 1815 | for line in lines: |
|
1816 | 1816 | if line.startswith('diff'): |
|
1817 | 1817 | addresult() |
|
1818 | 1818 | # set numbers to 0 anyway when starting new file |
|
1819 | 1819 | adds, removes, isbinary = 0, 0, False |
|
1820 | 1820 | if line.startswith('diff --git'): |
|
1821 | 1821 | filename = gitre.search(line).group(1) |
|
1822 | 1822 | elif line.startswith('diff -r'): |
|
1823 | 1823 | # format: "diff -r ... -r ... filename" |
|
1824 | 1824 | filename = diffre.search(line).group(1) |
|
1825 | 1825 | elif line.startswith('+') and not line.startswith('+++ '): |
|
1826 | 1826 | adds += 1 |
|
1827 | 1827 | elif line.startswith('-') and not line.startswith('--- '): |
|
1828 | 1828 | removes += 1 |
|
1829 | 1829 | elif (line.startswith('GIT binary patch') or |
|
1830 | 1830 | line.startswith('Binary file')): |
|
1831 | 1831 | isbinary = True |
|
1832 | 1832 | addresult() |
|
1833 | 1833 | return results |
|
1834 | 1834 | |
|
1835 | 1835 | def diffstat(lines, width=80, git=False): |
|
1836 | 1836 | output = [] |
|
1837 | 1837 | stats = diffstatdata(lines) |
|
1838 | 1838 | maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats) |
|
1839 | 1839 | |
|
1840 | 1840 | countwidth = len(str(maxtotal)) |
|
1841 | 1841 | if hasbinary and countwidth < 3: |
|
1842 | 1842 | countwidth = 3 |
|
1843 | 1843 | graphwidth = width - countwidth - maxname - 6 |
|
1844 | 1844 | if graphwidth < 10: |
|
1845 | 1845 | graphwidth = 10 |
|
1846 | 1846 | |
|
1847 | 1847 | def scale(i): |
|
1848 | 1848 | if maxtotal <= graphwidth: |
|
1849 | 1849 | return i |
|
1850 | 1850 | # If diffstat runs out of room it doesn't print anything, |
|
1851 | 1851 | # which isn't very useful, so always print at least one + or - |
|
1852 | 1852 | # if there were at least some changes. |
|
1853 | 1853 | return max(i * graphwidth // maxtotal, int(bool(i))) |
|
1854 | 1854 | |
|
1855 | 1855 | for filename, adds, removes, isbinary in stats: |
|
1856 | 1856 | if isbinary: |
|
1857 | 1857 | count = 'Bin' |
|
1858 | 1858 | else: |
|
1859 | 1859 | count = adds + removes |
|
1860 | 1860 | pluses = '+' * scale(adds) |
|
1861 | 1861 | minuses = '-' * scale(removes) |
|
1862 | 1862 | output.append(' %s%s | %*s %s%s\n' % |
|
1863 | 1863 | (filename, ' ' * (maxname - encoding.colwidth(filename)), |
|
1864 | 1864 | countwidth, count, pluses, minuses)) |
|
1865 | 1865 | |
|
1866 | 1866 | if stats: |
|
1867 | 1867 | output.append(_(' %d files changed, %d insertions(+), ' |
|
1868 | 1868 | '%d deletions(-)\n') |
|
1869 | 1869 | % (len(stats), totaladds, totalremoves)) |
|
1870 | 1870 | |
|
1871 | 1871 | return ''.join(output) |
|
1872 | 1872 | |
|
1873 | 1873 | def diffstatui(*args, **kw): |
|
1874 | 1874 | '''like diffstat(), but yields 2-tuples of (output, label) for |
|
1875 | 1875 | ui.write() |
|
1876 | 1876 | ''' |
|
1877 | 1877 | |
|
1878 | 1878 | for line in diffstat(*args, **kw).splitlines(): |
|
1879 | 1879 | if line and line[-1] in '+-': |
|
1880 | 1880 | name, graph = line.rsplit(' ', 1) |
|
1881 | 1881 | yield (name + ' ', '') |
|
1882 | 1882 | m = re.search(r'\++', graph) |
|
1883 | 1883 | if m: |
|
1884 | 1884 | yield (m.group(0), 'diffstat.inserted') |
|
1885 | 1885 | m = re.search(r'-+', graph) |
|
1886 | 1886 | if m: |
|
1887 | 1887 | yield (m.group(0), 'diffstat.deleted') |
|
1888 | 1888 | else: |
|
1889 | 1889 | yield (line, '') |
|
1890 | 1890 | yield ('\n', '') |
@@ -1,170 +1,170 b'' | |||
|
1 | 1 | # osutil.py - pure Python version of osutil.c |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import os |
|
9 | 9 | import stat as statmod |
|
10 | 10 | |
|
11 | 11 | def _mode_to_kind(mode): |
|
12 | 12 | if statmod.S_ISREG(mode): |
|
13 | 13 | return statmod.S_IFREG |
|
14 | 14 | if statmod.S_ISDIR(mode): |
|
15 | 15 | return statmod.S_IFDIR |
|
16 | 16 | if statmod.S_ISLNK(mode): |
|
17 | 17 | return statmod.S_IFLNK |
|
18 | 18 | if statmod.S_ISBLK(mode): |
|
19 | 19 | return statmod.S_IFBLK |
|
20 | 20 | if statmod.S_ISCHR(mode): |
|
21 | 21 | return statmod.S_IFCHR |
|
22 | 22 | if statmod.S_ISFIFO(mode): |
|
23 | 23 | return statmod.S_IFIFO |
|
24 | 24 | if statmod.S_ISSOCK(mode): |
|
25 | 25 | return statmod.S_IFSOCK |
|
26 | 26 | return mode |
|
27 | 27 | |
|
28 | 28 | def listdir(path, stat=False, skip=None): |
|
29 | 29 | '''listdir(path, stat=False) -> list_of_tuples |
|
30 | 30 | |
|
31 | 31 | Return a sorted list containing information about the entries |
|
32 | 32 | in the directory. |
|
33 | 33 | |
|
34 | 34 | If stat is True, each element is a 3-tuple: |
|
35 | 35 | |
|
36 | 36 | (name, type, stat object) |
|
37 | 37 | |
|
38 | 38 | Otherwise, each element is a 2-tuple: |
|
39 | 39 | |
|
40 | 40 | (name, type) |
|
41 | 41 | ''' |
|
42 | 42 | result = [] |
|
43 | 43 | prefix = path |
|
44 | 44 | if not prefix.endswith(os.sep): |
|
45 | 45 | prefix += os.sep |
|
46 | 46 | names = os.listdir(path) |
|
47 | 47 | names.sort() |
|
48 | 48 | for fn in names: |
|
49 | 49 | st = os.lstat(prefix + fn) |
|
50 | 50 | if fn == skip and statmod.S_ISDIR(st.st_mode): |
|
51 | 51 | return [] |
|
52 | 52 | if stat: |
|
53 | 53 | result.append((fn, _mode_to_kind(st.st_mode), st)) |
|
54 | 54 | else: |
|
55 | 55 | result.append((fn, _mode_to_kind(st.st_mode))) |
|
56 | 56 | return result |
|
57 | 57 | |
|
58 | 58 | if os.name != 'nt': |
|
59 | 59 | posixfile = open |
|
60 | 60 | else: |
|
61 | 61 | import ctypes, msvcrt |
|
62 | 62 | |
|
63 | 63 | _kernel32 = ctypes.windll.kernel32 |
|
64 | 64 | |
|
65 | 65 | _DWORD = ctypes.c_ulong |
|
66 | 66 | _LPCSTR = _LPSTR = ctypes.c_char_p |
|
67 | 67 | _HANDLE = ctypes.c_void_p |
|
68 | 68 | |
|
69 | 69 | _INVALID_HANDLE_VALUE = _HANDLE(-1).value |
|
70 | 70 | |
|
71 | 71 | # CreateFile |
|
72 | 72 | _FILE_SHARE_READ = 0x00000001 |
|
73 | 73 | _FILE_SHARE_WRITE = 0x00000002 |
|
74 | 74 | _FILE_SHARE_DELETE = 0x00000004 |
|
75 | 75 | |
|
76 | 76 | _CREATE_ALWAYS = 2 |
|
77 | 77 | _OPEN_EXISTING = 3 |
|
78 | 78 | _OPEN_ALWAYS = 4 |
|
79 | 79 | |
|
80 | 80 | _GENERIC_READ = 0x80000000 |
|
81 | 81 | _GENERIC_WRITE = 0x40000000 |
|
82 | 82 | |
|
83 | 83 | _FILE_ATTRIBUTE_NORMAL = 0x80 |
|
84 | 84 | |
|
85 | 85 | # _open_osfhandle |
|
86 | 86 | _O_RDONLY = 0x0000 |
|
87 | 87 | _O_RDWR = 0x0002 |
|
88 | 88 | _O_APPEND = 0x0008 |
|
89 | 89 | |
|
90 | 90 | _O_TEXT = 0x4000 |
|
91 | 91 | _O_BINARY = 0x8000 |
|
92 | 92 | |
|
93 | 93 | # types of parameters of C functions used (required by pypy) |
|
94 | 94 | |
|
95 | 95 | _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p, |
|
96 | 96 | _DWORD, _DWORD, _HANDLE] |
|
97 | 97 | _kernel32.CreateFileA.restype = _HANDLE |
|
98 | 98 | |
|
99 | 99 | def _raiseioerror(name): |
|
100 | 100 | err = ctypes.WinError() |
|
101 | 101 | raise IOError(err.errno, '%s: %s' % (name, err.strerror)) |
|
102 | 102 | |
|
103 | 103 | class posixfile(object): |
|
104 | 104 | '''a file object aiming for POSIX-like semantics |
|
105 | 105 | |
|
106 | 106 | CPython's open() returns a file that was opened *without* setting the |
|
107 | 107 | _FILE_SHARE_DELETE flag, which causes rename and unlink to abort. |
|
108 | 108 | This even happens if any hardlinked copy of the file is in open state. |
|
109 | 109 | We set _FILE_SHARE_DELETE here, so files opened with posixfile can be |
|
110 | 110 | renamed and deleted while they are held open. |
|
111 | 111 | Note that if a file opened with posixfile is unlinked, the file |
|
112 | 112 | remains but cannot be opened again or be recreated under the same name, |
|
113 | 113 | until all reading processes have closed the file.''' |
|
114 | 114 | |
|
115 | 115 | def __init__(self, name, mode='r', bufsize=-1): |
|
116 | 116 | if 'b' in mode: |
|
117 | 117 | flags = _O_BINARY |
|
118 | 118 | else: |
|
119 | 119 | flags = _O_TEXT |
|
120 | 120 | |
|
121 | 121 | m0 = mode[0] |
|
122 |
if m0 == 'r' and |
|
|
122 | if m0 == 'r' and '+' not in mode: | |
|
123 | 123 | flags |= _O_RDONLY |
|
124 | 124 | access = _GENERIC_READ |
|
125 | 125 | else: |
|
126 | 126 | # work around http://support.microsoft.com/kb/899149 and |
|
127 | 127 | # set _O_RDWR for 'w' and 'a', even if mode has no '+' |
|
128 | 128 | flags |= _O_RDWR |
|
129 | 129 | access = _GENERIC_READ | _GENERIC_WRITE |
|
130 | 130 | |
|
131 | 131 | if m0 == 'r': |
|
132 | 132 | creation = _OPEN_EXISTING |
|
133 | 133 | elif m0 == 'w': |
|
134 | 134 | creation = _CREATE_ALWAYS |
|
135 | 135 | elif m0 == 'a': |
|
136 | 136 | creation = _OPEN_ALWAYS |
|
137 | 137 | flags |= _O_APPEND |
|
138 | 138 | else: |
|
139 | 139 | raise ValueError("invalid mode: %s" % mode) |
|
140 | 140 | |
|
141 | 141 | fh = _kernel32.CreateFileA(name, access, |
|
142 | 142 | _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE, |
|
143 | 143 | None, creation, _FILE_ATTRIBUTE_NORMAL, None) |
|
144 | 144 | if fh == _INVALID_HANDLE_VALUE: |
|
145 | 145 | _raiseioerror(name) |
|
146 | 146 | |
|
147 | 147 | fd = msvcrt.open_osfhandle(fh, flags) |
|
148 | 148 | if fd == -1: |
|
149 | 149 | _kernel32.CloseHandle(fh) |
|
150 | 150 | _raiseioerror(name) |
|
151 | 151 | |
|
152 | 152 | f = os.fdopen(fd, mode, bufsize) |
|
153 | 153 | # unfortunately, f.name is '<fdopen>' at this point -- so we store |
|
154 | 154 | # the name on this wrapper. We cannot just assign to f.name, |
|
155 | 155 | # because that attribute is read-only. |
|
156 | 156 | object.__setattr__(self, 'name', name) |
|
157 | 157 | object.__setattr__(self, '_file', f) |
|
158 | 158 | |
|
159 | 159 | def __iter__(self): |
|
160 | 160 | return self._file |
|
161 | 161 | |
|
162 | 162 | def __getattr__(self, name): |
|
163 | 163 | return getattr(self._file, name) |
|
164 | 164 | |
|
165 | 165 | def __setattr__(self, name, value): |
|
166 | 166 | '''mimics the read-only attributes of Python file objects |
|
167 | 167 | by raising 'TypeError: readonly attribute' if someone tries: |
|
168 | 168 | f = posixfile('foo.txt') |
|
169 | 169 | f.name = 'bla' ''' |
|
170 | 170 | return self._file.__setattr__(name, value) |
@@ -1,1317 +1,1317 b'' | |||
|
1 | 1 | # revlog.py - storage back-end for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """Storage back-end for Mercurial. |
|
9 | 9 | |
|
10 | 10 | This provides efficient delta storage with O(1) retrieve and append |
|
11 | 11 | and O(changes) merge between branches. |
|
12 | 12 | """ |
|
13 | 13 | |
|
14 | 14 | # import stuff from node for others to import from revlog |
|
15 | 15 | from node import bin, hex, nullid, nullrev |
|
16 | 16 | from i18n import _ |
|
17 | 17 | import ancestor, mdiff, parsers, error, util, dagutil |
|
18 | 18 | import struct, zlib, errno |
|
19 | 19 | |
|
20 | 20 | _pack = struct.pack |
|
21 | 21 | _unpack = struct.unpack |
|
22 | 22 | _compress = zlib.compress |
|
23 | 23 | _decompress = zlib.decompress |
|
24 | 24 | _sha = util.sha1 |
|
25 | 25 | |
|
26 | 26 | # revlog header flags |
|
27 | 27 | REVLOGV0 = 0 |
|
28 | 28 | REVLOGNG = 1 |
|
29 | 29 | REVLOGNGINLINEDATA = (1 << 16) |
|
30 | 30 | REVLOGGENERALDELTA = (1 << 17) |
|
31 | 31 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA |
|
32 | 32 | REVLOG_DEFAULT_FORMAT = REVLOGNG |
|
33 | 33 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS |
|
34 | 34 | REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA |
|
35 | 35 | |
|
36 | 36 | # revlog index flags |
|
37 | 37 | REVIDX_KNOWN_FLAGS = 0 |
|
38 | 38 | |
|
39 | 39 | # max size of revlog with inline data |
|
40 | 40 | _maxinline = 131072 |
|
41 | 41 | _chunksize = 1048576 |
|
42 | 42 | |
|
43 | 43 | RevlogError = error.RevlogError |
|
44 | 44 | LookupError = error.LookupError |
|
45 | 45 | |
|
46 | 46 | def getoffset(q): |
|
47 | 47 | return int(q >> 16) |
|
48 | 48 | |
|
49 | 49 | def gettype(q): |
|
50 | 50 | return int(q & 0xFFFF) |
|
51 | 51 | |
|
52 | 52 | def offset_type(offset, type): |
|
53 | 53 | return long(long(offset) << 16 | type) |
|
54 | 54 | |
|
55 | 55 | nullhash = _sha(nullid) |
|
56 | 56 | |
|
57 | 57 | def hash(text, p1, p2): |
|
58 | 58 | """generate a hash from the given text and its parent hashes |
|
59 | 59 | |
|
60 | 60 | This hash combines both the current file contents and its history |
|
61 | 61 | in a manner that makes it easy to distinguish nodes with the same |
|
62 | 62 | content in the revision graph. |
|
63 | 63 | """ |
|
64 | 64 | # As of now, if one of the parent node is null, p2 is null |
|
65 | 65 | if p2 == nullid: |
|
66 | 66 | # deep copy of a hash is faster than creating one |
|
67 | 67 | s = nullhash.copy() |
|
68 | 68 | s.update(p1) |
|
69 | 69 | else: |
|
70 | 70 | # none of the parent nodes are nullid |
|
71 | 71 | l = [p1, p2] |
|
72 | 72 | l.sort() |
|
73 | 73 | s = _sha(l[0]) |
|
74 | 74 | s.update(l[1]) |
|
75 | 75 | s.update(text) |
|
76 | 76 | return s.digest() |
|
77 | 77 | |
|
78 | 78 | def compress(text): |
|
79 | 79 | """ generate a possibly-compressed representation of text """ |
|
80 | 80 | if not text: |
|
81 | 81 | return ("", text) |
|
82 | 82 | l = len(text) |
|
83 | 83 | bin = None |
|
84 | 84 | if l < 44: |
|
85 | 85 | pass |
|
86 | 86 | elif l > 1000000: |
|
87 | 87 | # zlib makes an internal copy, thus doubling memory usage for |
|
88 | 88 | # large files, so lets do this in pieces |
|
89 | 89 | z = zlib.compressobj() |
|
90 | 90 | p = [] |
|
91 | 91 | pos = 0 |
|
92 | 92 | while pos < l: |
|
93 | 93 | pos2 = pos + 2**20 |
|
94 | 94 | p.append(z.compress(text[pos:pos2])) |
|
95 | 95 | pos = pos2 |
|
96 | 96 | p.append(z.flush()) |
|
97 | 97 | if sum(map(len, p)) < l: |
|
98 | 98 | bin = "".join(p) |
|
99 | 99 | else: |
|
100 | 100 | bin = _compress(text) |
|
101 | 101 | if bin is None or len(bin) > l: |
|
102 | 102 | if text[0] == '\0': |
|
103 | 103 | return ("", text) |
|
104 | 104 | return ('u', text) |
|
105 | 105 | return ("", bin) |
|
106 | 106 | |
|
107 | 107 | def decompress(bin): |
|
108 | 108 | """ decompress the given input """ |
|
109 | 109 | if not bin: |
|
110 | 110 | return bin |
|
111 | 111 | t = bin[0] |
|
112 | 112 | if t == '\0': |
|
113 | 113 | return bin |
|
114 | 114 | if t == 'x': |
|
115 | 115 | return _decompress(bin) |
|
116 | 116 | if t == 'u': |
|
117 | 117 | return bin[1:] |
|
118 | 118 | raise RevlogError(_("unknown compression type %r") % t) |
|
119 | 119 | |
|
120 | 120 | indexformatv0 = ">4l20s20s20s" |
|
121 | 121 | v0shaoffset = 56 |
|
122 | 122 | |
|
123 | 123 | class revlogoldio(object): |
|
124 | 124 | def __init__(self): |
|
125 | 125 | self.size = struct.calcsize(indexformatv0) |
|
126 | 126 | |
|
127 | 127 | def parseindex(self, data, inline): |
|
128 | 128 | s = self.size |
|
129 | 129 | index = [] |
|
130 | 130 | nodemap = {nullid: nullrev} |
|
131 | 131 | n = off = 0 |
|
132 | 132 | l = len(data) |
|
133 | 133 | while off + s <= l: |
|
134 | 134 | cur = data[off:off + s] |
|
135 | 135 | off += s |
|
136 | 136 | e = _unpack(indexformatv0, cur) |
|
137 | 137 | # transform to revlogv1 format |
|
138 | 138 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], |
|
139 | 139 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) |
|
140 | 140 | index.append(e2) |
|
141 | 141 | nodemap[e[6]] = n |
|
142 | 142 | n += 1 |
|
143 | 143 | |
|
144 | 144 | # add the magic null revision at -1 |
|
145 | 145 | index.append((0, 0, 0, -1, -1, -1, -1, nullid)) |
|
146 | 146 | |
|
147 | 147 | return index, nodemap, None |
|
148 | 148 | |
|
149 | 149 | def packentry(self, entry, node, version, rev): |
|
150 | 150 | if gettype(entry[0]): |
|
151 | 151 | raise RevlogError(_("index entry flags need RevlogNG")) |
|
152 | 152 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], |
|
153 | 153 | node(entry[5]), node(entry[6]), entry[7]) |
|
154 | 154 | return _pack(indexformatv0, *e2) |
|
155 | 155 | |
|
156 | 156 | # index ng: |
|
157 | 157 | # 6 bytes: offset |
|
158 | 158 | # 2 bytes: flags |
|
159 | 159 | # 4 bytes: compressed length |
|
160 | 160 | # 4 bytes: uncompressed length |
|
161 | 161 | # 4 bytes: base rev |
|
162 | 162 | # 4 bytes: link rev |
|
163 | 163 | # 4 bytes: parent 1 rev |
|
164 | 164 | # 4 bytes: parent 2 rev |
|
165 | 165 | # 32 bytes: nodeid |
|
166 | 166 | indexformatng = ">Qiiiiii20s12x" |
|
167 | 167 | ngshaoffset = 32 |
|
168 | 168 | versionformat = ">I" |
|
169 | 169 | |
|
170 | 170 | class revlogio(object): |
|
171 | 171 | def __init__(self): |
|
172 | 172 | self.size = struct.calcsize(indexformatng) |
|
173 | 173 | |
|
174 | 174 | def parseindex(self, data, inline): |
|
175 | 175 | # call the C implementation to parse the index data |
|
176 | 176 | index, cache = parsers.parse_index2(data, inline) |
|
177 | 177 | return index, getattr(index, 'nodemap', None), cache |
|
178 | 178 | |
|
179 | 179 | def packentry(self, entry, node, version, rev): |
|
180 | 180 | p = _pack(indexformatng, *entry) |
|
181 | 181 | if rev == 0: |
|
182 | 182 | p = _pack(versionformat, version) + p[4:] |
|
183 | 183 | return p |
|
184 | 184 | |
|
185 | 185 | class revlog(object): |
|
186 | 186 | """ |
|
187 | 187 | the underlying revision storage object |
|
188 | 188 | |
|
189 | 189 | A revlog consists of two parts, an index and the revision data. |
|
190 | 190 | |
|
191 | 191 | The index is a file with a fixed record size containing |
|
192 | 192 | information on each revision, including its nodeid (hash), the |
|
193 | 193 | nodeids of its parents, the position and offset of its data within |
|
194 | 194 | the data file, and the revision it's based on. Finally, each entry |
|
195 | 195 | contains a linkrev entry that can serve as a pointer to external |
|
196 | 196 | data. |
|
197 | 197 | |
|
198 | 198 | The revision data itself is a linear collection of data chunks. |
|
199 | 199 | Each chunk represents a revision and is usually represented as a |
|
200 | 200 | delta against the previous chunk. To bound lookup time, runs of |
|
201 | 201 | deltas are limited to about 2 times the length of the original |
|
202 | 202 | version data. This makes retrieval of a version proportional to |
|
203 | 203 | its size, or O(1) relative to the number of revisions. |
|
204 | 204 | |
|
205 | 205 | Both pieces of the revlog are written to in an append-only |
|
206 | 206 | fashion, which means we never need to rewrite a file to insert or |
|
207 | 207 | remove data, and can use some simple techniques to avoid the need |
|
208 | 208 | for locking while reading. |
|
209 | 209 | """ |
|
210 | 210 | def __init__(self, opener, indexfile): |
|
211 | 211 | """ |
|
212 | 212 | create a revlog object |
|
213 | 213 | |
|
214 | 214 | opener is a function that abstracts the file opening operation |
|
215 | 215 | and can be used to implement COW semantics or the like. |
|
216 | 216 | """ |
|
217 | 217 | self.indexfile = indexfile |
|
218 | 218 | self.datafile = indexfile[:-2] + ".d" |
|
219 | 219 | self.opener = opener |
|
220 | 220 | self._cache = None |
|
221 | 221 | self._basecache = (0, 0) |
|
222 | 222 | self._chunkcache = (0, '') |
|
223 | 223 | self.index = [] |
|
224 | 224 | self._pcache = {} |
|
225 | 225 | self._nodecache = {nullid: nullrev} |
|
226 | 226 | self._nodepos = None |
|
227 | 227 | |
|
228 | 228 | v = REVLOG_DEFAULT_VERSION |
|
229 | 229 | opts = getattr(opener, 'options', None) |
|
230 | 230 | if opts is not None: |
|
231 | 231 | if 'revlogv1' in opts: |
|
232 | 232 | if 'generaldelta' in opts: |
|
233 | 233 | v |= REVLOGGENERALDELTA |
|
234 | 234 | else: |
|
235 | 235 | v = 0 |
|
236 | 236 | |
|
237 | 237 | i = '' |
|
238 | 238 | self._initempty = True |
|
239 | 239 | try: |
|
240 | 240 | f = self.opener(self.indexfile) |
|
241 | 241 | i = f.read() |
|
242 | 242 | f.close() |
|
243 | 243 | if len(i) > 0: |
|
244 | 244 | v = struct.unpack(versionformat, i[:4])[0] |
|
245 | 245 | self._initempty = False |
|
246 | 246 | except IOError, inst: |
|
247 | 247 | if inst.errno != errno.ENOENT: |
|
248 | 248 | raise |
|
249 | 249 | |
|
250 | 250 | self.version = v |
|
251 | 251 | self._inline = v & REVLOGNGINLINEDATA |
|
252 | 252 | self._generaldelta = v & REVLOGGENERALDELTA |
|
253 | 253 | flags = v & ~0xFFFF |
|
254 | 254 | fmt = v & 0xFFFF |
|
255 | 255 | if fmt == REVLOGV0 and flags: |
|
256 | 256 | raise RevlogError(_("index %s unknown flags %#04x for format v0") |
|
257 | 257 | % (self.indexfile, flags >> 16)) |
|
258 | 258 | elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS: |
|
259 | 259 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") |
|
260 | 260 | % (self.indexfile, flags >> 16)) |
|
261 | 261 | elif fmt > REVLOGNG: |
|
262 | 262 | raise RevlogError(_("index %s unknown format %d") |
|
263 | 263 | % (self.indexfile, fmt)) |
|
264 | 264 | |
|
265 | 265 | self._io = revlogio() |
|
266 | 266 | if self.version == REVLOGV0: |
|
267 | 267 | self._io = revlogoldio() |
|
268 | 268 | try: |
|
269 | 269 | d = self._io.parseindex(i, self._inline) |
|
270 | 270 | except (ValueError, IndexError): |
|
271 | 271 | raise RevlogError(_("index %s is corrupted") % (self.indexfile)) |
|
272 | 272 | self.index, nodemap, self._chunkcache = d |
|
273 | 273 | if nodemap is not None: |
|
274 | 274 | self.nodemap = self._nodecache = nodemap |
|
275 | 275 | if not self._chunkcache: |
|
276 | 276 | self._chunkclear() |
|
277 | 277 | |
|
278 | 278 | def tip(self): |
|
279 | 279 | return self.node(len(self.index) - 2) |
|
280 | 280 | def __len__(self): |
|
281 | 281 | return len(self.index) - 1 |
|
282 | 282 | def __iter__(self): |
|
283 | 283 | for i in xrange(len(self)): |
|
284 | 284 | yield i |
|
285 | 285 | |
|
286 | 286 | @util.propertycache |
|
287 | 287 | def nodemap(self): |
|
288 | 288 | self.rev(self.node(0)) |
|
289 | 289 | return self._nodecache |
|
290 | 290 | |
|
291 | 291 | def hasnode(self, node): |
|
292 | 292 | try: |
|
293 | 293 | self.rev(node) |
|
294 | 294 | return True |
|
295 | 295 | except KeyError: |
|
296 | 296 | return False |
|
297 | 297 | |
|
298 | 298 | def clearcaches(self): |
|
299 | 299 | try: |
|
300 | 300 | self._nodecache.clearcaches() |
|
301 | 301 | except AttributeError: |
|
302 | 302 | self._nodecache = {nullid: nullrev} |
|
303 | 303 | self._nodepos = None |
|
304 | 304 | |
|
305 | 305 | def rev(self, node): |
|
306 | 306 | try: |
|
307 | 307 | return self._nodecache[node] |
|
308 | 308 | except RevlogError: |
|
309 | 309 | # parsers.c radix tree lookup failed |
|
310 | 310 | raise LookupError(node, self.indexfile, _('no node')) |
|
311 | 311 | except KeyError: |
|
312 | 312 | # pure python cache lookup failed |
|
313 | 313 | n = self._nodecache |
|
314 | 314 | i = self.index |
|
315 | 315 | p = self._nodepos |
|
316 | 316 | if p is None: |
|
317 | 317 | p = len(i) - 2 |
|
318 | 318 | for r in xrange(p, -1, -1): |
|
319 | 319 | v = i[r][7] |
|
320 | 320 | n[v] = r |
|
321 | 321 | if v == node: |
|
322 | 322 | self._nodepos = r - 1 |
|
323 | 323 | return r |
|
324 | 324 | raise LookupError(node, self.indexfile, _('no node')) |
|
325 | 325 | |
|
326 | 326 | def node(self, rev): |
|
327 | 327 | return self.index[rev][7] |
|
328 | 328 | def linkrev(self, rev): |
|
329 | 329 | return self.index[rev][4] |
|
330 | 330 | def parents(self, node): |
|
331 | 331 | i = self.index |
|
332 | 332 | d = i[self.rev(node)] |
|
333 | 333 | return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline |
|
334 | 334 | def parentrevs(self, rev): |
|
335 | 335 | return self.index[rev][5:7] |
|
336 | 336 | def start(self, rev): |
|
337 | 337 | return int(self.index[rev][0] >> 16) |
|
338 | 338 | def end(self, rev): |
|
339 | 339 | return self.start(rev) + self.length(rev) |
|
340 | 340 | def length(self, rev): |
|
341 | 341 | return self.index[rev][1] |
|
342 | 342 | def chainbase(self, rev): |
|
343 | 343 | index = self.index |
|
344 | 344 | base = index[rev][3] |
|
345 | 345 | while base != rev: |
|
346 | 346 | rev = base |
|
347 | 347 | base = index[rev][3] |
|
348 | 348 | return base |
|
349 | 349 | def flags(self, rev): |
|
350 | 350 | return self.index[rev][0] & 0xFFFF |
|
351 | 351 | def rawsize(self, rev): |
|
352 | 352 | """return the length of the uncompressed text for a given revision""" |
|
353 | 353 | l = self.index[rev][2] |
|
354 | 354 | if l >= 0: |
|
355 | 355 | return l |
|
356 | 356 | |
|
357 | 357 | t = self.revision(self.node(rev)) |
|
358 | 358 | return len(t) |
|
359 | 359 | size = rawsize |
|
360 | 360 | |
|
361 | 361 | def reachable(self, node, stop=None): |
|
362 | 362 | """return the set of all nodes ancestral to a given node, including |
|
363 | 363 | the node itself, stopping when stop is matched""" |
|
364 | 364 | reachable = set((node,)) |
|
365 | 365 | visit = [node] |
|
366 | 366 | if stop: |
|
367 | 367 | stopn = self.rev(stop) |
|
368 | 368 | else: |
|
369 | 369 | stopn = 0 |
|
370 | 370 | while visit: |
|
371 | 371 | n = visit.pop(0) |
|
372 | 372 | if n == stop: |
|
373 | 373 | continue |
|
374 | 374 | if n == nullid: |
|
375 | 375 | continue |
|
376 | 376 | for p in self.parents(n): |
|
377 | 377 | if self.rev(p) < stopn: |
|
378 | 378 | continue |
|
379 | 379 | if p not in reachable: |
|
380 | 380 | reachable.add(p) |
|
381 | 381 | visit.append(p) |
|
382 | 382 | return reachable |
|
383 | 383 | |
|
384 | 384 | def ancestors(self, *revs): |
|
385 | 385 | """Generate the ancestors of 'revs' in reverse topological order. |
|
386 | 386 | |
|
387 | 387 | Yield a sequence of revision numbers starting with the parents |
|
388 | 388 | of each revision in revs, i.e., each revision is *not* considered |
|
389 | 389 | an ancestor of itself. Results are in breadth-first order: |
|
390 | 390 | parents of each rev in revs, then parents of those, etc. Result |
|
391 | 391 | does not include the null revision.""" |
|
392 | 392 | visit = list(revs) |
|
393 | 393 | seen = set([nullrev]) |
|
394 | 394 | while visit: |
|
395 | 395 | for parent in self.parentrevs(visit.pop(0)): |
|
396 | 396 | if parent not in seen: |
|
397 | 397 | visit.append(parent) |
|
398 | 398 | seen.add(parent) |
|
399 | 399 | yield parent |
|
400 | 400 | |
|
401 | 401 | def descendants(self, *revs): |
|
402 | 402 | """Generate the descendants of 'revs' in revision order. |
|
403 | 403 | |
|
404 | 404 | Yield a sequence of revision numbers starting with a child of |
|
405 | 405 | some rev in revs, i.e., each revision is *not* considered a |
|
406 | 406 | descendant of itself. Results are ordered by revision number (a |
|
407 | 407 | topological sort).""" |
|
408 | 408 | first = min(revs) |
|
409 | 409 | if first == nullrev: |
|
410 | 410 | for i in self: |
|
411 | 411 | yield i |
|
412 | 412 | return |
|
413 | 413 | |
|
414 | 414 | seen = set(revs) |
|
415 | 415 | for i in xrange(first + 1, len(self)): |
|
416 | 416 | for x in self.parentrevs(i): |
|
417 | 417 | if x != nullrev and x in seen: |
|
418 | 418 | seen.add(i) |
|
419 | 419 | yield i |
|
420 | 420 | break |
|
421 | 421 | |
|
422 | 422 | def findcommonmissing(self, common=None, heads=None): |
|
423 | 423 | """Return a tuple of the ancestors of common and the ancestors of heads |
|
424 | 424 | that are not ancestors of common. In revset terminology, we return the |
|
425 | 425 | tuple: |
|
426 | 426 | |
|
427 | 427 | ::common, (::heads) - (::common) |
|
428 | 428 | |
|
429 | 429 | The list is sorted by revision number, meaning it is |
|
430 | 430 | topologically sorted. |
|
431 | 431 | |
|
432 | 432 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
433 | 433 | not supplied, uses all of the revlog's heads. If common is not |
|
434 | 434 | supplied, uses nullid.""" |
|
435 | 435 | if common is None: |
|
436 | 436 | common = [nullid] |
|
437 | 437 | if heads is None: |
|
438 | 438 | heads = self.heads() |
|
439 | 439 | |
|
440 | 440 | common = [self.rev(n) for n in common] |
|
441 | 441 | heads = [self.rev(n) for n in heads] |
|
442 | 442 | |
|
443 | 443 | # we want the ancestors, but inclusive |
|
444 | 444 | has = set(self.ancestors(*common)) |
|
445 | 445 | has.add(nullrev) |
|
446 | 446 | has.update(common) |
|
447 | 447 | |
|
448 | 448 | # take all ancestors from heads that aren't in has |
|
449 | 449 | missing = set() |
|
450 | 450 | visit = [r for r in heads if r not in has] |
|
451 | 451 | while visit: |
|
452 | 452 | r = visit.pop(0) |
|
453 | 453 | if r in missing: |
|
454 | 454 | continue |
|
455 | 455 | else: |
|
456 | 456 | missing.add(r) |
|
457 | 457 | for p in self.parentrevs(r): |
|
458 | 458 | if p not in has: |
|
459 | 459 | visit.append(p) |
|
460 | 460 | missing = list(missing) |
|
461 | 461 | missing.sort() |
|
462 | 462 | return has, [self.node(r) for r in missing] |
|
463 | 463 | |
|
464 | 464 | def findmissing(self, common=None, heads=None): |
|
465 | 465 | """Return the ancestors of heads that are not ancestors of common. |
|
466 | 466 | |
|
467 | 467 | More specifically, return a list of nodes N such that every N |
|
468 | 468 | satisfies the following constraints: |
|
469 | 469 | |
|
470 | 470 | 1. N is an ancestor of some node in 'heads' |
|
471 | 471 | 2. N is not an ancestor of any node in 'common' |
|
472 | 472 | |
|
473 | 473 | The list is sorted by revision number, meaning it is |
|
474 | 474 | topologically sorted. |
|
475 | 475 | |
|
476 | 476 | 'heads' and 'common' are both lists of node IDs. If heads is |
|
477 | 477 | not supplied, uses all of the revlog's heads. If common is not |
|
478 | 478 | supplied, uses nullid.""" |
|
479 | 479 | _common, missing = self.findcommonmissing(common, heads) |
|
480 | 480 | return missing |
|
481 | 481 | |
|
482 | 482 | def nodesbetween(self, roots=None, heads=None): |
|
483 | 483 | """Return a topological path from 'roots' to 'heads'. |
|
484 | 484 | |
|
485 | 485 | Return a tuple (nodes, outroots, outheads) where 'nodes' is a |
|
486 | 486 | topologically sorted list of all nodes N that satisfy both of |
|
487 | 487 | these constraints: |
|
488 | 488 | |
|
489 | 489 | 1. N is a descendant of some node in 'roots' |
|
490 | 490 | 2. N is an ancestor of some node in 'heads' |
|
491 | 491 | |
|
492 | 492 | Every node is considered to be both a descendant and an ancestor |
|
493 | 493 | of itself, so every reachable node in 'roots' and 'heads' will be |
|
494 | 494 | included in 'nodes'. |
|
495 | 495 | |
|
496 | 496 | 'outroots' is the list of reachable nodes in 'roots', i.e., the |
|
497 | 497 | subset of 'roots' that is returned in 'nodes'. Likewise, |
|
498 | 498 | 'outheads' is the subset of 'heads' that is also in 'nodes'. |
|
499 | 499 | |
|
500 | 500 | 'roots' and 'heads' are both lists of node IDs. If 'roots' is |
|
501 | 501 | unspecified, uses nullid as the only root. If 'heads' is |
|
502 | 502 | unspecified, uses list of all of the revlog's heads.""" |
|
503 | 503 | nonodes = ([], [], []) |
|
504 | 504 | if roots is not None: |
|
505 | 505 | roots = list(roots) |
|
506 | 506 | if not roots: |
|
507 | 507 | return nonodes |
|
508 | 508 | lowestrev = min([self.rev(n) for n in roots]) |
|
509 | 509 | else: |
|
510 | 510 | roots = [nullid] # Everybody's a descendant of nullid |
|
511 | 511 | lowestrev = nullrev |
|
512 | 512 | if (lowestrev == nullrev) and (heads is None): |
|
513 | 513 | # We want _all_ the nodes! |
|
514 | 514 | return ([self.node(r) for r in self], [nullid], list(self.heads())) |
|
515 | 515 | if heads is None: |
|
516 | 516 | # All nodes are ancestors, so the latest ancestor is the last |
|
517 | 517 | # node. |
|
518 | 518 | highestrev = len(self) - 1 |
|
519 | 519 | # Set ancestors to None to signal that every node is an ancestor. |
|
520 | 520 | ancestors = None |
|
521 | 521 | # Set heads to an empty dictionary for later discovery of heads |
|
522 | 522 | heads = {} |
|
523 | 523 | else: |
|
524 | 524 | heads = list(heads) |
|
525 | 525 | if not heads: |
|
526 | 526 | return nonodes |
|
527 | 527 | ancestors = set() |
|
528 | 528 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
529 | 529 | # Also, later we will be using it to filter out the heads we can't |
|
530 | 530 | # find from roots. |
|
531 | 531 | heads = dict.fromkeys(heads, False) |
|
532 | 532 | # Start at the top and keep marking parents until we're done. |
|
533 | 533 | nodestotag = set(heads) |
|
534 | 534 | # Remember where the top was so we can use it as a limit later. |
|
535 | 535 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
536 | 536 | while nodestotag: |
|
537 | 537 | # grab a node to tag |
|
538 | 538 | n = nodestotag.pop() |
|
539 | 539 | # Never tag nullid |
|
540 | 540 | if n == nullid: |
|
541 | 541 | continue |
|
542 | 542 | # A node's revision number represents its place in a |
|
543 | 543 | # topologically sorted list of nodes. |
|
544 | 544 | r = self.rev(n) |
|
545 | 545 | if r >= lowestrev: |
|
546 | 546 | if n not in ancestors: |
|
547 | 547 | # If we are possibly a descendant of one of the roots |
|
548 | 548 | # and we haven't already been marked as an ancestor |
|
549 | 549 | ancestors.add(n) # Mark as ancestor |
|
550 | 550 | # Add non-nullid parents to list of nodes to tag. |
|
551 | 551 | nodestotag.update([p for p in self.parents(n) if |
|
552 | 552 | p != nullid]) |
|
553 | 553 | elif n in heads: # We've seen it before, is it a fake head? |
|
554 | 554 | # So it is, real heads should not be the ancestors of |
|
555 | 555 | # any other heads. |
|
556 | 556 | heads.pop(n) |
|
557 | 557 | if not ancestors: |
|
558 | 558 | return nonodes |
|
559 | 559 | # Now that we have our set of ancestors, we want to remove any |
|
560 | 560 | # roots that are not ancestors. |
|
561 | 561 | |
|
562 | 562 | # If one of the roots was nullid, everything is included anyway. |
|
563 | 563 | if lowestrev > nullrev: |
|
564 | 564 | # But, since we weren't, let's recompute the lowest rev to not |
|
565 | 565 | # include roots that aren't ancestors. |
|
566 | 566 | |
|
567 | 567 | # Filter out roots that aren't ancestors of heads |
|
568 | 568 | roots = [n for n in roots if n in ancestors] |
|
569 | 569 | # Recompute the lowest revision |
|
570 | 570 | if roots: |
|
571 | 571 | lowestrev = min([self.rev(n) for n in roots]) |
|
572 | 572 | else: |
|
573 | 573 | # No more roots? Return empty list |
|
574 | 574 | return nonodes |
|
575 | 575 | else: |
|
576 | 576 | # We are descending from nullid, and don't need to care about |
|
577 | 577 | # any other roots. |
|
578 | 578 | lowestrev = nullrev |
|
579 | 579 | roots = [nullid] |
|
580 | 580 | # Transform our roots list into a set. |
|
581 | 581 | descendants = set(roots) |
|
582 | 582 | # Also, keep the original roots so we can filter out roots that aren't |
|
583 | 583 | # 'real' roots (i.e. are descended from other roots). |
|
584 | 584 | roots = descendants.copy() |
|
585 | 585 | # Our topologically sorted list of output nodes. |
|
586 | 586 | orderedout = [] |
|
587 | 587 | # Don't start at nullid since we don't want nullid in our output list, |
|
588 | 588 | # and if nullid shows up in descedents, empty parents will look like |
|
589 | 589 | # they're descendants. |
|
590 | 590 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
591 | 591 | n = self.node(r) |
|
592 | 592 | isdescendant = False |
|
593 | 593 | if lowestrev == nullrev: # Everybody is a descendant of nullid |
|
594 | 594 | isdescendant = True |
|
595 | 595 | elif n in descendants: |
|
596 | 596 | # n is already a descendant |
|
597 | 597 | isdescendant = True |
|
598 | 598 | # This check only needs to be done here because all the roots |
|
599 | 599 | # will start being marked is descendants before the loop. |
|
600 | 600 | if n in roots: |
|
601 | 601 | # If n was a root, check if it's a 'real' root. |
|
602 | 602 | p = tuple(self.parents(n)) |
|
603 | 603 | # If any of its parents are descendants, it's not a root. |
|
604 | 604 | if (p[0] in descendants) or (p[1] in descendants): |
|
605 | 605 | roots.remove(n) |
|
606 | 606 | else: |
|
607 | 607 | p = tuple(self.parents(n)) |
|
608 | 608 | # A node is a descendant if either of its parents are |
|
609 | 609 | # descendants. (We seeded the dependents list with the roots |
|
610 | 610 | # up there, remember?) |
|
611 | 611 | if (p[0] in descendants) or (p[1] in descendants): |
|
612 | 612 | descendants.add(n) |
|
613 | 613 | isdescendant = True |
|
614 | 614 | if isdescendant and ((ancestors is None) or (n in ancestors)): |
|
615 | 615 | # Only include nodes that are both descendants and ancestors. |
|
616 | 616 | orderedout.append(n) |
|
617 | 617 | if (ancestors is not None) and (n in heads): |
|
618 | 618 | # We're trying to figure out which heads are reachable |
|
619 | 619 | # from roots. |
|
620 | 620 | # Mark this head as having been reached |
|
621 | 621 | heads[n] = True |
|
622 | 622 | elif ancestors is None: |
|
623 | 623 | # Otherwise, we're trying to discover the heads. |
|
624 | 624 | # Assume this is a head because if it isn't, the next step |
|
625 | 625 | # will eventually remove it. |
|
626 | 626 | heads[n] = True |
|
627 | 627 | # But, obviously its parents aren't. |
|
628 | 628 | for p in self.parents(n): |
|
629 | 629 | heads.pop(p, None) |
|
630 | 630 | heads = [n for n, flag in heads.iteritems() if flag] |
|
631 | 631 | roots = list(roots) |
|
632 | 632 | assert orderedout |
|
633 | 633 | assert roots |
|
634 | 634 | assert heads |
|
635 | 635 | return (orderedout, roots, heads) |
|
636 | 636 | |
|
637 | 637 | def headrevs(self): |
|
638 | 638 | count = len(self) |
|
639 | 639 | if not count: |
|
640 | 640 | return [nullrev] |
|
641 | 641 | ishead = [1] * (count + 1) |
|
642 | 642 | index = self.index |
|
643 | 643 | for r in xrange(count): |
|
644 | 644 | e = index[r] |
|
645 | 645 | ishead[e[5]] = ishead[e[6]] = 0 |
|
646 | 646 | return [r for r in xrange(count) if ishead[r]] |
|
647 | 647 | |
|
648 | 648 | def heads(self, start=None, stop=None): |
|
649 | 649 | """return the list of all nodes that have no children |
|
650 | 650 | |
|
651 | 651 | if start is specified, only heads that are descendants of |
|
652 | 652 | start will be returned |
|
653 | 653 | if stop is specified, it will consider all the revs from stop |
|
654 | 654 | as if they had no children |
|
655 | 655 | """ |
|
656 | 656 | if start is None and stop is None: |
|
657 | 657 | if not len(self): |
|
658 | 658 | return [nullid] |
|
659 | 659 | return [self.node(r) for r in self.headrevs()] |
|
660 | 660 | |
|
661 | 661 | if start is None: |
|
662 | 662 | start = nullid |
|
663 | 663 | if stop is None: |
|
664 | 664 | stop = [] |
|
665 | 665 | stoprevs = set([self.rev(n) for n in stop]) |
|
666 | 666 | startrev = self.rev(start) |
|
667 | 667 | reachable = set((startrev,)) |
|
668 | 668 | heads = set((startrev,)) |
|
669 | 669 | |
|
670 | 670 | parentrevs = self.parentrevs |
|
671 | 671 | for r in xrange(startrev + 1, len(self)): |
|
672 | 672 | for p in parentrevs(r): |
|
673 | 673 | if p in reachable: |
|
674 | 674 | if r not in stoprevs: |
|
675 | 675 | reachable.add(r) |
|
676 | 676 | heads.add(r) |
|
677 | 677 | if p in heads and p not in stoprevs: |
|
678 | 678 | heads.remove(p) |
|
679 | 679 | |
|
680 | 680 | return [self.node(r) for r in heads] |
|
681 | 681 | |
|
682 | 682 | def children(self, node): |
|
683 | 683 | """find the children of a given node""" |
|
684 | 684 | c = [] |
|
685 | 685 | p = self.rev(node) |
|
686 | 686 | for r in range(p + 1, len(self)): |
|
687 | 687 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
688 | 688 | if prevs: |
|
689 | 689 | for pr in prevs: |
|
690 | 690 | if pr == p: |
|
691 | 691 | c.append(self.node(r)) |
|
692 | 692 | elif p == nullrev: |
|
693 | 693 | c.append(self.node(r)) |
|
694 | 694 | return c |
|
695 | 695 | |
|
696 | 696 | def descendant(self, start, end): |
|
697 | 697 | if start == nullrev: |
|
698 | 698 | return True |
|
699 | 699 | for i in self.descendants(start): |
|
700 | 700 | if i == end: |
|
701 | 701 | return True |
|
702 | 702 | elif i > end: |
|
703 | 703 | break |
|
704 | 704 | return False |
|
705 | 705 | |
|
706 | 706 | def ancestor(self, a, b): |
|
707 | 707 | """calculate the least common ancestor of nodes a and b""" |
|
708 | 708 | |
|
709 | 709 | # fast path, check if it is a descendant |
|
710 | 710 | a, b = self.rev(a), self.rev(b) |
|
711 | 711 | start, end = sorted((a, b)) |
|
712 | 712 | if self.descendant(start, end): |
|
713 | 713 | return self.node(start) |
|
714 | 714 | |
|
715 | 715 | def parents(rev): |
|
716 | 716 | return [p for p in self.parentrevs(rev) if p != nullrev] |
|
717 | 717 | |
|
718 | 718 | c = ancestor.ancestor(a, b, parents) |
|
719 | 719 | if c is None: |
|
720 | 720 | return nullid |
|
721 | 721 | |
|
722 | 722 | return self.node(c) |
|
723 | 723 | |
|
724 | 724 | def _match(self, id): |
|
725 | 725 | if isinstance(id, (long, int)): |
|
726 | 726 | # rev |
|
727 | 727 | return self.node(id) |
|
728 | 728 | if len(id) == 20: |
|
729 | 729 | # possibly a binary node |
|
730 | 730 | # odds of a binary node being all hex in ASCII are 1 in 10**25 |
|
731 | 731 | try: |
|
732 | 732 | node = id |
|
733 | 733 | self.rev(node) # quick search the index |
|
734 | 734 | return node |
|
735 | 735 | except LookupError: |
|
736 | 736 | pass # may be partial hex id |
|
737 | 737 | try: |
|
738 | 738 | # str(rev) |
|
739 | 739 | rev = int(id) |
|
740 | 740 | if str(rev) != id: |
|
741 | 741 | raise ValueError |
|
742 | 742 | if rev < 0: |
|
743 | 743 | rev = len(self) + rev |
|
744 | 744 | if rev < 0 or rev >= len(self): |
|
745 | 745 | raise ValueError |
|
746 | 746 | return self.node(rev) |
|
747 | 747 | except (ValueError, OverflowError): |
|
748 | 748 | pass |
|
749 | 749 | if len(id) == 40: |
|
750 | 750 | try: |
|
751 | 751 | # a full hex nodeid? |
|
752 | 752 | node = bin(id) |
|
753 | 753 | self.rev(node) |
|
754 | 754 | return node |
|
755 | 755 | except (TypeError, LookupError): |
|
756 | 756 | pass |
|
757 | 757 | |
|
758 | 758 | def _partialmatch(self, id): |
|
759 | 759 | try: |
|
760 | 760 | return self.index.partialmatch(id) |
|
761 | 761 | except RevlogError: |
|
762 | 762 | # parsers.c radix tree lookup gave multiple matches |
|
763 | 763 | raise LookupError(id, self.indexfile, _("ambiguous identifier")) |
|
764 | 764 | except (AttributeError, ValueError): |
|
765 | 765 | # we are pure python, or key was too short to search radix tree |
|
766 | 766 | pass |
|
767 | 767 | |
|
768 | 768 | if id in self._pcache: |
|
769 | 769 | return self._pcache[id] |
|
770 | 770 | |
|
771 | 771 | if len(id) < 40: |
|
772 | 772 | try: |
|
773 | 773 | # hex(node)[:...] |
|
774 | 774 | l = len(id) // 2 # grab an even number of digits |
|
775 | 775 | prefix = bin(id[:l * 2]) |
|
776 | 776 | nl = [e[7] for e in self.index if e[7].startswith(prefix)] |
|
777 | 777 | nl = [n for n in nl if hex(n).startswith(id)] |
|
778 | 778 | if len(nl) > 0: |
|
779 | 779 | if len(nl) == 1: |
|
780 | 780 | self._pcache[id] = nl[0] |
|
781 | 781 | return nl[0] |
|
782 | 782 | raise LookupError(id, self.indexfile, |
|
783 | 783 | _('ambiguous identifier')) |
|
784 | 784 | return None |
|
785 | 785 | except TypeError: |
|
786 | 786 | pass |
|
787 | 787 | |
|
788 | 788 | def lookup(self, id): |
|
789 | 789 | """locate a node based on: |
|
790 | 790 | - revision number or str(revision number) |
|
791 | 791 | - nodeid or subset of hex nodeid |
|
792 | 792 | """ |
|
793 | 793 | n = self._match(id) |
|
794 | 794 | if n is not None: |
|
795 | 795 | return n |
|
796 | 796 | n = self._partialmatch(id) |
|
797 | 797 | if n: |
|
798 | 798 | return n |
|
799 | 799 | |
|
800 | 800 | raise LookupError(id, self.indexfile, _('no match found')) |
|
801 | 801 | |
|
802 | 802 | def cmp(self, node, text): |
|
803 | 803 | """compare text with a given file revision |
|
804 | 804 | |
|
805 | 805 | returns True if text is different than what is stored. |
|
806 | 806 | """ |
|
807 | 807 | p1, p2 = self.parents(node) |
|
808 | 808 | return hash(text, p1, p2) != node |
|
809 | 809 | |
|
810 | 810 | def _addchunk(self, offset, data): |
|
811 | 811 | o, d = self._chunkcache |
|
812 | 812 | # try to add to existing cache |
|
813 | 813 | if o + len(d) == offset and len(d) + len(data) < _chunksize: |
|
814 | 814 | self._chunkcache = o, d + data |
|
815 | 815 | else: |
|
816 | 816 | self._chunkcache = offset, data |
|
817 | 817 | |
|
818 | 818 | def _loadchunk(self, offset, length): |
|
819 | 819 | if self._inline: |
|
820 | 820 | df = self.opener(self.indexfile) |
|
821 | 821 | else: |
|
822 | 822 | df = self.opener(self.datafile) |
|
823 | 823 | |
|
824 | 824 | readahead = max(65536, length) |
|
825 | 825 | df.seek(offset) |
|
826 | 826 | d = df.read(readahead) |
|
827 | 827 | df.close() |
|
828 | 828 | self._addchunk(offset, d) |
|
829 | 829 | if readahead > length: |
|
830 | 830 | return util.buffer(d, 0, length) |
|
831 | 831 | return d |
|
832 | 832 | |
|
833 | 833 | def _getchunk(self, offset, length): |
|
834 | 834 | o, d = self._chunkcache |
|
835 | 835 | l = len(d) |
|
836 | 836 | |
|
837 | 837 | # is it in the cache? |
|
838 | 838 | cachestart = offset - o |
|
839 | 839 | cacheend = cachestart + length |
|
840 | 840 | if cachestart >= 0 and cacheend <= l: |
|
841 | 841 | if cachestart == 0 and cacheend == l: |
|
842 | 842 | return d # avoid a copy |
|
843 | 843 | return util.buffer(d, cachestart, cacheend - cachestart) |
|
844 | 844 | |
|
845 | 845 | return self._loadchunk(offset, length) |
|
846 | 846 | |
|
847 | 847 | def _chunkraw(self, startrev, endrev): |
|
848 | 848 | start = self.start(startrev) |
|
849 | 849 | length = self.end(endrev) - start |
|
850 | 850 | if self._inline: |
|
851 | 851 | start += (startrev + 1) * self._io.size |
|
852 | 852 | return self._getchunk(start, length) |
|
853 | 853 | |
|
854 | 854 | def _chunk(self, rev): |
|
855 | 855 | return decompress(self._chunkraw(rev, rev)) |
|
856 | 856 | |
|
857 | 857 | def _chunkbase(self, rev): |
|
858 | 858 | return self._chunk(rev) |
|
859 | 859 | |
|
860 | 860 | def _chunkclear(self): |
|
861 | 861 | self._chunkcache = (0, '') |
|
862 | 862 | |
|
863 | 863 | def deltaparent(self, rev): |
|
864 | 864 | """return deltaparent of the given revision""" |
|
865 | 865 | base = self.index[rev][3] |
|
866 | 866 | if base == rev: |
|
867 | 867 | return nullrev |
|
868 | 868 | elif self._generaldelta: |
|
869 | 869 | return base |
|
870 | 870 | else: |
|
871 | 871 | return rev - 1 |
|
872 | 872 | |
|
873 | 873 | def revdiff(self, rev1, rev2): |
|
874 | 874 | """return or calculate a delta between two revisions""" |
|
875 | 875 | if rev1 != nullrev and self.deltaparent(rev2) == rev1: |
|
876 | 876 | return str(self._chunk(rev2)) |
|
877 | 877 | |
|
878 | 878 | return mdiff.textdiff(self.revision(rev1), |
|
879 | 879 | self.revision(rev2)) |
|
880 | 880 | |
|
881 | 881 | def revision(self, nodeorrev): |
|
882 | 882 | """return an uncompressed revision of a given node or revision |
|
883 | 883 | number. |
|
884 | 884 | """ |
|
885 | 885 | if isinstance(nodeorrev, int): |
|
886 | 886 | rev = nodeorrev |
|
887 | 887 | node = self.node(rev) |
|
888 | 888 | else: |
|
889 | 889 | node = nodeorrev |
|
890 | 890 | rev = None |
|
891 | 891 | |
|
892 | 892 | cachedrev = None |
|
893 | 893 | if node == nullid: |
|
894 | 894 | return "" |
|
895 | 895 | if self._cache: |
|
896 | 896 | if self._cache[0] == node: |
|
897 | 897 | return self._cache[2] |
|
898 | 898 | cachedrev = self._cache[1] |
|
899 | 899 | |
|
900 | 900 | # look up what we need to read |
|
901 | 901 | text = None |
|
902 | 902 | if rev is None: |
|
903 | 903 | rev = self.rev(node) |
|
904 | 904 | |
|
905 | 905 | # check rev flags |
|
906 | 906 | if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: |
|
907 | 907 | raise RevlogError(_('incompatible revision flag %x') % |
|
908 | 908 | (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) |
|
909 | 909 | |
|
910 | 910 | # build delta chain |
|
911 | 911 | chain = [] |
|
912 | 912 | index = self.index # for performance |
|
913 | 913 | generaldelta = self._generaldelta |
|
914 | 914 | iterrev = rev |
|
915 | 915 | e = index[iterrev] |
|
916 | 916 | while iterrev != e[3] and iterrev != cachedrev: |
|
917 | 917 | chain.append(iterrev) |
|
918 | 918 | if generaldelta: |
|
919 | 919 | iterrev = e[3] |
|
920 | 920 | else: |
|
921 | 921 | iterrev -= 1 |
|
922 | 922 | e = index[iterrev] |
|
923 | 923 | chain.reverse() |
|
924 | 924 | base = iterrev |
|
925 | 925 | |
|
926 | 926 | if iterrev == cachedrev: |
|
927 | 927 | # cache hit |
|
928 | 928 | text = self._cache[2] |
|
929 | 929 | |
|
930 | 930 | # drop cache to save memory |
|
931 | 931 | self._cache = None |
|
932 | 932 | |
|
933 | 933 | self._chunkraw(base, rev) |
|
934 | 934 | if text is None: |
|
935 | 935 | text = str(self._chunkbase(base)) |
|
936 | 936 | |
|
937 | 937 | bins = [self._chunk(r) for r in chain] |
|
938 | 938 | text = mdiff.patches(text, bins) |
|
939 | 939 | |
|
940 | 940 | text = self._checkhash(text, node, rev) |
|
941 | 941 | |
|
942 | 942 | self._cache = (node, rev, text) |
|
943 | 943 | return text |
|
944 | 944 | |
|
945 | 945 | def _checkhash(self, text, node, rev): |
|
946 | 946 | p1, p2 = self.parents(node) |
|
947 | 947 | if node != hash(text, p1, p2): |
|
948 | 948 | raise RevlogError(_("integrity check failed on %s:%d") |
|
949 | 949 | % (self.indexfile, rev)) |
|
950 | 950 | return text |
|
951 | 951 | |
|
952 | 952 | def checkinlinesize(self, tr, fp=None): |
|
953 | 953 | if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline: |
|
954 | 954 | return |
|
955 | 955 | |
|
956 | 956 | trinfo = tr.find(self.indexfile) |
|
957 | 957 | if trinfo is None: |
|
958 | 958 | raise RevlogError(_("%s not found in the transaction") |
|
959 | 959 | % self.indexfile) |
|
960 | 960 | |
|
961 | 961 | trindex = trinfo[2] |
|
962 | 962 | dataoff = self.start(trindex) |
|
963 | 963 | |
|
964 | 964 | tr.add(self.datafile, dataoff) |
|
965 | 965 | |
|
966 | 966 | if fp: |
|
967 | 967 | fp.flush() |
|
968 | 968 | fp.close() |
|
969 | 969 | |
|
970 | 970 | df = self.opener(self.datafile, 'w') |
|
971 | 971 | try: |
|
972 | 972 | for r in self: |
|
973 | 973 | df.write(self._chunkraw(r, r)) |
|
974 | 974 | finally: |
|
975 | 975 | df.close() |
|
976 | 976 | |
|
977 | 977 | fp = self.opener(self.indexfile, 'w', atomictemp=True) |
|
978 | 978 | self.version &= ~(REVLOGNGINLINEDATA) |
|
979 | 979 | self._inline = False |
|
980 | 980 | for i in self: |
|
981 | 981 | e = self._io.packentry(self.index[i], self.node, self.version, i) |
|
982 | 982 | fp.write(e) |
|
983 | 983 | |
|
984 | 984 | # if we don't call close, the temp file will never replace the |
|
985 | 985 | # real index |
|
986 | 986 | fp.close() |
|
987 | 987 | |
|
988 | 988 | tr.replace(self.indexfile, trindex * self._io.size) |
|
989 | 989 | self._chunkclear() |
|
990 | 990 | |
|
991 | 991 | def addrevision(self, text, transaction, link, p1, p2, cachedelta=None): |
|
992 | 992 | """add a revision to the log |
|
993 | 993 | |
|
994 | 994 | text - the revision data to add |
|
995 | 995 | transaction - the transaction object used for rollback |
|
996 | 996 | link - the linkrev data to add |
|
997 | 997 | p1, p2 - the parent nodeids of the revision |
|
998 | 998 | cachedelta - an optional precomputed delta |
|
999 | 999 | """ |
|
1000 | 1000 | node = hash(text, p1, p2) |
|
1001 | 1001 | if node in self.nodemap: |
|
1002 | 1002 | return node |
|
1003 | 1003 | |
|
1004 | 1004 | dfh = None |
|
1005 | 1005 | if not self._inline: |
|
1006 | 1006 | dfh = self.opener(self.datafile, "a") |
|
1007 | 1007 | ifh = self.opener(self.indexfile, "a+") |
|
1008 | 1008 | try: |
|
1009 | 1009 | return self._addrevision(node, text, transaction, link, p1, p2, |
|
1010 | 1010 | cachedelta, ifh, dfh) |
|
1011 | 1011 | finally: |
|
1012 | 1012 | if dfh: |
|
1013 | 1013 | dfh.close() |
|
1014 | 1014 | ifh.close() |
|
1015 | 1015 | |
|
1016 | 1016 | def _addrevision(self, node, text, transaction, link, p1, p2, |
|
1017 | 1017 | cachedelta, ifh, dfh): |
|
1018 | 1018 | """internal function to add revisions to the log |
|
1019 | 1019 | |
|
1020 | 1020 | see addrevision for argument descriptions. |
|
1021 | 1021 | invariants: |
|
1022 | 1022 | - text is optional (can be None); if not set, cachedelta must be set. |
|
1023 | 1023 | if both are set, they must correspond to eachother. |
|
1024 | 1024 | """ |
|
1025 | 1025 | btext = [text] |
|
1026 | 1026 | def buildtext(): |
|
1027 | 1027 | if btext[0] is not None: |
|
1028 | 1028 | return btext[0] |
|
1029 | 1029 | # flush any pending writes here so we can read it in revision |
|
1030 | 1030 | if dfh: |
|
1031 | 1031 | dfh.flush() |
|
1032 | 1032 | ifh.flush() |
|
1033 | 1033 | basetext = self.revision(self.node(cachedelta[0])) |
|
1034 | 1034 | btext[0] = mdiff.patch(basetext, cachedelta[1]) |
|
1035 | 1035 | chk = hash(btext[0], p1, p2) |
|
1036 | 1036 | if chk != node: |
|
1037 | 1037 | raise RevlogError(_("consistency error in delta")) |
|
1038 | 1038 | return btext[0] |
|
1039 | 1039 | |
|
1040 | 1040 | def builddelta(rev): |
|
1041 | 1041 | # can we use the cached delta? |
|
1042 | 1042 | if cachedelta and cachedelta[0] == rev: |
|
1043 | 1043 | delta = cachedelta[1] |
|
1044 | 1044 | else: |
|
1045 | 1045 | t = buildtext() |
|
1046 | 1046 | ptext = self.revision(self.node(rev)) |
|
1047 | 1047 | delta = mdiff.textdiff(ptext, t) |
|
1048 | 1048 | data = compress(delta) |
|
1049 | 1049 | l = len(data[1]) + len(data[0]) |
|
1050 | 1050 | if basecache[0] == rev: |
|
1051 | 1051 | chainbase = basecache[1] |
|
1052 | 1052 | else: |
|
1053 | 1053 | chainbase = self.chainbase(rev) |
|
1054 | 1054 | dist = l + offset - self.start(chainbase) |
|
1055 | 1055 | if self._generaldelta: |
|
1056 | 1056 | base = rev |
|
1057 | 1057 | else: |
|
1058 | 1058 | base = chainbase |
|
1059 | 1059 | return dist, l, data, base, chainbase |
|
1060 | 1060 | |
|
1061 | 1061 | curr = len(self) |
|
1062 | 1062 | prev = curr - 1 |
|
1063 | 1063 | base = chainbase = curr |
|
1064 | 1064 | offset = self.end(prev) |
|
1065 | 1065 | flags = 0 |
|
1066 | 1066 | d = None |
|
1067 | 1067 | basecache = self._basecache |
|
1068 | 1068 | p1r, p2r = self.rev(p1), self.rev(p2) |
|
1069 | 1069 | |
|
1070 | 1070 | # should we try to build a delta? |
|
1071 | 1071 | if prev != nullrev: |
|
1072 | 1072 | if self._generaldelta: |
|
1073 | 1073 | if p1r >= basecache[1]: |
|
1074 | 1074 | d = builddelta(p1r) |
|
1075 | 1075 | elif p2r >= basecache[1]: |
|
1076 | 1076 | d = builddelta(p2r) |
|
1077 | 1077 | else: |
|
1078 | 1078 | d = builddelta(prev) |
|
1079 | 1079 | else: |
|
1080 | 1080 | d = builddelta(prev) |
|
1081 | 1081 | dist, l, data, base, chainbase = d |
|
1082 | 1082 | |
|
1083 | 1083 | # full versions are inserted when the needed deltas |
|
1084 | 1084 | # become comparable to the uncompressed text |
|
1085 | 1085 | if text is None: |
|
1086 | 1086 | textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), |
|
1087 | 1087 | cachedelta[1]) |
|
1088 | 1088 | else: |
|
1089 | 1089 | textlen = len(text) |
|
1090 | 1090 | if d is None or dist > textlen * 2: |
|
1091 | 1091 | text = buildtext() |
|
1092 | 1092 | data = compress(text) |
|
1093 | 1093 | l = len(data[1]) + len(data[0]) |
|
1094 | 1094 | base = chainbase = curr |
|
1095 | 1095 | |
|
1096 | 1096 | e = (offset_type(offset, flags), l, textlen, |
|
1097 | 1097 | base, link, p1r, p2r, node) |
|
1098 | 1098 | self.index.insert(-1, e) |
|
1099 | 1099 | self.nodemap[node] = curr |
|
1100 | 1100 | |
|
1101 | 1101 | entry = self._io.packentry(e, self.node, self.version, curr) |
|
1102 | 1102 | if not self._inline: |
|
1103 | 1103 | transaction.add(self.datafile, offset) |
|
1104 | 1104 | transaction.add(self.indexfile, curr * len(entry)) |
|
1105 | 1105 | if data[0]: |
|
1106 | 1106 | dfh.write(data[0]) |
|
1107 | 1107 | dfh.write(data[1]) |
|
1108 | 1108 | dfh.flush() |
|
1109 | 1109 | ifh.write(entry) |
|
1110 | 1110 | else: |
|
1111 | 1111 | offset += curr * self._io.size |
|
1112 | 1112 | transaction.add(self.indexfile, offset, curr) |
|
1113 | 1113 | ifh.write(entry) |
|
1114 | 1114 | ifh.write(data[0]) |
|
1115 | 1115 | ifh.write(data[1]) |
|
1116 | 1116 | self.checkinlinesize(transaction, ifh) |
|
1117 | 1117 | |
|
1118 | 1118 | if type(text) == str: # only accept immutable objects |
|
1119 | 1119 | self._cache = (node, curr, text) |
|
1120 | 1120 | self._basecache = (curr, chainbase) |
|
1121 | 1121 | return node |
|
1122 | 1122 | |
|
1123 | 1123 | def group(self, nodelist, bundler, reorder=None): |
|
1124 | 1124 | """Calculate a delta group, yielding a sequence of changegroup chunks |
|
1125 | 1125 | (strings). |
|
1126 | 1126 | |
|
1127 | 1127 | Given a list of changeset revs, return a set of deltas and |
|
1128 | 1128 | metadata corresponding to nodes. The first delta is |
|
1129 | 1129 | first parent(nodelist[0]) -> nodelist[0], the receiver is |
|
1130 | 1130 | guaranteed to have this parent as it has all history before |
|
1131 | 1131 | these changesets. In the case firstparent is nullrev the |
|
1132 | 1132 | changegroup starts with a full revision. |
|
1133 | 1133 | """ |
|
1134 | 1134 | |
|
1135 | 1135 | # if we don't have any revisions touched by these changesets, bail |
|
1136 | 1136 | if len(nodelist) == 0: |
|
1137 | 1137 | yield bundler.close() |
|
1138 | 1138 | return |
|
1139 | 1139 | |
|
1140 | 1140 | # for generaldelta revlogs, we linearize the revs; this will both be |
|
1141 | 1141 | # much quicker and generate a much smaller bundle |
|
1142 | 1142 | if (self._generaldelta and reorder is not False) or reorder: |
|
1143 | 1143 | dag = dagutil.revlogdag(self) |
|
1144 | 1144 | revs = set(self.rev(n) for n in nodelist) |
|
1145 | 1145 | revs = dag.linearize(revs) |
|
1146 | 1146 | else: |
|
1147 | 1147 | revs = sorted([self.rev(n) for n in nodelist]) |
|
1148 | 1148 | |
|
1149 | 1149 | # add the parent of the first rev |
|
1150 | 1150 | p = self.parentrevs(revs[0])[0] |
|
1151 | 1151 | revs.insert(0, p) |
|
1152 | 1152 | |
|
1153 | 1153 | # build deltas |
|
1154 | 1154 | for r in xrange(len(revs) - 1): |
|
1155 | 1155 | prev, curr = revs[r], revs[r + 1] |
|
1156 | 1156 | for c in bundler.revchunk(self, curr, prev): |
|
1157 | 1157 | yield c |
|
1158 | 1158 | |
|
1159 | 1159 | yield bundler.close() |
|
1160 | 1160 | |
|
1161 | 1161 | def addgroup(self, bundle, linkmapper, transaction): |
|
1162 | 1162 | """ |
|
1163 | 1163 | add a delta group |
|
1164 | 1164 | |
|
1165 | 1165 | given a set of deltas, add them to the revision log. the |
|
1166 | 1166 | first delta is against its parent, which should be in our |
|
1167 | 1167 | log, the rest are against the previous delta. |
|
1168 | 1168 | """ |
|
1169 | 1169 | |
|
1170 | 1170 | # track the base of the current delta log |
|
1171 | 1171 | content = [] |
|
1172 | 1172 | node = None |
|
1173 | 1173 | |
|
1174 | 1174 | r = len(self) |
|
1175 | 1175 | end = 0 |
|
1176 | 1176 | if r: |
|
1177 | 1177 | end = self.end(r - 1) |
|
1178 | 1178 | ifh = self.opener(self.indexfile, "a+") |
|
1179 | 1179 | isize = r * self._io.size |
|
1180 | 1180 | if self._inline: |
|
1181 | 1181 | transaction.add(self.indexfile, end + isize, r) |
|
1182 | 1182 | dfh = None |
|
1183 | 1183 | else: |
|
1184 | 1184 | transaction.add(self.indexfile, isize, r) |
|
1185 | 1185 | transaction.add(self.datafile, end) |
|
1186 | 1186 | dfh = self.opener(self.datafile, "a") |
|
1187 | 1187 | |
|
1188 | 1188 | try: |
|
1189 | 1189 | # loop through our set of deltas |
|
1190 | 1190 | chain = None |
|
1191 | 1191 | while True: |
|
1192 | 1192 | chunkdata = bundle.deltachunk(chain) |
|
1193 | 1193 | if not chunkdata: |
|
1194 | 1194 | break |
|
1195 | 1195 | node = chunkdata['node'] |
|
1196 | 1196 | p1 = chunkdata['p1'] |
|
1197 | 1197 | p2 = chunkdata['p2'] |
|
1198 | 1198 | cs = chunkdata['cs'] |
|
1199 | 1199 | deltabase = chunkdata['deltabase'] |
|
1200 | 1200 | delta = chunkdata['delta'] |
|
1201 | 1201 | |
|
1202 | 1202 | content.append(node) |
|
1203 | 1203 | |
|
1204 | 1204 | link = linkmapper(cs) |
|
1205 | 1205 | if node in self.nodemap: |
|
1206 | 1206 | # this can happen if two branches make the same change |
|
1207 | 1207 | chain = node |
|
1208 | 1208 | continue |
|
1209 | 1209 | |
|
1210 | 1210 | for p in (p1, p2): |
|
1211 |
if not |
|
|
1211 | if p not in self.nodemap: | |
|
1212 | 1212 | raise LookupError(p, self.indexfile, |
|
1213 | 1213 | _('unknown parent')) |
|
1214 | 1214 | |
|
1215 | 1215 | if deltabase not in self.nodemap: |
|
1216 | 1216 | raise LookupError(deltabase, self.indexfile, |
|
1217 | 1217 | _('unknown delta base')) |
|
1218 | 1218 | |
|
1219 | 1219 | baserev = self.rev(deltabase) |
|
1220 | 1220 | chain = self._addrevision(node, None, transaction, link, |
|
1221 | 1221 | p1, p2, (baserev, delta), ifh, dfh) |
|
1222 | 1222 | if not dfh and not self._inline: |
|
1223 | 1223 | # addrevision switched from inline to conventional |
|
1224 | 1224 | # reopen the index |
|
1225 | 1225 | ifh.close() |
|
1226 | 1226 | dfh = self.opener(self.datafile, "a") |
|
1227 | 1227 | ifh = self.opener(self.indexfile, "a") |
|
1228 | 1228 | finally: |
|
1229 | 1229 | if dfh: |
|
1230 | 1230 | dfh.close() |
|
1231 | 1231 | ifh.close() |
|
1232 | 1232 | |
|
1233 | 1233 | return content |
|
1234 | 1234 | |
|
1235 | 1235 | def strip(self, minlink, transaction): |
|
1236 | 1236 | """truncate the revlog on the first revision with a linkrev >= minlink |
|
1237 | 1237 | |
|
1238 | 1238 | This function is called when we're stripping revision minlink and |
|
1239 | 1239 | its descendants from the repository. |
|
1240 | 1240 | |
|
1241 | 1241 | We have to remove all revisions with linkrev >= minlink, because |
|
1242 | 1242 | the equivalent changelog revisions will be renumbered after the |
|
1243 | 1243 | strip. |
|
1244 | 1244 | |
|
1245 | 1245 | So we truncate the revlog on the first of these revisions, and |
|
1246 | 1246 | trust that the caller has saved the revisions that shouldn't be |
|
1247 | 1247 | removed and that it'll re-add them after this truncation. |
|
1248 | 1248 | """ |
|
1249 | 1249 | if len(self) == 0: |
|
1250 | 1250 | return |
|
1251 | 1251 | |
|
1252 | 1252 | for rev in self: |
|
1253 | 1253 | if self.index[rev][4] >= minlink: |
|
1254 | 1254 | break |
|
1255 | 1255 | else: |
|
1256 | 1256 | return |
|
1257 | 1257 | |
|
1258 | 1258 | # first truncate the files on disk |
|
1259 | 1259 | end = self.start(rev) |
|
1260 | 1260 | if not self._inline: |
|
1261 | 1261 | transaction.add(self.datafile, end) |
|
1262 | 1262 | end = rev * self._io.size |
|
1263 | 1263 | else: |
|
1264 | 1264 | end += rev * self._io.size |
|
1265 | 1265 | |
|
1266 | 1266 | transaction.add(self.indexfile, end) |
|
1267 | 1267 | |
|
1268 | 1268 | # then reset internal state in memory to forget those revisions |
|
1269 | 1269 | self._cache = None |
|
1270 | 1270 | self._chunkclear() |
|
1271 | 1271 | for x in xrange(rev, len(self)): |
|
1272 | 1272 | del self.nodemap[self.node(x)] |
|
1273 | 1273 | |
|
1274 | 1274 | del self.index[rev:-1] |
|
1275 | 1275 | |
|
1276 | 1276 | def checksize(self): |
|
1277 | 1277 | expected = 0 |
|
1278 | 1278 | if len(self): |
|
1279 | 1279 | expected = max(0, self.end(len(self) - 1)) |
|
1280 | 1280 | |
|
1281 | 1281 | try: |
|
1282 | 1282 | f = self.opener(self.datafile) |
|
1283 | 1283 | f.seek(0, 2) |
|
1284 | 1284 | actual = f.tell() |
|
1285 | 1285 | f.close() |
|
1286 | 1286 | dd = actual - expected |
|
1287 | 1287 | except IOError, inst: |
|
1288 | 1288 | if inst.errno != errno.ENOENT: |
|
1289 | 1289 | raise |
|
1290 | 1290 | dd = 0 |
|
1291 | 1291 | |
|
1292 | 1292 | try: |
|
1293 | 1293 | f = self.opener(self.indexfile) |
|
1294 | 1294 | f.seek(0, 2) |
|
1295 | 1295 | actual = f.tell() |
|
1296 | 1296 | f.close() |
|
1297 | 1297 | s = self._io.size |
|
1298 | 1298 | i = max(0, actual // s) |
|
1299 | 1299 | di = actual - (i * s) |
|
1300 | 1300 | if self._inline: |
|
1301 | 1301 | databytes = 0 |
|
1302 | 1302 | for r in self: |
|
1303 | 1303 | databytes += max(0, self.length(r)) |
|
1304 | 1304 | dd = 0 |
|
1305 | 1305 | di = actual - len(self) * s - databytes |
|
1306 | 1306 | except IOError, inst: |
|
1307 | 1307 | if inst.errno != errno.ENOENT: |
|
1308 | 1308 | raise |
|
1309 | 1309 | di = 0 |
|
1310 | 1310 | |
|
1311 | 1311 | return (dd, di) |
|
1312 | 1312 | |
|
1313 | 1313 | def files(self): |
|
1314 | 1314 | res = [self.indexfile] |
|
1315 | 1315 | if not self._inline: |
|
1316 | 1316 | res.append(self.datafile) |
|
1317 | 1317 | return res |
@@ -1,395 +1,395 b'' | |||
|
1 | 1 | # template-filters.py - common template expansion filters |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2008 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import cgi, re, os, time, urllib |
|
9 | 9 | import encoding, node, util |
|
10 | 10 | import hbisect |
|
11 | 11 | |
|
12 | 12 | def addbreaks(text): |
|
13 | 13 | """:addbreaks: Any text. Add an XHTML "<br />" tag before the end of |
|
14 | 14 | every line except the last. |
|
15 | 15 | """ |
|
16 | 16 | return text.replace('\n', '<br/>\n') |
|
17 | 17 | |
|
18 | 18 | agescales = [("year", 3600 * 24 * 365), |
|
19 | 19 | ("month", 3600 * 24 * 30), |
|
20 | 20 | ("week", 3600 * 24 * 7), |
|
21 | 21 | ("day", 3600 * 24), |
|
22 | 22 | ("hour", 3600), |
|
23 | 23 | ("minute", 60), |
|
24 | 24 | ("second", 1)] |
|
25 | 25 | |
|
26 | 26 | def age(date): |
|
27 | 27 | """:age: Date. Returns a human-readable date/time difference between the |
|
28 | 28 | given date/time and the current date/time. |
|
29 | 29 | """ |
|
30 | 30 | |
|
31 | 31 | def plural(t, c): |
|
32 | 32 | if c == 1: |
|
33 | 33 | return t |
|
34 | 34 | return t + "s" |
|
35 | 35 | def fmt(t, c): |
|
36 | 36 | return "%d %s" % (c, plural(t, c)) |
|
37 | 37 | |
|
38 | 38 | now = time.time() |
|
39 | 39 | then = date[0] |
|
40 | 40 | future = False |
|
41 | 41 | if then > now: |
|
42 | 42 | future = True |
|
43 | 43 | delta = max(1, int(then - now)) |
|
44 | 44 | if delta > agescales[0][1] * 30: |
|
45 | 45 | return 'in the distant future' |
|
46 | 46 | else: |
|
47 | 47 | delta = max(1, int(now - then)) |
|
48 | 48 | if delta > agescales[0][1] * 2: |
|
49 | 49 | return util.shortdate(date) |
|
50 | 50 | |
|
51 | 51 | for t, s in agescales: |
|
52 | 52 | n = delta // s |
|
53 | 53 | if n >= 2 or s == 1: |
|
54 | 54 | if future: |
|
55 | 55 | return '%s from now' % fmt(t, n) |
|
56 | 56 | return '%s ago' % fmt(t, n) |
|
57 | 57 | |
|
58 | 58 | def basename(path): |
|
59 | 59 | """:basename: Any text. Treats the text as a path, and returns the last |
|
60 | 60 | component of the path after splitting by the path separator |
|
61 | 61 | (ignoring trailing separators). For example, "foo/bar/baz" becomes |
|
62 | 62 | "baz" and "foo/bar//" becomes "bar". |
|
63 | 63 | """ |
|
64 | 64 | return os.path.basename(path) |
|
65 | 65 | |
|
66 | 66 | def datefilter(text): |
|
67 | 67 | """:date: Date. Returns a date in a Unix date format, including the |
|
68 | 68 | timezone: "Mon Sep 04 15:13:13 2006 0700". |
|
69 | 69 | """ |
|
70 | 70 | return util.datestr(text) |
|
71 | 71 | |
|
72 | 72 | def domain(author): |
|
73 | 73 | """:domain: Any text. Finds the first string that looks like an email |
|
74 | 74 | address, and extracts just the domain component. Example: ``User |
|
75 | 75 | <user@example.com>`` becomes ``example.com``. |
|
76 | 76 | """ |
|
77 | 77 | f = author.find('@') |
|
78 | 78 | if f == -1: |
|
79 | 79 | return '' |
|
80 | 80 | author = author[f + 1:] |
|
81 | 81 | f = author.find('>') |
|
82 | 82 | if f >= 0: |
|
83 | 83 | author = author[:f] |
|
84 | 84 | return author |
|
85 | 85 | |
|
86 | 86 | def email(text): |
|
87 | 87 | """:email: Any text. Extracts the first string that looks like an email |
|
88 | 88 | address. Example: ``User <user@example.com>`` becomes |
|
89 | 89 | ``user@example.com``. |
|
90 | 90 | """ |
|
91 | 91 | return util.email(text) |
|
92 | 92 | |
|
93 | 93 | def escape(text): |
|
94 | 94 | """:escape: Any text. Replaces the special XML/XHTML characters "&", "<" |
|
95 | 95 | and ">" with XML entities. |
|
96 | 96 | """ |
|
97 | 97 | return cgi.escape(text, True) |
|
98 | 98 | |
|
99 | 99 | para_re = None |
|
100 | 100 | space_re = None |
|
101 | 101 | |
|
102 | 102 | def fill(text, width): |
|
103 | 103 | '''fill many paragraphs.''' |
|
104 | 104 | global para_re, space_re |
|
105 | 105 | if para_re is None: |
|
106 | 106 | para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M) |
|
107 | 107 | space_re = re.compile(r' +') |
|
108 | 108 | |
|
109 | 109 | def findparas(): |
|
110 | 110 | start = 0 |
|
111 | 111 | while True: |
|
112 | 112 | m = para_re.search(text, start) |
|
113 | 113 | if not m: |
|
114 | 114 | uctext = unicode(text[start:], encoding.encoding) |
|
115 | 115 | w = len(uctext) |
|
116 | 116 | while 0 < w and uctext[w - 1].isspace(): |
|
117 | 117 | w -= 1 |
|
118 | 118 | yield (uctext[:w].encode(encoding.encoding), |
|
119 | 119 | uctext[w:].encode(encoding.encoding)) |
|
120 | 120 | break |
|
121 | 121 | yield text[start:m.start(0)], m.group(1) |
|
122 | 122 | start = m.end(1) |
|
123 | 123 | |
|
124 | 124 | return "".join([space_re.sub(' ', util.wrap(para, width=width)) + rest |
|
125 | 125 | for para, rest in findparas()]) |
|
126 | 126 | |
|
127 | 127 | def fill68(text): |
|
128 | 128 | """:fill68: Any text. Wraps the text to fit in 68 columns.""" |
|
129 | 129 | return fill(text, 68) |
|
130 | 130 | |
|
131 | 131 | def fill76(text): |
|
132 | 132 | """:fill76: Any text. Wraps the text to fit in 76 columns.""" |
|
133 | 133 | return fill(text, 76) |
|
134 | 134 | |
|
135 | 135 | def firstline(text): |
|
136 | 136 | """:firstline: Any text. Returns the first line of text.""" |
|
137 | 137 | try: |
|
138 | 138 | return text.splitlines(True)[0].rstrip('\r\n') |
|
139 | 139 | except IndexError: |
|
140 | 140 | return '' |
|
141 | 141 | |
|
142 | 142 | def hexfilter(text): |
|
143 | 143 | """:hex: Any text. Convert a binary Mercurial node identifier into |
|
144 | 144 | its long hexadecimal representation. |
|
145 | 145 | """ |
|
146 | 146 | return node.hex(text) |
|
147 | 147 | |
|
148 | 148 | def hgdate(text): |
|
149 | 149 | """:hgdate: Date. Returns the date as a pair of numbers: "1157407993 |
|
150 | 150 | 25200" (Unix timestamp, timezone offset). |
|
151 | 151 | """ |
|
152 | 152 | return "%d %d" % text |
|
153 | 153 | |
|
154 | 154 | def isodate(text): |
|
155 | 155 | """:isodate: Date. Returns the date in ISO 8601 format: "2009-08-18 13:00 |
|
156 | 156 | +0200". |
|
157 | 157 | """ |
|
158 | 158 | return util.datestr(text, '%Y-%m-%d %H:%M %1%2') |
|
159 | 159 | |
|
160 | 160 | def isodatesec(text): |
|
161 | 161 | """:isodatesec: Date. Returns the date in ISO 8601 format, including |
|
162 | 162 | seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date |
|
163 | 163 | filter. |
|
164 | 164 | """ |
|
165 | 165 | return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2') |
|
166 | 166 | |
|
167 | 167 | def indent(text, prefix): |
|
168 | 168 | '''indent each non-empty line of text after first with prefix.''' |
|
169 | 169 | lines = text.splitlines() |
|
170 | 170 | num_lines = len(lines) |
|
171 | 171 | endswithnewline = text[-1:] == '\n' |
|
172 | 172 | def indenter(): |
|
173 | 173 | for i in xrange(num_lines): |
|
174 | 174 | l = lines[i] |
|
175 | 175 | if i and l.strip(): |
|
176 | 176 | yield prefix |
|
177 | 177 | yield l |
|
178 | 178 | if i < num_lines - 1 or endswithnewline: |
|
179 | 179 | yield '\n' |
|
180 | 180 | return "".join(indenter()) |
|
181 | 181 | |
|
182 | 182 | def json(obj): |
|
183 | 183 | if obj is None or obj is False or obj is True: |
|
184 | 184 | return {None: 'null', False: 'false', True: 'true'}[obj] |
|
185 | 185 | elif isinstance(obj, int) or isinstance(obj, float): |
|
186 | 186 | return str(obj) |
|
187 | 187 | elif isinstance(obj, str): |
|
188 | 188 | u = unicode(obj, encoding.encoding, 'replace') |
|
189 | 189 | return '"%s"' % jsonescape(u) |
|
190 | 190 | elif isinstance(obj, unicode): |
|
191 | 191 | return '"%s"' % jsonescape(obj) |
|
192 | 192 | elif util.safehasattr(obj, 'keys'): |
|
193 | 193 | out = [] |
|
194 | 194 | for k, v in obj.iteritems(): |
|
195 | 195 | s = '%s: %s' % (json(k), json(v)) |
|
196 | 196 | out.append(s) |
|
197 | 197 | return '{' + ', '.join(out) + '}' |
|
198 | 198 | elif util.safehasattr(obj, '__iter__'): |
|
199 | 199 | out = [] |
|
200 | 200 | for i in obj: |
|
201 | 201 | out.append(json(i)) |
|
202 | 202 | return '[' + ', '.join(out) + ']' |
|
203 | 203 | else: |
|
204 | 204 | raise TypeError('cannot encode type %s' % obj.__class__.__name__) |
|
205 | 205 | |
|
206 | 206 | def _uescape(c): |
|
207 | 207 | if ord(c) < 0x80: |
|
208 | 208 | return c |
|
209 | 209 | else: |
|
210 | 210 | return '\\u%04x' % ord(c) |
|
211 | 211 | |
|
212 | 212 | _escapes = [ |
|
213 | 213 | ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'), |
|
214 | 214 | ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'), |
|
215 | 215 | ] |
|
216 | 216 | |
|
217 | 217 | def jsonescape(s): |
|
218 | 218 | for k, v in _escapes: |
|
219 | 219 | s = s.replace(k, v) |
|
220 | 220 | return ''.join(_uescape(c) for c in s) |
|
221 | 221 | |
|
222 | 222 | def localdate(text): |
|
223 | 223 | """:localdate: Date. Converts a date to local date.""" |
|
224 | 224 | return (text[0], util.makedate()[1]) |
|
225 | 225 | |
|
226 | 226 | def nonempty(str): |
|
227 | 227 | """:nonempty: Any text. Returns '(none)' if the string is empty.""" |
|
228 | 228 | return str or "(none)" |
|
229 | 229 | |
|
230 | 230 | def obfuscate(text): |
|
231 | 231 | """:obfuscate: Any text. Returns the input text rendered as a sequence of |
|
232 | 232 | XML entities. |
|
233 | 233 | """ |
|
234 | 234 | text = unicode(text, encoding.encoding, 'replace') |
|
235 | 235 | return ''.join(['&#%d;' % ord(c) for c in text]) |
|
236 | 236 | |
|
237 | 237 | def permissions(flags): |
|
238 | 238 | if "l" in flags: |
|
239 | 239 | return "lrwxrwxrwx" |
|
240 | 240 | if "x" in flags: |
|
241 | 241 | return "-rwxr-xr-x" |
|
242 | 242 | return "-rw-r--r--" |
|
243 | 243 | |
|
244 | 244 | def person(author): |
|
245 | 245 | """:person: Any text. Returns the name before an email address, |
|
246 | 246 | interpreting it as per RFC 5322. |
|
247 | 247 | |
|
248 | 248 | >>> person('foo@bar') |
|
249 | 249 | 'foo' |
|
250 | 250 | >>> person('Foo Bar <foo@bar>') |
|
251 | 251 | 'Foo Bar' |
|
252 | 252 | >>> person('"Foo Bar" <foo@bar>') |
|
253 | 253 | 'Foo Bar' |
|
254 | 254 | >>> person('"Foo \"buz\" Bar" <foo@bar>') |
|
255 | 255 | 'Foo "buz" Bar' |
|
256 | 256 | >>> # The following are invalid, but do exist in real-life |
|
257 | 257 | ... |
|
258 | 258 | >>> person('Foo "buz" Bar <foo@bar>') |
|
259 | 259 | 'Foo "buz" Bar' |
|
260 | 260 | >>> person('"Foo Bar <foo@bar>') |
|
261 | 261 | 'Foo Bar' |
|
262 | 262 | """ |
|
263 |
if |
|
|
263 | if '@' not in author: | |
|
264 | 264 | return author |
|
265 | 265 | f = author.find('<') |
|
266 | 266 | if f != -1: |
|
267 | 267 | return author[:f].strip(' "').replace('\\"', '"') |
|
268 | 268 | f = author.find('@') |
|
269 | 269 | return author[:f].replace('.', ' ') |
|
270 | 270 | |
|
271 | 271 | def rfc3339date(text): |
|
272 | 272 | """:rfc3339date: Date. Returns a date using the Internet date format |
|
273 | 273 | specified in RFC 3339: "2009-08-18T13:00:13+02:00". |
|
274 | 274 | """ |
|
275 | 275 | return util.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2") |
|
276 | 276 | |
|
277 | 277 | def rfc822date(text): |
|
278 | 278 | """:rfc822date: Date. Returns a date using the same format used in email |
|
279 | 279 | headers: "Tue, 18 Aug 2009 13:00:13 +0200". |
|
280 | 280 | """ |
|
281 | 281 | return util.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2") |
|
282 | 282 | |
|
283 | 283 | def short(text): |
|
284 | 284 | """:short: Changeset hash. Returns the short form of a changeset hash, |
|
285 | 285 | i.e. a 12 hexadecimal digit string. |
|
286 | 286 | """ |
|
287 | 287 | return text[:12] |
|
288 | 288 | |
|
289 | 289 | def shortbisect(text): |
|
290 | 290 | """:shortbisect: Any text. Treats `text` as a bisection status, and |
|
291 | 291 | returns a single-character representing the status (G: good, B: bad, |
|
292 | 292 | S: skipped, U: untested, I: ignored). Returns single space if `text` |
|
293 | 293 | is not a valid bisection status. |
|
294 | 294 | """ |
|
295 | 295 | return hbisect.shortlabel(text) or ' ' |
|
296 | 296 | |
|
297 | 297 | def shortdate(text): |
|
298 | 298 | """:shortdate: Date. Returns a date like "2006-09-18".""" |
|
299 | 299 | return util.shortdate(text) |
|
300 | 300 | |
|
301 | 301 | def stringescape(text): |
|
302 | 302 | return text.encode('string_escape') |
|
303 | 303 | |
|
304 | 304 | def stringify(thing): |
|
305 | 305 | """:stringify: Any type. Turns the value into text by converting values into |
|
306 | 306 | text and concatenating them. |
|
307 | 307 | """ |
|
308 | 308 | if util.safehasattr(thing, '__iter__') and not isinstance(thing, str): |
|
309 | 309 | return "".join([stringify(t) for t in thing if t is not None]) |
|
310 | 310 | return str(thing) |
|
311 | 311 | |
|
312 | 312 | def strip(text): |
|
313 | 313 | """:strip: Any text. Strips all leading and trailing whitespace.""" |
|
314 | 314 | return text.strip() |
|
315 | 315 | |
|
316 | 316 | def stripdir(text): |
|
317 | 317 | """:stripdir: Treat the text as path and strip a directory level, if |
|
318 | 318 | possible. For example, "foo" and "foo/bar" becomes "foo". |
|
319 | 319 | """ |
|
320 | 320 | dir = os.path.dirname(text) |
|
321 | 321 | if dir == "": |
|
322 | 322 | return os.path.basename(text) |
|
323 | 323 | else: |
|
324 | 324 | return dir |
|
325 | 325 | |
|
326 | 326 | def tabindent(text): |
|
327 | 327 | """:tabindent: Any text. Returns the text, with every line except the |
|
328 | 328 | first starting with a tab character. |
|
329 | 329 | """ |
|
330 | 330 | return indent(text, '\t') |
|
331 | 331 | |
|
332 | 332 | def urlescape(text): |
|
333 | 333 | """:urlescape: Any text. Escapes all "special" characters. For example, |
|
334 | 334 | "foo bar" becomes "foo%20bar". |
|
335 | 335 | """ |
|
336 | 336 | return urllib.quote(text) |
|
337 | 337 | |
|
338 | 338 | def userfilter(text): |
|
339 | 339 | """:user: Any text. Returns a short representation of a user name or email |
|
340 | 340 | address.""" |
|
341 | 341 | return util.shortuser(text) |
|
342 | 342 | |
|
343 | 343 | def emailuser(text): |
|
344 | 344 | """:emailuser: Any text. Returns the user portion of an email address.""" |
|
345 | 345 | return util.emailuser(text) |
|
346 | 346 | |
|
347 | 347 | def xmlescape(text): |
|
348 | 348 | text = (text |
|
349 | 349 | .replace('&', '&') |
|
350 | 350 | .replace('<', '<') |
|
351 | 351 | .replace('>', '>') |
|
352 | 352 | .replace('"', '"') |
|
353 | 353 | .replace("'", ''')) # ' invalid in HTML |
|
354 | 354 | return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text) |
|
355 | 355 | |
|
356 | 356 | filters = { |
|
357 | 357 | "addbreaks": addbreaks, |
|
358 | 358 | "age": age, |
|
359 | 359 | "basename": basename, |
|
360 | 360 | "date": datefilter, |
|
361 | 361 | "domain": domain, |
|
362 | 362 | "email": email, |
|
363 | 363 | "escape": escape, |
|
364 | 364 | "fill68": fill68, |
|
365 | 365 | "fill76": fill76, |
|
366 | 366 | "firstline": firstline, |
|
367 | 367 | "hex": hexfilter, |
|
368 | 368 | "hgdate": hgdate, |
|
369 | 369 | "isodate": isodate, |
|
370 | 370 | "isodatesec": isodatesec, |
|
371 | 371 | "json": json, |
|
372 | 372 | "jsonescape": jsonescape, |
|
373 | 373 | "localdate": localdate, |
|
374 | 374 | "nonempty": nonempty, |
|
375 | 375 | "obfuscate": obfuscate, |
|
376 | 376 | "permissions": permissions, |
|
377 | 377 | "person": person, |
|
378 | 378 | "rfc3339date": rfc3339date, |
|
379 | 379 | "rfc822date": rfc822date, |
|
380 | 380 | "short": short, |
|
381 | 381 | "shortbisect": shortbisect, |
|
382 | 382 | "shortdate": shortdate, |
|
383 | 383 | "stringescape": stringescape, |
|
384 | 384 | "stringify": stringify, |
|
385 | 385 | "strip": strip, |
|
386 | 386 | "stripdir": stripdir, |
|
387 | 387 | "tabindent": tabindent, |
|
388 | 388 | "urlescape": urlescape, |
|
389 | 389 | "user": userfilter, |
|
390 | 390 | "emailuser": emailuser, |
|
391 | 391 | "xmlescape": xmlescape, |
|
392 | 392 | } |
|
393 | 393 | |
|
394 | 394 | # tell hggettext to extract docstrings from these functions: |
|
395 | 395 | i18nfunctions = filters.values() |
@@ -1,392 +1,392 b'' | |||
|
1 | 1 | # templater.py - template expansion for output |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | 9 | import sys, os |
|
10 | 10 | import util, config, templatefilters, parser, error |
|
11 | 11 | |
|
12 | 12 | # template parsing |
|
13 | 13 | |
|
14 | 14 | elements = { |
|
15 | 15 | "(": (20, ("group", 1, ")"), ("func", 1, ")")), |
|
16 | 16 | ",": (2, None, ("list", 2)), |
|
17 | 17 | "|": (5, None, ("|", 5)), |
|
18 | 18 | "%": (6, None, ("%", 6)), |
|
19 | 19 | ")": (0, None, None), |
|
20 | 20 | "symbol": (0, ("symbol",), None), |
|
21 | 21 | "string": (0, ("string",), None), |
|
22 | 22 | "end": (0, None, None), |
|
23 | 23 | } |
|
24 | 24 | |
|
25 | 25 | def tokenizer(data): |
|
26 | 26 | program, start, end = data |
|
27 | 27 | pos = start |
|
28 | 28 | while pos < end: |
|
29 | 29 | c = program[pos] |
|
30 | 30 | if c.isspace(): # skip inter-token whitespace |
|
31 | 31 | pass |
|
32 | 32 | elif c in "(,)%|": # handle simple operators |
|
33 | 33 | yield (c, None, pos) |
|
34 | 34 | elif (c in '"\'' or c == 'r' and |
|
35 | 35 | program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings |
|
36 | 36 | if c == 'r': |
|
37 | 37 | pos += 1 |
|
38 | 38 | c = program[pos] |
|
39 | 39 | decode = lambda x: x |
|
40 | 40 | else: |
|
41 | 41 | decode = lambda x: x.decode('string-escape') |
|
42 | 42 | pos += 1 |
|
43 | 43 | s = pos |
|
44 | 44 | while pos < end: # find closing quote |
|
45 | 45 | d = program[pos] |
|
46 | 46 | if d == '\\': # skip over escaped characters |
|
47 | 47 | pos += 2 |
|
48 | 48 | continue |
|
49 | 49 | if d == c: |
|
50 | 50 | yield ('string', decode(program[s:pos]), s) |
|
51 | 51 | break |
|
52 | 52 | pos += 1 |
|
53 | 53 | else: |
|
54 | 54 | raise error.ParseError(_("unterminated string"), s) |
|
55 | 55 | elif c.isalnum() or c in '_': |
|
56 | 56 | s = pos |
|
57 | 57 | pos += 1 |
|
58 | 58 | while pos < end: # find end of symbol |
|
59 | 59 | d = program[pos] |
|
60 | 60 | if not (d.isalnum() or d == "_"): |
|
61 | 61 | break |
|
62 | 62 | pos += 1 |
|
63 | 63 | sym = program[s:pos] |
|
64 | 64 | yield ('symbol', sym, s) |
|
65 | 65 | pos -= 1 |
|
66 | 66 | elif c == '}': |
|
67 | 67 | pos += 1 |
|
68 | 68 | break |
|
69 | 69 | else: |
|
70 | 70 | raise error.ParseError(_("syntax error"), pos) |
|
71 | 71 | pos += 1 |
|
72 | 72 | yield ('end', None, pos) |
|
73 | 73 | |
|
74 | 74 | def compiletemplate(tmpl, context): |
|
75 | 75 | parsed = [] |
|
76 | 76 | pos, stop = 0, len(tmpl) |
|
77 | 77 | p = parser.parser(tokenizer, elements) |
|
78 | 78 | |
|
79 | 79 | while pos < stop: |
|
80 | 80 | n = tmpl.find('{', pos) |
|
81 | 81 | if n < 0: |
|
82 | 82 | parsed.append(("string", tmpl[pos:])) |
|
83 | 83 | break |
|
84 | 84 | if n > 0 and tmpl[n - 1] == '\\': |
|
85 | 85 | # escaped |
|
86 | 86 | parsed.append(("string", tmpl[pos:n - 1] + "{")) |
|
87 | 87 | pos = n + 1 |
|
88 | 88 | continue |
|
89 | 89 | if n > pos: |
|
90 | 90 | parsed.append(("string", tmpl[pos:n])) |
|
91 | 91 | |
|
92 | 92 | pd = [tmpl, n + 1, stop] |
|
93 | 93 | parseres, pos = p.parse(pd) |
|
94 | 94 | parsed.append(parseres) |
|
95 | 95 | |
|
96 | 96 | return [compileexp(e, context) for e in parsed] |
|
97 | 97 | |
|
98 | 98 | def compileexp(exp, context): |
|
99 | 99 | t = exp[0] |
|
100 | 100 | if t in methods: |
|
101 | 101 | return methods[t](exp, context) |
|
102 | 102 | raise error.ParseError(_("unknown method '%s'") % t) |
|
103 | 103 | |
|
104 | 104 | # template evaluation |
|
105 | 105 | |
|
106 | 106 | def getsymbol(exp): |
|
107 | 107 | if exp[0] == 'symbol': |
|
108 | 108 | return exp[1] |
|
109 | 109 | raise error.ParseError(_("expected a symbol")) |
|
110 | 110 | |
|
111 | 111 | def getlist(x): |
|
112 | 112 | if not x: |
|
113 | 113 | return [] |
|
114 | 114 | if x[0] == 'list': |
|
115 | 115 | return getlist(x[1]) + [x[2]] |
|
116 | 116 | return [x] |
|
117 | 117 | |
|
118 | 118 | def getfilter(exp, context): |
|
119 | 119 | f = getsymbol(exp) |
|
120 | 120 | if f not in context._filters: |
|
121 | 121 | raise error.ParseError(_("unknown function '%s'") % f) |
|
122 | 122 | return context._filters[f] |
|
123 | 123 | |
|
124 | 124 | def gettemplate(exp, context): |
|
125 | 125 | if exp[0] == 'string': |
|
126 | 126 | return compiletemplate(exp[1], context) |
|
127 | 127 | if exp[0] == 'symbol': |
|
128 | 128 | return context._load(exp[1]) |
|
129 | 129 | raise error.ParseError(_("expected template specifier")) |
|
130 | 130 | |
|
131 | 131 | def runstring(context, mapping, data): |
|
132 | 132 | return data |
|
133 | 133 | |
|
134 | 134 | def runsymbol(context, mapping, key): |
|
135 | 135 | v = mapping.get(key) |
|
136 | 136 | if v is None: |
|
137 | 137 | v = context._defaults.get(key, '') |
|
138 | 138 | if util.safehasattr(v, '__call__'): |
|
139 | 139 | return v(**mapping) |
|
140 | 140 | return v |
|
141 | 141 | |
|
142 | 142 | def buildfilter(exp, context): |
|
143 | 143 | func, data = compileexp(exp[1], context) |
|
144 | 144 | filt = getfilter(exp[2], context) |
|
145 | 145 | return (runfilter, (func, data, filt)) |
|
146 | 146 | |
|
147 | 147 | def runfilter(context, mapping, data): |
|
148 | 148 | func, data, filt = data |
|
149 | 149 | return filt(func(context, mapping, data)) |
|
150 | 150 | |
|
151 | 151 | def buildmap(exp, context): |
|
152 | 152 | func, data = compileexp(exp[1], context) |
|
153 | 153 | ctmpl = gettemplate(exp[2], context) |
|
154 | 154 | return (runmap, (func, data, ctmpl)) |
|
155 | 155 | |
|
156 | 156 | def runmap(context, mapping, data): |
|
157 | 157 | func, data, ctmpl = data |
|
158 | 158 | d = func(context, mapping, data) |
|
159 | 159 | lm = mapping.copy() |
|
160 | 160 | |
|
161 | 161 | for i in d: |
|
162 | 162 | if isinstance(i, dict): |
|
163 | 163 | lm.update(i) |
|
164 | 164 | for f, d in ctmpl: |
|
165 | 165 | yield f(context, lm, d) |
|
166 | 166 | else: |
|
167 | 167 | # v is not an iterable of dicts, this happen when 'key' |
|
168 | 168 | # has been fully expanded already and format is useless. |
|
169 | 169 | # If so, return the expanded value. |
|
170 | 170 | yield i |
|
171 | 171 | |
|
172 | 172 | def buildfunc(exp, context): |
|
173 | 173 | n = getsymbol(exp[1]) |
|
174 | 174 | args = [compileexp(x, context) for x in getlist(exp[2])] |
|
175 | 175 | if n in funcs: |
|
176 | 176 | f = funcs[n] |
|
177 | 177 | return (f, args) |
|
178 | 178 | if n in context._filters: |
|
179 | 179 | if len(args) != 1: |
|
180 | 180 | raise error.ParseError(_("filter %s expects one argument") % n) |
|
181 | 181 | f = context._filters[n] |
|
182 | 182 | return (runfilter, (args[0][0], args[0][1], f)) |
|
183 | 183 | |
|
184 | 184 | methods = { |
|
185 | 185 | "string": lambda e, c: (runstring, e[1]), |
|
186 | 186 | "symbol": lambda e, c: (runsymbol, e[1]), |
|
187 | 187 | "group": lambda e, c: compileexp(e[1], c), |
|
188 | 188 | # ".": buildmember, |
|
189 | 189 | "|": buildfilter, |
|
190 | 190 | "%": buildmap, |
|
191 | 191 | "func": buildfunc, |
|
192 | 192 | } |
|
193 | 193 | |
|
194 | 194 | funcs = { |
|
195 | 195 | } |
|
196 | 196 | |
|
197 | 197 | # template engine |
|
198 | 198 | |
|
199 | 199 | path = ['templates', '../templates'] |
|
200 | 200 | stringify = templatefilters.stringify |
|
201 | 201 | |
|
202 | 202 | def _flatten(thing): |
|
203 | 203 | '''yield a single stream from a possibly nested set of iterators''' |
|
204 | 204 | if isinstance(thing, str): |
|
205 | 205 | yield thing |
|
206 | 206 | elif not util.safehasattr(thing, '__iter__'): |
|
207 | 207 | if thing is not None: |
|
208 | 208 | yield str(thing) |
|
209 | 209 | else: |
|
210 | 210 | for i in thing: |
|
211 | 211 | if isinstance(i, str): |
|
212 | 212 | yield i |
|
213 | 213 | elif not util.safehasattr(i, '__iter__'): |
|
214 | 214 | if i is not None: |
|
215 | 215 | yield str(i) |
|
216 | 216 | elif i is not None: |
|
217 | 217 | for j in _flatten(i): |
|
218 | 218 | yield j |
|
219 | 219 | |
|
220 | 220 | def parsestring(s, quoted=True): |
|
221 | 221 | '''parse a string using simple c-like syntax. |
|
222 | 222 | string must be in quotes if quoted is True.''' |
|
223 | 223 | if quoted: |
|
224 | 224 | if len(s) < 2 or s[0] != s[-1]: |
|
225 | 225 | raise SyntaxError(_('unmatched quotes')) |
|
226 | 226 | return s[1:-1].decode('string_escape') |
|
227 | 227 | |
|
228 | 228 | return s.decode('string_escape') |
|
229 | 229 | |
|
230 | 230 | class engine(object): |
|
231 | 231 | '''template expansion engine. |
|
232 | 232 | |
|
233 | 233 | template expansion works like this. a map file contains key=value |
|
234 | 234 | pairs. if value is quoted, it is treated as string. otherwise, it |
|
235 | 235 | is treated as name of template file. |
|
236 | 236 | |
|
237 | 237 | templater is asked to expand a key in map. it looks up key, and |
|
238 | 238 | looks for strings like this: {foo}. it expands {foo} by looking up |
|
239 | 239 | foo in map, and substituting it. expansion is recursive: it stops |
|
240 | 240 | when there is no more {foo} to replace. |
|
241 | 241 | |
|
242 | 242 | expansion also allows formatting and filtering. |
|
243 | 243 | |
|
244 | 244 | format uses key to expand each item in list. syntax is |
|
245 | 245 | {key%format}. |
|
246 | 246 | |
|
247 | 247 | filter uses function to transform value. syntax is |
|
248 | 248 | {key|filter1|filter2|...}.''' |
|
249 | 249 | |
|
250 | 250 | def __init__(self, loader, filters={}, defaults={}): |
|
251 | 251 | self._loader = loader |
|
252 | 252 | self._filters = filters |
|
253 | 253 | self._defaults = defaults |
|
254 | 254 | self._cache = {} |
|
255 | 255 | |
|
256 | 256 | def _load(self, t): |
|
257 | 257 | '''load, parse, and cache a template''' |
|
258 | 258 | if t not in self._cache: |
|
259 | 259 | self._cache[t] = compiletemplate(self._loader(t), self) |
|
260 | 260 | return self._cache[t] |
|
261 | 261 | |
|
262 | 262 | def process(self, t, mapping): |
|
263 | 263 | '''Perform expansion. t is name of map element to expand. |
|
264 | 264 | mapping contains added elements for use during expansion. Is a |
|
265 | 265 | generator.''' |
|
266 | 266 | return _flatten(func(self, mapping, data) for func, data in |
|
267 | 267 | self._load(t)) |
|
268 | 268 | |
|
269 | 269 | engines = {'default': engine} |
|
270 | 270 | |
|
271 | 271 | class templater(object): |
|
272 | 272 | |
|
273 | 273 | def __init__(self, mapfile, filters={}, defaults={}, cache={}, |
|
274 | 274 | minchunk=1024, maxchunk=65536): |
|
275 | 275 | '''set up template engine. |
|
276 | 276 | mapfile is name of file to read map definitions from. |
|
277 | 277 | filters is dict of functions. each transforms a value into another. |
|
278 | 278 | defaults is dict of default map definitions.''' |
|
279 | 279 | self.mapfile = mapfile or 'template' |
|
280 | 280 | self.cache = cache.copy() |
|
281 | 281 | self.map = {} |
|
282 | 282 | self.base = (mapfile and os.path.dirname(mapfile)) or '' |
|
283 | 283 | self.filters = templatefilters.filters.copy() |
|
284 | 284 | self.filters.update(filters) |
|
285 | 285 | self.defaults = defaults |
|
286 | 286 | self.minchunk, self.maxchunk = minchunk, maxchunk |
|
287 | 287 | self.ecache = {} |
|
288 | 288 | |
|
289 | 289 | if not mapfile: |
|
290 | 290 | return |
|
291 | 291 | if not os.path.exists(mapfile): |
|
292 | 292 | raise util.Abort(_('style not found: %s') % mapfile) |
|
293 | 293 | |
|
294 | 294 | conf = config.config() |
|
295 | 295 | conf.read(mapfile) |
|
296 | 296 | |
|
297 | 297 | for key, val in conf[''].items(): |
|
298 | 298 | if val[0] in "'\"": |
|
299 | 299 | try: |
|
300 | 300 | self.cache[key] = parsestring(val) |
|
301 | 301 | except SyntaxError, inst: |
|
302 | 302 | raise SyntaxError('%s: %s' % |
|
303 | 303 | (conf.source('', key), inst.args[0])) |
|
304 | 304 | else: |
|
305 | 305 | val = 'default', val |
|
306 | 306 | if ':' in val[1]: |
|
307 | 307 | val = val[1].split(':', 1) |
|
308 | 308 | self.map[key] = val[0], os.path.join(self.base, val[1]) |
|
309 | 309 | |
|
310 | 310 | def __contains__(self, key): |
|
311 | 311 | return key in self.cache or key in self.map |
|
312 | 312 | |
|
313 | 313 | def load(self, t): |
|
314 | 314 | '''Get the template for the given template name. Use a local cache.''' |
|
315 |
if not |
|
|
315 | if t not in self.cache: | |
|
316 | 316 | try: |
|
317 | 317 | self.cache[t] = util.readfile(self.map[t][1]) |
|
318 | 318 | except KeyError, inst: |
|
319 | 319 | raise util.Abort(_('"%s" not in template map') % inst.args[0]) |
|
320 | 320 | except IOError, inst: |
|
321 | 321 | raise IOError(inst.args[0], _('template file %s: %s') % |
|
322 | 322 | (self.map[t][1], inst.args[1])) |
|
323 | 323 | return self.cache[t] |
|
324 | 324 | |
|
325 | 325 | def __call__(self, t, **mapping): |
|
326 | 326 | ttype = t in self.map and self.map[t][0] or 'default' |
|
327 | 327 | if ttype not in self.ecache: |
|
328 | 328 | self.ecache[ttype] = engines[ttype](self.load, |
|
329 | 329 | self.filters, self.defaults) |
|
330 | 330 | proc = self.ecache[ttype] |
|
331 | 331 | |
|
332 | 332 | stream = proc.process(t, mapping) |
|
333 | 333 | if self.minchunk: |
|
334 | 334 | stream = util.increasingchunks(stream, min=self.minchunk, |
|
335 | 335 | max=self.maxchunk) |
|
336 | 336 | return stream |
|
337 | 337 | |
|
338 | 338 | def templatepath(name=None): |
|
339 | 339 | '''return location of template file or directory (if no name). |
|
340 | 340 | returns None if not found.''' |
|
341 | 341 | normpaths = [] |
|
342 | 342 | |
|
343 | 343 | # executable version (py2exe) doesn't support __file__ |
|
344 | 344 | if util.mainfrozen(): |
|
345 | 345 | module = sys.executable |
|
346 | 346 | else: |
|
347 | 347 | module = __file__ |
|
348 | 348 | for f in path: |
|
349 | 349 | if f.startswith('/'): |
|
350 | 350 | p = f |
|
351 | 351 | else: |
|
352 | 352 | fl = f.split('/') |
|
353 | 353 | p = os.path.join(os.path.dirname(module), *fl) |
|
354 | 354 | if name: |
|
355 | 355 | p = os.path.join(p, name) |
|
356 | 356 | if name and os.path.exists(p): |
|
357 | 357 | return os.path.normpath(p) |
|
358 | 358 | elif os.path.isdir(p): |
|
359 | 359 | normpaths.append(os.path.normpath(p)) |
|
360 | 360 | |
|
361 | 361 | return normpaths |
|
362 | 362 | |
|
363 | 363 | def stylemap(styles, paths=None): |
|
364 | 364 | """Return path to mapfile for a given style. |
|
365 | 365 | |
|
366 | 366 | Searches mapfile in the following locations: |
|
367 | 367 | 1. templatepath/style/map |
|
368 | 368 | 2. templatepath/map-style |
|
369 | 369 | 3. templatepath/map |
|
370 | 370 | """ |
|
371 | 371 | |
|
372 | 372 | if paths is None: |
|
373 | 373 | paths = templatepath() |
|
374 | 374 | elif isinstance(paths, str): |
|
375 | 375 | paths = [paths] |
|
376 | 376 | |
|
377 | 377 | if isinstance(styles, str): |
|
378 | 378 | styles = [styles] |
|
379 | 379 | |
|
380 | 380 | for style in styles: |
|
381 | 381 | if not style: |
|
382 | 382 | continue |
|
383 | 383 | locations = [os.path.join(style, 'map'), 'map-' + style] |
|
384 | 384 | locations.append('map') |
|
385 | 385 | |
|
386 | 386 | for path in paths: |
|
387 | 387 | for location in locations: |
|
388 | 388 | mapfile = os.path.join(path, location) |
|
389 | 389 | if os.path.isfile(mapfile): |
|
390 | 390 | return style, mapfile |
|
391 | 391 | |
|
392 | 392 | raise RuntimeError("No hgweb templates found in %r" % paths) |
@@ -1,55 +1,55 b'' | |||
|
1 | 1 | import os |
|
2 | 2 | from mercurial import hg, ui |
|
3 | 3 | from mercurial.scmutil import walkrepos |
|
4 | 4 | from mercurial.util import checklink |
|
5 | 5 | from os import mkdir, chdir |
|
6 | 6 | from os.path import join as pjoin |
|
7 | 7 | |
|
8 | 8 | u = ui.ui() |
|
9 | 9 | sym = checklink('.') |
|
10 | 10 | |
|
11 | 11 | hg.repository(u, 'top1', create=1) |
|
12 | 12 | mkdir('subdir') |
|
13 | 13 | chdir('subdir') |
|
14 | 14 | hg.repository(u, 'sub1', create=1) |
|
15 | 15 | mkdir('subsubdir') |
|
16 | 16 | chdir('subsubdir') |
|
17 | 17 | hg.repository(u, 'subsub1', create=1) |
|
18 | 18 | chdir(os.path.pardir) |
|
19 | 19 | if sym: |
|
20 | 20 | os.symlink(os.path.pardir, 'circle') |
|
21 | 21 | os.symlink(pjoin('subsubdir', 'subsub1'), 'subsub1') |
|
22 | 22 | |
|
23 | 23 | def runtest(): |
|
24 | 24 | reposet = frozenset(walkrepos('.', followsym=True)) |
|
25 | 25 | if sym and (len(reposet) != 3): |
|
26 | 26 | print "reposet = %r" % (reposet,) |
|
27 | 27 | print ("Found %d repositories when I should have found 3" |
|
28 | 28 | % (len(reposet),)) |
|
29 | 29 | if (not sym) and (len(reposet) != 2): |
|
30 | 30 | print "reposet = %r" % (reposet,) |
|
31 | 31 | print ("Found %d repositories when I should have found 2" |
|
32 | 32 | % (len(reposet),)) |
|
33 | 33 | sub1set = frozenset((pjoin('.', 'sub1'), |
|
34 | 34 | pjoin('.', 'circle', 'subdir', 'sub1'))) |
|
35 | 35 | if len(sub1set & reposet) != 1: |
|
36 | 36 | print "sub1set = %r" % (sub1set,) |
|
37 | 37 | print "reposet = %r" % (reposet,) |
|
38 | 38 | print "sub1set and reposet should have exactly one path in common." |
|
39 | 39 | sub2set = frozenset((pjoin('.', 'subsub1'), |
|
40 | 40 | pjoin('.', 'subsubdir', 'subsub1'))) |
|
41 | 41 | if len(sub2set & reposet) != 1: |
|
42 | 42 | print "sub2set = %r" % (sub2set,) |
|
43 | 43 | print "reposet = %r" % (reposet,) |
|
44 | 44 | print "sub1set and reposet should have exactly one path in common." |
|
45 | 45 | sub3 = pjoin('.', 'circle', 'top1') |
|
46 |
if sym and not |
|
|
46 | if sym and sub3 not in reposet: | |
|
47 | 47 | print "reposet = %r" % (reposet,) |
|
48 | 48 | print "Symbolic links are supported and %s is not in reposet" % (sub3,) |
|
49 | 49 | |
|
50 | 50 | runtest() |
|
51 | 51 | if sym: |
|
52 | 52 | # Simulate not having symlinks. |
|
53 | 53 | del os.path.samestat |
|
54 | 54 | sym = False |
|
55 | 55 | runtest() |
General Comments 0
You need to be logged in to leave comments.
Login now