##// END OF EJS Templates
check-code: there must also be whitespace between ')' and operator...
Mads Kiilerich -
r18054:b35e3364 default
parent child Browse files
Show More
@@ -1,454 +1,454 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # check-code - a style and portability checker for Mercurial
4 4 #
5 5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 import re, glob, os, sys
11 11 import keyword
12 12 import optparse
13 13
14 14 def repquote(m):
15 15 t = re.sub(r"\w", "x", m.group('text'))
16 16 t = re.sub(r"[^\s\nx]", "o", t)
17 17 return m.group('quote') + t + m.group('quote')
18 18
19 19 def reppython(m):
20 20 comment = m.group('comment')
21 21 if comment:
22 22 return "#" * len(comment)
23 23 return repquote(m)
24 24
25 25 def repcomment(m):
26 26 return m.group(1) + "#" * len(m.group(2))
27 27
28 28 def repccomment(m):
29 29 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
30 30 return m.group(1) + t + "*/"
31 31
32 32 def repcallspaces(m):
33 33 t = re.sub(r"\n\s+", "\n", m.group(2))
34 34 return m.group(1) + t
35 35
36 36 def repinclude(m):
37 37 return m.group(1) + "<foo>"
38 38
39 39 def rephere(m):
40 40 t = re.sub(r"\S", "x", m.group(2))
41 41 return m.group(1) + t
42 42
43 43
44 44 testpats = [
45 45 [
46 46 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
47 47 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
48 48 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
49 49 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
50 50 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
51 51 (r'echo -n', "don't use 'echo -n', use printf"),
52 52 (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"),
53 53 (r'head -c', "don't use 'head -c', use 'dd'"),
54 54 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
55 55 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
56 56 (r'printf.*\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"),
57 57 (r'printf.*\\x', "don't use printf \\x, use Python"),
58 58 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
59 59 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
60 60 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
61 61 "use egrep for extended grep syntax"),
62 62 (r'/bin/', "don't use explicit paths for tools"),
63 63 (r'[^\n]\Z', "no trailing newline"),
64 64 (r'export.*=', "don't export and assign at once"),
65 65 (r'^source\b', "don't use 'source', use '.'"),
66 66 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
67 67 (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
68 68 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
69 69 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
70 70 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
71 71 (r'^alias\b.*=', "don't use alias, use a function"),
72 72 (r'if\s*!', "don't use '!' to negate exit status"),
73 73 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
74 74 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
75 75 (r'^( *)\t', "don't use tabs to indent"),
76 76 ],
77 77 # warnings
78 78 [
79 79 (r'^function', "don't use 'function', use old style"),
80 80 (r'^diff.*-\w*N', "don't use 'diff -N'"),
81 81 (r'\$PWD', "don't use $PWD, use `pwd`"),
82 82 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
83 83 ]
84 84 ]
85 85
86 86 testfilters = [
87 87 (r"( *)(#([^\n]*\S)?)", repcomment),
88 88 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
89 89 ]
90 90
91 91 uprefix = r"^ \$ "
92 92 utestpats = [
93 93 [
94 94 (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"),
95 95 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
96 96 "use regex test output patterns instead of sed"),
97 97 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
98 98 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
99 99 (uprefix + r'.*\|\| echo.*(fail|error)',
100 100 "explicit exit code checks unnecessary"),
101 101 (uprefix + r'set -e', "don't use set -e"),
102 102 (uprefix + r'\s', "don't indent commands, use > for continued lines"),
103 103 (r'^ saved backup bundle to \$TESTTMP.*\.hg$',
104 104 "use (glob) to match Windows paths too"),
105 105 ],
106 106 # warnings
107 107 []
108 108 ]
109 109
110 110 for i in [0, 1]:
111 111 for p, m in testpats[i]:
112 112 if p.startswith(r'^'):
113 113 p = r"^ [$>] (%s)" % p[1:]
114 114 else:
115 115 p = r"^ [$>] .*(%s)" % p
116 116 utestpats[i].append((p, m))
117 117
118 118 utestfilters = [
119 119 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
120 120 (r"( *)(#([^\n]*\S)?)", repcomment),
121 121 ]
122 122
123 123 pypats = [
124 124 [
125 125 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
126 126 "tuple parameter unpacking not available in Python 3+"),
127 127 (r'lambda\s*\(.*,.*\)',
128 128 "tuple parameter unpacking not available in Python 3+"),
129 129 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
130 130 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
131 131 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
132 132 (r'^\s*\t', "don't use tabs"),
133 133 (r'\S;\s*\n', "semicolon"),
134 134 (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"),
135 135 (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"),
136 (r'\w,\w', "missing whitespace after ,"),
137 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
136 (r'(\w|\)),\w', "missing whitespace after ,"),
137 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
138 138 (r'^\s+\w+=\w+[^,)\n]$', "missing whitespace in assignment"),
139 139 (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n'
140 140 r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'),
141 141 (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?'
142 142 r'((?:\n|\1\s.*\n)+?)\1finally:',
143 143 'no yield inside try/finally in Python 2.4'),
144 144 (r'.{81}', "line too long"),
145 145 (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'),
146 146 (r'[^\n]\Z', "no trailing newline"),
147 147 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
148 148 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
149 149 # "don't use underbars in identifiers"),
150 150 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
151 151 "don't use camelcase in identifiers"),
152 152 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
153 153 "linebreak after :"),
154 154 (r'class\s[^( \n]+:', "old-style class, use class foo(object)"),
155 155 (r'class\s[^( \n]+\(\):',
156 156 "class foo() not available in Python 2.4, use class foo(object)"),
157 157 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
158 158 "Python keyword is not a function"),
159 159 (r',]', "unneeded trailing ',' in list"),
160 160 # (r'class\s[A-Z][^\(]*\((?!Exception)',
161 161 # "don't capitalize non-exception classes"),
162 162 # (r'in range\(', "use xrange"),
163 163 # (r'^\s*print\s+', "avoid using print in core and extensions"),
164 164 (r'[\x80-\xff]', "non-ASCII character literal"),
165 165 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
166 166 (r'^\s*with\s+', "with not available in Python 2.4"),
167 167 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
168 168 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
169 169 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
170 170 (r'(?<!def)\s+(any|all|format)\(',
171 171 "any/all/format not available in Python 2.4"),
172 172 (r'(?<!def)\s+(callable)\(',
173 173 "callable not available in Python 3, use getattr(f, '__call__', None)"),
174 174 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
175 175 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
176 176 "gratuitous whitespace after Python keyword"),
177 177 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
178 178 # (r'\s\s=', "gratuitous whitespace before ="),
179 179 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
180 180 "missing whitespace around operator"),
181 181 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
182 182 "missing whitespace around operator"),
183 183 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
184 184 "missing whitespace around operator"),
185 185 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
186 186 "wrong whitespace around ="),
187 187 (r'raise Exception', "don't raise generic exceptions"),
188 188 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
189 189 (r' [=!]=\s+(True|False|None)',
190 190 "comparison with singleton, use 'is' or 'is not' instead"),
191 191 (r'^\s*(while|if) [01]:',
192 192 "use True/False for constant Boolean expression"),
193 193 (r'(?:(?<!def)\s+|\()hasattr',
194 194 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
195 195 (r'opener\([^)]*\).read\(',
196 196 "use opener.read() instead"),
197 197 (r'BaseException', 'not in Python 2.4, use Exception'),
198 198 (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'),
199 199 (r'opener\([^)]*\).write\(',
200 200 "use opener.write() instead"),
201 201 (r'[\s\(](open|file)\([^)]*\)\.read\(',
202 202 "use util.readfile() instead"),
203 203 (r'[\s\(](open|file)\([^)]*\)\.write\(',
204 204 "use util.readfile() instead"),
205 205 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
206 206 "always assign an opened file to a variable, and close it afterwards"),
207 207 (r'[\s\(](open|file)\([^)]*\)\.',
208 208 "always assign an opened file to a variable, and close it afterwards"),
209 209 (r'(?i)descendent', "the proper spelling is descendAnt"),
210 210 (r'\.debug\(\_', "don't mark debug messages for translation"),
211 211 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
212 212 (r'^\s*except\s*:', "warning: naked except clause", r'#.*re-raises'),
213 213 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
214 214 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
215 215 "missing _() in ui message (use () to hide false-positives)"),
216 216 ],
217 217 # warnings
218 218 [
219 219 ]
220 220 ]
221 221
222 222 pyfilters = [
223 223 (r"""(?msx)(?P<comment>\#.*?$)|
224 224 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
225 225 (?P<text>(([^\\]|\\.)*?))
226 226 (?P=quote))""", reppython),
227 227 ]
228 228
229 229 cpats = [
230 230 [
231 231 (r'//', "don't use //-style comments"),
232 232 (r'^ ', "don't use spaces to indent"),
233 233 (r'\S\t', "don't use tabs except for indent"),
234 234 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
235 235 (r'.{81}', "line too long"),
236 236 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
237 237 (r'return\(', "return is not a function"),
238 238 (r' ;', "no space before ;"),
239 239 (r'\w+\* \w+', "use int *foo, not int* foo"),
240 240 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
241 241 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
242 242 (r'\w,\w', "missing whitespace after ,"),
243 243 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
244 244 (r'^#\s+\w', "use #foo, not # foo"),
245 245 (r'[^\n]\Z', "no trailing newline"),
246 246 (r'^\s*#import\b', "use only #include in standard C code"),
247 247 ],
248 248 # warnings
249 249 []
250 250 ]
251 251
252 252 cfilters = [
253 253 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
254 254 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
255 255 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
256 256 (r'(\()([^)]+\))', repcallspaces),
257 257 ]
258 258
259 259 inutilpats = [
260 260 [
261 261 (r'\bui\.', "don't use ui in util"),
262 262 ],
263 263 # warnings
264 264 []
265 265 ]
266 266
267 267 inrevlogpats = [
268 268 [
269 269 (r'\brepo\.', "don't use repo in revlog"),
270 270 ],
271 271 # warnings
272 272 []
273 273 ]
274 274
275 275 checks = [
276 276 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
277 277 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
278 278 ('c', r'.*\.c$', cfilters, cpats),
279 279 ('unified test', r'.*\.t$', utestfilters, utestpats),
280 280 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
281 281 inrevlogpats),
282 282 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
283 283 inutilpats),
284 284 ]
285 285
286 286 class norepeatlogger(object):
287 287 def __init__(self):
288 288 self._lastseen = None
289 289
290 290 def log(self, fname, lineno, line, msg, blame):
291 291 """print error related a to given line of a given file.
292 292
293 293 The faulty line will also be printed but only once in the case
294 294 of multiple errors.
295 295
296 296 :fname: filename
297 297 :lineno: line number
298 298 :line: actual content of the line
299 299 :msg: error message
300 300 """
301 301 msgid = fname, lineno, line
302 302 if msgid != self._lastseen:
303 303 if blame:
304 304 print "%s:%d (%s):" % (fname, lineno, blame)
305 305 else:
306 306 print "%s:%d:" % (fname, lineno)
307 307 print " > %s" % line
308 308 self._lastseen = msgid
309 309 print " " + msg
310 310
311 311 _defaultlogger = norepeatlogger()
312 312
313 313 def getblame(f):
314 314 lines = []
315 315 for l in os.popen('hg annotate -un %s' % f):
316 316 start, line = l.split(':', 1)
317 317 user, rev = start.split()
318 318 lines.append((line[1:-1], user, rev))
319 319 return lines
320 320
321 321 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
322 322 blame=False, debug=False, lineno=True):
323 323 """checks style and portability of a given file
324 324
325 325 :f: filepath
326 326 :logfunc: function used to report error
327 327 logfunc(filename, linenumber, linecontent, errormessage)
328 328 :maxerr: number of error to display before aborting.
329 329 Set to false (default) to report all errors
330 330
331 331 return True if no error is found, False otherwise.
332 332 """
333 333 blamecache = None
334 334 result = True
335 335 for name, match, filters, pats in checks:
336 336 if debug:
337 337 print name, f
338 338 fc = 0
339 339 if not re.match(match, f):
340 340 if debug:
341 341 print "Skipping %s for %s it doesn't match %s" % (
342 342 name, match, f)
343 343 continue
344 344 fp = open(f)
345 345 pre = post = fp.read()
346 346 fp.close()
347 347 if "no-" + "check-code" in pre:
348 348 if debug:
349 349 print "Skipping %s for %s it has no- and check-code" % (
350 350 name, f)
351 351 break
352 352 for p, r in filters:
353 353 post = re.sub(p, r, post)
354 354 if warnings:
355 355 pats = pats[0] + pats[1]
356 356 else:
357 357 pats = pats[0]
358 358 # print post # uncomment to show filtered version
359 359
360 360 if debug:
361 361 print "Checking %s for %s" % (name, f)
362 362
363 363 prelines = None
364 364 errors = []
365 365 for pat in pats:
366 366 if len(pat) == 3:
367 367 p, msg, ignore = pat
368 368 else:
369 369 p, msg = pat
370 370 ignore = None
371 371
372 372 # fix-up regexes for multi-line searches
373 373 po = p
374 374 # \s doesn't match \n
375 375 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
376 376 # [^...] doesn't match newline
377 377 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
378 378
379 379 #print po, '=>', p
380 380
381 381 pos = 0
382 382 n = 0
383 383 for m in re.finditer(p, post, re.MULTILINE):
384 384 if prelines is None:
385 385 prelines = pre.splitlines()
386 386 postlines = post.splitlines(True)
387 387
388 388 start = m.start()
389 389 while n < len(postlines):
390 390 step = len(postlines[n])
391 391 if pos + step > start:
392 392 break
393 393 pos += step
394 394 n += 1
395 395 l = prelines[n]
396 396
397 397 if "check-code" + "-ignore" in l:
398 398 if debug:
399 399 print "Skipping %s for %s:%s (check-code -ignore)" % (
400 400 name, f, n)
401 401 continue
402 402 elif ignore and re.search(ignore, l, re.MULTILINE):
403 403 continue
404 404 bd = ""
405 405 if blame:
406 406 bd = 'working directory'
407 407 if not blamecache:
408 408 blamecache = getblame(f)
409 409 if n < len(blamecache):
410 410 bl, bu, br = blamecache[n]
411 411 if bl == l:
412 412 bd = '%s@%s' % (bu, br)
413 413 errors.append((f, lineno and n + 1, l, msg, bd))
414 414 result = False
415 415
416 416 errors.sort()
417 417 for e in errors:
418 418 logfunc(*e)
419 419 fc += 1
420 420 if maxerr and fc >= maxerr:
421 421 print " (too many errors, giving up)"
422 422 break
423 423
424 424 return result
425 425
426 426 if __name__ == "__main__":
427 427 parser = optparse.OptionParser("%prog [options] [files]")
428 428 parser.add_option("-w", "--warnings", action="store_true",
429 429 help="include warning-level checks")
430 430 parser.add_option("-p", "--per-file", type="int",
431 431 help="max warnings per file")
432 432 parser.add_option("-b", "--blame", action="store_true",
433 433 help="use annotate to generate blame info")
434 434 parser.add_option("", "--debug", action="store_true",
435 435 help="show debug information")
436 436 parser.add_option("", "--nolineno", action="store_false",
437 437 dest='lineno', help="don't show line numbers")
438 438
439 439 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
440 440 lineno=True)
441 441 (options, args) = parser.parse_args()
442 442
443 443 if len(args) == 0:
444 444 check = glob.glob("*")
445 445 else:
446 446 check = args
447 447
448 448 ret = 0
449 449 for f in check:
450 450 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
451 451 blame=options.blame, debug=options.debug,
452 452 lineno=options.lineno):
453 453 ret = 1
454 454 sys.exit(ret)
@@ -1,1110 +1,1110 b''
1 1 # -*- coding: utf-8 -*-
2 2 # $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
3 3 # Author: Engelbert Gruber <grubert@users.sourceforge.net>
4 4 # Copyright: This module is put into the public domain.
5 5
6 6 """
7 7 Simple man page writer for reStructuredText.
8 8
9 9 Man pages (short for "manual pages") contain system documentation on unix-like
10 10 systems. The pages are grouped in numbered sections:
11 11
12 12 1 executable programs and shell commands
13 13 2 system calls
14 14 3 library functions
15 15 4 special files
16 16 5 file formats
17 17 6 games
18 18 7 miscellaneous
19 19 8 system administration
20 20
21 21 Man pages are written *troff*, a text file formatting system.
22 22
23 23 See http://www.tldp.org/HOWTO/Man-Page for a start.
24 24
25 25 Man pages have no subsection only parts.
26 26 Standard parts
27 27
28 28 NAME ,
29 29 SYNOPSIS ,
30 30 DESCRIPTION ,
31 31 OPTIONS ,
32 32 FILES ,
33 33 SEE ALSO ,
34 34 BUGS ,
35 35
36 36 and
37 37
38 38 AUTHOR .
39 39
40 40 A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
41 41 by the command whatis or apropos.
42 42
43 43 """
44 44
45 45 __docformat__ = 'reStructuredText'
46 46
47 47 import re
48 48
49 49 from docutils import nodes, writers, languages
50 50 try:
51 51 import roman
52 52 except ImportError:
53 53 from docutils.utils import roman
54 54 import inspect
55 55
56 56 FIELD_LIST_INDENT = 7
57 57 DEFINITION_LIST_INDENT = 7
58 58 OPTION_LIST_INDENT = 7
59 59 BLOCKQOUTE_INDENT = 3.5
60 60
61 61 # Define two macros so man/roff can calculate the
62 62 # indent/unindent margins by itself
63 63 MACRO_DEF = (r""".
64 64 .nr rst2man-indent-level 0
65 65 .
66 66 .de1 rstReportMargin
67 67 \\$1 \\n[an-margin]
68 68 level \\n[rst2man-indent-level]
69 69 level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
70 70 -
71 71 \\n[rst2man-indent0]
72 72 \\n[rst2man-indent1]
73 73 \\n[rst2man-indent2]
74 74 ..
75 75 .de1 INDENT
76 76 .\" .rstReportMargin pre:
77 77 . RS \\$1
78 78 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
79 79 . nr rst2man-indent-level +1
80 80 .\" .rstReportMargin post:
81 81 ..
82 82 .de UNINDENT
83 83 . RE
84 84 .\" indent \\n[an-margin]
85 85 .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
86 86 .nr rst2man-indent-level -1
87 87 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
88 88 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
89 89 ..
90 90 """)
91 91
92 92 class Writer(writers.Writer):
93 93
94 94 supported = ('manpage')
95 95 """Formats this writer supports."""
96 96
97 97 output = None
98 98 """Final translated form of `document`."""
99 99
100 100 def __init__(self):
101 101 writers.Writer.__init__(self)
102 102 self.translator_class = Translator
103 103
104 104 def translate(self):
105 105 visitor = self.translator_class(self.document)
106 106 self.document.walkabout(visitor)
107 107 self.output = visitor.astext()
108 108
109 109
110 110 class Table(object):
111 111 def __init__(self):
112 112 self._rows = []
113 113 self._options = ['center']
114 114 self._tab_char = '\t'
115 115 self._coldefs = []
116 116 def new_row(self):
117 117 self._rows.append([])
118 118 def append_separator(self, separator):
119 119 """Append the separator for table head."""
120 120 self._rows.append([separator])
121 121 def append_cell(self, cell_lines):
122 122 """cell_lines is an array of lines"""
123 123 start = 0
124 124 if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
125 125 start = 1
126 126 self._rows[-1].append(cell_lines[start:])
127 127 if len(self._coldefs) < len(self._rows[-1]):
128 128 self._coldefs.append('l')
129 129 def _minimize_cell(self, cell_lines):
130 130 """Remove leading and trailing blank and ``.sp`` lines"""
131 131 while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
132 132 del cell_lines[0]
133 133 while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
134 134 del cell_lines[-1]
135 135 def as_list(self):
136 136 text = ['.TS\n']
137 137 text.append(' '.join(self._options) + ';\n')
138 138 text.append('|%s|.\n' % ('|'.join(self._coldefs)))
139 139 for row in self._rows:
140 140 # row = array of cells. cell = array of lines.
141 141 text.append('_\n') # line above
142 142 text.append('T{\n')
143 143 for i in range(len(row)):
144 144 cell = row[i]
145 145 self._minimize_cell(cell)
146 146 text.extend(cell)
147 147 if not text[-1].endswith('\n'):
148 148 text[-1] += '\n'
149 if i < len(row)-1:
149 if i < len(row) - 1:
150 150 text.append('T}'+self._tab_char+'T{\n')
151 151 else:
152 152 text.append('T}\n')
153 153 text.append('_\n')
154 154 text.append('.TE\n')
155 155 return text
156 156
157 157 class Translator(nodes.NodeVisitor):
158 158 """"""
159 159
160 160 words_and_spaces = re.compile(r'\S+| +|\n')
161 161 document_start = """Man page generated from reStructuredText."""
162 162
163 163 def __init__(self, document):
164 164 nodes.NodeVisitor.__init__(self, document)
165 165 self.settings = settings = document.settings
166 166 lcode = settings.language_code
167 167 arglen = len(inspect.getargspec(languages.get_language)[0])
168 168 if arglen == 2:
169 169 self.language = languages.get_language(lcode,
170 170 self.document.reporter)
171 171 else:
172 172 self.language = languages.get_language(lcode)
173 173 self.head = []
174 174 self.body = []
175 175 self.foot = []
176 176 self.section_level = 0
177 177 self.context = []
178 178 self.topic_class = ''
179 179 self.colspecs = []
180 180 self.compact_p = 1
181 181 self.compact_simple = None
182 182 # the list style "*" bullet or "#" numbered
183 183 self._list_char = []
184 184 # writing the header .TH and .SH NAME is postboned after
185 185 # docinfo.
186 186 self._docinfo = {
187 187 "title" : "", "title_upper": "",
188 188 "subtitle" : "",
189 189 "manual_section" : "", "manual_group" : "",
190 190 "author" : [],
191 191 "date" : "",
192 192 "copyright" : "",
193 193 "version" : "",
194 194 }
195 195 self._docinfo_keys = [] # a list to keep the sequence as in source.
196 196 self._docinfo_names = {} # to get name from text not normalized.
197 197 self._in_docinfo = None
198 198 self._active_table = None
199 199 self._in_literal = False
200 200 self.header_written = 0
201 201 self._line_block = 0
202 202 self.authors = []
203 203 self.section_level = 0
204 204 self._indent = [0]
205 205 # central definition of simple processing rules
206 206 # what to output on : visit, depart
207 207 # Do not use paragraph requests ``.PP`` because these set indentation.
208 208 # use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
209 209 #
210 210 # Fonts are put on a stack, the top one is used.
211 211 # ``.ft P`` or ``\\fP`` pop from stack.
212 212 # ``B`` bold, ``I`` italic, ``R`` roman should be available.
213 213 # Hopefully ``C`` courier too.
214 214 self.defs = {
215 215 'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
216 216 'definition_list_item' : ('.TP', ''),
217 217 'field_name' : ('.TP\n.B ', '\n'),
218 218 'literal' : ('\\fB', '\\fP'),
219 219 'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
220 220
221 221 'option_list_item' : ('.TP\n', ''),
222 222
223 223 'reference' : (r'\%', r'\:'),
224 224 'emphasis': ('\\fI', '\\fP'),
225 225 'strong' : ('\\fB', '\\fP'),
226 226 'term' : ('\n.B ', '\n'),
227 227 'title_reference' : ('\\fI', '\\fP'),
228 228
229 229 'topic-title' : ('.SS ',),
230 230 'sidebar-title' : ('.SS ',),
231 231
232 232 'problematic' : ('\n.nf\n', '\n.fi\n'),
233 233 }
234 234 # NOTE don't specify the newline before a dot-command, but ensure
235 235 # it is there.
236 236
237 237 def comment_begin(self, text):
238 238 """Return commented version of the passed text WITHOUT end of
239 239 line/comment."""
240 240 prefix = '.\\" '
241 241 out_text = ''.join(
242 242 [(prefix + in_line + '\n')
243 243 for in_line in text.split('\n')])
244 244 return out_text
245 245
246 246 def comment(self, text):
247 247 """Return commented version of the passed text."""
248 248 return self.comment_begin(text)+'.\n'
249 249
250 250 def ensure_eol(self):
251 251 """Ensure the last line in body is terminated by new line."""
252 252 if self.body[-1][-1] != '\n':
253 253 self.body.append('\n')
254 254
255 255 def astext(self):
256 256 """Return the final formatted document as a string."""
257 257 if not self.header_written:
258 258 # ensure we get a ".TH" as viewers require it.
259 259 self.head.append(self.header())
260 260 # filter body
261 for i in xrange(len(self.body)-1, 0, -1):
261 for i in xrange(len(self.body) - 1, 0, -1):
262 262 # remove superfluous vertical gaps.
263 263 if self.body[i] == '.sp\n':
264 264 if self.body[i - 1][:4] in ('.BI ','.IP '):
265 265 self.body[i] = '.\n'
266 266 elif (self.body[i - 1][:3] == '.B ' and
267 267 self.body[i - 2][:4] == '.TP\n'):
268 268 self.body[i] = '.\n'
269 269 elif (self.body[i - 1] == '\n' and
270 270 self.body[i - 2][0] != '.' and
271 271 (self.body[i - 3][:7] == '.TP\n.B '
272 272 or self.body[i - 3][:4] == '\n.B ')
273 273 ):
274 274 self.body[i] = '.\n'
275 275 return ''.join(self.head + self.body + self.foot)
276 276
277 277 def deunicode(self, text):
278 278 text = text.replace(u'\xa0', '\\ ')
279 279 text = text.replace(u'\u2020', '\\(dg')
280 280 return text
281 281
282 282 def visit_Text(self, node):
283 283 text = node.astext()
284 284 text = text.replace('\\','\\e')
285 285 replace_pairs = [
286 286 (u'-', ur'\-'),
287 287 (u'\'', ur'\(aq'),
288 288 (u'Β΄', ur'\''),
289 289 (u'`', ur'\(ga'),
290 290 ]
291 291 for (in_char, out_markup) in replace_pairs:
292 292 text = text.replace(in_char, out_markup)
293 293 # unicode
294 294 text = self.deunicode(text)
295 295 if self._in_literal:
296 296 # prevent interpretation of "." at line start
297 297 if text[0] == '.':
298 298 text = '\\&' + text
299 299 text = text.replace('\n.', '\n\\&.')
300 300 self.body.append(text)
301 301
302 302 def depart_Text(self, node):
303 303 pass
304 304
305 305 def list_start(self, node):
306 306 class enum_char(object):
307 307 enum_style = {
308 308 'bullet' : '\\(bu',
309 309 'emdash' : '\\(em',
310 310 }
311 311
312 312 def __init__(self, style):
313 313 self._style = style
314 314 if 'start' in node:
315 315 self._cnt = node['start'] - 1
316 316 else:
317 317 self._cnt = 0
318 318 self._indent = 2
319 319 if style == 'arabic':
320 320 # indentation depends on number of childrens
321 321 # and start value.
322 322 self._indent = len(str(len(node.children)))
323 323 self._indent += len(str(self._cnt)) + 1
324 324 elif style == 'loweralpha':
325 325 self._cnt += ord('a') - 1
326 326 self._indent = 3
327 327 elif style == 'upperalpha':
328 328 self._cnt += ord('A') - 1
329 329 self._indent = 3
330 330 elif style.endswith('roman'):
331 331 self._indent = 5
332 332
333 333 def next(self):
334 334 if self._style == 'bullet':
335 335 return self.enum_style[self._style]
336 336 elif self._style == 'emdash':
337 337 return self.enum_style[self._style]
338 338 self._cnt += 1
339 339 # TODO add prefix postfix
340 340 if self._style == 'arabic':
341 341 return "%d." % self._cnt
342 342 elif self._style in ('loweralpha', 'upperalpha'):
343 343 return "%c." % self._cnt
344 344 elif self._style.endswith('roman'):
345 345 res = roman.toRoman(self._cnt) + '.'
346 346 if self._style.startswith('upper'):
347 347 return res.upper()
348 348 return res.lower()
349 349 else:
350 350 return "%d." % self._cnt
351 351 def get_width(self):
352 352 return self._indent
353 353 def __repr__(self):
354 354 return 'enum_style-%s' % list(self._style)
355 355
356 356 if 'enumtype' in node:
357 357 self._list_char.append(enum_char(node['enumtype']))
358 358 else:
359 359 self._list_char.append(enum_char('bullet'))
360 360 if len(self._list_char) > 1:
361 361 # indent nested lists
362 362 self.indent(self._list_char[-2].get_width())
363 363 else:
364 364 self.indent(self._list_char[-1].get_width())
365 365
366 366 def list_end(self):
367 367 self.dedent()
368 368 self._list_char.pop()
369 369
370 370 def header(self):
371 371 tmpl = (".TH %(title_upper)s %(manual_section)s"
372 372 " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
373 373 ".SH NAME\n"
374 374 "%(title)s \- %(subtitle)s\n")
375 375 return tmpl % self._docinfo
376 376
377 377 def append_header(self):
378 378 """append header with .TH and .SH NAME"""
379 379 # NOTE before everything
380 380 # .TH title_upper section date source manual
381 381 if self.header_written:
382 382 return
383 383 self.body.append(self.header())
384 384 self.body.append(MACRO_DEF)
385 385 self.header_written = 1
386 386
387 387 def visit_address(self, node):
388 388 self.visit_docinfo_item(node, 'address')
389 389
390 390 def depart_address(self, node):
391 391 pass
392 392
393 393 def visit_admonition(self, node, name=None):
394 394 if name:
395 395 self.body.append('.IP %s\n' %
396 396 self.language.labels.get(name, name))
397 397
398 398 def depart_admonition(self, node):
399 399 self.body.append('.RE\n')
400 400
401 401 def visit_attention(self, node):
402 402 self.visit_admonition(node, 'attention')
403 403
404 404 depart_attention = depart_admonition
405 405
406 406 def visit_docinfo_item(self, node, name):
407 407 if name == 'author':
408 408 self._docinfo[name].append(node.astext())
409 409 else:
410 410 self._docinfo[name] = node.astext()
411 411 self._docinfo_keys.append(name)
412 412 raise nodes.SkipNode
413 413
414 414 def depart_docinfo_item(self, node):
415 415 pass
416 416
417 417 def visit_author(self, node):
418 418 self.visit_docinfo_item(node, 'author')
419 419
420 420 depart_author = depart_docinfo_item
421 421
422 422 def visit_authors(self, node):
423 423 # _author is called anyway.
424 424 pass
425 425
426 426 def depart_authors(self, node):
427 427 pass
428 428
429 429 def visit_block_quote(self, node):
430 430 # BUG/HACK: indent alway uses the _last_ indention,
431 431 # thus we need two of them.
432 432 self.indent(BLOCKQOUTE_INDENT)
433 433 self.indent(0)
434 434
435 435 def depart_block_quote(self, node):
436 436 self.dedent()
437 437 self.dedent()
438 438
439 439 def visit_bullet_list(self, node):
440 440 self.list_start(node)
441 441
442 442 def depart_bullet_list(self, node):
443 443 self.list_end()
444 444
445 445 def visit_caption(self, node):
446 446 pass
447 447
448 448 def depart_caption(self, node):
449 449 pass
450 450
451 451 def visit_caution(self, node):
452 452 self.visit_admonition(node, 'caution')
453 453
454 454 depart_caution = depart_admonition
455 455
456 456 def visit_citation(self, node):
457 457 num, text = node.astext().split(None, 1)
458 458 num = num.strip()
459 459 self.body.append('.IP [%s] 5\n' % num)
460 460
461 461 def depart_citation(self, node):
462 462 pass
463 463
464 464 def visit_citation_reference(self, node):
465 465 self.body.append('['+node.astext()+']')
466 466 raise nodes.SkipNode
467 467
468 468 def visit_classifier(self, node):
469 469 pass
470 470
471 471 def depart_classifier(self, node):
472 472 pass
473 473
474 474 def visit_colspec(self, node):
475 475 self.colspecs.append(node)
476 476
477 477 def depart_colspec(self, node):
478 478 pass
479 479
480 480 def write_colspecs(self):
481 481 self.body.append("%s.\n" % ('L '*len(self.colspecs)))
482 482
483 483 def visit_comment(self, node,
484 484 sub=re.compile('-(?=-)').sub):
485 485 self.body.append(self.comment(node.astext()))
486 486 raise nodes.SkipNode
487 487
488 488 def visit_contact(self, node):
489 489 self.visit_docinfo_item(node, 'contact')
490 490
491 491 depart_contact = depart_docinfo_item
492 492
493 493 def visit_container(self, node):
494 494 pass
495 495
496 496 def depart_container(self, node):
497 497 pass
498 498
499 499 def visit_compound(self, node):
500 500 pass
501 501
502 502 def depart_compound(self, node):
503 503 pass
504 504
505 505 def visit_copyright(self, node):
506 506 self.visit_docinfo_item(node, 'copyright')
507 507
508 508 def visit_danger(self, node):
509 509 self.visit_admonition(node, 'danger')
510 510
511 511 depart_danger = depart_admonition
512 512
513 513 def visit_date(self, node):
514 514 self.visit_docinfo_item(node, 'date')
515 515
516 516 def visit_decoration(self, node):
517 517 pass
518 518
519 519 def depart_decoration(self, node):
520 520 pass
521 521
522 522 def visit_definition(self, node):
523 523 pass
524 524
525 525 def depart_definition(self, node):
526 526 pass
527 527
528 528 def visit_definition_list(self, node):
529 529 self.indent(DEFINITION_LIST_INDENT)
530 530
531 531 def depart_definition_list(self, node):
532 532 self.dedent()
533 533
534 534 def visit_definition_list_item(self, node):
535 535 self.body.append(self.defs['definition_list_item'][0])
536 536
537 537 def depart_definition_list_item(self, node):
538 538 self.body.append(self.defs['definition_list_item'][1])
539 539
540 540 def visit_description(self, node):
541 541 pass
542 542
543 543 def depart_description(self, node):
544 544 pass
545 545
546 546 def visit_docinfo(self, node):
547 547 self._in_docinfo = 1
548 548
549 549 def depart_docinfo(self, node):
550 550 self._in_docinfo = None
551 551 # NOTE nothing should be written before this
552 552 self.append_header()
553 553
554 554 def visit_doctest_block(self, node):
555 555 self.body.append(self.defs['literal_block'][0])
556 556 self._in_literal = True
557 557
558 558 def depart_doctest_block(self, node):
559 559 self._in_literal = False
560 560 self.body.append(self.defs['literal_block'][1])
561 561
562 562 def visit_document(self, node):
563 563 # no blank line between comment and header.
564 564 self.body.append(self.comment(self.document_start).rstrip()+'\n')
565 565 # writing header is postboned
566 566 self.header_written = 0
567 567
568 568 def depart_document(self, node):
569 569 if self._docinfo['author']:
570 570 self.body.append('.SH AUTHOR\n%s\n'
571 571 % ', '.join(self._docinfo['author']))
572 572 skip = ('author', 'copyright', 'date',
573 573 'manual_group', 'manual_section',
574 574 'subtitle',
575 575 'title', 'title_upper', 'version')
576 576 for name in self._docinfo_keys:
577 577 if name == 'address':
578 578 self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
579 579 self.language.labels.get(name, name),
580 580 self.defs['indent'][0] % 0,
581 581 self.defs['indent'][0] % BLOCKQOUTE_INDENT,
582 582 self._docinfo[name],
583 583 self.defs['indent'][1],
584 584 self.defs['indent'][1]))
585 585 elif name not in skip:
586 586 if name in self._docinfo_names:
587 587 label = self._docinfo_names[name]
588 588 else:
589 589 label = self.language.labels.get(name, name)
590 590 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
591 591 if self._docinfo['copyright']:
592 592 self.body.append('.SH COPYRIGHT\n%s\n'
593 593 % self._docinfo['copyright'])
594 594 self.body.append(self.comment(
595 595 'Generated by docutils manpage writer.\n'))
596 596
597 597 def visit_emphasis(self, node):
598 598 self.body.append(self.defs['emphasis'][0])
599 599
600 600 def depart_emphasis(self, node):
601 601 self.body.append(self.defs['emphasis'][1])
602 602
603 603 def visit_entry(self, node):
604 604 # a cell in a table row
605 605 if 'morerows' in node:
606 606 self.document.reporter.warning('"table row spanning" not supported',
607 607 base_node=node)
608 608 if 'morecols' in node:
609 609 self.document.reporter.warning(
610 610 '"table cell spanning" not supported', base_node=node)
611 611 self.context.append(len(self.body))
612 612
613 613 def depart_entry(self, node):
614 614 start = self.context.pop()
615 615 self._active_table.append_cell(self.body[start:])
616 616 del self.body[start:]
617 617
618 618 def visit_enumerated_list(self, node):
619 619 self.list_start(node)
620 620
621 621 def depart_enumerated_list(self, node):
622 622 self.list_end()
623 623
624 624 def visit_error(self, node):
625 625 self.visit_admonition(node, 'error')
626 626
627 627 depart_error = depart_admonition
628 628
629 629 def visit_field(self, node):
630 630 pass
631 631
632 632 def depart_field(self, node):
633 633 pass
634 634
635 635 def visit_field_body(self, node):
636 636 if self._in_docinfo:
637 637 name_normalized = self._field_name.lower().replace(" ","_")
638 638 self._docinfo_names[name_normalized] = self._field_name
639 639 self.visit_docinfo_item(node, name_normalized)
640 640 raise nodes.SkipNode
641 641
642 642 def depart_field_body(self, node):
643 643 pass
644 644
645 645 def visit_field_list(self, node):
646 646 self.indent(FIELD_LIST_INDENT)
647 647
648 648 def depart_field_list(self, node):
649 649 self.dedent()
650 650
651 651 def visit_field_name(self, node):
652 652 if self._in_docinfo:
653 653 self._field_name = node.astext()
654 654 raise nodes.SkipNode
655 655 else:
656 656 self.body.append(self.defs['field_name'][0])
657 657
658 658 def depart_field_name(self, node):
659 659 self.body.append(self.defs['field_name'][1])
660 660
661 661 def visit_figure(self, node):
662 662 self.indent(2.5)
663 663 self.indent(0)
664 664
665 665 def depart_figure(self, node):
666 666 self.dedent()
667 667 self.dedent()
668 668
669 669 def visit_footer(self, node):
670 670 self.document.reporter.warning('"footer" not supported',
671 671 base_node=node)
672 672
673 673 def depart_footer(self, node):
674 674 pass
675 675
676 676 def visit_footnote(self, node):
677 677 num, text = node.astext().split(None, 1)
678 678 num = num.strip()
679 679 self.body.append('.IP [%s] 5\n' % self.deunicode(num))
680 680
681 681 def depart_footnote(self, node):
682 682 pass
683 683
684 684 def footnote_backrefs(self, node):
685 685 self.document.reporter.warning('"footnote_backrefs" not supported',
686 686 base_node=node)
687 687
688 688 def visit_footnote_reference(self, node):
689 689 self.body.append('['+self.deunicode(node.astext())+']')
690 690 raise nodes.SkipNode
691 691
692 692 def depart_footnote_reference(self, node):
693 693 pass
694 694
695 695 def visit_generated(self, node):
696 696 pass
697 697
698 698 def depart_generated(self, node):
699 699 pass
700 700
701 701 def visit_header(self, node):
702 702 raise NotImplementedError, node.astext()
703 703
704 704 def depart_header(self, node):
705 705 pass
706 706
707 707 def visit_hint(self, node):
708 708 self.visit_admonition(node, 'hint')
709 709
710 710 depart_hint = depart_admonition
711 711
712 712 def visit_subscript(self, node):
713 713 self.body.append('\\s-2\\d')
714 714
715 715 def depart_subscript(self, node):
716 716 self.body.append('\\u\\s0')
717 717
718 718 def visit_superscript(self, node):
719 719 self.body.append('\\s-2\\u')
720 720
721 721 def depart_superscript(self, node):
722 722 self.body.append('\\d\\s0')
723 723
724 724 def visit_attribution(self, node):
725 725 self.body.append('\\(em ')
726 726
727 727 def depart_attribution(self, node):
728 728 self.body.append('\n')
729 729
730 730 def visit_image(self, node):
731 731 self.document.reporter.warning('"image" not supported',
732 732 base_node=node)
733 733 text = []
734 734 if 'alt' in node.attributes:
735 735 text.append(node.attributes['alt'])
736 736 if 'uri' in node.attributes:
737 737 text.append(node.attributes['uri'])
738 738 self.body.append('[image: %s]\n' % ('/'.join(text)))
739 739 raise nodes.SkipNode
740 740
741 741 def visit_important(self, node):
742 742 self.visit_admonition(node, 'important')
743 743
744 744 depart_important = depart_admonition
745 745
746 746 def visit_label(self, node):
747 747 # footnote and citation
748 748 if (isinstance(node.parent, nodes.footnote)
749 749 or isinstance(node.parent, nodes.citation)):
750 750 raise nodes.SkipNode
751 751 self.document.reporter.warning('"unsupported "label"',
752 752 base_node=node)
753 753 self.body.append('[')
754 754
755 755 def depart_label(self, node):
756 756 self.body.append(']\n')
757 757
758 758 def visit_legend(self, node):
759 759 pass
760 760
761 761 def depart_legend(self, node):
762 762 pass
763 763
764 764 # WHAT should we use .INDENT, .UNINDENT ?
765 765 def visit_line_block(self, node):
766 766 self._line_block += 1
767 767 if self._line_block == 1:
768 768 self.body.append('.sp\n')
769 769 self.body.append('.nf\n')
770 770 else:
771 771 self.body.append('.in +2\n')
772 772
773 773 def depart_line_block(self, node):
774 774 self._line_block -= 1
775 775 if self._line_block == 0:
776 776 self.body.append('.fi\n')
777 777 self.body.append('.sp\n')
778 778 else:
779 779 self.body.append('.in -2\n')
780 780
781 781 def visit_line(self, node):
782 782 pass
783 783
784 784 def depart_line(self, node):
785 785 self.body.append('\n')
786 786
787 787 def visit_list_item(self, node):
788 788 # man 7 man argues to use ".IP" instead of ".TP"
789 789 self.body.append('.IP %s %d\n' % (
790 790 self._list_char[-1].next(),
791 791 self._list_char[-1].get_width(),))
792 792
793 793 def depart_list_item(self, node):
794 794 pass
795 795
796 796 def visit_literal(self, node):
797 797 self.body.append(self.defs['literal'][0])
798 798
799 799 def depart_literal(self, node):
800 800 self.body.append(self.defs['literal'][1])
801 801
802 802 def visit_literal_block(self, node):
803 803 self.body.append(self.defs['literal_block'][0])
804 804 self._in_literal = True
805 805
806 806 def depart_literal_block(self, node):
807 807 self._in_literal = False
808 808 self.body.append(self.defs['literal_block'][1])
809 809
810 810 def visit_meta(self, node):
811 811 raise NotImplementedError, node.astext()
812 812
813 813 def depart_meta(self, node):
814 814 pass
815 815
816 816 def visit_note(self, node):
817 817 self.visit_admonition(node, 'note')
818 818
819 819 depart_note = depart_admonition
820 820
821 821 def indent(self, by=0.5):
822 822 # if we are in a section ".SH" there already is a .RS
823 823 step = self._indent[-1]
824 824 self._indent.append(by)
825 825 self.body.append(self.defs['indent'][0] % step)
826 826
827 827 def dedent(self):
828 828 self._indent.pop()
829 829 self.body.append(self.defs['indent'][1])
830 830
831 831 def visit_option_list(self, node):
832 832 self.indent(OPTION_LIST_INDENT)
833 833
834 834 def depart_option_list(self, node):
835 835 self.dedent()
836 836
837 837 def visit_option_list_item(self, node):
838 838 # one item of the list
839 839 self.body.append(self.defs['option_list_item'][0])
840 840
841 841 def depart_option_list_item(self, node):
842 842 self.body.append(self.defs['option_list_item'][1])
843 843
844 844 def visit_option_group(self, node):
845 845 # as one option could have several forms it is a group
846 846 # options without parameter bold only, .B, -v
847 847 # options with parameter bold italic, .BI, -f file
848 848 #
849 849 # we do not know if .B or .BI
850 850 self.context.append('.B') # blind guess
851 851 self.context.append(len(self.body)) # to be able to insert later
852 852 self.context.append(0) # option counter
853 853
854 854 def depart_option_group(self, node):
855 855 self.context.pop() # the counter
856 856 start_position = self.context.pop()
857 857 text = self.body[start_position:]
858 858 del self.body[start_position:]
859 859 self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
860 860
861 861 def visit_option(self, node):
862 862 # each form of the option will be presented separately
863 863 if self.context[-1] > 0:
864 864 self.body.append(', ')
865 865 if self.context[-3] == '.BI':
866 866 self.body.append('\\')
867 867 self.body.append(' ')
868 868
869 869 def depart_option(self, node):
870 870 self.context[-1] += 1
871 871
872 872 def visit_option_string(self, node):
873 873 # do not know if .B or .BI
874 874 pass
875 875
876 876 def depart_option_string(self, node):
877 877 pass
878 878
879 879 def visit_option_argument(self, node):
880 880 self.context[-3] = '.BI' # bold/italic alternate
881 881 if node['delimiter'] != ' ':
882 882 self.body.append('\\fB%s ' % node['delimiter'])
883 elif self.body[len(self.body)-1].endswith('='):
883 elif self.body[len(self.body) - 1].endswith('='):
884 884 # a blank only means no blank in output, just changing font
885 885 self.body.append(' ')
886 886 else:
887 887 # blank backslash blank, switch font then a blank
888 888 self.body.append(' \\ ')
889 889
890 890 def depart_option_argument(self, node):
891 891 pass
892 892
893 893 def visit_organization(self, node):
894 894 self.visit_docinfo_item(node, 'organization')
895 895
896 896 def depart_organization(self, node):
897 897 pass
898 898
899 899 def visit_paragraph(self, node):
900 900 # ``.PP`` : Start standard indented paragraph.
901 901 # ``.LP`` : Start block paragraph, all except the first.
902 902 # ``.P [type]`` : Start paragraph type.
903 903 # NOTE don't use paragraph starts because they reset indentation.
904 904 # ``.sp`` is only vertical space
905 905 self.ensure_eol()
906 906 self.body.append('.sp\n')
907 907
908 908 def depart_paragraph(self, node):
909 909 self.body.append('\n')
910 910
911 911 def visit_problematic(self, node):
912 912 self.body.append(self.defs['problematic'][0])
913 913
914 914 def depart_problematic(self, node):
915 915 self.body.append(self.defs['problematic'][1])
916 916
917 917 def visit_raw(self, node):
918 918 if node.get('format') == 'manpage':
919 919 self.body.append(node.astext() + "\n")
920 920 # Keep non-manpage raw text out of output:
921 921 raise nodes.SkipNode
922 922
923 923 def visit_reference(self, node):
924 924 """E.g. link or email address."""
925 925 self.body.append(self.defs['reference'][0])
926 926
927 927 def depart_reference(self, node):
928 928 self.body.append(self.defs['reference'][1])
929 929
930 930 def visit_revision(self, node):
931 931 self.visit_docinfo_item(node, 'revision')
932 932
933 933 depart_revision = depart_docinfo_item
934 934
935 935 def visit_row(self, node):
936 936 self._active_table.new_row()
937 937
938 938 def depart_row(self, node):
939 939 pass
940 940
941 941 def visit_section(self, node):
942 942 self.section_level += 1
943 943
944 944 def depart_section(self, node):
945 945 self.section_level -= 1
946 946
947 947 def visit_status(self, node):
948 948 self.visit_docinfo_item(node, 'status')
949 949
950 950 depart_status = depart_docinfo_item
951 951
952 952 def visit_strong(self, node):
953 953 self.body.append(self.defs['strong'][0])
954 954
955 955 def depart_strong(self, node):
956 956 self.body.append(self.defs['strong'][1])
957 957
958 958 def visit_substitution_definition(self, node):
959 959 """Internal only."""
960 960 raise nodes.SkipNode
961 961
962 962 def visit_substitution_reference(self, node):
963 963 self.document.reporter.warning('"substitution_reference" not supported',
964 964 base_node=node)
965 965
966 966 def visit_subtitle(self, node):
967 967 if isinstance(node.parent, nodes.sidebar):
968 968 self.body.append(self.defs['strong'][0])
969 969 elif isinstance(node.parent, nodes.document):
970 970 self.visit_docinfo_item(node, 'subtitle')
971 971 elif isinstance(node.parent, nodes.section):
972 972 self.body.append(self.defs['strong'][0])
973 973
974 974 def depart_subtitle(self, node):
975 975 # document subtitle calls SkipNode
976 976 self.body.append(self.defs['strong'][1]+'\n.PP\n')
977 977
978 978 def visit_system_message(self, node):
979 979 # TODO add report_level
980 980 #if node['level'] < self.document.reporter['writer'].report_level:
981 981 # Level is too low to display:
982 982 # raise nodes.SkipNode
983 983 attr = {}
984 984 backref_text = ''
985 985 if node.hasattr('id'):
986 986 attr['name'] = node['id']
987 987 if node.hasattr('line'):
988 988 line = ', line %s' % node['line']
989 989 else:
990 990 line = ''
991 991 self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
992 992 % (node['type'], node['level'], node['source'], line))
993 993
994 994 def depart_system_message(self, node):
995 995 pass
996 996
997 997 def visit_table(self, node):
998 998 self._active_table = Table()
999 999
1000 1000 def depart_table(self, node):
1001 1001 self.ensure_eol()
1002 1002 self.body.extend(self._active_table.as_list())
1003 1003 self._active_table = None
1004 1004
1005 1005 def visit_target(self, node):
1006 1006 # targets are in-document hyper targets, without any use for man-pages.
1007 1007 raise nodes.SkipNode
1008 1008
1009 1009 def visit_tbody(self, node):
1010 1010 pass
1011 1011
1012 1012 def depart_tbody(self, node):
1013 1013 pass
1014 1014
1015 1015 def visit_term(self, node):
1016 1016 self.body.append(self.defs['term'][0])
1017 1017
1018 1018 def depart_term(self, node):
1019 1019 self.body.append(self.defs['term'][1])
1020 1020
1021 1021 def visit_tgroup(self, node):
1022 1022 pass
1023 1023
1024 1024 def depart_tgroup(self, node):
1025 1025 pass
1026 1026
1027 1027 def visit_thead(self, node):
1028 1028 # MAYBE double line '='
1029 1029 pass
1030 1030
1031 1031 def depart_thead(self, node):
1032 1032 # MAYBE double line '='
1033 1033 pass
1034 1034
1035 1035 def visit_tip(self, node):
1036 1036 self.visit_admonition(node, 'tip')
1037 1037
1038 1038 depart_tip = depart_admonition
1039 1039
1040 1040 def visit_title(self, node):
1041 1041 if isinstance(node.parent, nodes.topic):
1042 1042 self.body.append(self.defs['topic-title'][0])
1043 1043 elif isinstance(node.parent, nodes.sidebar):
1044 1044 self.body.append(self.defs['sidebar-title'][0])
1045 1045 elif isinstance(node.parent, nodes.admonition):
1046 1046 self.body.append('.IP "')
1047 1047 elif self.section_level == 0:
1048 1048 self._docinfo['title'] = node.astext()
1049 1049 # document title for .TH
1050 1050 self._docinfo['title_upper'] = node.astext().upper()
1051 1051 raise nodes.SkipNode
1052 1052 elif self.section_level == 1:
1053 1053 self.body.append('.SH ')
1054 1054 for n in node.traverse(nodes.Text):
1055 1055 n.parent.replace(n, nodes.Text(n.astext().upper()))
1056 1056 else:
1057 1057 self.body.append('.SS ')
1058 1058
1059 1059 def depart_title(self, node):
1060 1060 if isinstance(node.parent, nodes.admonition):
1061 1061 self.body.append('"')
1062 1062 self.body.append('\n')
1063 1063
1064 1064 def visit_title_reference(self, node):
1065 1065 """inline citation reference"""
1066 1066 self.body.append(self.defs['title_reference'][0])
1067 1067
1068 1068 def depart_title_reference(self, node):
1069 1069 self.body.append(self.defs['title_reference'][1])
1070 1070
1071 1071 def visit_topic(self, node):
1072 1072 pass
1073 1073
1074 1074 def depart_topic(self, node):
1075 1075 pass
1076 1076
1077 1077 def visit_sidebar(self, node):
1078 1078 pass
1079 1079
1080 1080 def depart_sidebar(self, node):
1081 1081 pass
1082 1082
1083 1083 def visit_rubric(self, node):
1084 1084 pass
1085 1085
1086 1086 def depart_rubric(self, node):
1087 1087 pass
1088 1088
1089 1089 def visit_transition(self, node):
1090 1090 # .PP Begin a new paragraph and reset prevailing indent.
1091 1091 # .sp N leaves N lines of blank space.
1092 1092 # .ce centers the next line
1093 1093 self.body.append('\n.sp\n.ce\n----\n')
1094 1094
1095 1095 def depart_transition(self, node):
1096 1096 self.body.append('\n.ce 0\n.sp\n')
1097 1097
1098 1098 def visit_version(self, node):
1099 1099 self.visit_docinfo_item(node, 'version')
1100 1100
1101 1101 def visit_warning(self, node):
1102 1102 self.visit_admonition(node, 'warning')
1103 1103
1104 1104 depart_warning = depart_admonition
1105 1105
1106 1106 def unimplemented_visit(self, node):
1107 1107 raise NotImplementedError('visiting unimplemented node type: %s'
1108 1108 % node.__class__.__name__)
1109 1109
1110 1110 # vim: set fileencoding=utf-8 et ts=4 ai :
@@ -1,61 +1,61 b''
1 1 # highlight.py - highlight extension implementation file
2 2 #
3 3 # Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # The original module was split in an interface and an implementation
9 9 # file to defer pygments loading and speedup extension setup.
10 10
11 11 from mercurial import demandimport
12 12 demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__'])
13 13 from mercurial import util, encoding
14 14
15 15 from pygments import highlight
16 16 from pygments.util import ClassNotFound
17 17 from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
18 18 from pygments.formatters import HtmlFormatter
19 19
20 20 SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
21 21 'type="text/css" />')
22 22
23 23 def pygmentize(field, fctx, style, tmpl):
24 24
25 25 # append a <link ...> to the syntax highlighting css
26 26 old_header = tmpl.load('header')
27 27 if SYNTAX_CSS not in old_header:
28 28 new_header = old_header + SYNTAX_CSS
29 29 tmpl.cache['header'] = new_header
30 30
31 31 text = fctx.data()
32 32 if util.binary(text):
33 33 return
34 34
35 35 # Pygments is best used with Unicode strings:
36 36 # <http://pygments.org/docs/unicode/>
37 37 text = text.decode(encoding.encoding, 'replace')
38 38
39 39 # To get multi-line strings right, we can't format line-by-line
40 40 try:
41 41 lexer = guess_lexer_for_filename(fctx.path(), text[:1024])
42 42 except (ClassNotFound, ValueError):
43 43 try:
44 44 lexer = guess_lexer(text[:1024])
45 45 except (ClassNotFound, ValueError):
46 46 lexer = TextLexer()
47 47
48 48 formatter = HtmlFormatter(style=style)
49 49
50 50 colorized = highlight(text, lexer, formatter)
51 51 # strip wrapping div
52 52 colorized = colorized[:colorized.find('\n</pre>')]
53 colorized = colorized[colorized.find('<pre>')+5:]
53 colorized = colorized[colorized.find('<pre>') + 5:]
54 54 coloriter = (s.encode(encoding.encoding, 'replace')
55 55 for s in colorized.splitlines())
56 56
57 57 tmpl.filters['colorize'] = lambda x: coloriter.next()
58 58
59 59 oldl = tmpl.cache[field]
60 60 newl = oldl.replace('line|escape', 'line|colorize')
61 61 tmpl.cache[field] = newl
@@ -1,335 +1,335 b''
1 1 # watcher.py - high-level interfaces to the Linux inotify subsystem
2 2
3 3 # Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
4 4
5 5 # This library is free software; you can redistribute it and/or modify
6 6 # it under the terms of version 2.1 of the GNU Lesser General Public
7 7 # License, or any later version.
8 8
9 9 '''High-level interfaces to the Linux inotify subsystem.
10 10
11 11 The inotify subsystem provides an efficient mechanism for file status
12 12 monitoring and change notification.
13 13
14 14 The watcher class hides the low-level details of the inotify
15 15 interface, and provides a Pythonic wrapper around it. It generates
16 16 events that provide somewhat more information than raw inotify makes
17 17 available.
18 18
19 19 The autowatcher class is more useful, as it automatically watches
20 20 newly-created directories on your behalf.'''
21 21
22 22 __author__ = "Bryan O'Sullivan <bos@serpentine.com>"
23 23
24 24 import _inotify as inotify
25 25 import array
26 26 import errno
27 27 import fcntl
28 28 import os
29 29 import termios
30 30
31 31
32 32 class event(object):
33 33 '''Derived inotify event class.
34 34
35 35 The following fields are available:
36 36
37 37 mask: event mask, indicating what kind of event this is
38 38
39 39 cookie: rename cookie, if a rename-related event
40 40
41 41 path: path of the directory in which the event occurred
42 42
43 43 name: name of the directory entry to which the event occurred
44 44 (may be None if the event happened to a watched directory)
45 45
46 46 fullpath: complete path at which the event occurred
47 47
48 48 wd: watch descriptor that triggered this event'''
49 49
50 50 __slots__ = (
51 51 'cookie',
52 52 'fullpath',
53 53 'mask',
54 54 'name',
55 55 'path',
56 56 'raw',
57 57 'wd',
58 58 )
59 59
60 60 def __init__(self, raw, path):
61 61 self.path = path
62 62 self.raw = raw
63 63 if raw.name:
64 64 self.fullpath = path + '/' + raw.name
65 65 else:
66 66 self.fullpath = path
67 67
68 68 self.wd = raw.wd
69 69 self.mask = raw.mask
70 70 self.cookie = raw.cookie
71 71 self.name = raw.name
72 72
73 73 def __repr__(self):
74 74 r = repr(self.raw)
75 return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
75 return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:]
76 76
77 77
78 78 _event_props = {
79 79 'access': 'File was accessed',
80 80 'modify': 'File was modified',
81 81 'attrib': 'Attribute of a directory entry was changed',
82 82 'close_write': 'File was closed after being written to',
83 83 'close_nowrite': 'File was closed without being written to',
84 84 'open': 'File was opened',
85 85 'moved_from': 'Directory entry was renamed from this name',
86 86 'moved_to': 'Directory entry was renamed to this name',
87 87 'create': 'Directory entry was created',
88 88 'delete': 'Directory entry was deleted',
89 89 'delete_self': 'The watched directory entry was deleted',
90 90 'move_self': 'The watched directory entry was renamed',
91 91 'unmount': 'Directory was unmounted, and can no longer be watched',
92 92 'q_overflow': 'Kernel dropped events due to queue overflow',
93 93 'ignored': 'Directory entry is no longer being watched',
94 94 'isdir': 'Event occurred on a directory',
95 95 }
96 96
97 97 for k, v in _event_props.iteritems():
98 98 mask = getattr(inotify, 'IN_' + k.upper())
99 99 def getter(self):
100 100 return self.mask & mask
101 101 getter.__name__ = k
102 102 getter.__doc__ = v
103 103 setattr(event, k, property(getter, doc=v))
104 104
105 105 del _event_props
106 106
107 107
108 108 class watcher(object):
109 109 '''Provide a Pythonic interface to the low-level inotify API.
110 110
111 111 Also adds derived information to each event that is not available
112 112 through the normal inotify API, such as directory name.'''
113 113
114 114 __slots__ = (
115 115 'fd',
116 116 '_paths',
117 117 '_wds',
118 118 )
119 119
120 120 def __init__(self):
121 121 '''Create a new inotify instance.'''
122 122
123 123 self.fd = inotify.init()
124 124 self._paths = {}
125 125 self._wds = {}
126 126
127 127 def fileno(self):
128 128 '''Return the file descriptor this watcher uses.
129 129
130 130 Useful for passing to select and poll.'''
131 131
132 132 return self.fd
133 133
134 134 def add(self, path, mask):
135 135 '''Add or modify a watch.
136 136
137 137 Return the watch descriptor added or modified.'''
138 138
139 139 path = os.path.normpath(path)
140 140 wd = inotify.add_watch(self.fd, path, mask)
141 141 self._paths[path] = wd, mask
142 142 self._wds[wd] = path, mask
143 143 return wd
144 144
145 145 def remove(self, wd):
146 146 '''Remove the given watch.'''
147 147
148 148 inotify.remove_watch(self.fd, wd)
149 149 self._remove(wd)
150 150
151 151 def _remove(self, wd):
152 152 path_mask = self._wds.pop(wd, None)
153 153 if path_mask is not None:
154 154 self._paths.pop(path_mask[0])
155 155
156 156 def path(self, path):
157 157 '''Return a (watch descriptor, event mask) pair for the given path.
158 158
159 159 If the path is not being watched, return None.'''
160 160
161 161 return self._paths.get(path)
162 162
163 163 def wd(self, wd):
164 164 '''Return a (path, event mask) pair for the given watch descriptor.
165 165
166 166 If the watch descriptor is not valid or not associated with
167 167 this watcher, return None.'''
168 168
169 169 return self._wds.get(wd)
170 170
171 171 def read(self, bufsize=None):
172 172 '''Read a list of queued inotify events.
173 173
174 174 If bufsize is zero, only return those events that can be read
175 175 immediately without blocking. Otherwise, block until events are
176 176 available.'''
177 177
178 178 events = []
179 179 for evt in inotify.read(self.fd, bufsize):
180 180 events.append(event(evt, self._wds[evt.wd][0]))
181 181 if evt.mask & inotify.IN_IGNORED:
182 182 self._remove(evt.wd)
183 183 elif evt.mask & inotify.IN_UNMOUNT:
184 184 self.close()
185 185 return events
186 186
187 187 def close(self):
188 188 '''Shut down this watcher.
189 189
190 190 All subsequent method calls are likely to raise exceptions.'''
191 191
192 192 os.close(self.fd)
193 193 self.fd = None
194 194 self._paths = None
195 195 self._wds = None
196 196
197 197 def __len__(self):
198 198 '''Return the number of active watches.'''
199 199
200 200 return len(self._paths)
201 201
202 202 def __iter__(self):
203 203 '''Yield a (path, watch descriptor, event mask) tuple for each
204 204 entry being watched.'''
205 205
206 206 for path, (wd, mask) in self._paths.iteritems():
207 207 yield path, wd, mask
208 208
209 209 def __del__(self):
210 210 if self.fd is not None:
211 211 os.close(self.fd)
212 212
213 213 ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
214 214
215 215 def add_iter(self, path, mask, onerror=None):
216 216 '''Add or modify watches over path and its subdirectories.
217 217
218 218 Yield each added or modified watch descriptor.
219 219
220 220 To ensure that this method runs to completion, you must
221 221 iterate over all of its results, even if you do not care what
222 222 they are. For example:
223 223
224 224 for wd in w.add_iter(path, mask):
225 225 pass
226 226
227 227 By default, errors are ignored. If optional arg "onerror" is
228 228 specified, it should be a function; it will be called with one
229 229 argument, an OSError instance. It can report the error to
230 230 continue with the walk, or raise the exception to abort the
231 231 walk.'''
232 232
233 233 # Add the IN_ONLYDIR flag to the event mask, to avoid a possible
234 234 # race when adding a subdirectory. In the time between the
235 235 # event being queued by the kernel and us processing it, the
236 236 # directory may have been deleted, or replaced with a different
237 237 # kind of entry with the same name.
238 238
239 239 submask = mask | inotify.IN_ONLYDIR
240 240
241 241 try:
242 242 yield self.add(path, mask)
243 243 except OSError, err:
244 244 if onerror and err.errno not in self.ignored_errors:
245 245 onerror(err)
246 246 for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
247 247 for d in dirs:
248 248 try:
249 249 yield self.add(root + '/' + d, submask)
250 250 except OSError, err:
251 251 if onerror and err.errno not in self.ignored_errors:
252 252 onerror(err)
253 253
254 254 def add_all(self, path, mask, onerror=None):
255 255 '''Add or modify watches over path and its subdirectories.
256 256
257 257 Return a list of added or modified watch descriptors.
258 258
259 259 By default, errors are ignored. If optional arg "onerror" is
260 260 specified, it should be a function; it will be called with one
261 261 argument, an OSError instance. It can report the error to
262 262 continue with the walk, or raise the exception to abort the
263 263 walk.'''
264 264
265 265 return [w for w in self.add_iter(path, mask, onerror)]
266 266
267 267
268 268 class autowatcher(watcher):
269 269 '''watcher class that automatically watches newly created directories.'''
270 270
271 271 __slots__ = (
272 272 'addfilter',
273 273 )
274 274
275 275 def __init__(self, addfilter=None):
276 276 '''Create a new inotify instance.
277 277
278 278 This instance will automatically watch newly created
279 279 directories.
280 280
281 281 If the optional addfilter parameter is not None, it must be a
282 282 callable that takes one parameter. It will be called each time
283 283 a directory is about to be automatically watched. If it returns
284 284 True, the directory will be watched if it still exists,
285 285 otherwise, it will be skipped.'''
286 286
287 287 super(autowatcher, self).__init__()
288 288 self.addfilter = addfilter
289 289
290 290 _dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
291 291
292 292 def read(self, bufsize=None):
293 293 events = super(autowatcher, self).read(bufsize)
294 294 for evt in events:
295 295 if evt.mask & self._dir_create_mask == self._dir_create_mask:
296 296 if self.addfilter is None or self.addfilter(evt):
297 297 parentmask = self._wds[evt.wd][1]
298 298 # See note about race avoidance via IN_ONLYDIR above.
299 299 mask = parentmask | inotify.IN_ONLYDIR
300 300 try:
301 301 self.add_all(evt.fullpath, mask)
302 302 except OSError, err:
303 303 if err.errno not in self.ignored_errors:
304 304 raise
305 305 return events
306 306
307 307
308 308 class threshold(object):
309 309 '''Class that indicates whether a file descriptor has reached a
310 310 threshold of readable bytes available.
311 311
312 312 This class is not thread-safe.'''
313 313
314 314 __slots__ = (
315 315 'fd',
316 316 'threshold',
317 317 '_iocbuf',
318 318 )
319 319
320 320 def __init__(self, fd, threshold=1024):
321 321 self.fd = fd
322 322 self.threshold = threshold
323 323 self._iocbuf = array.array('i', [0])
324 324
325 325 def readable(self):
326 326 '''Return the number of bytes readable on this file descriptor.'''
327 327
328 328 fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
329 329 return self._iocbuf[0]
330 330
331 331 def __call__(self):
332 332 '''Indicate whether the number of readable bytes has met or
333 333 exceeded the threshold.'''
334 334
335 335 return self.readable() >= self.threshold
@@ -1,3622 +1,3622 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behaviour can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60 '''
61 61
62 62 from mercurial.i18n import _
63 63 from mercurial.node import bin, hex, short, nullid, nullrev
64 64 from mercurial.lock import release
65 65 from mercurial import commands, cmdutil, hg, scmutil, util, revset
66 66 from mercurial import repair, extensions, error, phases
67 67 from mercurial import patch as patchmod
68 68 import os, re, errno, shutil
69 69
70 70 commands.norepo += " qclone"
71 71
72 72 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
73 73
74 74 cmdtable = {}
75 75 command = cmdutil.command(cmdtable)
76 76 testedwith = 'internal'
77 77
78 78 # Patch names looks like unix-file names.
79 79 # They must be joinable with queue directory and result in the patch path.
80 80 normname = util.normpath
81 81
82 82 class statusentry(object):
83 83 def __init__(self, node, name):
84 84 self.node, self.name = node, name
85 85 def __repr__(self):
86 86 return hex(self.node) + ':' + self.name
87 87
88 88 class patchheader(object):
89 89 def __init__(self, pf, plainmode=False):
90 90 def eatdiff(lines):
91 91 while lines:
92 92 l = lines[-1]
93 93 if (l.startswith("diff -") or
94 94 l.startswith("Index:") or
95 95 l.startswith("===========")):
96 96 del lines[-1]
97 97 else:
98 98 break
99 99 def eatempty(lines):
100 100 while lines:
101 101 if not lines[-1].strip():
102 102 del lines[-1]
103 103 else:
104 104 break
105 105
106 106 message = []
107 107 comments = []
108 108 user = None
109 109 date = None
110 110 parent = None
111 111 format = None
112 112 subject = None
113 113 branch = None
114 114 nodeid = None
115 115 diffstart = 0
116 116
117 117 for line in file(pf):
118 118 line = line.rstrip()
119 119 if (line.startswith('diff --git')
120 120 or (diffstart and line.startswith('+++ '))):
121 121 diffstart = 2
122 122 break
123 123 diffstart = 0 # reset
124 124 if line.startswith("--- "):
125 125 diffstart = 1
126 126 continue
127 127 elif format == "hgpatch":
128 128 # parse values when importing the result of an hg export
129 129 if line.startswith("# User "):
130 130 user = line[7:]
131 131 elif line.startswith("# Date "):
132 132 date = line[7:]
133 133 elif line.startswith("# Parent "):
134 134 parent = line[9:].lstrip()
135 135 elif line.startswith("# Branch "):
136 136 branch = line[9:]
137 137 elif line.startswith("# Node ID "):
138 138 nodeid = line[10:]
139 139 elif not line.startswith("# ") and line:
140 140 message.append(line)
141 141 format = None
142 142 elif line == '# HG changeset patch':
143 143 message = []
144 144 format = "hgpatch"
145 145 elif (format != "tagdone" and (line.startswith("Subject: ") or
146 146 line.startswith("subject: "))):
147 147 subject = line[9:]
148 148 format = "tag"
149 149 elif (format != "tagdone" and (line.startswith("From: ") or
150 150 line.startswith("from: "))):
151 151 user = line[6:]
152 152 format = "tag"
153 153 elif (format != "tagdone" and (line.startswith("Date: ") or
154 154 line.startswith("date: "))):
155 155 date = line[6:]
156 156 format = "tag"
157 157 elif format == "tag" and line == "":
158 158 # when looking for tags (subject: from: etc) they
159 159 # end once you find a blank line in the source
160 160 format = "tagdone"
161 161 elif message or line:
162 162 message.append(line)
163 163 comments.append(line)
164 164
165 165 eatdiff(message)
166 166 eatdiff(comments)
167 167 # Remember the exact starting line of the patch diffs before consuming
168 168 # empty lines, for external use by TortoiseHg and others
169 169 self.diffstartline = len(comments)
170 170 eatempty(message)
171 171 eatempty(comments)
172 172
173 173 # make sure message isn't empty
174 174 if format and format.startswith("tag") and subject:
175 175 message.insert(0, "")
176 176 message.insert(0, subject)
177 177
178 178 self.message = message
179 179 self.comments = comments
180 180 self.user = user
181 181 self.date = date
182 182 self.parent = parent
183 183 # nodeid and branch are for external use by TortoiseHg and others
184 184 self.nodeid = nodeid
185 185 self.branch = branch
186 186 self.haspatch = diffstart > 1
187 187 self.plainmode = plainmode
188 188
189 189 def setuser(self, user):
190 190 if not self.updateheader(['From: ', '# User '], user):
191 191 try:
192 192 patchheaderat = self.comments.index('# HG changeset patch')
193 193 self.comments.insert(patchheaderat + 1, '# User ' + user)
194 194 except ValueError:
195 195 if self.plainmode or self._hasheader(['Date: ']):
196 196 self.comments = ['From: ' + user] + self.comments
197 197 else:
198 198 tmp = ['# HG changeset patch', '# User ' + user, '']
199 199 self.comments = tmp + self.comments
200 200 self.user = user
201 201
202 202 def setdate(self, date):
203 203 if not self.updateheader(['Date: ', '# Date '], date):
204 204 try:
205 205 patchheaderat = self.comments.index('# HG changeset patch')
206 206 self.comments.insert(patchheaderat + 1, '# Date ' + date)
207 207 except ValueError:
208 208 if self.plainmode or self._hasheader(['From: ']):
209 209 self.comments = ['Date: ' + date] + self.comments
210 210 else:
211 211 tmp = ['# HG changeset patch', '# Date ' + date, '']
212 212 self.comments = tmp + self.comments
213 213 self.date = date
214 214
215 215 def setparent(self, parent):
216 216 if not self.updateheader(['# Parent '], parent):
217 217 try:
218 218 patchheaderat = self.comments.index('# HG changeset patch')
219 219 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
220 220 except ValueError:
221 221 pass
222 222 self.parent = parent
223 223
224 224 def setmessage(self, message):
225 225 if self.comments:
226 226 self._delmsg()
227 227 self.message = [message]
228 228 self.comments += self.message
229 229
230 230 def updateheader(self, prefixes, new):
231 231 '''Update all references to a field in the patch header.
232 232 Return whether the field is present.'''
233 233 res = False
234 234 for prefix in prefixes:
235 235 for i in xrange(len(self.comments)):
236 236 if self.comments[i].startswith(prefix):
237 237 self.comments[i] = prefix + new
238 238 res = True
239 239 break
240 240 return res
241 241
242 242 def _hasheader(self, prefixes):
243 243 '''Check if a header starts with any of the given prefixes.'''
244 244 for prefix in prefixes:
245 245 for comment in self.comments:
246 246 if comment.startswith(prefix):
247 247 return True
248 248 return False
249 249
250 250 def __str__(self):
251 251 if not self.comments:
252 252 return ''
253 253 return '\n'.join(self.comments) + '\n\n'
254 254
255 255 def _delmsg(self):
256 256 '''Remove existing message, keeping the rest of the comments fields.
257 257 If comments contains 'subject: ', message will prepend
258 258 the field and a blank line.'''
259 259 if self.message:
260 260 subj = 'subject: ' + self.message[0].lower()
261 261 for i in xrange(len(self.comments)):
262 262 if subj == self.comments[i].lower():
263 263 del self.comments[i]
264 264 self.message = self.message[2:]
265 265 break
266 266 ci = 0
267 267 for mi in self.message:
268 268 while mi != self.comments[ci]:
269 269 ci += 1
270 270 del self.comments[ci]
271 271
272 272 def newcommit(repo, phase, *args, **kwargs):
273 273 """helper dedicated to ensure a commit respect mq.secret setting
274 274
275 275 It should be used instead of repo.commit inside the mq source for operation
276 276 creating new changeset.
277 277 """
278 278 repo = repo.unfiltered()
279 279 if phase is None:
280 280 if repo.ui.configbool('mq', 'secret', False):
281 281 phase = phases.secret
282 282 if phase is not None:
283 283 backup = repo.ui.backupconfig('phases', 'new-commit')
284 284 # Marking the repository as committing an mq patch can be used
285 285 # to optimize operations like _branchtags().
286 286 repo._committingpatch = True
287 287 try:
288 288 if phase is not None:
289 289 repo.ui.setconfig('phases', 'new-commit', phase)
290 290 return repo.commit(*args, **kwargs)
291 291 finally:
292 292 repo._committingpatch = False
293 293 if phase is not None:
294 294 repo.ui.restoreconfig(backup)
295 295
296 296 class AbortNoCleanup(error.Abort):
297 297 pass
298 298
299 299 class queue(object):
300 300 def __init__(self, ui, path, patchdir=None):
301 301 self.basepath = path
302 302 try:
303 303 fh = open(os.path.join(path, 'patches.queue'))
304 304 cur = fh.read().rstrip()
305 305 fh.close()
306 306 if not cur:
307 307 curpath = os.path.join(path, 'patches')
308 308 else:
309 309 curpath = os.path.join(path, 'patches-' + cur)
310 310 except IOError:
311 311 curpath = os.path.join(path, 'patches')
312 312 self.path = patchdir or curpath
313 313 self.opener = scmutil.opener(self.path)
314 314 self.ui = ui
315 315 self.applieddirty = False
316 316 self.seriesdirty = False
317 317 self.added = []
318 318 self.seriespath = "series"
319 319 self.statuspath = "status"
320 320 self.guardspath = "guards"
321 321 self.activeguards = None
322 322 self.guardsdirty = False
323 323 # Handle mq.git as a bool with extended values
324 324 try:
325 325 gitmode = ui.configbool('mq', 'git', None)
326 326 if gitmode is None:
327 327 raise error.ConfigError
328 328 self.gitmode = gitmode and 'yes' or 'no'
329 329 except error.ConfigError:
330 330 self.gitmode = ui.config('mq', 'git', 'auto').lower()
331 331 self.plainmode = ui.configbool('mq', 'plain', False)
332 332
333 333 @util.propertycache
334 334 def applied(self):
335 335 def parselines(lines):
336 336 for l in lines:
337 337 entry = l.split(':', 1)
338 338 if len(entry) > 1:
339 339 n, name = entry
340 340 yield statusentry(bin(n), name)
341 341 elif l.strip():
342 342 self.ui.warn(_('malformated mq status line: %s\n') % entry)
343 343 # else we ignore empty lines
344 344 try:
345 345 lines = self.opener.read(self.statuspath).splitlines()
346 346 return list(parselines(lines))
347 347 except IOError, e:
348 348 if e.errno == errno.ENOENT:
349 349 return []
350 350 raise
351 351
352 352 @util.propertycache
353 353 def fullseries(self):
354 354 try:
355 355 return self.opener.read(self.seriespath).splitlines()
356 356 except IOError, e:
357 357 if e.errno == errno.ENOENT:
358 358 return []
359 359 raise
360 360
361 361 @util.propertycache
362 362 def series(self):
363 363 self.parseseries()
364 364 return self.series
365 365
366 366 @util.propertycache
367 367 def seriesguards(self):
368 368 self.parseseries()
369 369 return self.seriesguards
370 370
371 371 def invalidate(self):
372 372 for a in 'applied fullseries series seriesguards'.split():
373 373 if a in self.__dict__:
374 374 delattr(self, a)
375 375 self.applieddirty = False
376 376 self.seriesdirty = False
377 377 self.guardsdirty = False
378 378 self.activeguards = None
379 379
380 380 def diffopts(self, opts={}, patchfn=None):
381 381 diffopts = patchmod.diffopts(self.ui, opts)
382 382 if self.gitmode == 'auto':
383 383 diffopts.upgrade = True
384 384 elif self.gitmode == 'keep':
385 385 pass
386 386 elif self.gitmode in ('yes', 'no'):
387 387 diffopts.git = self.gitmode == 'yes'
388 388 else:
389 389 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
390 390 ' got %s') % self.gitmode)
391 391 if patchfn:
392 392 diffopts = self.patchopts(diffopts, patchfn)
393 393 return diffopts
394 394
395 395 def patchopts(self, diffopts, *patches):
396 396 """Return a copy of input diff options with git set to true if
397 397 referenced patch is a git patch and should be preserved as such.
398 398 """
399 399 diffopts = diffopts.copy()
400 400 if not diffopts.git and self.gitmode == 'keep':
401 401 for patchfn in patches:
402 402 patchf = self.opener(patchfn, 'r')
403 403 # if the patch was a git patch, refresh it as a git patch
404 404 for line in patchf:
405 405 if line.startswith('diff --git'):
406 406 diffopts.git = True
407 407 break
408 408 patchf.close()
409 409 return diffopts
410 410
411 411 def join(self, *p):
412 412 return os.path.join(self.path, *p)
413 413
414 414 def findseries(self, patch):
415 415 def matchpatch(l):
416 416 l = l.split('#', 1)[0]
417 417 return l.strip() == patch
418 418 for index, l in enumerate(self.fullseries):
419 419 if matchpatch(l):
420 420 return index
421 421 return None
422 422
423 423 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
424 424
425 425 def parseseries(self):
426 426 self.series = []
427 427 self.seriesguards = []
428 428 for l in self.fullseries:
429 429 h = l.find('#')
430 430 if h == -1:
431 431 patch = l
432 432 comment = ''
433 433 elif h == 0:
434 434 continue
435 435 else:
436 436 patch = l[:h]
437 437 comment = l[h:]
438 438 patch = patch.strip()
439 439 if patch:
440 440 if patch in self.series:
441 441 raise util.Abort(_('%s appears more than once in %s') %
442 442 (patch, self.join(self.seriespath)))
443 443 self.series.append(patch)
444 444 self.seriesguards.append(self.guard_re.findall(comment))
445 445
446 446 def checkguard(self, guard):
447 447 if not guard:
448 448 return _('guard cannot be an empty string')
449 449 bad_chars = '# \t\r\n\f'
450 450 first = guard[0]
451 451 if first in '-+':
452 452 return (_('guard %r starts with invalid character: %r') %
453 453 (guard, first))
454 454 for c in bad_chars:
455 455 if c in guard:
456 456 return _('invalid character in guard %r: %r') % (guard, c)
457 457
458 458 def setactive(self, guards):
459 459 for guard in guards:
460 460 bad = self.checkguard(guard)
461 461 if bad:
462 462 raise util.Abort(bad)
463 463 guards = sorted(set(guards))
464 464 self.ui.debug('active guards: %s\n' % ' '.join(guards))
465 465 self.activeguards = guards
466 466 self.guardsdirty = True
467 467
468 468 def active(self):
469 469 if self.activeguards is None:
470 470 self.activeguards = []
471 471 try:
472 472 guards = self.opener.read(self.guardspath).split()
473 473 except IOError, err:
474 474 if err.errno != errno.ENOENT:
475 475 raise
476 476 guards = []
477 477 for i, guard in enumerate(guards):
478 478 bad = self.checkguard(guard)
479 479 if bad:
480 480 self.ui.warn('%s:%d: %s\n' %
481 481 (self.join(self.guardspath), i + 1, bad))
482 482 else:
483 483 self.activeguards.append(guard)
484 484 return self.activeguards
485 485
486 486 def setguards(self, idx, guards):
487 487 for g in guards:
488 488 if len(g) < 2:
489 489 raise util.Abort(_('guard %r too short') % g)
490 490 if g[0] not in '-+':
491 491 raise util.Abort(_('guard %r starts with invalid char') % g)
492 492 bad = self.checkguard(g[1:])
493 493 if bad:
494 494 raise util.Abort(bad)
495 495 drop = self.guard_re.sub('', self.fullseries[idx])
496 496 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
497 497 self.parseseries()
498 498 self.seriesdirty = True
499 499
500 500 def pushable(self, idx):
501 501 if isinstance(idx, str):
502 502 idx = self.series.index(idx)
503 503 patchguards = self.seriesguards[idx]
504 504 if not patchguards:
505 505 return True, None
506 506 guards = self.active()
507 507 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
508 508 if exactneg:
509 509 return False, repr(exactneg[0])
510 510 pos = [g for g in patchguards if g[0] == '+']
511 511 exactpos = [g for g in pos if g[1:] in guards]
512 512 if pos:
513 513 if exactpos:
514 514 return True, repr(exactpos[0])
515 515 return False, ' '.join(map(repr, pos))
516 516 return True, ''
517 517
518 518 def explainpushable(self, idx, all_patches=False):
519 519 write = all_patches and self.ui.write or self.ui.warn
520 520 if all_patches or self.ui.verbose:
521 521 if isinstance(idx, str):
522 522 idx = self.series.index(idx)
523 523 pushable, why = self.pushable(idx)
524 524 if all_patches and pushable:
525 525 if why is None:
526 526 write(_('allowing %s - no guards in effect\n') %
527 527 self.series[idx])
528 528 else:
529 529 if not why:
530 530 write(_('allowing %s - no matching negative guards\n') %
531 531 self.series[idx])
532 532 else:
533 533 write(_('allowing %s - guarded by %s\n') %
534 534 (self.series[idx], why))
535 535 if not pushable:
536 536 if why:
537 537 write(_('skipping %s - guarded by %s\n') %
538 538 (self.series[idx], why))
539 539 else:
540 540 write(_('skipping %s - no matching guards\n') %
541 541 self.series[idx])
542 542
543 543 def savedirty(self):
544 544 def writelist(items, path):
545 545 fp = self.opener(path, 'w')
546 546 for i in items:
547 547 fp.write("%s\n" % i)
548 548 fp.close()
549 549 if self.applieddirty:
550 550 writelist(map(str, self.applied), self.statuspath)
551 551 self.applieddirty = False
552 552 if self.seriesdirty:
553 553 writelist(self.fullseries, self.seriespath)
554 554 self.seriesdirty = False
555 555 if self.guardsdirty:
556 556 writelist(self.activeguards, self.guardspath)
557 557 self.guardsdirty = False
558 558 if self.added:
559 559 qrepo = self.qrepo()
560 560 if qrepo:
561 561 qrepo[None].add(f for f in self.added if f not in qrepo[None])
562 562 self.added = []
563 563
564 564 def removeundo(self, repo):
565 565 undo = repo.sjoin('undo')
566 566 if not os.path.exists(undo):
567 567 return
568 568 try:
569 569 os.unlink(undo)
570 570 except OSError, inst:
571 571 self.ui.warn(_('error removing undo: %s\n') % str(inst))
572 572
573 573 def backup(self, repo, files, copy=False):
574 574 # backup local changes in --force case
575 575 for f in sorted(files):
576 576 absf = repo.wjoin(f)
577 577 if os.path.lexists(absf):
578 578 self.ui.note(_('saving current version of %s as %s\n') %
579 579 (f, f + '.orig'))
580 580 if copy:
581 581 util.copyfile(absf, absf + '.orig')
582 582 else:
583 583 util.rename(absf, absf + '.orig')
584 584
585 585 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
586 586 fp=None, changes=None, opts={}):
587 587 stat = opts.get('stat')
588 588 m = scmutil.match(repo[node1], files, opts)
589 589 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
590 590 changes, stat, fp)
591 591
592 592 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
593 593 # first try just applying the patch
594 594 (err, n) = self.apply(repo, [patch], update_status=False,
595 595 strict=True, merge=rev)
596 596
597 597 if err == 0:
598 598 return (err, n)
599 599
600 600 if n is None:
601 601 raise util.Abort(_("apply failed for patch %s") % patch)
602 602
603 603 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
604 604
605 605 # apply failed, strip away that rev and merge.
606 606 hg.clean(repo, head)
607 607 self.strip(repo, [n], update=False, backup='strip')
608 608
609 609 ctx = repo[rev]
610 610 ret = hg.merge(repo, rev)
611 611 if ret:
612 612 raise util.Abort(_("update returned %d") % ret)
613 613 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
614 614 if n is None:
615 615 raise util.Abort(_("repo commit failed"))
616 616 try:
617 617 ph = patchheader(mergeq.join(patch), self.plainmode)
618 618 except Exception:
619 619 raise util.Abort(_("unable to read %s") % patch)
620 620
621 621 diffopts = self.patchopts(diffopts, patch)
622 622 patchf = self.opener(patch, "w")
623 623 comments = str(ph)
624 624 if comments:
625 625 patchf.write(comments)
626 626 self.printdiff(repo, diffopts, head, n, fp=patchf)
627 627 patchf.close()
628 628 self.removeundo(repo)
629 629 return (0, n)
630 630
631 631 def qparents(self, repo, rev=None):
632 632 if rev is None:
633 633 (p1, p2) = repo.dirstate.parents()
634 634 if p2 == nullid:
635 635 return p1
636 636 if not self.applied:
637 637 return None
638 638 return self.applied[-1].node
639 639 p1, p2 = repo.changelog.parents(rev)
640 640 if p2 != nullid and p2 in [x.node for x in self.applied]:
641 641 return p2
642 642 return p1
643 643
644 644 def mergepatch(self, repo, mergeq, series, diffopts):
645 645 if not self.applied:
646 646 # each of the patches merged in will have two parents. This
647 647 # can confuse the qrefresh, qdiff, and strip code because it
648 648 # needs to know which parent is actually in the patch queue.
649 649 # so, we insert a merge marker with only one parent. This way
650 650 # the first patch in the queue is never a merge patch
651 651 #
652 652 pname = ".hg.patches.merge.marker"
653 653 n = newcommit(repo, None, '[mq]: merge marker', force=True)
654 654 self.removeundo(repo)
655 655 self.applied.append(statusentry(n, pname))
656 656 self.applieddirty = True
657 657
658 658 head = self.qparents(repo)
659 659
660 660 for patch in series:
661 661 patch = mergeq.lookup(patch, strict=True)
662 662 if not patch:
663 663 self.ui.warn(_("patch %s does not exist\n") % patch)
664 664 return (1, None)
665 665 pushable, reason = self.pushable(patch)
666 666 if not pushable:
667 667 self.explainpushable(patch, all_patches=True)
668 668 continue
669 669 info = mergeq.isapplied(patch)
670 670 if not info:
671 671 self.ui.warn(_("patch %s is not applied\n") % patch)
672 672 return (1, None)
673 673 rev = info[1]
674 674 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
675 675 if head:
676 676 self.applied.append(statusentry(head, patch))
677 677 self.applieddirty = True
678 678 if err:
679 679 return (err, head)
680 680 self.savedirty()
681 681 return (0, head)
682 682
683 683 def patch(self, repo, patchfile):
684 684 '''Apply patchfile to the working directory.
685 685 patchfile: name of patch file'''
686 686 files = set()
687 687 try:
688 688 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
689 689 files=files, eolmode=None)
690 690 return (True, list(files), fuzz)
691 691 except Exception, inst:
692 692 self.ui.note(str(inst) + '\n')
693 693 if not self.ui.verbose:
694 694 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
695 695 self.ui.traceback()
696 696 return (False, list(files), False)
697 697
698 698 def apply(self, repo, series, list=False, update_status=True,
699 699 strict=False, patchdir=None, merge=None, all_files=None,
700 700 tobackup=None, keepchanges=False):
701 701 wlock = lock = tr = None
702 702 try:
703 703 wlock = repo.wlock()
704 704 lock = repo.lock()
705 705 tr = repo.transaction("qpush")
706 706 try:
707 707 ret = self._apply(repo, series, list, update_status,
708 708 strict, patchdir, merge, all_files=all_files,
709 709 tobackup=tobackup, keepchanges=keepchanges)
710 710 tr.close()
711 711 self.savedirty()
712 712 return ret
713 713 except AbortNoCleanup:
714 714 tr.close()
715 715 self.savedirty()
716 716 return 2, repo.dirstate.p1()
717 717 except: # re-raises
718 718 try:
719 719 tr.abort()
720 720 finally:
721 721 repo.invalidate()
722 722 repo.dirstate.invalidate()
723 723 self.invalidate()
724 724 raise
725 725 finally:
726 726 release(tr, lock, wlock)
727 727 self.removeundo(repo)
728 728
729 729 def _apply(self, repo, series, list=False, update_status=True,
730 730 strict=False, patchdir=None, merge=None, all_files=None,
731 731 tobackup=None, keepchanges=False):
732 732 """returns (error, hash)
733 733
734 734 error = 1 for unable to read, 2 for patch failed, 3 for patch
735 735 fuzz. tobackup is None or a set of files to backup before they
736 736 are modified by a patch.
737 737 """
738 738 # TODO unify with commands.py
739 739 if not patchdir:
740 740 patchdir = self.path
741 741 err = 0
742 742 n = None
743 743 for patchname in series:
744 744 pushable, reason = self.pushable(patchname)
745 745 if not pushable:
746 746 self.explainpushable(patchname, all_patches=True)
747 747 continue
748 748 self.ui.status(_("applying %s\n") % patchname)
749 749 pf = os.path.join(patchdir, patchname)
750 750
751 751 try:
752 752 ph = patchheader(self.join(patchname), self.plainmode)
753 753 except IOError:
754 754 self.ui.warn(_("unable to read %s\n") % patchname)
755 755 err = 1
756 756 break
757 757
758 758 message = ph.message
759 759 if not message:
760 760 # The commit message should not be translated
761 761 message = "imported patch %s\n" % patchname
762 762 else:
763 763 if list:
764 764 # The commit message should not be translated
765 765 message.append("\nimported patch %s" % patchname)
766 766 message = '\n'.join(message)
767 767
768 768 if ph.haspatch:
769 769 if tobackup:
770 770 touched = patchmod.changedfiles(self.ui, repo, pf)
771 771 touched = set(touched) & tobackup
772 772 if touched and keepchanges:
773 773 raise AbortNoCleanup(
774 774 _("local changes found, refresh first"))
775 775 self.backup(repo, touched, copy=True)
776 776 tobackup = tobackup - touched
777 777 (patcherr, files, fuzz) = self.patch(repo, pf)
778 778 if all_files is not None:
779 779 all_files.update(files)
780 780 patcherr = not patcherr
781 781 else:
782 782 self.ui.warn(_("patch %s is empty\n") % patchname)
783 783 patcherr, files, fuzz = 0, [], 0
784 784
785 785 if merge and files:
786 786 # Mark as removed/merged and update dirstate parent info
787 787 removed = []
788 788 merged = []
789 789 for f in files:
790 790 if os.path.lexists(repo.wjoin(f)):
791 791 merged.append(f)
792 792 else:
793 793 removed.append(f)
794 794 for f in removed:
795 795 repo.dirstate.remove(f)
796 796 for f in merged:
797 797 repo.dirstate.merge(f)
798 798 p1, p2 = repo.dirstate.parents()
799 799 repo.setparents(p1, merge)
800 800
801 801 match = scmutil.matchfiles(repo, files or [])
802 802 oldtip = repo['tip']
803 803 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
804 804 force=True)
805 805 if repo['tip'] == oldtip:
806 806 raise util.Abort(_("qpush exactly duplicates child changeset"))
807 807 if n is None:
808 808 raise util.Abort(_("repository commit failed"))
809 809
810 810 if update_status:
811 811 self.applied.append(statusentry(n, patchname))
812 812
813 813 if patcherr:
814 814 self.ui.warn(_("patch failed, rejects left in working dir\n"))
815 815 err = 2
816 816 break
817 817
818 818 if fuzz and strict:
819 819 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
820 820 err = 3
821 821 break
822 822 return (err, n)
823 823
824 824 def _cleanup(self, patches, numrevs, keep=False):
825 825 if not keep:
826 826 r = self.qrepo()
827 827 if r:
828 828 r[None].forget(patches)
829 829 for p in patches:
830 830 os.unlink(self.join(p))
831 831
832 832 qfinished = []
833 833 if numrevs:
834 834 qfinished = self.applied[:numrevs]
835 835 del self.applied[:numrevs]
836 836 self.applieddirty = True
837 837
838 838 unknown = []
839 839
840 840 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
841 841 reverse=True):
842 842 if i is not None:
843 843 del self.fullseries[i]
844 844 else:
845 845 unknown.append(p)
846 846
847 847 if unknown:
848 848 if numrevs:
849 849 rev = dict((entry.name, entry.node) for entry in qfinished)
850 850 for p in unknown:
851 851 msg = _('revision %s refers to unknown patches: %s\n')
852 852 self.ui.warn(msg % (short(rev[p]), p))
853 853 else:
854 854 msg = _('unknown patches: %s\n')
855 855 raise util.Abort(''.join(msg % p for p in unknown))
856 856
857 857 self.parseseries()
858 858 self.seriesdirty = True
859 859 return [entry.node for entry in qfinished]
860 860
861 861 def _revpatches(self, repo, revs):
862 862 firstrev = repo[self.applied[0].node].rev()
863 863 patches = []
864 864 for i, rev in enumerate(revs):
865 865
866 866 if rev < firstrev:
867 867 raise util.Abort(_('revision %d is not managed') % rev)
868 868
869 869 ctx = repo[rev]
870 870 base = self.applied[i].node
871 871 if ctx.node() != base:
872 872 msg = _('cannot delete revision %d above applied patches')
873 873 raise util.Abort(msg % rev)
874 874
875 875 patch = self.applied[i].name
876 876 for fmt in ('[mq]: %s', 'imported patch %s'):
877 877 if ctx.description() == fmt % patch:
878 878 msg = _('patch %s finalized without changeset message\n')
879 879 repo.ui.status(msg % patch)
880 880 break
881 881
882 882 patches.append(patch)
883 883 return patches
884 884
885 885 def finish(self, repo, revs):
886 886 # Manually trigger phase computation to ensure phasedefaults is
887 887 # executed before we remove the patches.
888 888 repo._phasecache
889 889 patches = self._revpatches(repo, sorted(revs))
890 890 qfinished = self._cleanup(patches, len(patches))
891 891 if qfinished and repo.ui.configbool('mq', 'secret', False):
892 892 # only use this logic when the secret option is added
893 893 oldqbase = repo[qfinished[0]]
894 894 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
895 895 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
896 896 phases.advanceboundary(repo, tphase, qfinished)
897 897
898 898 def delete(self, repo, patches, opts):
899 899 if not patches and not opts.get('rev'):
900 900 raise util.Abort(_('qdelete requires at least one revision or '
901 901 'patch name'))
902 902
903 903 realpatches = []
904 904 for patch in patches:
905 905 patch = self.lookup(patch, strict=True)
906 906 info = self.isapplied(patch)
907 907 if info:
908 908 raise util.Abort(_("cannot delete applied patch %s") % patch)
909 909 if patch not in self.series:
910 910 raise util.Abort(_("patch %s not in series file") % patch)
911 911 if patch not in realpatches:
912 912 realpatches.append(patch)
913 913
914 914 numrevs = 0
915 915 if opts.get('rev'):
916 916 if not self.applied:
917 917 raise util.Abort(_('no patches applied'))
918 918 revs = scmutil.revrange(repo, opts.get('rev'))
919 919 if len(revs) > 1 and revs[0] > revs[1]:
920 920 revs.reverse()
921 921 revpatches = self._revpatches(repo, revs)
922 922 realpatches += revpatches
923 923 numrevs = len(revpatches)
924 924
925 925 self._cleanup(realpatches, numrevs, opts.get('keep'))
926 926
927 927 def checktoppatch(self, repo):
928 928 if self.applied:
929 929 top = self.applied[-1].node
930 930 patch = self.applied[-1].name
931 931 pp = repo.dirstate.parents()
932 932 if top not in pp:
933 933 raise util.Abort(_("working directory revision is not qtip"))
934 934 return top, patch
935 935 return None, None
936 936
937 937 def checksubstate(self, repo, baserev=None):
938 938 '''return list of subrepos at a different revision than substate.
939 939 Abort if any subrepos have uncommitted changes.'''
940 940 inclsubs = []
941 941 wctx = repo[None]
942 942 if baserev:
943 943 bctx = repo[baserev]
944 944 else:
945 945 bctx = wctx.parents()[0]
946 946 for s in wctx.substate:
947 947 if wctx.sub(s).dirty(True):
948 948 raise util.Abort(
949 949 _("uncommitted changes in subrepository %s") % s)
950 950 elif s not in bctx.substate or bctx.sub(s).dirty():
951 951 inclsubs.append(s)
952 952 return inclsubs
953 953
954 954 def putsubstate2changes(self, substatestate, changes):
955 955 for files in changes[:3]:
956 956 if '.hgsubstate' in files:
957 957 return # already listed up
958 958 # not yet listed up
959 959 if substatestate in 'a?':
960 960 changes[1].append('.hgsubstate')
961 961 elif substatestate in 'r':
962 962 changes[2].append('.hgsubstate')
963 963 else: # modified
964 964 changes[0].append('.hgsubstate')
965 965
966 966 def localchangesfound(self, refresh=True):
967 967 if refresh:
968 968 raise util.Abort(_("local changes found, refresh first"))
969 969 else:
970 970 raise util.Abort(_("local changes found"))
971 971
972 972 def checklocalchanges(self, repo, force=False, refresh=True):
973 973 m, a, r, d = repo.status()[:4]
974 974 if (m or a or r or d) and not force:
975 975 self.localchangesfound(refresh)
976 976 return m, a, r, d
977 977
978 978 _reserved = ('series', 'status', 'guards', '.', '..')
979 979 def checkreservedname(self, name):
980 980 if name in self._reserved:
981 981 raise util.Abort(_('"%s" cannot be used as the name of a patch')
982 982 % name)
983 983 for prefix in ('.hg', '.mq'):
984 984 if name.startswith(prefix):
985 985 raise util.Abort(_('patch name cannot begin with "%s"')
986 986 % prefix)
987 987 for c in ('#', ':'):
988 988 if c in name:
989 989 raise util.Abort(_('"%s" cannot be used in the name of a patch')
990 990 % c)
991 991
992 992 def checkpatchname(self, name, force=False):
993 993 self.checkreservedname(name)
994 994 if not force and os.path.exists(self.join(name)):
995 995 if os.path.isdir(self.join(name)):
996 996 raise util.Abort(_('"%s" already exists as a directory')
997 997 % name)
998 998 else:
999 999 raise util.Abort(_('patch "%s" already exists') % name)
1000 1000
1001 1001 def checkkeepchanges(self, keepchanges, force):
1002 1002 if force and keepchanges:
1003 1003 raise util.Abort(_('cannot use both --force and --keep-changes'))
1004 1004
1005 1005 def new(self, repo, patchfn, *pats, **opts):
1006 1006 """options:
1007 1007 msg: a string or a no-argument function returning a string
1008 1008 """
1009 1009 msg = opts.get('msg')
1010 1010 user = opts.get('user')
1011 1011 date = opts.get('date')
1012 1012 if date:
1013 1013 date = util.parsedate(date)
1014 1014 diffopts = self.diffopts({'git': opts.get('git')})
1015 1015 if opts.get('checkname', True):
1016 1016 self.checkpatchname(patchfn)
1017 1017 inclsubs = self.checksubstate(repo)
1018 1018 if inclsubs:
1019 1019 inclsubs.append('.hgsubstate')
1020 1020 substatestate = repo.dirstate['.hgsubstate']
1021 1021 if opts.get('include') or opts.get('exclude') or pats:
1022 1022 if inclsubs:
1023 1023 pats = list(pats or []) + inclsubs
1024 1024 match = scmutil.match(repo[None], pats, opts)
1025 1025 # detect missing files in pats
1026 1026 def badfn(f, msg):
1027 1027 if f != '.hgsubstate': # .hgsubstate is auto-created
1028 1028 raise util.Abort('%s: %s' % (f, msg))
1029 1029 match.bad = badfn
1030 1030 changes = repo.status(match=match)
1031 1031 m, a, r, d = changes[:4]
1032 1032 else:
1033 1033 changes = self.checklocalchanges(repo, force=True)
1034 1034 m, a, r, d = changes
1035 1035 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
1036 1036 if len(repo[None].parents()) > 1:
1037 1037 raise util.Abort(_('cannot manage merge changesets'))
1038 1038 commitfiles = m + a + r
1039 1039 self.checktoppatch(repo)
1040 1040 insert = self.fullseriesend()
1041 1041 wlock = repo.wlock()
1042 1042 try:
1043 1043 try:
1044 1044 # if patch file write fails, abort early
1045 1045 p = self.opener(patchfn, "w")
1046 1046 except IOError, e:
1047 1047 raise util.Abort(_('cannot write patch "%s": %s')
1048 1048 % (patchfn, e.strerror))
1049 1049 try:
1050 1050 if self.plainmode:
1051 1051 if user:
1052 1052 p.write("From: " + user + "\n")
1053 1053 if not date:
1054 1054 p.write("\n")
1055 1055 if date:
1056 1056 p.write("Date: %d %d\n\n" % date)
1057 1057 else:
1058 1058 p.write("# HG changeset patch\n")
1059 1059 p.write("# Parent "
1060 1060 + hex(repo[None].p1().node()) + "\n")
1061 1061 if user:
1062 1062 p.write("# User " + user + "\n")
1063 1063 if date:
1064 1064 p.write("# Date %s %s\n\n" % date)
1065 1065 if util.safehasattr(msg, '__call__'):
1066 1066 msg = msg()
1067 1067 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
1068 1068 n = newcommit(repo, None, commitmsg, user, date, match=match,
1069 1069 force=True)
1070 1070 if n is None:
1071 1071 raise util.Abort(_("repo commit failed"))
1072 1072 try:
1073 1073 self.fullseries[insert:insert] = [patchfn]
1074 1074 self.applied.append(statusentry(n, patchfn))
1075 1075 self.parseseries()
1076 1076 self.seriesdirty = True
1077 1077 self.applieddirty = True
1078 1078 if msg:
1079 1079 msg = msg + "\n\n"
1080 1080 p.write(msg)
1081 1081 if commitfiles:
1082 1082 parent = self.qparents(repo, n)
1083 1083 if inclsubs:
1084 1084 self.putsubstate2changes(substatestate, changes)
1085 1085 chunks = patchmod.diff(repo, node1=parent, node2=n,
1086 1086 changes=changes, opts=diffopts)
1087 1087 for chunk in chunks:
1088 1088 p.write(chunk)
1089 1089 p.close()
1090 1090 r = self.qrepo()
1091 1091 if r:
1092 1092 r[None].add([patchfn])
1093 1093 except: # re-raises
1094 1094 repo.rollback()
1095 1095 raise
1096 1096 except Exception:
1097 1097 patchpath = self.join(patchfn)
1098 1098 try:
1099 1099 os.unlink(patchpath)
1100 1100 except OSError:
1101 1101 self.ui.warn(_('error unlinking %s\n') % patchpath)
1102 1102 raise
1103 1103 self.removeundo(repo)
1104 1104 finally:
1105 1105 release(wlock)
1106 1106
1107 1107 def strip(self, repo, revs, update=True, backup="all", force=None):
1108 1108 wlock = lock = None
1109 1109 try:
1110 1110 wlock = repo.wlock()
1111 1111 lock = repo.lock()
1112 1112
1113 1113 if update:
1114 1114 self.checklocalchanges(repo, force=force, refresh=False)
1115 1115 urev = self.qparents(repo, revs[0])
1116 1116 hg.clean(repo, urev)
1117 1117 repo.dirstate.write()
1118 1118
1119 1119 repair.strip(self.ui, repo, revs, backup)
1120 1120 finally:
1121 1121 release(lock, wlock)
1122 1122
1123 1123 def isapplied(self, patch):
1124 1124 """returns (index, rev, patch)"""
1125 1125 for i, a in enumerate(self.applied):
1126 1126 if a.name == patch:
1127 1127 return (i, a.node, a.name)
1128 1128 return None
1129 1129
1130 1130 # if the exact patch name does not exist, we try a few
1131 1131 # variations. If strict is passed, we try only #1
1132 1132 #
1133 1133 # 1) a number (as string) to indicate an offset in the series file
1134 1134 # 2) a unique substring of the patch name was given
1135 1135 # 3) patchname[-+]num to indicate an offset in the series file
1136 1136 def lookup(self, patch, strict=False):
1137 1137 def partialname(s):
1138 1138 if s in self.series:
1139 1139 return s
1140 1140 matches = [x for x in self.series if s in x]
1141 1141 if len(matches) > 1:
1142 1142 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1143 1143 for m in matches:
1144 1144 self.ui.warn(' %s\n' % m)
1145 1145 return None
1146 1146 if matches:
1147 1147 return matches[0]
1148 1148 if self.series and self.applied:
1149 1149 if s == 'qtip':
1150 return self.series[self.seriesend(True)-1]
1150 return self.series[self.seriesend(True) - 1]
1151 1151 if s == 'qbase':
1152 1152 return self.series[0]
1153 1153 return None
1154 1154
1155 1155 if patch in self.series:
1156 1156 return patch
1157 1157
1158 1158 if not os.path.isfile(self.join(patch)):
1159 1159 try:
1160 1160 sno = int(patch)
1161 1161 except (ValueError, OverflowError):
1162 1162 pass
1163 1163 else:
1164 1164 if -len(self.series) <= sno < len(self.series):
1165 1165 return self.series[sno]
1166 1166
1167 1167 if not strict:
1168 1168 res = partialname(patch)
1169 1169 if res:
1170 1170 return res
1171 1171 minus = patch.rfind('-')
1172 1172 if minus >= 0:
1173 1173 res = partialname(patch[:minus])
1174 1174 if res:
1175 1175 i = self.series.index(res)
1176 1176 try:
1177 1177 off = int(patch[minus + 1:] or 1)
1178 1178 except (ValueError, OverflowError):
1179 1179 pass
1180 1180 else:
1181 1181 if i - off >= 0:
1182 1182 return self.series[i - off]
1183 1183 plus = patch.rfind('+')
1184 1184 if plus >= 0:
1185 1185 res = partialname(patch[:plus])
1186 1186 if res:
1187 1187 i = self.series.index(res)
1188 1188 try:
1189 1189 off = int(patch[plus + 1:] or 1)
1190 1190 except (ValueError, OverflowError):
1191 1191 pass
1192 1192 else:
1193 1193 if i + off < len(self.series):
1194 1194 return self.series[i + off]
1195 1195 raise util.Abort(_("patch %s not in series") % patch)
1196 1196
1197 1197 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1198 1198 all=False, move=False, exact=False, nobackup=False,
1199 1199 keepchanges=False):
1200 1200 self.checkkeepchanges(keepchanges, force)
1201 1201 diffopts = self.diffopts()
1202 1202 wlock = repo.wlock()
1203 1203 try:
1204 1204 heads = []
1205 1205 for b, ls in repo.branchmap().iteritems():
1206 1206 heads += ls
1207 1207 if not heads:
1208 1208 heads = [nullid]
1209 1209 if repo.dirstate.p1() not in heads and not exact:
1210 1210 self.ui.status(_("(working directory not at a head)\n"))
1211 1211
1212 1212 if not self.series:
1213 1213 self.ui.warn(_('no patches in series\n'))
1214 1214 return 0
1215 1215
1216 1216 # Suppose our series file is: A B C and the current 'top'
1217 1217 # patch is B. qpush C should be performed (moving forward)
1218 1218 # qpush B is a NOP (no change) qpush A is an error (can't
1219 1219 # go backwards with qpush)
1220 1220 if patch:
1221 1221 patch = self.lookup(patch)
1222 1222 info = self.isapplied(patch)
1223 1223 if info and info[0] >= len(self.applied) - 1:
1224 1224 self.ui.warn(
1225 1225 _('qpush: %s is already at the top\n') % patch)
1226 1226 return 0
1227 1227
1228 1228 pushable, reason = self.pushable(patch)
1229 1229 if pushable:
1230 1230 if self.series.index(patch) < self.seriesend():
1231 1231 raise util.Abort(
1232 1232 _("cannot push to a previous patch: %s") % patch)
1233 1233 else:
1234 1234 if reason:
1235 1235 reason = _('guarded by %s') % reason
1236 1236 else:
1237 1237 reason = _('no matching guards')
1238 1238 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1239 1239 return 1
1240 1240 elif all:
1241 1241 patch = self.series[-1]
1242 1242 if self.isapplied(patch):
1243 1243 self.ui.warn(_('all patches are currently applied\n'))
1244 1244 return 0
1245 1245
1246 1246 # Following the above example, starting at 'top' of B:
1247 1247 # qpush should be performed (pushes C), but a subsequent
1248 1248 # qpush without an argument is an error (nothing to
1249 1249 # apply). This allows a loop of "...while hg qpush..." to
1250 1250 # work as it detects an error when done
1251 1251 start = self.seriesend()
1252 1252 if start == len(self.series):
1253 1253 self.ui.warn(_('patch series already fully applied\n'))
1254 1254 return 1
1255 1255 if not force and not keepchanges:
1256 1256 self.checklocalchanges(repo, refresh=self.applied)
1257 1257
1258 1258 if exact:
1259 1259 if keepchanges:
1260 1260 raise util.Abort(
1261 1261 _("cannot use --exact and --keep-changes together"))
1262 1262 if move:
1263 1263 raise util.Abort(_('cannot use --exact and --move '
1264 1264 'together'))
1265 1265 if self.applied:
1266 1266 raise util.Abort(_('cannot push --exact with applied '
1267 1267 'patches'))
1268 1268 root = self.series[start]
1269 1269 target = patchheader(self.join(root), self.plainmode).parent
1270 1270 if not target:
1271 1271 raise util.Abort(
1272 1272 _("%s does not have a parent recorded") % root)
1273 1273 if not repo[target] == repo['.']:
1274 1274 hg.update(repo, target)
1275 1275
1276 1276 if move:
1277 1277 if not patch:
1278 1278 raise util.Abort(_("please specify the patch to move"))
1279 1279 for fullstart, rpn in enumerate(self.fullseries):
1280 1280 # strip markers for patch guards
1281 1281 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1282 1282 break
1283 1283 for i, rpn in enumerate(self.fullseries[fullstart:]):
1284 1284 # strip markers for patch guards
1285 1285 if self.guard_re.split(rpn, 1)[0] == patch:
1286 1286 break
1287 1287 index = fullstart + i
1288 1288 assert index < len(self.fullseries)
1289 1289 fullpatch = self.fullseries[index]
1290 1290 del self.fullseries[index]
1291 1291 self.fullseries.insert(fullstart, fullpatch)
1292 1292 self.parseseries()
1293 1293 self.seriesdirty = True
1294 1294
1295 1295 self.applieddirty = True
1296 1296 if start > 0:
1297 1297 self.checktoppatch(repo)
1298 1298 if not patch:
1299 1299 patch = self.series[start]
1300 1300 end = start + 1
1301 1301 else:
1302 1302 end = self.series.index(patch, start) + 1
1303 1303
1304 1304 tobackup = set()
1305 1305 if (not nobackup and force) or keepchanges:
1306 1306 m, a, r, d = self.checklocalchanges(repo, force=True)
1307 1307 if keepchanges:
1308 1308 tobackup.update(m + a + r + d)
1309 1309 else:
1310 1310 tobackup.update(m + a)
1311 1311
1312 1312 s = self.series[start:end]
1313 1313 all_files = set()
1314 1314 try:
1315 1315 if mergeq:
1316 1316 ret = self.mergepatch(repo, mergeq, s, diffopts)
1317 1317 else:
1318 1318 ret = self.apply(repo, s, list, all_files=all_files,
1319 1319 tobackup=tobackup, keepchanges=keepchanges)
1320 1320 except: # re-raises
1321 1321 self.ui.warn(_('cleaning up working directory...'))
1322 1322 node = repo.dirstate.p1()
1323 1323 hg.revert(repo, node, None)
1324 1324 # only remove unknown files that we know we touched or
1325 1325 # created while patching
1326 1326 for f in all_files:
1327 1327 if f not in repo.dirstate:
1328 1328 try:
1329 1329 util.unlinkpath(repo.wjoin(f))
1330 1330 except OSError, inst:
1331 1331 if inst.errno != errno.ENOENT:
1332 1332 raise
1333 1333 self.ui.warn(_('done\n'))
1334 1334 raise
1335 1335
1336 1336 if not self.applied:
1337 1337 return ret[0]
1338 1338 top = self.applied[-1].name
1339 1339 if ret[0] and ret[0] > 1:
1340 1340 msg = _("errors during apply, please fix and refresh %s\n")
1341 1341 self.ui.write(msg % top)
1342 1342 else:
1343 1343 self.ui.write(_("now at: %s\n") % top)
1344 1344 return ret[0]
1345 1345
1346 1346 finally:
1347 1347 wlock.release()
1348 1348
1349 1349 def pop(self, repo, patch=None, force=False, update=True, all=False,
1350 1350 nobackup=False, keepchanges=False):
1351 1351 self.checkkeepchanges(keepchanges, force)
1352 1352 wlock = repo.wlock()
1353 1353 try:
1354 1354 if patch:
1355 1355 # index, rev, patch
1356 1356 info = self.isapplied(patch)
1357 1357 if not info:
1358 1358 patch = self.lookup(patch)
1359 1359 info = self.isapplied(patch)
1360 1360 if not info:
1361 1361 raise util.Abort(_("patch %s is not applied") % patch)
1362 1362
1363 1363 if not self.applied:
1364 1364 # Allow qpop -a to work repeatedly,
1365 1365 # but not qpop without an argument
1366 1366 self.ui.warn(_("no patches applied\n"))
1367 1367 return not all
1368 1368
1369 1369 if all:
1370 1370 start = 0
1371 1371 elif patch:
1372 1372 start = info[0] + 1
1373 1373 else:
1374 1374 start = len(self.applied) - 1
1375 1375
1376 1376 if start >= len(self.applied):
1377 1377 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1378 1378 return
1379 1379
1380 1380 if not update:
1381 1381 parents = repo.dirstate.parents()
1382 1382 rr = [x.node for x in self.applied]
1383 1383 for p in parents:
1384 1384 if p in rr:
1385 1385 self.ui.warn(_("qpop: forcing dirstate update\n"))
1386 1386 update = True
1387 1387 else:
1388 1388 parents = [p.node() for p in repo[None].parents()]
1389 1389 needupdate = False
1390 1390 for entry in self.applied[start:]:
1391 1391 if entry.node in parents:
1392 1392 needupdate = True
1393 1393 break
1394 1394 update = needupdate
1395 1395
1396 1396 tobackup = set()
1397 1397 if update:
1398 1398 m, a, r, d = self.checklocalchanges(
1399 1399 repo, force=force or keepchanges)
1400 1400 if force:
1401 1401 if not nobackup:
1402 1402 tobackup.update(m + a)
1403 1403 elif keepchanges:
1404 1404 tobackup.update(m + a + r + d)
1405 1405
1406 1406 self.applieddirty = True
1407 1407 end = len(self.applied)
1408 1408 rev = self.applied[start].node
1409 1409 if update:
1410 1410 top = self.checktoppatch(repo)[0]
1411 1411
1412 1412 try:
1413 1413 heads = repo.changelog.heads(rev)
1414 1414 except error.LookupError:
1415 1415 node = short(rev)
1416 1416 raise util.Abort(_('trying to pop unknown node %s') % node)
1417 1417
1418 1418 if heads != [self.applied[-1].node]:
1419 1419 raise util.Abort(_("popping would remove a revision not "
1420 1420 "managed by this patch queue"))
1421 1421 if not repo[self.applied[-1].node].mutable():
1422 1422 raise util.Abort(
1423 1423 _("popping would remove an immutable revision"),
1424 1424 hint=_('see "hg help phases" for details'))
1425 1425
1426 1426 # we know there are no local changes, so we can make a simplified
1427 1427 # form of hg.update.
1428 1428 if update:
1429 1429 qp = self.qparents(repo, rev)
1430 1430 ctx = repo[qp]
1431 1431 m, a, r, d = repo.status(qp, top)[:4]
1432 1432 if d:
1433 1433 raise util.Abort(_("deletions found between repo revs"))
1434 1434
1435 1435 tobackup = set(a + m + r) & tobackup
1436 1436 if keepchanges and tobackup:
1437 1437 self.localchangesfound()
1438 1438 self.backup(repo, tobackup)
1439 1439
1440 1440 for f in a:
1441 1441 try:
1442 1442 util.unlinkpath(repo.wjoin(f))
1443 1443 except OSError, e:
1444 1444 if e.errno != errno.ENOENT:
1445 1445 raise
1446 1446 repo.dirstate.drop(f)
1447 1447 for f in m + r:
1448 1448 fctx = ctx[f]
1449 1449 repo.wwrite(f, fctx.data(), fctx.flags())
1450 1450 repo.dirstate.normal(f)
1451 1451 repo.setparents(qp, nullid)
1452 1452 for patch in reversed(self.applied[start:end]):
1453 1453 self.ui.status(_("popping %s\n") % patch.name)
1454 1454 del self.applied[start:end]
1455 1455 self.strip(repo, [rev], update=False, backup='strip')
1456 1456 if self.applied:
1457 1457 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1458 1458 else:
1459 1459 self.ui.write(_("patch queue now empty\n"))
1460 1460 finally:
1461 1461 wlock.release()
1462 1462
1463 1463 def diff(self, repo, pats, opts):
1464 1464 top, patch = self.checktoppatch(repo)
1465 1465 if not top:
1466 1466 self.ui.write(_("no patches applied\n"))
1467 1467 return
1468 1468 qp = self.qparents(repo, top)
1469 1469 if opts.get('reverse'):
1470 1470 node1, node2 = None, qp
1471 1471 else:
1472 1472 node1, node2 = qp, None
1473 1473 diffopts = self.diffopts(opts, patch)
1474 1474 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1475 1475
1476 1476 def refresh(self, repo, pats=None, **opts):
1477 1477 if not self.applied:
1478 1478 self.ui.write(_("no patches applied\n"))
1479 1479 return 1
1480 1480 msg = opts.get('msg', '').rstrip()
1481 1481 newuser = opts.get('user')
1482 1482 newdate = opts.get('date')
1483 1483 if newdate:
1484 1484 newdate = '%d %d' % util.parsedate(newdate)
1485 1485 wlock = repo.wlock()
1486 1486
1487 1487 try:
1488 1488 self.checktoppatch(repo)
1489 1489 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1490 1490 if repo.changelog.heads(top) != [top]:
1491 1491 raise util.Abort(_("cannot refresh a revision with children"))
1492 1492 if not repo[top].mutable():
1493 1493 raise util.Abort(_("cannot refresh immutable revision"),
1494 1494 hint=_('see "hg help phases" for details'))
1495 1495
1496 1496 cparents = repo.changelog.parents(top)
1497 1497 patchparent = self.qparents(repo, top)
1498 1498
1499 1499 inclsubs = self.checksubstate(repo, hex(patchparent))
1500 1500 if inclsubs:
1501 1501 inclsubs.append('.hgsubstate')
1502 1502 substatestate = repo.dirstate['.hgsubstate']
1503 1503
1504 1504 ph = patchheader(self.join(patchfn), self.plainmode)
1505 1505 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1506 1506 if msg:
1507 1507 ph.setmessage(msg)
1508 1508 if newuser:
1509 1509 ph.setuser(newuser)
1510 1510 if newdate:
1511 1511 ph.setdate(newdate)
1512 1512 ph.setparent(hex(patchparent))
1513 1513
1514 1514 # only commit new patch when write is complete
1515 1515 patchf = self.opener(patchfn, 'w', atomictemp=True)
1516 1516
1517 1517 comments = str(ph)
1518 1518 if comments:
1519 1519 patchf.write(comments)
1520 1520
1521 1521 # update the dirstate in place, strip off the qtip commit
1522 1522 # and then commit.
1523 1523 #
1524 1524 # this should really read:
1525 1525 # mm, dd, aa = repo.status(top, patchparent)[:3]
1526 1526 # but we do it backwards to take advantage of manifest/changelog
1527 1527 # caching against the next repo.status call
1528 1528 mm, aa, dd = repo.status(patchparent, top)[:3]
1529 1529 changes = repo.changelog.read(top)
1530 1530 man = repo.manifest.read(changes[0])
1531 1531 aaa = aa[:]
1532 1532 matchfn = scmutil.match(repo[None], pats, opts)
1533 1533 # in short mode, we only diff the files included in the
1534 1534 # patch already plus specified files
1535 1535 if opts.get('short'):
1536 1536 # if amending a patch, we start with existing
1537 1537 # files plus specified files - unfiltered
1538 1538 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1539 1539 # filter with include/exclude options
1540 1540 matchfn = scmutil.match(repo[None], opts=opts)
1541 1541 else:
1542 1542 match = scmutil.matchall(repo)
1543 1543 m, a, r, d = repo.status(match=match)[:4]
1544 1544 mm = set(mm)
1545 1545 aa = set(aa)
1546 1546 dd = set(dd)
1547 1547
1548 1548 # we might end up with files that were added between
1549 1549 # qtip and the dirstate parent, but then changed in the
1550 1550 # local dirstate. in this case, we want them to only
1551 1551 # show up in the added section
1552 1552 for x in m:
1553 1553 if x not in aa:
1554 1554 mm.add(x)
1555 1555 # we might end up with files added by the local dirstate that
1556 1556 # were deleted by the patch. In this case, they should only
1557 1557 # show up in the changed section.
1558 1558 for x in a:
1559 1559 if x in dd:
1560 1560 dd.remove(x)
1561 1561 mm.add(x)
1562 1562 else:
1563 1563 aa.add(x)
1564 1564 # make sure any files deleted in the local dirstate
1565 1565 # are not in the add or change column of the patch
1566 1566 forget = []
1567 1567 for x in d + r:
1568 1568 if x in aa:
1569 1569 aa.remove(x)
1570 1570 forget.append(x)
1571 1571 continue
1572 1572 else:
1573 1573 mm.discard(x)
1574 1574 dd.add(x)
1575 1575
1576 1576 m = list(mm)
1577 1577 r = list(dd)
1578 1578 a = list(aa)
1579 1579
1580 1580 # create 'match' that includes the files to be recommited.
1581 1581 # apply matchfn via repo.status to ensure correct case handling.
1582 1582 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1583 1583 allmatches = set(cm + ca + cr + cd)
1584 1584 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1585 1585
1586 1586 files = set(inclsubs)
1587 1587 for x in refreshchanges:
1588 1588 files.update(x)
1589 1589 match = scmutil.matchfiles(repo, files)
1590 1590
1591 1591 bmlist = repo[top].bookmarks()
1592 1592
1593 1593 try:
1594 1594 if diffopts.git or diffopts.upgrade:
1595 1595 copies = {}
1596 1596 for dst in a:
1597 1597 src = repo.dirstate.copied(dst)
1598 1598 # during qfold, the source file for copies may
1599 1599 # be removed. Treat this as a simple add.
1600 1600 if src is not None and src in repo.dirstate:
1601 1601 copies.setdefault(src, []).append(dst)
1602 1602 repo.dirstate.add(dst)
1603 1603 # remember the copies between patchparent and qtip
1604 1604 for dst in aaa:
1605 1605 f = repo.file(dst)
1606 1606 src = f.renamed(man[dst])
1607 1607 if src:
1608 1608 copies.setdefault(src[0], []).extend(
1609 1609 copies.get(dst, []))
1610 1610 if dst in a:
1611 1611 copies[src[0]].append(dst)
1612 1612 # we can't copy a file created by the patch itself
1613 1613 if dst in copies:
1614 1614 del copies[dst]
1615 1615 for src, dsts in copies.iteritems():
1616 1616 for dst in dsts:
1617 1617 repo.dirstate.copy(src, dst)
1618 1618 else:
1619 1619 for dst in a:
1620 1620 repo.dirstate.add(dst)
1621 1621 # Drop useless copy information
1622 1622 for f in list(repo.dirstate.copies()):
1623 1623 repo.dirstate.copy(None, f)
1624 1624 for f in r:
1625 1625 repo.dirstate.remove(f)
1626 1626 # if the patch excludes a modified file, mark that
1627 1627 # file with mtime=0 so status can see it.
1628 1628 mm = []
1629 for i in xrange(len(m)-1, -1, -1):
1629 for i in xrange(len(m) - 1, -1, -1):
1630 1630 if not matchfn(m[i]):
1631 1631 mm.append(m[i])
1632 1632 del m[i]
1633 1633 for f in m:
1634 1634 repo.dirstate.normal(f)
1635 1635 for f in mm:
1636 1636 repo.dirstate.normallookup(f)
1637 1637 for f in forget:
1638 1638 repo.dirstate.drop(f)
1639 1639
1640 1640 if not msg:
1641 1641 if not ph.message:
1642 1642 message = "[mq]: %s\n" % patchfn
1643 1643 else:
1644 1644 message = "\n".join(ph.message)
1645 1645 else:
1646 1646 message = msg
1647 1647
1648 1648 user = ph.user or changes[1]
1649 1649
1650 1650 oldphase = repo[top].phase()
1651 1651
1652 1652 # assumes strip can roll itself back if interrupted
1653 1653 repo.setparents(*cparents)
1654 1654 self.applied.pop()
1655 1655 self.applieddirty = True
1656 1656 self.strip(repo, [top], update=False,
1657 1657 backup='strip')
1658 1658 except: # re-raises
1659 1659 repo.dirstate.invalidate()
1660 1660 raise
1661 1661
1662 1662 try:
1663 1663 # might be nice to attempt to roll back strip after this
1664 1664
1665 1665 # Ensure we create a new changeset in the same phase than
1666 1666 # the old one.
1667 1667 n = newcommit(repo, oldphase, message, user, ph.date,
1668 1668 match=match, force=True)
1669 1669 # only write patch after a successful commit
1670 1670 c = [list(x) for x in refreshchanges]
1671 1671 if inclsubs:
1672 1672 self.putsubstate2changes(substatestate, c)
1673 1673 chunks = patchmod.diff(repo, patchparent,
1674 1674 changes=c, opts=diffopts)
1675 1675 for chunk in chunks:
1676 1676 patchf.write(chunk)
1677 1677 patchf.close()
1678 1678
1679 1679 marks = repo._bookmarks
1680 1680 for bm in bmlist:
1681 1681 marks[bm] = n
1682 1682 marks.write()
1683 1683
1684 1684 self.applied.append(statusentry(n, patchfn))
1685 1685 except: # re-raises
1686 1686 ctx = repo[cparents[0]]
1687 1687 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1688 1688 self.savedirty()
1689 1689 self.ui.warn(_('refresh interrupted while patch was popped! '
1690 1690 '(revert --all, qpush to recover)\n'))
1691 1691 raise
1692 1692 finally:
1693 1693 wlock.release()
1694 1694 self.removeundo(repo)
1695 1695
1696 1696 def init(self, repo, create=False):
1697 1697 if not create and os.path.isdir(self.path):
1698 1698 raise util.Abort(_("patch queue directory already exists"))
1699 1699 try:
1700 1700 os.mkdir(self.path)
1701 1701 except OSError, inst:
1702 1702 if inst.errno != errno.EEXIST or not create:
1703 1703 raise
1704 1704 if create:
1705 1705 return self.qrepo(create=True)
1706 1706
1707 1707 def unapplied(self, repo, patch=None):
1708 1708 if patch and patch not in self.series:
1709 1709 raise util.Abort(_("patch %s is not in series file") % patch)
1710 1710 if not patch:
1711 1711 start = self.seriesend()
1712 1712 else:
1713 1713 start = self.series.index(patch) + 1
1714 1714 unapplied = []
1715 1715 for i in xrange(start, len(self.series)):
1716 1716 pushable, reason = self.pushable(i)
1717 1717 if pushable:
1718 1718 unapplied.append((i, self.series[i]))
1719 1719 self.explainpushable(i)
1720 1720 return unapplied
1721 1721
1722 1722 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1723 1723 summary=False):
1724 1724 def displayname(pfx, patchname, state):
1725 1725 if pfx:
1726 1726 self.ui.write(pfx)
1727 1727 if summary:
1728 1728 ph = patchheader(self.join(patchname), self.plainmode)
1729 1729 msg = ph.message and ph.message[0] or ''
1730 1730 if self.ui.formatted():
1731 1731 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1732 1732 if width > 0:
1733 1733 msg = util.ellipsis(msg, width)
1734 1734 else:
1735 1735 msg = ''
1736 1736 self.ui.write(patchname, label='qseries.' + state)
1737 1737 self.ui.write(': ')
1738 1738 self.ui.write(msg, label='qseries.message.' + state)
1739 1739 else:
1740 1740 self.ui.write(patchname, label='qseries.' + state)
1741 1741 self.ui.write('\n')
1742 1742
1743 1743 applied = set([p.name for p in self.applied])
1744 1744 if length is None:
1745 1745 length = len(self.series) - start
1746 1746 if not missing:
1747 1747 if self.ui.verbose:
1748 1748 idxwidth = len(str(start + length - 1))
1749 1749 for i in xrange(start, start + length):
1750 1750 patch = self.series[i]
1751 1751 if patch in applied:
1752 1752 char, state = 'A', 'applied'
1753 1753 elif self.pushable(i)[0]:
1754 1754 char, state = 'U', 'unapplied'
1755 1755 else:
1756 1756 char, state = 'G', 'guarded'
1757 1757 pfx = ''
1758 1758 if self.ui.verbose:
1759 1759 pfx = '%*d %s ' % (idxwidth, i, char)
1760 1760 elif status and status != char:
1761 1761 continue
1762 1762 displayname(pfx, patch, state)
1763 1763 else:
1764 1764 msng_list = []
1765 1765 for root, dirs, files in os.walk(self.path):
1766 1766 d = root[len(self.path) + 1:]
1767 1767 for f in files:
1768 1768 fl = os.path.join(d, f)
1769 1769 if (fl not in self.series and
1770 1770 fl not in (self.statuspath, self.seriespath,
1771 1771 self.guardspath)
1772 1772 and not fl.startswith('.')):
1773 1773 msng_list.append(fl)
1774 1774 for x in sorted(msng_list):
1775 1775 pfx = self.ui.verbose and ('D ') or ''
1776 1776 displayname(pfx, x, 'missing')
1777 1777
1778 1778 def issaveline(self, l):
1779 1779 if l.name == '.hg.patches.save.line':
1780 1780 return True
1781 1781
1782 1782 def qrepo(self, create=False):
1783 1783 ui = self.ui.copy()
1784 1784 ui.setconfig('paths', 'default', '', overlay=False)
1785 1785 ui.setconfig('paths', 'default-push', '', overlay=False)
1786 1786 if create or os.path.isdir(self.join(".hg")):
1787 1787 return hg.repository(ui, path=self.path, create=create)
1788 1788
1789 1789 def restore(self, repo, rev, delete=None, qupdate=None):
1790 1790 desc = repo[rev].description().strip()
1791 1791 lines = desc.splitlines()
1792 1792 i = 0
1793 1793 datastart = None
1794 1794 series = []
1795 1795 applied = []
1796 1796 qpp = None
1797 1797 for i, line in enumerate(lines):
1798 1798 if line == 'Patch Data:':
1799 1799 datastart = i + 1
1800 1800 elif line.startswith('Dirstate:'):
1801 1801 l = line.rstrip()
1802 1802 l = l[10:].split(' ')
1803 1803 qpp = [bin(x) for x in l]
1804 1804 elif datastart is not None:
1805 1805 l = line.rstrip()
1806 1806 n, name = l.split(':', 1)
1807 1807 if n:
1808 1808 applied.append(statusentry(bin(n), name))
1809 1809 else:
1810 1810 series.append(l)
1811 1811 if datastart is None:
1812 1812 self.ui.warn(_("no saved patch data found\n"))
1813 1813 return 1
1814 1814 self.ui.warn(_("restoring status: %s\n") % lines[0])
1815 1815 self.fullseries = series
1816 1816 self.applied = applied
1817 1817 self.parseseries()
1818 1818 self.seriesdirty = True
1819 1819 self.applieddirty = True
1820 1820 heads = repo.changelog.heads()
1821 1821 if delete:
1822 1822 if rev not in heads:
1823 1823 self.ui.warn(_("save entry has children, leaving it alone\n"))
1824 1824 else:
1825 1825 self.ui.warn(_("removing save entry %s\n") % short(rev))
1826 1826 pp = repo.dirstate.parents()
1827 1827 if rev in pp:
1828 1828 update = True
1829 1829 else:
1830 1830 update = False
1831 1831 self.strip(repo, [rev], update=update, backup='strip')
1832 1832 if qpp:
1833 1833 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1834 1834 (short(qpp[0]), short(qpp[1])))
1835 1835 if qupdate:
1836 1836 self.ui.status(_("updating queue directory\n"))
1837 1837 r = self.qrepo()
1838 1838 if not r:
1839 1839 self.ui.warn(_("unable to load queue repository\n"))
1840 1840 return 1
1841 1841 hg.clean(r, qpp[0])
1842 1842
1843 1843 def save(self, repo, msg=None):
1844 1844 if not self.applied:
1845 1845 self.ui.warn(_("save: no patches applied, exiting\n"))
1846 1846 return 1
1847 1847 if self.issaveline(self.applied[-1]):
1848 1848 self.ui.warn(_("status is already saved\n"))
1849 1849 return 1
1850 1850
1851 1851 if not msg:
1852 1852 msg = _("hg patches saved state")
1853 1853 else:
1854 1854 msg = "hg patches: " + msg.rstrip('\r\n')
1855 1855 r = self.qrepo()
1856 1856 if r:
1857 1857 pp = r.dirstate.parents()
1858 1858 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1859 1859 msg += "\n\nPatch Data:\n"
1860 1860 msg += ''.join('%s\n' % x for x in self.applied)
1861 1861 msg += ''.join(':%s\n' % x for x in self.fullseries)
1862 1862 n = repo.commit(msg, force=True)
1863 1863 if not n:
1864 1864 self.ui.warn(_("repo commit failed\n"))
1865 1865 return 1
1866 1866 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1867 1867 self.applieddirty = True
1868 1868 self.removeundo(repo)
1869 1869
1870 1870 def fullseriesend(self):
1871 1871 if self.applied:
1872 1872 p = self.applied[-1].name
1873 1873 end = self.findseries(p)
1874 1874 if end is None:
1875 1875 return len(self.fullseries)
1876 1876 return end + 1
1877 1877 return 0
1878 1878
1879 1879 def seriesend(self, all_patches=False):
1880 1880 """If all_patches is False, return the index of the next pushable patch
1881 1881 in the series, or the series length. If all_patches is True, return the
1882 1882 index of the first patch past the last applied one.
1883 1883 """
1884 1884 end = 0
1885 1885 def next(start):
1886 1886 if all_patches or start >= len(self.series):
1887 1887 return start
1888 1888 for i in xrange(start, len(self.series)):
1889 1889 p, reason = self.pushable(i)
1890 1890 if p:
1891 1891 return i
1892 1892 self.explainpushable(i)
1893 1893 return len(self.series)
1894 1894 if self.applied:
1895 1895 p = self.applied[-1].name
1896 1896 try:
1897 1897 end = self.series.index(p)
1898 1898 except ValueError:
1899 1899 return 0
1900 1900 return next(end + 1)
1901 1901 return next(end)
1902 1902
1903 1903 def appliedname(self, index):
1904 1904 pname = self.applied[index].name
1905 1905 if not self.ui.verbose:
1906 1906 p = pname
1907 1907 else:
1908 1908 p = str(self.series.index(pname)) + " " + pname
1909 1909 return p
1910 1910
1911 1911 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1912 1912 force=None, git=False):
1913 1913 def checkseries(patchname):
1914 1914 if patchname in self.series:
1915 1915 raise util.Abort(_('patch %s is already in the series file')
1916 1916 % patchname)
1917 1917
1918 1918 if rev:
1919 1919 if files:
1920 1920 raise util.Abort(_('option "-r" not valid when importing '
1921 1921 'files'))
1922 1922 rev = scmutil.revrange(repo, rev)
1923 1923 rev.sort(reverse=True)
1924 1924 elif not files:
1925 1925 raise util.Abort(_('no files or revisions specified'))
1926 1926 if (len(files) > 1 or len(rev) > 1) and patchname:
1927 1927 raise util.Abort(_('option "-n" not valid when importing multiple '
1928 1928 'patches'))
1929 1929 imported = []
1930 1930 if rev:
1931 1931 # If mq patches are applied, we can only import revisions
1932 1932 # that form a linear path to qbase.
1933 1933 # Otherwise, they should form a linear path to a head.
1934 1934 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1935 1935 if len(heads) > 1:
1936 1936 raise util.Abort(_('revision %d is the root of more than one '
1937 1937 'branch') % rev[-1])
1938 1938 if self.applied:
1939 1939 base = repo.changelog.node(rev[0])
1940 1940 if base in [n.node for n in self.applied]:
1941 1941 raise util.Abort(_('revision %d is already managed')
1942 1942 % rev[0])
1943 1943 if heads != [self.applied[-1].node]:
1944 1944 raise util.Abort(_('revision %d is not the parent of '
1945 1945 'the queue') % rev[0])
1946 1946 base = repo.changelog.rev(self.applied[0].node)
1947 1947 lastparent = repo.changelog.parentrevs(base)[0]
1948 1948 else:
1949 1949 if heads != [repo.changelog.node(rev[0])]:
1950 1950 raise util.Abort(_('revision %d has unmanaged children')
1951 1951 % rev[0])
1952 1952 lastparent = None
1953 1953
1954 1954 diffopts = self.diffopts({'git': git})
1955 1955 for r in rev:
1956 1956 if not repo[r].mutable():
1957 1957 raise util.Abort(_('revision %d is not mutable') % r,
1958 1958 hint=_('see "hg help phases" for details'))
1959 1959 p1, p2 = repo.changelog.parentrevs(r)
1960 1960 n = repo.changelog.node(r)
1961 1961 if p2 != nullrev:
1962 1962 raise util.Abort(_('cannot import merge revision %d') % r)
1963 1963 if lastparent and lastparent != r:
1964 1964 raise util.Abort(_('revision %d is not the parent of %d')
1965 1965 % (r, lastparent))
1966 1966 lastparent = p1
1967 1967
1968 1968 if not patchname:
1969 1969 patchname = normname('%d.diff' % r)
1970 1970 checkseries(patchname)
1971 1971 self.checkpatchname(patchname, force)
1972 1972 self.fullseries.insert(0, patchname)
1973 1973
1974 1974 patchf = self.opener(patchname, "w")
1975 1975 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1976 1976 patchf.close()
1977 1977
1978 1978 se = statusentry(n, patchname)
1979 1979 self.applied.insert(0, se)
1980 1980
1981 1981 self.added.append(patchname)
1982 1982 imported.append(patchname)
1983 1983 patchname = None
1984 1984 if rev and repo.ui.configbool('mq', 'secret', False):
1985 1985 # if we added anything with --rev, we must move the secret root
1986 1986 phases.retractboundary(repo, phases.secret, [n])
1987 1987 self.parseseries()
1988 1988 self.applieddirty = True
1989 1989 self.seriesdirty = True
1990 1990
1991 1991 for i, filename in enumerate(files):
1992 1992 if existing:
1993 1993 if filename == '-':
1994 1994 raise util.Abort(_('-e is incompatible with import from -'))
1995 1995 filename = normname(filename)
1996 1996 self.checkreservedname(filename)
1997 1997 originpath = self.join(filename)
1998 1998 if not os.path.isfile(originpath):
1999 1999 raise util.Abort(_("patch %s does not exist") % filename)
2000 2000
2001 2001 if patchname:
2002 2002 self.checkpatchname(patchname, force)
2003 2003
2004 2004 self.ui.write(_('renaming %s to %s\n')
2005 2005 % (filename, patchname))
2006 2006 util.rename(originpath, self.join(patchname))
2007 2007 else:
2008 2008 patchname = filename
2009 2009
2010 2010 else:
2011 2011 if filename == '-' and not patchname:
2012 2012 raise util.Abort(_('need --name to import a patch from -'))
2013 2013 elif not patchname:
2014 2014 patchname = normname(os.path.basename(filename.rstrip('/')))
2015 2015 self.checkpatchname(patchname, force)
2016 2016 try:
2017 2017 if filename == '-':
2018 2018 text = self.ui.fin.read()
2019 2019 else:
2020 2020 fp = hg.openpath(self.ui, filename)
2021 2021 text = fp.read()
2022 2022 fp.close()
2023 2023 except (OSError, IOError):
2024 2024 raise util.Abort(_("unable to read file %s") % filename)
2025 2025 patchf = self.opener(patchname, "w")
2026 2026 patchf.write(text)
2027 2027 patchf.close()
2028 2028 if not force:
2029 2029 checkseries(patchname)
2030 2030 if patchname not in self.series:
2031 2031 index = self.fullseriesend() + i
2032 2032 self.fullseries[index:index] = [patchname]
2033 2033 self.parseseries()
2034 2034 self.seriesdirty = True
2035 2035 self.ui.warn(_("adding %s to series file\n") % patchname)
2036 2036 self.added.append(patchname)
2037 2037 imported.append(patchname)
2038 2038 patchname = None
2039 2039
2040 2040 self.removeundo(repo)
2041 2041 return imported
2042 2042
2043 2043 def fixkeepchangesopts(ui, opts):
2044 2044 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2045 2045 or opts.get('exact')):
2046 2046 return opts
2047 2047 opts = dict(opts)
2048 2048 opts['keep_changes'] = True
2049 2049 return opts
2050 2050
2051 2051 @command("qdelete|qremove|qrm",
2052 2052 [('k', 'keep', None, _('keep patch file')),
2053 2053 ('r', 'rev', [],
2054 2054 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2055 2055 _('hg qdelete [-k] [PATCH]...'))
2056 2056 def delete(ui, repo, *patches, **opts):
2057 2057 """remove patches from queue
2058 2058
2059 2059 The patches must not be applied, and at least one patch is required. Exact
2060 2060 patch identifiers must be given. With -k/--keep, the patch files are
2061 2061 preserved in the patch directory.
2062 2062
2063 2063 To stop managing a patch and move it into permanent history,
2064 2064 use the :hg:`qfinish` command."""
2065 2065 q = repo.mq
2066 2066 q.delete(repo, patches, opts)
2067 2067 q.savedirty()
2068 2068 return 0
2069 2069
2070 2070 @command("qapplied",
2071 2071 [('1', 'last', None, _('show only the preceding applied patch'))
2072 2072 ] + seriesopts,
2073 2073 _('hg qapplied [-1] [-s] [PATCH]'))
2074 2074 def applied(ui, repo, patch=None, **opts):
2075 2075 """print the patches already applied
2076 2076
2077 2077 Returns 0 on success."""
2078 2078
2079 2079 q = repo.mq
2080 2080
2081 2081 if patch:
2082 2082 if patch not in q.series:
2083 2083 raise util.Abort(_("patch %s is not in series file") % patch)
2084 2084 end = q.series.index(patch) + 1
2085 2085 else:
2086 2086 end = q.seriesend(True)
2087 2087
2088 2088 if opts.get('last') and not end:
2089 2089 ui.write(_("no patches applied\n"))
2090 2090 return 1
2091 2091 elif opts.get('last') and end == 1:
2092 2092 ui.write(_("only one patch applied\n"))
2093 2093 return 1
2094 2094 elif opts.get('last'):
2095 2095 start = end - 2
2096 2096 end = 1
2097 2097 else:
2098 2098 start = 0
2099 2099
2100 2100 q.qseries(repo, length=end, start=start, status='A',
2101 2101 summary=opts.get('summary'))
2102 2102
2103 2103
2104 2104 @command("qunapplied",
2105 2105 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2106 2106 _('hg qunapplied [-1] [-s] [PATCH]'))
2107 2107 def unapplied(ui, repo, patch=None, **opts):
2108 2108 """print the patches not yet applied
2109 2109
2110 2110 Returns 0 on success."""
2111 2111
2112 2112 q = repo.mq
2113 2113 if patch:
2114 2114 if patch not in q.series:
2115 2115 raise util.Abort(_("patch %s is not in series file") % patch)
2116 2116 start = q.series.index(patch) + 1
2117 2117 else:
2118 2118 start = q.seriesend(True)
2119 2119
2120 2120 if start == len(q.series) and opts.get('first'):
2121 2121 ui.write(_("all patches applied\n"))
2122 2122 return 1
2123 2123
2124 2124 length = opts.get('first') and 1 or None
2125 2125 q.qseries(repo, start=start, length=length, status='U',
2126 2126 summary=opts.get('summary'))
2127 2127
2128 2128 @command("qimport",
2129 2129 [('e', 'existing', None, _('import file in patch directory')),
2130 2130 ('n', 'name', '',
2131 2131 _('name of patch file'), _('NAME')),
2132 2132 ('f', 'force', None, _('overwrite existing files')),
2133 2133 ('r', 'rev', [],
2134 2134 _('place existing revisions under mq control'), _('REV')),
2135 2135 ('g', 'git', None, _('use git extended diff format')),
2136 2136 ('P', 'push', None, _('qpush after importing'))],
2137 2137 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2138 2138 def qimport(ui, repo, *filename, **opts):
2139 2139 """import a patch or existing changeset
2140 2140
2141 2141 The patch is inserted into the series after the last applied
2142 2142 patch. If no patches have been applied, qimport prepends the patch
2143 2143 to the series.
2144 2144
2145 2145 The patch will have the same name as its source file unless you
2146 2146 give it a new one with -n/--name.
2147 2147
2148 2148 You can register an existing patch inside the patch directory with
2149 2149 the -e/--existing flag.
2150 2150
2151 2151 With -f/--force, an existing patch of the same name will be
2152 2152 overwritten.
2153 2153
2154 2154 An existing changeset may be placed under mq control with -r/--rev
2155 2155 (e.g. qimport --rev tip -n patch will place tip under mq control).
2156 2156 With -g/--git, patches imported with --rev will use the git diff
2157 2157 format. See the diffs help topic for information on why this is
2158 2158 important for preserving rename/copy information and permission
2159 2159 changes. Use :hg:`qfinish` to remove changesets from mq control.
2160 2160
2161 2161 To import a patch from standard input, pass - as the patch file.
2162 2162 When importing from standard input, a patch name must be specified
2163 2163 using the --name flag.
2164 2164
2165 2165 To import an existing patch while renaming it::
2166 2166
2167 2167 hg qimport -e existing-patch -n new-name
2168 2168
2169 2169 Returns 0 if import succeeded.
2170 2170 """
2171 2171 lock = repo.lock() # cause this may move phase
2172 2172 try:
2173 2173 q = repo.mq
2174 2174 try:
2175 2175 imported = q.qimport(
2176 2176 repo, filename, patchname=opts.get('name'),
2177 2177 existing=opts.get('existing'), force=opts.get('force'),
2178 2178 rev=opts.get('rev'), git=opts.get('git'))
2179 2179 finally:
2180 2180 q.savedirty()
2181 2181 finally:
2182 2182 lock.release()
2183 2183
2184 2184 if imported and opts.get('push') and not opts.get('rev'):
2185 2185 return q.push(repo, imported[-1])
2186 2186 return 0
2187 2187
2188 2188 def qinit(ui, repo, create):
2189 2189 """initialize a new queue repository
2190 2190
2191 2191 This command also creates a series file for ordering patches, and
2192 2192 an mq-specific .hgignore file in the queue repository, to exclude
2193 2193 the status and guards files (these contain mostly transient state).
2194 2194
2195 2195 Returns 0 if initialization succeeded."""
2196 2196 q = repo.mq
2197 2197 r = q.init(repo, create)
2198 2198 q.savedirty()
2199 2199 if r:
2200 2200 if not os.path.exists(r.wjoin('.hgignore')):
2201 2201 fp = r.wopener('.hgignore', 'w')
2202 2202 fp.write('^\\.hg\n')
2203 2203 fp.write('^\\.mq\n')
2204 2204 fp.write('syntax: glob\n')
2205 2205 fp.write('status\n')
2206 2206 fp.write('guards\n')
2207 2207 fp.close()
2208 2208 if not os.path.exists(r.wjoin('series')):
2209 2209 r.wopener('series', 'w').close()
2210 2210 r[None].add(['.hgignore', 'series'])
2211 2211 commands.add(ui, r)
2212 2212 return 0
2213 2213
2214 2214 @command("^qinit",
2215 2215 [('c', 'create-repo', None, _('create queue repository'))],
2216 2216 _('hg qinit [-c]'))
2217 2217 def init(ui, repo, **opts):
2218 2218 """init a new queue repository (DEPRECATED)
2219 2219
2220 2220 The queue repository is unversioned by default. If
2221 2221 -c/--create-repo is specified, qinit will create a separate nested
2222 2222 repository for patches (qinit -c may also be run later to convert
2223 2223 an unversioned patch repository into a versioned one). You can use
2224 2224 qcommit to commit changes to this queue repository.
2225 2225
2226 2226 This command is deprecated. Without -c, it's implied by other relevant
2227 2227 commands. With -c, use :hg:`init --mq` instead."""
2228 2228 return qinit(ui, repo, create=opts.get('create_repo'))
2229 2229
2230 2230 @command("qclone",
2231 2231 [('', 'pull', None, _('use pull protocol to copy metadata')),
2232 2232 ('U', 'noupdate', None,
2233 2233 _('do not update the new working directories')),
2234 2234 ('', 'uncompressed', None,
2235 2235 _('use uncompressed transfer (fast over LAN)')),
2236 2236 ('p', 'patches', '',
2237 2237 _('location of source patch repository'), _('REPO')),
2238 2238 ] + commands.remoteopts,
2239 2239 _('hg qclone [OPTION]... SOURCE [DEST]'))
2240 2240 def clone(ui, source, dest=None, **opts):
2241 2241 '''clone main and patch repository at same time
2242 2242
2243 2243 If source is local, destination will have no patches applied. If
2244 2244 source is remote, this command can not check if patches are
2245 2245 applied in source, so cannot guarantee that patches are not
2246 2246 applied in destination. If you clone remote repository, be sure
2247 2247 before that it has no patches applied.
2248 2248
2249 2249 Source patch repository is looked for in <src>/.hg/patches by
2250 2250 default. Use -p <url> to change.
2251 2251
2252 2252 The patch directory must be a nested Mercurial repository, as
2253 2253 would be created by :hg:`init --mq`.
2254 2254
2255 2255 Return 0 on success.
2256 2256 '''
2257 2257 def patchdir(repo):
2258 2258 """compute a patch repo url from a repo object"""
2259 2259 url = repo.url()
2260 2260 if url.endswith('/'):
2261 2261 url = url[:-1]
2262 2262 return url + '/.hg/patches'
2263 2263
2264 2264 # main repo (destination and sources)
2265 2265 if dest is None:
2266 2266 dest = hg.defaultdest(source)
2267 2267 sr = hg.peer(ui, opts, ui.expandpath(source))
2268 2268
2269 2269 # patches repo (source only)
2270 2270 if opts.get('patches'):
2271 2271 patchespath = ui.expandpath(opts.get('patches'))
2272 2272 else:
2273 2273 patchespath = patchdir(sr)
2274 2274 try:
2275 2275 hg.peer(ui, opts, patchespath)
2276 2276 except error.RepoError:
2277 2277 raise util.Abort(_('versioned patch repository not found'
2278 2278 ' (see init --mq)'))
2279 2279 qbase, destrev = None, None
2280 2280 if sr.local():
2281 2281 repo = sr.local()
2282 2282 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2283 2283 qbase = repo.mq.applied[0].node
2284 2284 if not hg.islocal(dest):
2285 2285 heads = set(repo.heads())
2286 2286 destrev = list(heads.difference(repo.heads(qbase)))
2287 2287 destrev.append(repo.changelog.parents(qbase)[0])
2288 2288 elif sr.capable('lookup'):
2289 2289 try:
2290 2290 qbase = sr.lookup('qbase')
2291 2291 except error.RepoError:
2292 2292 pass
2293 2293
2294 2294 ui.note(_('cloning main repository\n'))
2295 2295 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2296 2296 pull=opts.get('pull'),
2297 2297 rev=destrev,
2298 2298 update=False,
2299 2299 stream=opts.get('uncompressed'))
2300 2300
2301 2301 ui.note(_('cloning patch repository\n'))
2302 2302 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2303 2303 pull=opts.get('pull'), update=not opts.get('noupdate'),
2304 2304 stream=opts.get('uncompressed'))
2305 2305
2306 2306 if dr.local():
2307 2307 repo = dr.local()
2308 2308 if qbase:
2309 2309 ui.note(_('stripping applied patches from destination '
2310 2310 'repository\n'))
2311 2311 repo.mq.strip(repo, [qbase], update=False, backup=None)
2312 2312 if not opts.get('noupdate'):
2313 2313 ui.note(_('updating destination repository\n'))
2314 2314 hg.update(repo, repo.changelog.tip())
2315 2315
2316 2316 @command("qcommit|qci",
2317 2317 commands.table["^commit|ci"][1],
2318 2318 _('hg qcommit [OPTION]... [FILE]...'))
2319 2319 def commit(ui, repo, *pats, **opts):
2320 2320 """commit changes in the queue repository (DEPRECATED)
2321 2321
2322 2322 This command is deprecated; use :hg:`commit --mq` instead."""
2323 2323 q = repo.mq
2324 2324 r = q.qrepo()
2325 2325 if not r:
2326 2326 raise util.Abort('no queue repository')
2327 2327 commands.commit(r.ui, r, *pats, **opts)
2328 2328
2329 2329 @command("qseries",
2330 2330 [('m', 'missing', None, _('print patches not in series')),
2331 2331 ] + seriesopts,
2332 2332 _('hg qseries [-ms]'))
2333 2333 def series(ui, repo, **opts):
2334 2334 """print the entire series file
2335 2335
2336 2336 Returns 0 on success."""
2337 2337 repo.mq.qseries(repo, missing=opts.get('missing'),
2338 2338 summary=opts.get('summary'))
2339 2339 return 0
2340 2340
2341 2341 @command("qtop", seriesopts, _('hg qtop [-s]'))
2342 2342 def top(ui, repo, **opts):
2343 2343 """print the name of the current patch
2344 2344
2345 2345 Returns 0 on success."""
2346 2346 q = repo.mq
2347 2347 t = q.applied and q.seriesend(True) or 0
2348 2348 if t:
2349 2349 q.qseries(repo, start=t - 1, length=1, status='A',
2350 2350 summary=opts.get('summary'))
2351 2351 else:
2352 2352 ui.write(_("no patches applied\n"))
2353 2353 return 1
2354 2354
2355 2355 @command("qnext", seriesopts, _('hg qnext [-s]'))
2356 2356 def next(ui, repo, **opts):
2357 2357 """print the name of the next pushable patch
2358 2358
2359 2359 Returns 0 on success."""
2360 2360 q = repo.mq
2361 2361 end = q.seriesend()
2362 2362 if end == len(q.series):
2363 2363 ui.write(_("all patches applied\n"))
2364 2364 return 1
2365 2365 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2366 2366
2367 2367 @command("qprev", seriesopts, _('hg qprev [-s]'))
2368 2368 def prev(ui, repo, **opts):
2369 2369 """print the name of the preceding applied patch
2370 2370
2371 2371 Returns 0 on success."""
2372 2372 q = repo.mq
2373 2373 l = len(q.applied)
2374 2374 if l == 1:
2375 2375 ui.write(_("only one patch applied\n"))
2376 2376 return 1
2377 2377 if not l:
2378 2378 ui.write(_("no patches applied\n"))
2379 2379 return 1
2380 2380 idx = q.series.index(q.applied[-2].name)
2381 2381 q.qseries(repo, start=idx, length=1, status='A',
2382 2382 summary=opts.get('summary'))
2383 2383
2384 2384 def setupheaderopts(ui, opts):
2385 2385 if not opts.get('user') and opts.get('currentuser'):
2386 2386 opts['user'] = ui.username()
2387 2387 if not opts.get('date') and opts.get('currentdate'):
2388 2388 opts['date'] = "%d %d" % util.makedate()
2389 2389
2390 2390 @command("^qnew",
2391 2391 [('e', 'edit', None, _('edit commit message')),
2392 2392 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2393 2393 ('g', 'git', None, _('use git extended diff format')),
2394 2394 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2395 2395 ('u', 'user', '',
2396 2396 _('add "From: <USER>" to patch'), _('USER')),
2397 2397 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2398 2398 ('d', 'date', '',
2399 2399 _('add "Date: <DATE>" to patch'), _('DATE'))
2400 2400 ] + commands.walkopts + commands.commitopts,
2401 2401 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2402 2402 def new(ui, repo, patch, *args, **opts):
2403 2403 """create a new patch
2404 2404
2405 2405 qnew creates a new patch on top of the currently-applied patch (if
2406 2406 any). The patch will be initialized with any outstanding changes
2407 2407 in the working directory. You may also use -I/--include,
2408 2408 -X/--exclude, and/or a list of files after the patch name to add
2409 2409 only changes to matching files to the new patch, leaving the rest
2410 2410 as uncommitted modifications.
2411 2411
2412 2412 -u/--user and -d/--date can be used to set the (given) user and
2413 2413 date, respectively. -U/--currentuser and -D/--currentdate set user
2414 2414 to current user and date to current date.
2415 2415
2416 2416 -e/--edit, -m/--message or -l/--logfile set the patch header as
2417 2417 well as the commit message. If none is specified, the header is
2418 2418 empty and the commit message is '[mq]: PATCH'.
2419 2419
2420 2420 Use the -g/--git option to keep the patch in the git extended diff
2421 2421 format. Read the diffs help topic for more information on why this
2422 2422 is important for preserving permission changes and copy/rename
2423 2423 information.
2424 2424
2425 2425 Returns 0 on successful creation of a new patch.
2426 2426 """
2427 2427 msg = cmdutil.logmessage(ui, opts)
2428 2428 def getmsg():
2429 2429 return ui.edit(msg, opts.get('user') or ui.username())
2430 2430 q = repo.mq
2431 2431 opts['msg'] = msg
2432 2432 if opts.get('edit'):
2433 2433 opts['msg'] = getmsg
2434 2434 else:
2435 2435 opts['msg'] = msg
2436 2436 setupheaderopts(ui, opts)
2437 2437 q.new(repo, patch, *args, **opts)
2438 2438 q.savedirty()
2439 2439 return 0
2440 2440
2441 2441 @command("^qrefresh",
2442 2442 [('e', 'edit', None, _('edit commit message')),
2443 2443 ('g', 'git', None, _('use git extended diff format')),
2444 2444 ('s', 'short', None,
2445 2445 _('refresh only files already in the patch and specified files')),
2446 2446 ('U', 'currentuser', None,
2447 2447 _('add/update author field in patch with current user')),
2448 2448 ('u', 'user', '',
2449 2449 _('add/update author field in patch with given user'), _('USER')),
2450 2450 ('D', 'currentdate', None,
2451 2451 _('add/update date field in patch with current date')),
2452 2452 ('d', 'date', '',
2453 2453 _('add/update date field in patch with given date'), _('DATE'))
2454 2454 ] + commands.walkopts + commands.commitopts,
2455 2455 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2456 2456 def refresh(ui, repo, *pats, **opts):
2457 2457 """update the current patch
2458 2458
2459 2459 If any file patterns are provided, the refreshed patch will
2460 2460 contain only the modifications that match those patterns; the
2461 2461 remaining modifications will remain in the working directory.
2462 2462
2463 2463 If -s/--short is specified, files currently included in the patch
2464 2464 will be refreshed just like matched files and remain in the patch.
2465 2465
2466 2466 If -e/--edit is specified, Mercurial will start your configured editor for
2467 2467 you to enter a message. In case qrefresh fails, you will find a backup of
2468 2468 your message in ``.hg/last-message.txt``.
2469 2469
2470 2470 hg add/remove/copy/rename work as usual, though you might want to
2471 2471 use git-style patches (-g/--git or [diff] git=1) to track copies
2472 2472 and renames. See the diffs help topic for more information on the
2473 2473 git diff format.
2474 2474
2475 2475 Returns 0 on success.
2476 2476 """
2477 2477 q = repo.mq
2478 2478 message = cmdutil.logmessage(ui, opts)
2479 2479 if opts.get('edit'):
2480 2480 if not q.applied:
2481 2481 ui.write(_("no patches applied\n"))
2482 2482 return 1
2483 2483 if message:
2484 2484 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2485 2485 patch = q.applied[-1].name
2486 2486 ph = patchheader(q.join(patch), q.plainmode)
2487 2487 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2488 2488 # We don't want to lose the patch message if qrefresh fails (issue2062)
2489 2489 repo.savecommitmessage(message)
2490 2490 setupheaderopts(ui, opts)
2491 2491 wlock = repo.wlock()
2492 2492 try:
2493 2493 ret = q.refresh(repo, pats, msg=message, **opts)
2494 2494 q.savedirty()
2495 2495 return ret
2496 2496 finally:
2497 2497 wlock.release()
2498 2498
2499 2499 @command("^qdiff",
2500 2500 commands.diffopts + commands.diffopts2 + commands.walkopts,
2501 2501 _('hg qdiff [OPTION]... [FILE]...'))
2502 2502 def diff(ui, repo, *pats, **opts):
2503 2503 """diff of the current patch and subsequent modifications
2504 2504
2505 2505 Shows a diff which includes the current patch as well as any
2506 2506 changes which have been made in the working directory since the
2507 2507 last refresh (thus showing what the current patch would become
2508 2508 after a qrefresh).
2509 2509
2510 2510 Use :hg:`diff` if you only want to see the changes made since the
2511 2511 last qrefresh, or :hg:`export qtip` if you want to see changes
2512 2512 made by the current patch without including changes made since the
2513 2513 qrefresh.
2514 2514
2515 2515 Returns 0 on success.
2516 2516 """
2517 2517 repo.mq.diff(repo, pats, opts)
2518 2518 return 0
2519 2519
2520 2520 @command('qfold',
2521 2521 [('e', 'edit', None, _('edit patch header')),
2522 2522 ('k', 'keep', None, _('keep folded patch files')),
2523 2523 ] + commands.commitopts,
2524 2524 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2525 2525 def fold(ui, repo, *files, **opts):
2526 2526 """fold the named patches into the current patch
2527 2527
2528 2528 Patches must not yet be applied. Each patch will be successively
2529 2529 applied to the current patch in the order given. If all the
2530 2530 patches apply successfully, the current patch will be refreshed
2531 2531 with the new cumulative patch, and the folded patches will be
2532 2532 deleted. With -k/--keep, the folded patch files will not be
2533 2533 removed afterwards.
2534 2534
2535 2535 The header for each folded patch will be concatenated with the
2536 2536 current patch header, separated by a line of ``* * *``.
2537 2537
2538 2538 Returns 0 on success."""
2539 2539 q = repo.mq
2540 2540 if not files:
2541 2541 raise util.Abort(_('qfold requires at least one patch name'))
2542 2542 if not q.checktoppatch(repo)[0]:
2543 2543 raise util.Abort(_('no patches applied'))
2544 2544 q.checklocalchanges(repo)
2545 2545
2546 2546 message = cmdutil.logmessage(ui, opts)
2547 2547 if opts.get('edit'):
2548 2548 if message:
2549 2549 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2550 2550
2551 2551 parent = q.lookup('qtip')
2552 2552 patches = []
2553 2553 messages = []
2554 2554 for f in files:
2555 2555 p = q.lookup(f)
2556 2556 if p in patches or p == parent:
2557 2557 ui.warn(_('skipping already folded patch %s\n') % p)
2558 2558 if q.isapplied(p):
2559 2559 raise util.Abort(_('qfold cannot fold already applied patch %s')
2560 2560 % p)
2561 2561 patches.append(p)
2562 2562
2563 2563 for p in patches:
2564 2564 if not message:
2565 2565 ph = patchheader(q.join(p), q.plainmode)
2566 2566 if ph.message:
2567 2567 messages.append(ph.message)
2568 2568 pf = q.join(p)
2569 2569 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2570 2570 if not patchsuccess:
2571 2571 raise util.Abort(_('error folding patch %s') % p)
2572 2572
2573 2573 if not message:
2574 2574 ph = patchheader(q.join(parent), q.plainmode)
2575 2575 message, user = ph.message, ph.user
2576 2576 for msg in messages:
2577 2577 message.append('* * *')
2578 2578 message.extend(msg)
2579 2579 message = '\n'.join(message)
2580 2580
2581 2581 if opts.get('edit'):
2582 2582 message = ui.edit(message, user or ui.username())
2583 2583
2584 2584 diffopts = q.patchopts(q.diffopts(), *patches)
2585 2585 wlock = repo.wlock()
2586 2586 try:
2587 2587 q.refresh(repo, msg=message, git=diffopts.git)
2588 2588 q.delete(repo, patches, opts)
2589 2589 q.savedirty()
2590 2590 finally:
2591 2591 wlock.release()
2592 2592
2593 2593 @command("qgoto",
2594 2594 [('', 'keep-changes', None,
2595 2595 _('tolerate non-conflicting local changes')),
2596 2596 ('f', 'force', None, _('overwrite any local changes')),
2597 2597 ('', 'no-backup', None, _('do not save backup copies of files'))],
2598 2598 _('hg qgoto [OPTION]... PATCH'))
2599 2599 def goto(ui, repo, patch, **opts):
2600 2600 '''push or pop patches until named patch is at top of stack
2601 2601
2602 2602 Returns 0 on success.'''
2603 2603 opts = fixkeepchangesopts(ui, opts)
2604 2604 q = repo.mq
2605 2605 patch = q.lookup(patch)
2606 2606 nobackup = opts.get('no_backup')
2607 2607 keepchanges = opts.get('keep_changes')
2608 2608 if q.isapplied(patch):
2609 2609 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2610 2610 keepchanges=keepchanges)
2611 2611 else:
2612 2612 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2613 2613 keepchanges=keepchanges)
2614 2614 q.savedirty()
2615 2615 return ret
2616 2616
2617 2617 @command("qguard",
2618 2618 [('l', 'list', None, _('list all patches and guards')),
2619 2619 ('n', 'none', None, _('drop all guards'))],
2620 2620 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2621 2621 def guard(ui, repo, *args, **opts):
2622 2622 '''set or print guards for a patch
2623 2623
2624 2624 Guards control whether a patch can be pushed. A patch with no
2625 2625 guards is always pushed. A patch with a positive guard ("+foo") is
2626 2626 pushed only if the :hg:`qselect` command has activated it. A patch with
2627 2627 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2628 2628 has activated it.
2629 2629
2630 2630 With no arguments, print the currently active guards.
2631 2631 With arguments, set guards for the named patch.
2632 2632
2633 2633 .. note::
2634 2634 Specifying negative guards now requires '--'.
2635 2635
2636 2636 To set guards on another patch::
2637 2637
2638 2638 hg qguard other.patch -- +2.6.17 -stable
2639 2639
2640 2640 Returns 0 on success.
2641 2641 '''
2642 2642 def status(idx):
2643 2643 guards = q.seriesguards[idx] or ['unguarded']
2644 2644 if q.series[idx] in applied:
2645 2645 state = 'applied'
2646 2646 elif q.pushable(idx)[0]:
2647 2647 state = 'unapplied'
2648 2648 else:
2649 2649 state = 'guarded'
2650 2650 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2651 2651 ui.write('%s: ' % ui.label(q.series[idx], label))
2652 2652
2653 2653 for i, guard in enumerate(guards):
2654 2654 if guard.startswith('+'):
2655 2655 ui.write(guard, label='qguard.positive')
2656 2656 elif guard.startswith('-'):
2657 2657 ui.write(guard, label='qguard.negative')
2658 2658 else:
2659 2659 ui.write(guard, label='qguard.unguarded')
2660 2660 if i != len(guards) - 1:
2661 2661 ui.write(' ')
2662 2662 ui.write('\n')
2663 2663 q = repo.mq
2664 2664 applied = set(p.name for p in q.applied)
2665 2665 patch = None
2666 2666 args = list(args)
2667 2667 if opts.get('list'):
2668 2668 if args or opts.get('none'):
2669 2669 raise util.Abort(_('cannot mix -l/--list with options or '
2670 2670 'arguments'))
2671 2671 for i in xrange(len(q.series)):
2672 2672 status(i)
2673 2673 return
2674 2674 if not args or args[0][0:1] in '-+':
2675 2675 if not q.applied:
2676 2676 raise util.Abort(_('no patches applied'))
2677 2677 patch = q.applied[-1].name
2678 2678 if patch is None and args[0][0:1] not in '-+':
2679 2679 patch = args.pop(0)
2680 2680 if patch is None:
2681 2681 raise util.Abort(_('no patch to work with'))
2682 2682 if args or opts.get('none'):
2683 2683 idx = q.findseries(patch)
2684 2684 if idx is None:
2685 2685 raise util.Abort(_('no patch named %s') % patch)
2686 2686 q.setguards(idx, args)
2687 2687 q.savedirty()
2688 2688 else:
2689 2689 status(q.series.index(q.lookup(patch)))
2690 2690
2691 2691 @command("qheader", [], _('hg qheader [PATCH]'))
2692 2692 def header(ui, repo, patch=None):
2693 2693 """print the header of the topmost or specified patch
2694 2694
2695 2695 Returns 0 on success."""
2696 2696 q = repo.mq
2697 2697
2698 2698 if patch:
2699 2699 patch = q.lookup(patch)
2700 2700 else:
2701 2701 if not q.applied:
2702 2702 ui.write(_('no patches applied\n'))
2703 2703 return 1
2704 2704 patch = q.lookup('qtip')
2705 2705 ph = patchheader(q.join(patch), q.plainmode)
2706 2706
2707 2707 ui.write('\n'.join(ph.message) + '\n')
2708 2708
2709 2709 def lastsavename(path):
2710 2710 (directory, base) = os.path.split(path)
2711 2711 names = os.listdir(directory)
2712 2712 namere = re.compile("%s.([0-9]+)" % base)
2713 2713 maxindex = None
2714 2714 maxname = None
2715 2715 for f in names:
2716 2716 m = namere.match(f)
2717 2717 if m:
2718 2718 index = int(m.group(1))
2719 2719 if maxindex is None or index > maxindex:
2720 2720 maxindex = index
2721 2721 maxname = f
2722 2722 if maxname:
2723 2723 return (os.path.join(directory, maxname), maxindex)
2724 2724 return (None, None)
2725 2725
2726 2726 def savename(path):
2727 2727 (last, index) = lastsavename(path)
2728 2728 if last is None:
2729 2729 index = 0
2730 2730 newpath = path + ".%d" % (index + 1)
2731 2731 return newpath
2732 2732
2733 2733 @command("^qpush",
2734 2734 [('', 'keep-changes', None,
2735 2735 _('tolerate non-conflicting local changes')),
2736 2736 ('f', 'force', None, _('apply on top of local changes')),
2737 2737 ('e', 'exact', None,
2738 2738 _('apply the target patch to its recorded parent')),
2739 2739 ('l', 'list', None, _('list patch name in commit text')),
2740 2740 ('a', 'all', None, _('apply all patches')),
2741 2741 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2742 2742 ('n', 'name', '',
2743 2743 _('merge queue name (DEPRECATED)'), _('NAME')),
2744 2744 ('', 'move', None,
2745 2745 _('reorder patch series and apply only the patch')),
2746 2746 ('', 'no-backup', None, _('do not save backup copies of files'))],
2747 2747 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2748 2748 def push(ui, repo, patch=None, **opts):
2749 2749 """push the next patch onto the stack
2750 2750
2751 2751 By default, abort if the working directory contains uncommitted
2752 2752 changes. With --keep-changes, abort only if the uncommitted files
2753 2753 overlap with patched files. With -f/--force, backup and patch over
2754 2754 uncommitted changes.
2755 2755
2756 2756 Return 0 on success.
2757 2757 """
2758 2758 q = repo.mq
2759 2759 mergeq = None
2760 2760
2761 2761 opts = fixkeepchangesopts(ui, opts)
2762 2762 if opts.get('merge'):
2763 2763 if opts.get('name'):
2764 2764 newpath = repo.join(opts.get('name'))
2765 2765 else:
2766 2766 newpath, i = lastsavename(q.path)
2767 2767 if not newpath:
2768 2768 ui.warn(_("no saved queues found, please use -n\n"))
2769 2769 return 1
2770 2770 mergeq = queue(ui, repo.path, newpath)
2771 2771 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2772 2772 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2773 2773 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2774 2774 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2775 2775 keepchanges=opts.get('keep_changes'))
2776 2776 return ret
2777 2777
2778 2778 @command("^qpop",
2779 2779 [('a', 'all', None, _('pop all patches')),
2780 2780 ('n', 'name', '',
2781 2781 _('queue name to pop (DEPRECATED)'), _('NAME')),
2782 2782 ('', 'keep-changes', None,
2783 2783 _('tolerate non-conflicting local changes')),
2784 2784 ('f', 'force', None, _('forget any local changes to patched files')),
2785 2785 ('', 'no-backup', None, _('do not save backup copies of files'))],
2786 2786 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2787 2787 def pop(ui, repo, patch=None, **opts):
2788 2788 """pop the current patch off the stack
2789 2789
2790 2790 Without argument, pops off the top of the patch stack. If given a
2791 2791 patch name, keeps popping off patches until the named patch is at
2792 2792 the top of the stack.
2793 2793
2794 2794 By default, abort if the working directory contains uncommitted
2795 2795 changes. With --keep-changes, abort only if the uncommitted files
2796 2796 overlap with patched files. With -f/--force, backup and discard
2797 2797 changes made to such files.
2798 2798
2799 2799 Return 0 on success.
2800 2800 """
2801 2801 opts = fixkeepchangesopts(ui, opts)
2802 2802 localupdate = True
2803 2803 if opts.get('name'):
2804 2804 q = queue(ui, repo.path, repo.join(opts.get('name')))
2805 2805 ui.warn(_('using patch queue: %s\n') % q.path)
2806 2806 localupdate = False
2807 2807 else:
2808 2808 q = repo.mq
2809 2809 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2810 2810 all=opts.get('all'), nobackup=opts.get('no_backup'),
2811 2811 keepchanges=opts.get('keep_changes'))
2812 2812 q.savedirty()
2813 2813 return ret
2814 2814
2815 2815 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2816 2816 def rename(ui, repo, patch, name=None, **opts):
2817 2817 """rename a patch
2818 2818
2819 2819 With one argument, renames the current patch to PATCH1.
2820 2820 With two arguments, renames PATCH1 to PATCH2.
2821 2821
2822 2822 Returns 0 on success."""
2823 2823 q = repo.mq
2824 2824 if not name:
2825 2825 name = patch
2826 2826 patch = None
2827 2827
2828 2828 if patch:
2829 2829 patch = q.lookup(patch)
2830 2830 else:
2831 2831 if not q.applied:
2832 2832 ui.write(_('no patches applied\n'))
2833 2833 return
2834 2834 patch = q.lookup('qtip')
2835 2835 absdest = q.join(name)
2836 2836 if os.path.isdir(absdest):
2837 2837 name = normname(os.path.join(name, os.path.basename(patch)))
2838 2838 absdest = q.join(name)
2839 2839 q.checkpatchname(name)
2840 2840
2841 2841 ui.note(_('renaming %s to %s\n') % (patch, name))
2842 2842 i = q.findseries(patch)
2843 2843 guards = q.guard_re.findall(q.fullseries[i])
2844 2844 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2845 2845 q.parseseries()
2846 2846 q.seriesdirty = True
2847 2847
2848 2848 info = q.isapplied(patch)
2849 2849 if info:
2850 2850 q.applied[info[0]] = statusentry(info[1], name)
2851 2851 q.applieddirty = True
2852 2852
2853 2853 destdir = os.path.dirname(absdest)
2854 2854 if not os.path.isdir(destdir):
2855 2855 os.makedirs(destdir)
2856 2856 util.rename(q.join(patch), absdest)
2857 2857 r = q.qrepo()
2858 2858 if r and patch in r.dirstate:
2859 2859 wctx = r[None]
2860 2860 wlock = r.wlock()
2861 2861 try:
2862 2862 if r.dirstate[patch] == 'a':
2863 2863 r.dirstate.drop(patch)
2864 2864 r.dirstate.add(name)
2865 2865 else:
2866 2866 wctx.copy(patch, name)
2867 2867 wctx.forget([patch])
2868 2868 finally:
2869 2869 wlock.release()
2870 2870
2871 2871 q.savedirty()
2872 2872
2873 2873 @command("qrestore",
2874 2874 [('d', 'delete', None, _('delete save entry')),
2875 2875 ('u', 'update', None, _('update queue working directory'))],
2876 2876 _('hg qrestore [-d] [-u] REV'))
2877 2877 def restore(ui, repo, rev, **opts):
2878 2878 """restore the queue state saved by a revision (DEPRECATED)
2879 2879
2880 2880 This command is deprecated, use :hg:`rebase` instead."""
2881 2881 rev = repo.lookup(rev)
2882 2882 q = repo.mq
2883 2883 q.restore(repo, rev, delete=opts.get('delete'),
2884 2884 qupdate=opts.get('update'))
2885 2885 q.savedirty()
2886 2886 return 0
2887 2887
2888 2888 @command("qsave",
2889 2889 [('c', 'copy', None, _('copy patch directory')),
2890 2890 ('n', 'name', '',
2891 2891 _('copy directory name'), _('NAME')),
2892 2892 ('e', 'empty', None, _('clear queue status file')),
2893 2893 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2894 2894 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2895 2895 def save(ui, repo, **opts):
2896 2896 """save current queue state (DEPRECATED)
2897 2897
2898 2898 This command is deprecated, use :hg:`rebase` instead."""
2899 2899 q = repo.mq
2900 2900 message = cmdutil.logmessage(ui, opts)
2901 2901 ret = q.save(repo, msg=message)
2902 2902 if ret:
2903 2903 return ret
2904 2904 q.savedirty() # save to .hg/patches before copying
2905 2905 if opts.get('copy'):
2906 2906 path = q.path
2907 2907 if opts.get('name'):
2908 2908 newpath = os.path.join(q.basepath, opts.get('name'))
2909 2909 if os.path.exists(newpath):
2910 2910 if not os.path.isdir(newpath):
2911 2911 raise util.Abort(_('destination %s exists and is not '
2912 2912 'a directory') % newpath)
2913 2913 if not opts.get('force'):
2914 2914 raise util.Abort(_('destination %s exists, '
2915 2915 'use -f to force') % newpath)
2916 2916 else:
2917 2917 newpath = savename(path)
2918 2918 ui.warn(_("copy %s to %s\n") % (path, newpath))
2919 2919 util.copyfiles(path, newpath)
2920 2920 if opts.get('empty'):
2921 2921 del q.applied[:]
2922 2922 q.applieddirty = True
2923 2923 q.savedirty()
2924 2924 return 0
2925 2925
2926 2926 @command("strip",
2927 2927 [
2928 2928 ('r', 'rev', [], _('strip specified revision (optional, '
2929 2929 'can specify revisions without this '
2930 2930 'option)'), _('REV')),
2931 2931 ('f', 'force', None, _('force removal of changesets, discard '
2932 2932 'uncommitted changes (no backup)')),
2933 2933 ('b', 'backup', None, _('bundle only changesets with local revision'
2934 2934 ' number greater than REV which are not'
2935 2935 ' descendants of REV (DEPRECATED)')),
2936 2936 ('', 'no-backup', None, _('no backups')),
2937 2937 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2938 2938 ('n', '', None, _('ignored (DEPRECATED)')),
2939 2939 ('k', 'keep', None, _("do not modify working copy during strip")),
2940 2940 ('B', 'bookmark', '', _("remove revs only reachable from given"
2941 2941 " bookmark"))],
2942 2942 _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
2943 2943 def strip(ui, repo, *revs, **opts):
2944 2944 """strip changesets and all their descendants from the repository
2945 2945
2946 2946 The strip command removes the specified changesets and all their
2947 2947 descendants. If the working directory has uncommitted changes, the
2948 2948 operation is aborted unless the --force flag is supplied, in which
2949 2949 case changes will be discarded.
2950 2950
2951 2951 If a parent of the working directory is stripped, then the working
2952 2952 directory will automatically be updated to the most recent
2953 2953 available ancestor of the stripped parent after the operation
2954 2954 completes.
2955 2955
2956 2956 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2957 2957 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2958 2958 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2959 2959 where BUNDLE is the bundle file created by the strip. Note that
2960 2960 the local revision numbers will in general be different after the
2961 2961 restore.
2962 2962
2963 2963 Use the --no-backup option to discard the backup bundle once the
2964 2964 operation completes.
2965 2965
2966 2966 Strip is not a history-rewriting operation and can be used on
2967 2967 changesets in the public phase. But if the stripped changesets have
2968 2968 been pushed to a remote repository you will likely pull them again.
2969 2969
2970 2970 Return 0 on success.
2971 2971 """
2972 2972 backup = 'all'
2973 2973 if opts.get('backup'):
2974 2974 backup = 'strip'
2975 2975 elif opts.get('no_backup') or opts.get('nobackup'):
2976 2976 backup = 'none'
2977 2977
2978 2978 cl = repo.changelog
2979 2979 revs = list(revs) + opts.get('rev')
2980 2980 revs = set(scmutil.revrange(repo, revs))
2981 2981
2982 2982 if opts.get('bookmark'):
2983 2983 mark = opts.get('bookmark')
2984 2984 marks = repo._bookmarks
2985 2985 if mark not in marks:
2986 2986 raise util.Abort(_("bookmark '%s' not found") % mark)
2987 2987
2988 2988 # If the requested bookmark is not the only one pointing to a
2989 2989 # a revision we have to only delete the bookmark and not strip
2990 2990 # anything. revsets cannot detect that case.
2991 2991 uniquebm = True
2992 2992 for m, n in marks.iteritems():
2993 2993 if m != mark and n == repo[mark].node():
2994 2994 uniquebm = False
2995 2995 break
2996 2996 if uniquebm:
2997 2997 rsrevs = repo.revs("ancestors(bookmark(%s)) - "
2998 2998 "ancestors(head() and not bookmark(%s)) - "
2999 2999 "ancestors(bookmark() and not bookmark(%s))",
3000 3000 mark, mark, mark)
3001 3001 revs.update(set(rsrevs))
3002 3002 if not revs:
3003 3003 del marks[mark]
3004 3004 marks.write()
3005 3005 ui.write(_("bookmark '%s' deleted\n") % mark)
3006 3006
3007 3007 if not revs:
3008 3008 raise util.Abort(_('empty revision set'))
3009 3009
3010 3010 descendants = set(cl.descendants(revs))
3011 3011 strippedrevs = revs.union(descendants)
3012 3012 roots = revs.difference(descendants)
3013 3013
3014 3014 update = False
3015 3015 # if one of the wdir parent is stripped we'll need
3016 3016 # to update away to an earlier revision
3017 3017 for p in repo.dirstate.parents():
3018 3018 if p != nullid and cl.rev(p) in strippedrevs:
3019 3019 update = True
3020 3020 break
3021 3021
3022 3022 rootnodes = set(cl.node(r) for r in roots)
3023 3023
3024 3024 q = repo.mq
3025 3025 if q.applied:
3026 3026 # refresh queue state if we're about to strip
3027 3027 # applied patches
3028 3028 if cl.rev(repo.lookup('qtip')) in strippedrevs:
3029 3029 q.applieddirty = True
3030 3030 start = 0
3031 3031 end = len(q.applied)
3032 3032 for i, statusentry in enumerate(q.applied):
3033 3033 if statusentry.node in rootnodes:
3034 3034 # if one of the stripped roots is an applied
3035 3035 # patch, only part of the queue is stripped
3036 3036 start = i
3037 3037 break
3038 3038 del q.applied[start:end]
3039 3039 q.savedirty()
3040 3040
3041 3041 revs = list(rootnodes)
3042 3042 if update and opts.get('keep'):
3043 3043 wlock = repo.wlock()
3044 3044 try:
3045 3045 urev = repo.mq.qparents(repo, revs[0])
3046 3046 repo.dirstate.rebuild(urev, repo[urev].manifest())
3047 3047 repo.dirstate.write()
3048 3048 update = False
3049 3049 finally:
3050 3050 wlock.release()
3051 3051
3052 3052 if opts.get('bookmark'):
3053 3053 del marks[mark]
3054 3054 marks.write()
3055 3055 ui.write(_("bookmark '%s' deleted\n") % mark)
3056 3056
3057 3057 repo.mq.strip(repo, revs, backup=backup, update=update,
3058 3058 force=opts.get('force'))
3059 3059
3060 3060 return 0
3061 3061
3062 3062 @command("qselect",
3063 3063 [('n', 'none', None, _('disable all guards')),
3064 3064 ('s', 'series', None, _('list all guards in series file')),
3065 3065 ('', 'pop', None, _('pop to before first guarded applied patch')),
3066 3066 ('', 'reapply', None, _('pop, then reapply patches'))],
3067 3067 _('hg qselect [OPTION]... [GUARD]...'))
3068 3068 def select(ui, repo, *args, **opts):
3069 3069 '''set or print guarded patches to push
3070 3070
3071 3071 Use the :hg:`qguard` command to set or print guards on patch, then use
3072 3072 qselect to tell mq which guards to use. A patch will be pushed if
3073 3073 it has no guards or any positive guards match the currently
3074 3074 selected guard, but will not be pushed if any negative guards
3075 3075 match the current guard. For example::
3076 3076
3077 3077 qguard foo.patch -- -stable (negative guard)
3078 3078 qguard bar.patch +stable (positive guard)
3079 3079 qselect stable
3080 3080
3081 3081 This activates the "stable" guard. mq will skip foo.patch (because
3082 3082 it has a negative match) but push bar.patch (because it has a
3083 3083 positive match).
3084 3084
3085 3085 With no arguments, prints the currently active guards.
3086 3086 With one argument, sets the active guard.
3087 3087
3088 3088 Use -n/--none to deactivate guards (no other arguments needed).
3089 3089 When no guards are active, patches with positive guards are
3090 3090 skipped and patches with negative guards are pushed.
3091 3091
3092 3092 qselect can change the guards on applied patches. It does not pop
3093 3093 guarded patches by default. Use --pop to pop back to the last
3094 3094 applied patch that is not guarded. Use --reapply (which implies
3095 3095 --pop) to push back to the current patch afterwards, but skip
3096 3096 guarded patches.
3097 3097
3098 3098 Use -s/--series to print a list of all guards in the series file
3099 3099 (no other arguments needed). Use -v for more information.
3100 3100
3101 3101 Returns 0 on success.'''
3102 3102
3103 3103 q = repo.mq
3104 3104 guards = q.active()
3105 3105 if args or opts.get('none'):
3106 3106 old_unapplied = q.unapplied(repo)
3107 3107 old_guarded = [i for i in xrange(len(q.applied)) if
3108 3108 not q.pushable(i)[0]]
3109 3109 q.setactive(args)
3110 3110 q.savedirty()
3111 3111 if not args:
3112 3112 ui.status(_('guards deactivated\n'))
3113 3113 if not opts.get('pop') and not opts.get('reapply'):
3114 3114 unapplied = q.unapplied(repo)
3115 3115 guarded = [i for i in xrange(len(q.applied))
3116 3116 if not q.pushable(i)[0]]
3117 3117 if len(unapplied) != len(old_unapplied):
3118 3118 ui.status(_('number of unguarded, unapplied patches has '
3119 3119 'changed from %d to %d\n') %
3120 3120 (len(old_unapplied), len(unapplied)))
3121 3121 if len(guarded) != len(old_guarded):
3122 3122 ui.status(_('number of guarded, applied patches has changed '
3123 3123 'from %d to %d\n') %
3124 3124 (len(old_guarded), len(guarded)))
3125 3125 elif opts.get('series'):
3126 3126 guards = {}
3127 3127 noguards = 0
3128 3128 for gs in q.seriesguards:
3129 3129 if not gs:
3130 3130 noguards += 1
3131 3131 for g in gs:
3132 3132 guards.setdefault(g, 0)
3133 3133 guards[g] += 1
3134 3134 if ui.verbose:
3135 3135 guards['NONE'] = noguards
3136 3136 guards = guards.items()
3137 3137 guards.sort(key=lambda x: x[0][1:])
3138 3138 if guards:
3139 3139 ui.note(_('guards in series file:\n'))
3140 3140 for guard, count in guards:
3141 3141 ui.note('%2d ' % count)
3142 3142 ui.write(guard, '\n')
3143 3143 else:
3144 3144 ui.note(_('no guards in series file\n'))
3145 3145 else:
3146 3146 if guards:
3147 3147 ui.note(_('active guards:\n'))
3148 3148 for g in guards:
3149 3149 ui.write(g, '\n')
3150 3150 else:
3151 3151 ui.write(_('no active guards\n'))
3152 3152 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3153 3153 popped = False
3154 3154 if opts.get('pop') or opts.get('reapply'):
3155 3155 for i in xrange(len(q.applied)):
3156 3156 pushable, reason = q.pushable(i)
3157 3157 if not pushable:
3158 3158 ui.status(_('popping guarded patches\n'))
3159 3159 popped = True
3160 3160 if i == 0:
3161 3161 q.pop(repo, all=True)
3162 3162 else:
3163 3163 q.pop(repo, str(i - 1))
3164 3164 break
3165 3165 if popped:
3166 3166 try:
3167 3167 if reapply:
3168 3168 ui.status(_('reapplying unguarded patches\n'))
3169 3169 q.push(repo, reapply)
3170 3170 finally:
3171 3171 q.savedirty()
3172 3172
3173 3173 @command("qfinish",
3174 3174 [('a', 'applied', None, _('finish all applied changesets'))],
3175 3175 _('hg qfinish [-a] [REV]...'))
3176 3176 def finish(ui, repo, *revrange, **opts):
3177 3177 """move applied patches into repository history
3178 3178
3179 3179 Finishes the specified revisions (corresponding to applied
3180 3180 patches) by moving them out of mq control into regular repository
3181 3181 history.
3182 3182
3183 3183 Accepts a revision range or the -a/--applied option. If --applied
3184 3184 is specified, all applied mq revisions are removed from mq
3185 3185 control. Otherwise, the given revisions must be at the base of the
3186 3186 stack of applied patches.
3187 3187
3188 3188 This can be especially useful if your changes have been applied to
3189 3189 an upstream repository, or if you are about to push your changes
3190 3190 to upstream.
3191 3191
3192 3192 Returns 0 on success.
3193 3193 """
3194 3194 if not opts.get('applied') and not revrange:
3195 3195 raise util.Abort(_('no revisions specified'))
3196 3196 elif opts.get('applied'):
3197 3197 revrange = ('qbase::qtip',) + revrange
3198 3198
3199 3199 q = repo.mq
3200 3200 if not q.applied:
3201 3201 ui.status(_('no patches applied\n'))
3202 3202 return 0
3203 3203
3204 3204 revs = scmutil.revrange(repo, revrange)
3205 3205 if repo['.'].rev() in revs and repo[None].files():
3206 3206 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3207 3207 # queue.finish may changes phases but leave the responsibility to lock the
3208 3208 # repo to the caller to avoid deadlock with wlock. This command code is
3209 3209 # responsibility for this locking.
3210 3210 lock = repo.lock()
3211 3211 try:
3212 3212 q.finish(repo, revs)
3213 3213 q.savedirty()
3214 3214 finally:
3215 3215 lock.release()
3216 3216 return 0
3217 3217
3218 3218 @command("qqueue",
3219 3219 [('l', 'list', False, _('list all available queues')),
3220 3220 ('', 'active', False, _('print name of active queue')),
3221 3221 ('c', 'create', False, _('create new queue')),
3222 3222 ('', 'rename', False, _('rename active queue')),
3223 3223 ('', 'delete', False, _('delete reference to queue')),
3224 3224 ('', 'purge', False, _('delete queue, and remove patch dir')),
3225 3225 ],
3226 3226 _('[OPTION] [QUEUE]'))
3227 3227 def qqueue(ui, repo, name=None, **opts):
3228 3228 '''manage multiple patch queues
3229 3229
3230 3230 Supports switching between different patch queues, as well as creating
3231 3231 new patch queues and deleting existing ones.
3232 3232
3233 3233 Omitting a queue name or specifying -l/--list will show you the registered
3234 3234 queues - by default the "normal" patches queue is registered. The currently
3235 3235 active queue will be marked with "(active)". Specifying --active will print
3236 3236 only the name of the active queue.
3237 3237
3238 3238 To create a new queue, use -c/--create. The queue is automatically made
3239 3239 active, except in the case where there are applied patches from the
3240 3240 currently active queue in the repository. Then the queue will only be
3241 3241 created and switching will fail.
3242 3242
3243 3243 To delete an existing queue, use --delete. You cannot delete the currently
3244 3244 active queue.
3245 3245
3246 3246 Returns 0 on success.
3247 3247 '''
3248 3248 q = repo.mq
3249 3249 _defaultqueue = 'patches'
3250 3250 _allqueues = 'patches.queues'
3251 3251 _activequeue = 'patches.queue'
3252 3252
3253 3253 def _getcurrent():
3254 3254 cur = os.path.basename(q.path)
3255 3255 if cur.startswith('patches-'):
3256 3256 cur = cur[8:]
3257 3257 return cur
3258 3258
3259 3259 def _noqueues():
3260 3260 try:
3261 3261 fh = repo.opener(_allqueues, 'r')
3262 3262 fh.close()
3263 3263 except IOError:
3264 3264 return True
3265 3265
3266 3266 return False
3267 3267
3268 3268 def _getqueues():
3269 3269 current = _getcurrent()
3270 3270
3271 3271 try:
3272 3272 fh = repo.opener(_allqueues, 'r')
3273 3273 queues = [queue.strip() for queue in fh if queue.strip()]
3274 3274 fh.close()
3275 3275 if current not in queues:
3276 3276 queues.append(current)
3277 3277 except IOError:
3278 3278 queues = [_defaultqueue]
3279 3279
3280 3280 return sorted(queues)
3281 3281
3282 3282 def _setactive(name):
3283 3283 if q.applied:
3284 3284 raise util.Abort(_('new queue created, but cannot make active '
3285 3285 'as patches are applied'))
3286 3286 _setactivenocheck(name)
3287 3287
3288 3288 def _setactivenocheck(name):
3289 3289 fh = repo.opener(_activequeue, 'w')
3290 3290 if name != 'patches':
3291 3291 fh.write(name)
3292 3292 fh.close()
3293 3293
3294 3294 def _addqueue(name):
3295 3295 fh = repo.opener(_allqueues, 'a')
3296 3296 fh.write('%s\n' % (name,))
3297 3297 fh.close()
3298 3298
3299 3299 def _queuedir(name):
3300 3300 if name == 'patches':
3301 3301 return repo.join('patches')
3302 3302 else:
3303 3303 return repo.join('patches-' + name)
3304 3304
3305 3305 def _validname(name):
3306 3306 for n in name:
3307 3307 if n in ':\\/.':
3308 3308 return False
3309 3309 return True
3310 3310
3311 3311 def _delete(name):
3312 3312 if name not in existing:
3313 3313 raise util.Abort(_('cannot delete queue that does not exist'))
3314 3314
3315 3315 current = _getcurrent()
3316 3316
3317 3317 if name == current:
3318 3318 raise util.Abort(_('cannot delete currently active queue'))
3319 3319
3320 3320 fh = repo.opener('patches.queues.new', 'w')
3321 3321 for queue in existing:
3322 3322 if queue == name:
3323 3323 continue
3324 3324 fh.write('%s\n' % (queue,))
3325 3325 fh.close()
3326 3326 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3327 3327
3328 3328 if not name or opts.get('list') or opts.get('active'):
3329 3329 current = _getcurrent()
3330 3330 if opts.get('active'):
3331 3331 ui.write('%s\n' % (current,))
3332 3332 return
3333 3333 for queue in _getqueues():
3334 3334 ui.write('%s' % (queue,))
3335 3335 if queue == current and not ui.quiet:
3336 3336 ui.write(_(' (active)\n'))
3337 3337 else:
3338 3338 ui.write('\n')
3339 3339 return
3340 3340
3341 3341 if not _validname(name):
3342 3342 raise util.Abort(
3343 3343 _('invalid queue name, may not contain the characters ":\\/."'))
3344 3344
3345 3345 existing = _getqueues()
3346 3346
3347 3347 if opts.get('create'):
3348 3348 if name in existing:
3349 3349 raise util.Abort(_('queue "%s" already exists') % name)
3350 3350 if _noqueues():
3351 3351 _addqueue(_defaultqueue)
3352 3352 _addqueue(name)
3353 3353 _setactive(name)
3354 3354 elif opts.get('rename'):
3355 3355 current = _getcurrent()
3356 3356 if name == current:
3357 3357 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3358 3358 if name in existing:
3359 3359 raise util.Abort(_('queue "%s" already exists') % name)
3360 3360
3361 3361 olddir = _queuedir(current)
3362 3362 newdir = _queuedir(name)
3363 3363
3364 3364 if os.path.exists(newdir):
3365 3365 raise util.Abort(_('non-queue directory "%s" already exists') %
3366 3366 newdir)
3367 3367
3368 3368 fh = repo.opener('patches.queues.new', 'w')
3369 3369 for queue in existing:
3370 3370 if queue == current:
3371 3371 fh.write('%s\n' % (name,))
3372 3372 if os.path.exists(olddir):
3373 3373 util.rename(olddir, newdir)
3374 3374 else:
3375 3375 fh.write('%s\n' % (queue,))
3376 3376 fh.close()
3377 3377 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3378 3378 _setactivenocheck(name)
3379 3379 elif opts.get('delete'):
3380 3380 _delete(name)
3381 3381 elif opts.get('purge'):
3382 3382 if name in existing:
3383 3383 _delete(name)
3384 3384 qdir = _queuedir(name)
3385 3385 if os.path.exists(qdir):
3386 3386 shutil.rmtree(qdir)
3387 3387 else:
3388 3388 if name not in existing:
3389 3389 raise util.Abort(_('use --create to create a new queue'))
3390 3390 _setactive(name)
3391 3391
3392 3392 def mqphasedefaults(repo, roots):
3393 3393 """callback used to set mq changeset as secret when no phase data exists"""
3394 3394 if repo.mq.applied:
3395 3395 if repo.ui.configbool('mq', 'secret', False):
3396 3396 mqphase = phases.secret
3397 3397 else:
3398 3398 mqphase = phases.draft
3399 3399 qbase = repo[repo.mq.applied[0].node]
3400 3400 roots[mqphase].add(qbase.node())
3401 3401 return roots
3402 3402
3403 3403 def reposetup(ui, repo):
3404 3404 class mqrepo(repo.__class__):
3405 3405 @util.propertycache
3406 3406 def mq(self):
3407 3407 return queue(self.ui, self.path)
3408 3408
3409 3409 def abortifwdirpatched(self, errmsg, force=False):
3410 3410 if self.mq.applied and not force:
3411 3411 parents = self.dirstate.parents()
3412 3412 patches = [s.node for s in self.mq.applied]
3413 3413 if parents[0] in patches or parents[1] in patches:
3414 3414 raise util.Abort(errmsg)
3415 3415
3416 3416 def commit(self, text="", user=None, date=None, match=None,
3417 3417 force=False, editor=False, extra={}):
3418 3418 self.abortifwdirpatched(
3419 3419 _('cannot commit over an applied mq patch'),
3420 3420 force)
3421 3421
3422 3422 return super(mqrepo, self).commit(text, user, date, match, force,
3423 3423 editor, extra)
3424 3424
3425 3425 def checkpush(self, force, revs):
3426 3426 if self.mq.applied and not force:
3427 3427 outapplied = [e.node for e in self.mq.applied]
3428 3428 if revs:
3429 3429 # Assume applied patches have no non-patch descendants and
3430 3430 # are not on remote already. Filtering any changeset not
3431 3431 # pushed.
3432 3432 heads = set(revs)
3433 3433 for node in reversed(outapplied):
3434 3434 if node in heads:
3435 3435 break
3436 3436 else:
3437 3437 outapplied.pop()
3438 3438 # looking for pushed and shared changeset
3439 3439 for node in outapplied:
3440 3440 if self[node].phase() < phases.secret:
3441 3441 raise util.Abort(_('source has mq patches applied'))
3442 3442 # no non-secret patches pushed
3443 3443 super(mqrepo, self).checkpush(force, revs)
3444 3444
3445 3445 def _findtags(self):
3446 3446 '''augment tags from base class with patch tags'''
3447 3447 result = super(mqrepo, self)._findtags()
3448 3448
3449 3449 q = self.mq
3450 3450 if not q.applied:
3451 3451 return result
3452 3452
3453 3453 mqtags = [(patch.node, patch.name) for patch in q.applied]
3454 3454
3455 3455 try:
3456 3456 # for now ignore filtering business
3457 3457 self.unfiltered().changelog.rev(mqtags[-1][0])
3458 3458 except error.LookupError:
3459 3459 self.ui.warn(_('mq status file refers to unknown node %s\n')
3460 3460 % short(mqtags[-1][0]))
3461 3461 return result
3462 3462
3463 3463 mqtags.append((mqtags[-1][0], 'qtip'))
3464 3464 mqtags.append((mqtags[0][0], 'qbase'))
3465 3465 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3466 3466 tags = result[0]
3467 3467 for patch in mqtags:
3468 3468 if patch[1] in tags:
3469 3469 self.ui.warn(_('tag %s overrides mq patch of the same '
3470 3470 'name\n') % patch[1])
3471 3471 else:
3472 3472 tags[patch[1]] = patch[0]
3473 3473
3474 3474 return result
3475 3475
3476 3476 def _branchtags(self, partial, lrev):
3477 3477 q = self.mq
3478 3478 cl = self.changelog
3479 3479 qbase = None
3480 3480 if not q.applied:
3481 3481 if getattr(self, '_committingpatch', False):
3482 3482 # Committing a new patch, must be tip
3483 3483 qbase = len(cl) - 1
3484 3484 else:
3485 3485 qbasenode = q.applied[0].node
3486 3486 try:
3487 3487 qbase = self.unfiltered().changelog.rev(qbasenode)
3488 3488 except error.LookupError:
3489 3489 self.ui.warn(_('mq status file refers to unknown node %s\n')
3490 3490 % short(qbasenode))
3491 3491 if qbase is None:
3492 3492 return super(mqrepo, self)._branchtags(partial, lrev)
3493 3493
3494 3494 start = lrev + 1
3495 3495 if start < qbase:
3496 3496 # update the cache (excluding the patches) and save it
3497 3497 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3498 3498 self._updatebranchcache(partial, ctxgen)
3499 3499 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3500 3500 start = qbase
3501 3501 # if start = qbase, the cache is as updated as it should be.
3502 3502 # if start > qbase, the cache includes (part of) the patches.
3503 3503 # we might as well use it, but we won't save it.
3504 3504
3505 3505 # update the cache up to the tip
3506 3506 ctxgen = (self[r] for r in xrange(start, len(cl)))
3507 3507 self._updatebranchcache(partial, ctxgen)
3508 3508
3509 3509 return partial
3510 3510
3511 3511 if repo.local():
3512 3512 repo.__class__ = mqrepo
3513 3513
3514 3514 repo._phasedefaults.append(mqphasedefaults)
3515 3515
3516 3516 def mqimport(orig, ui, repo, *args, **kwargs):
3517 3517 if (util.safehasattr(repo, 'abortifwdirpatched')
3518 3518 and not kwargs.get('no_commit', False)):
3519 3519 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3520 3520 kwargs.get('force'))
3521 3521 return orig(ui, repo, *args, **kwargs)
3522 3522
3523 3523 def mqinit(orig, ui, *args, **kwargs):
3524 3524 mq = kwargs.pop('mq', None)
3525 3525
3526 3526 if not mq:
3527 3527 return orig(ui, *args, **kwargs)
3528 3528
3529 3529 if args:
3530 3530 repopath = args[0]
3531 3531 if not hg.islocal(repopath):
3532 3532 raise util.Abort(_('only a local queue repository '
3533 3533 'may be initialized'))
3534 3534 else:
3535 3535 repopath = cmdutil.findrepo(os.getcwd())
3536 3536 if not repopath:
3537 3537 raise util.Abort(_('there is no Mercurial repository here '
3538 3538 '(.hg not found)'))
3539 3539 repo = hg.repository(ui, repopath)
3540 3540 return qinit(ui, repo, True)
3541 3541
3542 3542 def mqcommand(orig, ui, repo, *args, **kwargs):
3543 3543 """Add --mq option to operate on patch repository instead of main"""
3544 3544
3545 3545 # some commands do not like getting unknown options
3546 3546 mq = kwargs.pop('mq', None)
3547 3547
3548 3548 if not mq:
3549 3549 return orig(ui, repo, *args, **kwargs)
3550 3550
3551 3551 q = repo.mq
3552 3552 r = q.qrepo()
3553 3553 if not r:
3554 3554 raise util.Abort(_('no queue repository'))
3555 3555 return orig(r.ui, r, *args, **kwargs)
3556 3556
3557 3557 def summary(orig, ui, repo, *args, **kwargs):
3558 3558 r = orig(ui, repo, *args, **kwargs)
3559 3559 q = repo.mq
3560 3560 m = []
3561 3561 a, u = len(q.applied), len(q.unapplied(repo))
3562 3562 if a:
3563 3563 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3564 3564 if u:
3565 3565 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3566 3566 if m:
3567 3567 # i18n: column positioning for "hg summary"
3568 3568 ui.write(_("mq: %s\n") % ', '.join(m))
3569 3569 else:
3570 3570 # i18n: column positioning for "hg summary"
3571 3571 ui.note(_("mq: (empty queue)\n"))
3572 3572 return r
3573 3573
3574 3574 def revsetmq(repo, subset, x):
3575 3575 """``mq()``
3576 3576 Changesets managed by MQ.
3577 3577 """
3578 3578 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3579 3579 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3580 3580 return [r for r in subset if r in applied]
3581 3581
3582 3582 # tell hggettext to extract docstrings from these functions:
3583 3583 i18nfunctions = [revsetmq]
3584 3584
3585 3585 def extsetup(ui):
3586 3586 # Ensure mq wrappers are called first, regardless of extension load order by
3587 3587 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3588 3588 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3589 3589
3590 3590 extensions.wrapcommand(commands.table, 'import', mqimport)
3591 3591 extensions.wrapcommand(commands.table, 'summary', summary)
3592 3592
3593 3593 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3594 3594 entry[1].extend(mqopt)
3595 3595
3596 3596 nowrap = set(commands.norepo.split(" "))
3597 3597
3598 3598 def dotable(cmdtable):
3599 3599 for cmd in cmdtable.keys():
3600 3600 cmd = cmdutil.parsealiases(cmd)[0]
3601 3601 if cmd in nowrap:
3602 3602 continue
3603 3603 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3604 3604 entry[1].extend(mqopt)
3605 3605
3606 3606 dotable(commands.table)
3607 3607
3608 3608 for extname, extmodule in extensions.extensions():
3609 3609 if extmodule.__file__ != __file__:
3610 3610 dotable(getattr(extmodule, 'cmdtable', {}))
3611 3611
3612 3612 revset.symbols['mq'] = revsetmq
3613 3613
3614 3614 colortable = {'qguard.negative': 'red',
3615 3615 'qguard.positive': 'yellow',
3616 3616 'qguard.unguarded': 'green',
3617 3617 'qseries.applied': 'blue bold underline',
3618 3618 'qseries.guarded': 'black bold',
3619 3619 'qseries.missing': 'red bold',
3620 3620 'qseries.unapplied': 'black bold'}
3621 3621
3622 3622 commands.inferrepo += " qnew qrefresh qdiff qcommit"
@@ -1,172 +1,172 b''
1 1 # win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
2 2 #
3 3 # Copyright 2005, 2007-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''perform automatic newline conversion
9 9
10 10 Deprecation: The win32text extension requires each user to configure
11 11 the extension again and again for each clone since the configuration
12 12 is not copied when cloning.
13 13
14 14 We have therefore made the ``eol`` as an alternative. The ``eol``
15 15 uses a version controlled file for its configuration and each clone
16 16 will therefore use the right settings from the start.
17 17
18 18 To perform automatic newline conversion, use::
19 19
20 20 [extensions]
21 21 win32text =
22 22 [encode]
23 23 ** = cleverencode:
24 24 # or ** = macencode:
25 25
26 26 [decode]
27 27 ** = cleverdecode:
28 28 # or ** = macdecode:
29 29
30 30 If not doing conversion, to make sure you do not commit CRLF/CR by accident::
31 31
32 32 [hooks]
33 33 pretxncommit.crlf = python:hgext.win32text.forbidcrlf
34 34 # or pretxncommit.cr = python:hgext.win32text.forbidcr
35 35
36 36 To do the same check on a server to prevent CRLF/CR from being
37 37 pushed or pulled::
38 38
39 39 [hooks]
40 40 pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
41 41 # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
42 42 '''
43 43
44 44 from mercurial.i18n import _
45 45 from mercurial.node import short
46 46 from mercurial import util
47 47 import re
48 48
49 49 testedwith = 'internal'
50 50
51 51 # regexp for single LF without CR preceding.
52 52 re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
53 53
54 54 newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
55 55 filterstr = {'\r\n': 'clever', '\r': 'mac'}
56 56
57 57 def checknewline(s, newline, ui=None, repo=None, filename=None):
58 58 # warn if already has 'newline' in repository.
59 59 # it might cause unexpected eol conversion.
60 60 # see issue 302:
61 61 # http://mercurial.selenic.com/bts/issue302
62 62 if newline in s and ui and filename and repo:
63 63 ui.warn(_('WARNING: %s already has %s line endings\n'
64 64 'and does not need EOL conversion by the win32text plugin.\n'
65 65 'Before your next commit, please reconsider your '
66 66 'encode/decode settings in \nMercurial.ini or %s.\n') %
67 67 (filename, newlinestr[newline], repo.join('hgrc')))
68 68
69 69 def dumbdecode(s, cmd, **kwargs):
70 70 checknewline(s, '\r\n', **kwargs)
71 71 # replace single LF to CRLF
72 72 return re_single_lf.sub('\\1\r\n', s)
73 73
74 74 def dumbencode(s, cmd):
75 75 return s.replace('\r\n', '\n')
76 76
77 77 def macdumbdecode(s, cmd, **kwargs):
78 78 checknewline(s, '\r', **kwargs)
79 79 return s.replace('\n', '\r')
80 80
81 81 def macdumbencode(s, cmd):
82 82 return s.replace('\r', '\n')
83 83
84 84 def cleverdecode(s, cmd, **kwargs):
85 85 if not util.binary(s):
86 86 return dumbdecode(s, cmd, **kwargs)
87 87 return s
88 88
89 89 def cleverencode(s, cmd):
90 90 if not util.binary(s):
91 91 return dumbencode(s, cmd)
92 92 return s
93 93
94 94 def macdecode(s, cmd, **kwargs):
95 95 if not util.binary(s):
96 96 return macdumbdecode(s, cmd, **kwargs)
97 97 return s
98 98
99 99 def macencode(s, cmd):
100 100 if not util.binary(s):
101 101 return macdumbencode(s, cmd)
102 102 return s
103 103
104 104 _filters = {
105 105 'dumbdecode:': dumbdecode,
106 106 'dumbencode:': dumbencode,
107 107 'cleverdecode:': cleverdecode,
108 108 'cleverencode:': cleverencode,
109 109 'macdumbdecode:': macdumbdecode,
110 110 'macdumbencode:': macdumbencode,
111 111 'macdecode:': macdecode,
112 112 'macencode:': macencode,
113 113 }
114 114
115 115 def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
116 116 halt = False
117 117 seen = set()
118 118 # we try to walk changesets in reverse order from newest to
119 119 # oldest, so that if we see a file multiple times, we take the
120 120 # newest version as canonical. this prevents us from blocking a
121 121 # changegroup that contains an unacceptable commit followed later
122 122 # by a commit that fixes the problem.
123 123 tip = repo['tip']
124 for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
124 for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1):
125 125 c = repo[rev]
126 126 for f in c.files():
127 127 if f in seen or f not in tip or f not in c:
128 128 continue
129 129 seen.add(f)
130 130 data = c[f].data()
131 131 if not util.binary(data) and newline in data:
132 132 if not halt:
133 133 ui.warn(_('attempt to commit or push text file(s) '
134 134 'using %s line endings\n') %
135 135 newlinestr[newline])
136 136 ui.warn(_('in %s: %s\n') % (short(c.node()), f))
137 137 halt = True
138 138 if halt and hooktype == 'pretxnchangegroup':
139 139 crlf = newlinestr[newline].lower()
140 140 filter = filterstr[newline]
141 141 ui.warn(_('\nTo prevent this mistake in your local repository,\n'
142 142 'add to Mercurial.ini or .hg/hgrc:\n'
143 143 '\n'
144 144 '[hooks]\n'
145 145 'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
146 146 '\n'
147 147 'and also consider adding:\n'
148 148 '\n'
149 149 '[extensions]\n'
150 150 'win32text =\n'
151 151 '[encode]\n'
152 152 '** = %sencode:\n'
153 153 '[decode]\n'
154 154 '** = %sdecode:\n') % (crlf, crlf, filter, filter))
155 155 return halt
156 156
157 157 def forbidcrlf(ui, repo, hooktype, node, **kwargs):
158 158 return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
159 159
160 160 def forbidcr(ui, repo, hooktype, node, **kwargs):
161 161 return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
162 162
163 163 def reposetup(ui, repo):
164 164 if not repo.local():
165 165 return
166 166 for name, fn in _filters.iteritems():
167 167 repo.adddatafilter(name, fn)
168 168
169 169 def extsetup(ui):
170 170 if ui.configbool('win32text', 'warn', True):
171 171 ui.warn(_("win32text is deprecated: "
172 172 "http://mercurial.selenic.com/wiki/Win32TextExtension\n"))
@@ -1,105 +1,105 b''
1 1 # ignore.py - ignored file handling for mercurial
2 2 #
3 3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, match
10 10 import re
11 11
12 12 _commentre = None
13 13
14 14 def ignorepats(lines):
15 15 '''parse lines (iterable) of .hgignore text, returning a tuple of
16 16 (patterns, parse errors). These patterns should be given to compile()
17 17 to be validated and converted into a match function.'''
18 18 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
19 19 syntax = 'relre:'
20 20 patterns = []
21 21 warnings = []
22 22
23 23 for line in lines:
24 24 if "#" in line:
25 25 global _commentre
26 26 if not _commentre:
27 27 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
28 28 # remove comments prefixed by an even number of escapes
29 29 line = _commentre.sub(r'\1', line)
30 30 # fixup properly escaped comments that survived the above
31 31 line = line.replace("\\#", "#")
32 32 line = line.rstrip()
33 33 if not line:
34 34 continue
35 35
36 36 if line.startswith('syntax:'):
37 37 s = line[7:].strip()
38 38 try:
39 39 syntax = syntaxes[s]
40 40 except KeyError:
41 41 warnings.append(_("ignoring invalid syntax '%s'") % s)
42 42 continue
43 43 pat = syntax + line
44 44 for s, rels in syntaxes.iteritems():
45 45 if line.startswith(rels):
46 46 pat = line
47 47 break
48 48 elif line.startswith(s+':'):
49 pat = rels + line[len(s)+1:]
49 pat = rels + line[len(s) + 1:]
50 50 break
51 51 patterns.append(pat)
52 52
53 53 return patterns, warnings
54 54
55 55 def ignore(root, files, warn):
56 56 '''return matcher covering patterns in 'files'.
57 57
58 58 the files parsed for patterns include:
59 59 .hgignore in the repository root
60 60 any additional files specified in the [ui] section of ~/.hgrc
61 61
62 62 trailing white space is dropped.
63 63 the escape character is backslash.
64 64 comments start with #.
65 65 empty lines are skipped.
66 66
67 67 lines can be of the following formats:
68 68
69 69 syntax: regexp # defaults following lines to non-rooted regexps
70 70 syntax: glob # defaults following lines to non-rooted globs
71 71 re:pattern # non-rooted regular expression
72 72 glob:pattern # non-rooted glob
73 73 pattern # pattern of the current default type'''
74 74
75 75 pats = {}
76 76 for f in files:
77 77 try:
78 78 pats[f] = []
79 79 fp = open(f)
80 80 pats[f], warnings = ignorepats(fp)
81 81 fp.close()
82 82 for warning in warnings:
83 83 warn("%s: %s\n" % (f, warning))
84 84 except IOError, inst:
85 85 if f != files[0]:
86 86 warn(_("skipping unreadable ignore file '%s': %s\n") %
87 87 (f, inst.strerror))
88 88
89 89 allpats = []
90 90 for patlist in pats.values():
91 91 allpats.extend(patlist)
92 92 if not allpats:
93 93 return util.never
94 94
95 95 try:
96 96 ignorefunc = match.match(root, '', [], allpats)
97 97 except util.Abort:
98 98 # Re-raise an exception where the src is the right file
99 99 for f, patlist in pats.iteritems():
100 100 try:
101 101 match.match(root, '', [], patlist)
102 102 except util.Abort, inst:
103 103 raise util.Abort('%s: %s' % (f, inst[0]))
104 104
105 105 return ignorefunc
@@ -1,2680 +1,2680 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class repofilecache(filecache):
22 22 """All filecache usage on repo are done for logic that should be unfiltered
23 23 """
24 24
25 25 def __get__(self, repo, type=None):
26 26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 27 def __set__(self, repo, value):
28 28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 29 def __delete__(self, repo):
30 30 return super(repofilecache, self).__delete__(repo.unfiltered())
31 31
32 32 class storecache(repofilecache):
33 33 """filecache for files in the store"""
34 34 def join(self, obj, fname):
35 35 return obj.sjoin(fname)
36 36
37 37 class unfilteredpropertycache(propertycache):
38 38 """propertycache that apply to unfiltered repo only"""
39 39
40 40 def __get__(self, repo, type=None):
41 41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 42
43 43 class filteredpropertycache(propertycache):
44 44 """propertycache that must take filtering in account"""
45 45
46 46 def cachevalue(self, obj, value):
47 47 object.__setattr__(obj, self.name, value)
48 48
49 49
50 50 def hasunfilteredcache(repo, name):
51 51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 52 return name in vars(repo.unfiltered())
53 53
54 54 def unfilteredmethod(orig):
55 55 """decorate method that always need to be run on unfiltered version"""
56 56 def wrapper(repo, *args, **kwargs):
57 57 return orig(repo.unfiltered(), *args, **kwargs)
58 58 return wrapper
59 59
60 60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 62
63 63 class localpeer(peer.peerrepository):
64 64 '''peer for a local repo; reflects only the most recent API'''
65 65
66 66 def __init__(self, repo, caps=MODERNCAPS):
67 67 peer.peerrepository.__init__(self)
68 68 self._repo = repo
69 69 self.ui = repo.ui
70 70 self._caps = repo._restrictcapabilities(caps)
71 71 self.requirements = repo.requirements
72 72 self.supportedformats = repo.supportedformats
73 73
74 74 def close(self):
75 75 self._repo.close()
76 76
77 77 def _capabilities(self):
78 78 return self._caps
79 79
80 80 def local(self):
81 81 return self._repo
82 82
83 83 def canpush(self):
84 84 return True
85 85
86 86 def url(self):
87 87 return self._repo.url()
88 88
89 89 def lookup(self, key):
90 90 return self._repo.lookup(key)
91 91
92 92 def branchmap(self):
93 93 return discovery.visiblebranchmap(self._repo)
94 94
95 95 def heads(self):
96 96 return discovery.visibleheads(self._repo)
97 97
98 98 def known(self, nodes):
99 99 return self._repo.known(nodes)
100 100
101 101 def getbundle(self, source, heads=None, common=None):
102 102 return self._repo.getbundle(source, heads=heads, common=common)
103 103
104 104 # TODO We might want to move the next two calls into legacypeer and add
105 105 # unbundle instead.
106 106
107 107 def lock(self):
108 108 return self._repo.lock()
109 109
110 110 def addchangegroup(self, cg, source, url):
111 111 return self._repo.addchangegroup(cg, source, url)
112 112
113 113 def pushkey(self, namespace, key, old, new):
114 114 return self._repo.pushkey(namespace, key, old, new)
115 115
116 116 def listkeys(self, namespace):
117 117 return self._repo.listkeys(namespace)
118 118
119 119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 120 '''used to test argument passing over the wire'''
121 121 return "%s %s %s %s %s" % (one, two, three, four, five)
122 122
123 123 class locallegacypeer(localpeer):
124 124 '''peer extension which implements legacy methods too; used for tests with
125 125 restricted capabilities'''
126 126
127 127 def __init__(self, repo):
128 128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 129
130 130 def branches(self, nodes):
131 131 return self._repo.branches(nodes)
132 132
133 133 def between(self, pairs):
134 134 return self._repo.between(pairs)
135 135
136 136 def changegroup(self, basenodes, source):
137 137 return self._repo.changegroup(basenodes, source)
138 138
139 139 def changegroupsubset(self, bases, heads, source):
140 140 return self._repo.changegroupsubset(bases, heads, source)
141 141
142 142 class localrepository(object):
143 143
144 144 supportedformats = set(('revlogv1', 'generaldelta'))
145 145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 146 'dotencode'))
147 147 openerreqs = set(('revlogv1', 'generaldelta'))
148 148 requirements = ['revlogv1']
149 149
150 150 def _baserequirements(self, create):
151 151 return self.requirements[:]
152 152
153 153 def __init__(self, baseui, path=None, create=False):
154 154 self.wvfs = scmutil.vfs(path, expand=True)
155 155 self.wopener = self.wvfs
156 156 self.root = self.wvfs.base
157 157 self.path = self.wvfs.join(".hg")
158 158 self.origroot = path
159 159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 160 self.vfs = scmutil.vfs(self.path)
161 161 self.opener = self.vfs
162 162 self.baseui = baseui
163 163 self.ui = baseui.copy()
164 164 # A list of callback to shape the phase if no data were found.
165 165 # Callback are in the form: func(repo, roots) --> processed root.
166 166 # This list it to be filled by extension during repo setup
167 167 self._phasedefaults = []
168 168 try:
169 169 self.ui.readconfig(self.join("hgrc"), self.root)
170 170 extensions.loadall(self.ui)
171 171 except IOError:
172 172 pass
173 173
174 174 if not self.vfs.isdir():
175 175 if create:
176 176 if not self.wvfs.exists():
177 177 self.wvfs.makedirs()
178 178 self.vfs.makedir(notindexed=True)
179 179 requirements = self._baserequirements(create)
180 180 if self.ui.configbool('format', 'usestore', True):
181 181 self.vfs.mkdir("store")
182 182 requirements.append("store")
183 183 if self.ui.configbool('format', 'usefncache', True):
184 184 requirements.append("fncache")
185 185 if self.ui.configbool('format', 'dotencode', True):
186 186 requirements.append('dotencode')
187 187 # create an invalid changelog
188 188 self.vfs.append(
189 189 "00changelog.i",
190 190 '\0\0\0\2' # represents revlogv2
191 191 ' dummy changelog to prevent using the old repo layout'
192 192 )
193 193 if self.ui.configbool('format', 'generaldelta', False):
194 194 requirements.append("generaldelta")
195 195 requirements = set(requirements)
196 196 else:
197 197 raise error.RepoError(_("repository %s not found") % path)
198 198 elif create:
199 199 raise error.RepoError(_("repository %s already exists") % path)
200 200 else:
201 201 try:
202 202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 203 except IOError, inst:
204 204 if inst.errno != errno.ENOENT:
205 205 raise
206 206 requirements = set()
207 207
208 208 self.sharedpath = self.path
209 209 try:
210 210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 211 if not os.path.exists(s):
212 212 raise error.RepoError(
213 213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 214 self.sharedpath = s
215 215 except IOError, inst:
216 216 if inst.errno != errno.ENOENT:
217 217 raise
218 218
219 219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 220 self.spath = self.store.path
221 221 self.svfs = self.store.vfs
222 222 self.sopener = self.svfs
223 223 self.sjoin = self.store.join
224 224 self.vfs.createmode = self.store.createmode
225 225 self._applyrequirements(requirements)
226 226 if create:
227 227 self._writerequirements()
228 228
229 229
230 230 self._branchcache = None
231 231 self._branchcachetip = None
232 232 self.filterpats = {}
233 233 self._datafilters = {}
234 234 self._transref = self._lockref = self._wlockref = None
235 235
236 236 # A cache for various files under .hg/ that tracks file changes,
237 237 # (used by the filecache decorator)
238 238 #
239 239 # Maps a property name to its util.filecacheentry
240 240 self._filecache = {}
241 241
242 242 def close(self):
243 243 pass
244 244
245 245 def _restrictcapabilities(self, caps):
246 246 return caps
247 247
248 248 def _applyrequirements(self, requirements):
249 249 self.requirements = requirements
250 250 self.sopener.options = dict((r, 1) for r in requirements
251 251 if r in self.openerreqs)
252 252
253 253 def _writerequirements(self):
254 254 reqfile = self.opener("requires", "w")
255 255 for r in self.requirements:
256 256 reqfile.write("%s\n" % r)
257 257 reqfile.close()
258 258
259 259 def _checknested(self, path):
260 260 """Determine if path is a legal nested repository."""
261 261 if not path.startswith(self.root):
262 262 return False
263 263 subpath = path[len(self.root) + 1:]
264 264 normsubpath = util.pconvert(subpath)
265 265
266 266 # XXX: Checking against the current working copy is wrong in
267 267 # the sense that it can reject things like
268 268 #
269 269 # $ hg cat -r 10 sub/x.txt
270 270 #
271 271 # if sub/ is no longer a subrepository in the working copy
272 272 # parent revision.
273 273 #
274 274 # However, it can of course also allow things that would have
275 275 # been rejected before, such as the above cat command if sub/
276 276 # is a subrepository now, but was a normal directory before.
277 277 # The old path auditor would have rejected by mistake since it
278 278 # panics when it sees sub/.hg/.
279 279 #
280 280 # All in all, checking against the working copy seems sensible
281 281 # since we want to prevent access to nested repositories on
282 282 # the filesystem *now*.
283 283 ctx = self[None]
284 284 parts = util.splitpath(subpath)
285 285 while parts:
286 286 prefix = '/'.join(parts)
287 287 if prefix in ctx.substate:
288 288 if prefix == normsubpath:
289 289 return True
290 290 else:
291 291 sub = ctx.sub(prefix)
292 292 return sub.checknested(subpath[len(prefix) + 1:])
293 293 else:
294 294 parts.pop()
295 295 return False
296 296
297 297 def peer(self):
298 298 return localpeer(self) # not cached to avoid reference cycle
299 299
300 300 def unfiltered(self):
301 301 """Return unfiltered version of the repository
302 302
303 303 Intended to be ovewritten by filtered repo."""
304 304 return self
305 305
306 306 @repofilecache('bookmarks')
307 307 def _bookmarks(self):
308 308 return bookmarks.bmstore(self)
309 309
310 310 @repofilecache('bookmarks.current')
311 311 def _bookmarkcurrent(self):
312 312 return bookmarks.readcurrent(self)
313 313
314 314 def bookmarkheads(self, bookmark):
315 315 name = bookmark.split('@', 1)[0]
316 316 heads = []
317 317 for mark, n in self._bookmarks.iteritems():
318 318 if mark.split('@', 1)[0] == name:
319 319 heads.append(n)
320 320 return heads
321 321
322 322 @storecache('phaseroots')
323 323 def _phasecache(self):
324 324 return phases.phasecache(self, self._phasedefaults)
325 325
326 326 @storecache('obsstore')
327 327 def obsstore(self):
328 328 store = obsolete.obsstore(self.sopener)
329 329 if store and not obsolete._enabled:
330 330 # message is rare enough to not be translated
331 331 msg = 'obsolete feature not enabled but %i markers found!\n'
332 332 self.ui.warn(msg % len(list(store)))
333 333 return store
334 334
335 335 @unfilteredpropertycache
336 336 def hiddenrevs(self):
337 337 """hiddenrevs: revs that should be hidden by command and tools
338 338
339 339 This set is carried on the repo to ease initialization and lazy
340 340 loading; it'll probably move back to changelog for efficiency and
341 341 consistency reasons.
342 342
343 343 Note that the hiddenrevs will needs invalidations when
344 344 - a new changesets is added (possible unstable above extinct)
345 345 - a new obsolete marker is added (possible new extinct changeset)
346 346
347 347 hidden changesets cannot have non-hidden descendants
348 348 """
349 349 hidden = set()
350 350 if self.obsstore:
351 351 ### hide extinct changeset that are not accessible by any mean
352 352 hiddenquery = 'extinct() - ::(. + bookmark())'
353 353 hidden.update(self.revs(hiddenquery))
354 354 return hidden
355 355
356 356 @storecache('00changelog.i')
357 357 def changelog(self):
358 358 c = changelog.changelog(self.sopener)
359 359 if 'HG_PENDING' in os.environ:
360 360 p = os.environ['HG_PENDING']
361 361 if p.startswith(self.root):
362 362 c.readpending('00changelog.i.a')
363 363 return c
364 364
365 365 @storecache('00manifest.i')
366 366 def manifest(self):
367 367 return manifest.manifest(self.sopener)
368 368
369 369 @repofilecache('dirstate')
370 370 def dirstate(self):
371 371 warned = [0]
372 372 def validate(node):
373 373 try:
374 374 self.changelog.rev(node)
375 375 return node
376 376 except error.LookupError:
377 377 if not warned[0]:
378 378 warned[0] = True
379 379 self.ui.warn(_("warning: ignoring unknown"
380 380 " working parent %s!\n") % short(node))
381 381 return nullid
382 382
383 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 384
385 385 def __getitem__(self, changeid):
386 386 if changeid is None:
387 387 return context.workingctx(self)
388 388 return context.changectx(self, changeid)
389 389
390 390 def __contains__(self, changeid):
391 391 try:
392 392 return bool(self.lookup(changeid))
393 393 except error.RepoLookupError:
394 394 return False
395 395
396 396 def __nonzero__(self):
397 397 return True
398 398
399 399 def __len__(self):
400 400 return len(self.changelog)
401 401
402 402 def __iter__(self):
403 403 return iter(self.changelog)
404 404
405 405 def revs(self, expr, *args):
406 406 '''Return a list of revisions matching the given revset'''
407 407 expr = revset.formatspec(expr, *args)
408 408 m = revset.match(None, expr)
409 409 return [r for r in m(self, list(self))]
410 410
411 411 def set(self, expr, *args):
412 412 '''
413 413 Yield a context for each matching revision, after doing arg
414 414 replacement via revset.formatspec
415 415 '''
416 416 for r in self.revs(expr, *args):
417 417 yield self[r]
418 418
419 419 def url(self):
420 420 return 'file:' + self.root
421 421
422 422 def hook(self, name, throw=False, **args):
423 423 return hook.hook(self.ui, self, name, throw, **args)
424 424
425 425 @unfilteredmethod
426 426 def _tag(self, names, node, message, local, user, date, extra={}):
427 427 if isinstance(names, str):
428 428 names = (names,)
429 429
430 430 branches = self.branchmap()
431 431 for name in names:
432 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 433 local=local)
434 434 if name in branches:
435 435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 436 " branch name\n") % name)
437 437
438 438 def writetags(fp, names, munge, prevtags):
439 439 fp.seek(0, 2)
440 440 if prevtags and prevtags[-1] != '\n':
441 441 fp.write('\n')
442 442 for name in names:
443 443 m = munge and munge(name) or name
444 444 if (self._tagscache.tagtypes and
445 445 name in self._tagscache.tagtypes):
446 446 old = self.tags().get(name, nullid)
447 447 fp.write('%s %s\n' % (hex(old), m))
448 448 fp.write('%s %s\n' % (hex(node), m))
449 449 fp.close()
450 450
451 451 prevtags = ''
452 452 if local:
453 453 try:
454 454 fp = self.opener('localtags', 'r+')
455 455 except IOError:
456 456 fp = self.opener('localtags', 'a')
457 457 else:
458 458 prevtags = fp.read()
459 459
460 460 # local tags are stored in the current charset
461 461 writetags(fp, names, None, prevtags)
462 462 for name in names:
463 463 self.hook('tag', node=hex(node), tag=name, local=local)
464 464 return
465 465
466 466 try:
467 467 fp = self.wfile('.hgtags', 'rb+')
468 468 except IOError, e:
469 469 if e.errno != errno.ENOENT:
470 470 raise
471 471 fp = self.wfile('.hgtags', 'ab')
472 472 else:
473 473 prevtags = fp.read()
474 474
475 475 # committed tags are stored in UTF-8
476 476 writetags(fp, names, encoding.fromlocal, prevtags)
477 477
478 478 fp.close()
479 479
480 480 self.invalidatecaches()
481 481
482 482 if '.hgtags' not in self.dirstate:
483 483 self[None].add(['.hgtags'])
484 484
485 485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 487
488 488 for name in names:
489 489 self.hook('tag', node=hex(node), tag=name, local=local)
490 490
491 491 return tagnode
492 492
493 493 def tag(self, names, node, message, local, user, date):
494 494 '''tag a revision with one or more symbolic names.
495 495
496 496 names is a list of strings or, when adding a single tag, names may be a
497 497 string.
498 498
499 499 if local is True, the tags are stored in a per-repository file.
500 500 otherwise, they are stored in the .hgtags file, and a new
501 501 changeset is committed with the change.
502 502
503 503 keyword arguments:
504 504
505 505 local: whether to store tags in non-version-controlled file
506 506 (default False)
507 507
508 508 message: commit message to use if committing
509 509
510 510 user: name of user to use if committing
511 511
512 512 date: date tuple to use if committing'''
513 513
514 514 if not local:
515 515 for x in self.status()[:5]:
516 516 if '.hgtags' in x:
517 517 raise util.Abort(_('working copy of .hgtags is changed '
518 518 '(please commit .hgtags manually)'))
519 519
520 520 self.tags() # instantiate the cache
521 521 self._tag(names, node, message, local, user, date)
522 522
523 523 @filteredpropertycache
524 524 def _tagscache(self):
525 525 '''Returns a tagscache object that contains various tags related
526 526 caches.'''
527 527
528 528 # This simplifies its cache management by having one decorated
529 529 # function (this one) and the rest simply fetch things from it.
530 530 class tagscache(object):
531 531 def __init__(self):
532 532 # These two define the set of tags for this repository. tags
533 533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 534 # 'local'. (Global tags are defined by .hgtags across all
535 535 # heads, and local tags are defined in .hg/localtags.)
536 536 # They constitute the in-memory cache of tags.
537 537 self.tags = self.tagtypes = None
538 538
539 539 self.nodetagscache = self.tagslist = None
540 540
541 541 cache = tagscache()
542 542 cache.tags, cache.tagtypes = self._findtags()
543 543
544 544 return cache
545 545
546 546 def tags(self):
547 547 '''return a mapping of tag to node'''
548 548 t = {}
549 549 if self.changelog.filteredrevs:
550 550 tags, tt = self._findtags()
551 551 else:
552 552 tags = self._tagscache.tags
553 553 for k, v in tags.iteritems():
554 554 try:
555 555 # ignore tags to unknown nodes
556 556 self.changelog.rev(v)
557 557 t[k] = v
558 558 except (error.LookupError, ValueError):
559 559 pass
560 560 return t
561 561
562 562 def _findtags(self):
563 563 '''Do the hard work of finding tags. Return a pair of dicts
564 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 565 maps tag name to a string like \'global\' or \'local\'.
566 566 Subclasses or extensions are free to add their own tags, but
567 567 should be aware that the returned dicts will be retained for the
568 568 duration of the localrepo object.'''
569 569
570 570 # XXX what tagtype should subclasses/extensions use? Currently
571 571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 572 # Should each extension invent its own tag type? Should there
573 573 # be one tagtype for all such "virtual" tags? Or is the status
574 574 # quo fine?
575 575
576 576 alltags = {} # map tag name to (node, hist)
577 577 tagtypes = {}
578 578
579 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 581
582 582 # Build the return dicts. Have to re-encode tag names because
583 583 # the tags module always uses UTF-8 (in order not to lose info
584 584 # writing to the cache), but the rest of Mercurial wants them in
585 585 # local encoding.
586 586 tags = {}
587 587 for (name, (node, hist)) in alltags.iteritems():
588 588 if node != nullid:
589 589 tags[encoding.tolocal(name)] = node
590 590 tags['tip'] = self.changelog.tip()
591 591 tagtypes = dict([(encoding.tolocal(name), value)
592 592 for (name, value) in tagtypes.iteritems()])
593 593 return (tags, tagtypes)
594 594
595 595 def tagtype(self, tagname):
596 596 '''
597 597 return the type of the given tag. result can be:
598 598
599 599 'local' : a local tag
600 600 'global' : a global tag
601 601 None : tag does not exist
602 602 '''
603 603
604 604 return self._tagscache.tagtypes.get(tagname)
605 605
606 606 def tagslist(self):
607 607 '''return a list of tags ordered by revision'''
608 608 if not self._tagscache.tagslist:
609 609 l = []
610 610 for t, n in self.tags().iteritems():
611 611 r = self.changelog.rev(n)
612 612 l.append((r, t, n))
613 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 614
615 615 return self._tagscache.tagslist
616 616
617 617 def nodetags(self, node):
618 618 '''return the tags associated with a node'''
619 619 if not self._tagscache.nodetagscache:
620 620 nodetagscache = {}
621 621 for t, n in self._tagscache.tags.iteritems():
622 622 nodetagscache.setdefault(n, []).append(t)
623 623 for tags in nodetagscache.itervalues():
624 624 tags.sort()
625 625 self._tagscache.nodetagscache = nodetagscache
626 626 return self._tagscache.nodetagscache.get(node, [])
627 627
628 628 def nodebookmarks(self, node):
629 629 marks = []
630 630 for bookmark, n in self._bookmarks.iteritems():
631 631 if n == node:
632 632 marks.append(bookmark)
633 633 return sorted(marks)
634 634
635 635 def _branchtags(self, partial, lrev):
636 636 # TODO: rename this function?
637 637 tiprev = len(self) - 1
638 638 if lrev != tiprev:
639 639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
640 640 self._updatebranchcache(partial, ctxgen)
641 641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
642 642
643 643 return partial
644 644
645 645 @unfilteredmethod # Until we get a smarter cache management
646 646 def updatebranchcache(self):
647 647 tip = self.changelog.tip()
648 648 if self._branchcache is not None and self._branchcachetip == tip:
649 649 return
650 650
651 651 oldtip = self._branchcachetip
652 652 self._branchcachetip = tip
653 653 if oldtip is None or oldtip not in self.changelog.nodemap:
654 654 partial, last, lrev = self._readbranchcache()
655 655 else:
656 656 lrev = self.changelog.rev(oldtip)
657 657 partial = self._branchcache
658 658
659 659 self._branchtags(partial, lrev)
660 660 # this private cache holds all heads (not just the branch tips)
661 661 self._branchcache = partial
662 662
663 663 def branchmap(self):
664 664 '''returns a dictionary {branch: [branchheads]}'''
665 665 if self.changelog.filteredrevs:
666 666 # some changeset are excluded we can't use the cache
667 667 branchmap = {}
668 668 self._updatebranchcache(branchmap, (self[r] for r in self))
669 669 return branchmap
670 670 else:
671 671 self.updatebranchcache()
672 672 return self._branchcache
673 673
674 674
675 675 def _branchtip(self, heads):
676 676 '''return the tipmost branch head in heads'''
677 677 tip = heads[-1]
678 678 for h in reversed(heads):
679 679 if not self[h].closesbranch():
680 680 tip = h
681 681 break
682 682 return tip
683 683
684 684 def branchtip(self, branch):
685 685 '''return the tip node for a given branch'''
686 686 if branch not in self.branchmap():
687 687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
688 688 return self._branchtip(self.branchmap()[branch])
689 689
690 690 def branchtags(self):
691 691 '''return a dict where branch names map to the tipmost head of
692 692 the branch, open heads come before closed'''
693 693 bt = {}
694 694 for bn, heads in self.branchmap().iteritems():
695 695 bt[bn] = self._branchtip(heads)
696 696 return bt
697 697
698 698 @unfilteredmethod # Until we get a smarter cache management
699 699 def _readbranchcache(self):
700 700 partial = {}
701 701 try:
702 702 f = self.opener("cache/branchheads")
703 703 lines = f.read().split('\n')
704 704 f.close()
705 705 except (IOError, OSError):
706 706 return {}, nullid, nullrev
707 707
708 708 try:
709 709 last, lrev = lines.pop(0).split(" ", 1)
710 710 last, lrev = bin(last), int(lrev)
711 711 if lrev >= len(self) or self[lrev].node() != last:
712 712 # invalidate the cache
713 713 raise ValueError('invalidating branch cache (tip differs)')
714 714 for l in lines:
715 715 if not l:
716 716 continue
717 717 node, label = l.split(" ", 1)
718 718 label = encoding.tolocal(label.strip())
719 719 if not node in self:
720 720 raise ValueError('invalidating branch cache because node '+
721 721 '%s does not exist' % node)
722 722 partial.setdefault(label, []).append(bin(node))
723 723 except KeyboardInterrupt:
724 724 raise
725 725 except Exception, inst:
726 726 if self.ui.debugflag:
727 727 self.ui.warn(str(inst), '\n')
728 728 partial, last, lrev = {}, nullid, nullrev
729 729 return partial, last, lrev
730 730
731 731 @unfilteredmethod # Until we get a smarter cache management
732 732 def _writebranchcache(self, branches, tip, tiprev):
733 733 try:
734 734 f = self.opener("cache/branchheads", "w", atomictemp=True)
735 735 f.write("%s %s\n" % (hex(tip), tiprev))
736 736 for label, nodes in branches.iteritems():
737 737 for node in nodes:
738 738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
739 739 f.close()
740 740 except (IOError, OSError):
741 741 pass
742 742
743 743 @unfilteredmethod # Until we get a smarter cache management
744 744 def _updatebranchcache(self, partial, ctxgen):
745 745 """Given a branchhead cache, partial, that may have extra nodes or be
746 746 missing heads, and a generator of nodes that are at least a superset of
747 747 heads missing, this function updates partial to be correct.
748 748 """
749 749 # collect new branch entries
750 750 newbranches = {}
751 751 for c in ctxgen:
752 752 newbranches.setdefault(c.branch(), []).append(c.node())
753 753 # if older branchheads are reachable from new ones, they aren't
754 754 # really branchheads. Note checking parents is insufficient:
755 755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
756 756 for branch, newnodes in newbranches.iteritems():
757 757 bheads = partial.setdefault(branch, [])
758 758 # Remove candidate heads that no longer are in the repo (e.g., as
759 759 # the result of a strip that just happened). Avoid using 'node in
760 760 # self' here because that dives down into branchcache code somewhat
761 761 # recursively.
762 762 bheadrevs = [self.changelog.rev(node) for node in bheads
763 763 if self.changelog.hasnode(node)]
764 764 newheadrevs = [self.changelog.rev(node) for node in newnodes
765 765 if self.changelog.hasnode(node)]
766 766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
767 767 # Remove duplicates - nodes that are in newheadrevs and are already
768 768 # in bheadrevs. This can happen if you strip a node whose parent
769 769 # was already a head (because they're on different branches).
770 770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
771 771
772 772 # Starting from tip means fewer passes over reachable. If we know
773 773 # the new candidates are not ancestors of existing heads, we don't
774 774 # have to examine ancestors of existing heads
775 775 if ctxisnew:
776 776 iterrevs = sorted(newheadrevs)
777 777 else:
778 778 iterrevs = list(bheadrevs)
779 779
780 780 # This loop prunes out two kinds of heads - heads that are
781 781 # superseded by a head in newheadrevs, and newheadrevs that are not
782 782 # heads because an existing head is their descendant.
783 783 while iterrevs:
784 784 latest = iterrevs.pop()
785 785 if latest not in bheadrevs:
786 786 continue
787 787 ancestors = set(self.changelog.ancestors([latest],
788 788 bheadrevs[0]))
789 789 if ancestors:
790 790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
791 791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
792 792
793 793 # There may be branches that cease to exist when the last commit in the
794 794 # branch was stripped. This code filters them out. Note that the
795 795 # branch that ceased to exist may not be in newbranches because
796 796 # newbranches is the set of candidate heads, which when you strip the
797 797 # last commit in a branch will be the parent branch.
798 798 for branch in partial.keys():
799 799 nodes = [head for head in partial[branch]
800 800 if self.changelog.hasnode(head)]
801 801 if not nodes:
802 802 del partial[branch]
803 803
804 804 def lookup(self, key):
805 805 return self[key].node()
806 806
807 807 def lookupbranch(self, key, remote=None):
808 808 repo = remote or self
809 809 if key in repo.branchmap():
810 810 return key
811 811
812 812 repo = (remote and remote.local()) and remote or self
813 813 return repo[key].branch()
814 814
815 815 def known(self, nodes):
816 816 nm = self.changelog.nodemap
817 817 pc = self._phasecache
818 818 result = []
819 819 for n in nodes:
820 820 r = nm.get(n)
821 821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
822 822 result.append(resp)
823 823 return result
824 824
825 825 def local(self):
826 826 return self
827 827
828 828 def cancopy(self):
829 829 return self.local() # so statichttprepo's override of local() works
830 830
831 831 def join(self, f):
832 832 return os.path.join(self.path, f)
833 833
834 834 def wjoin(self, f):
835 835 return os.path.join(self.root, f)
836 836
837 837 def file(self, f):
838 838 if f[0] == '/':
839 839 f = f[1:]
840 840 return filelog.filelog(self.sopener, f)
841 841
842 842 def changectx(self, changeid):
843 843 return self[changeid]
844 844
845 845 def parents(self, changeid=None):
846 846 '''get list of changectxs for parents of changeid'''
847 847 return self[changeid].parents()
848 848
849 849 def setparents(self, p1, p2=nullid):
850 850 copies = self.dirstate.setparents(p1, p2)
851 851 if copies:
852 852 # Adjust copy records, the dirstate cannot do it, it
853 853 # requires access to parents manifests. Preserve them
854 854 # only for entries added to first parent.
855 855 pctx = self[p1]
856 856 for f in copies:
857 857 if f not in pctx and copies[f] in pctx:
858 858 self.dirstate.copy(copies[f], f)
859 859
860 860 def filectx(self, path, changeid=None, fileid=None):
861 861 """changeid can be a changeset revision, node, or tag.
862 862 fileid can be a file revision or node."""
863 863 return context.filectx(self, path, changeid, fileid)
864 864
865 865 def getcwd(self):
866 866 return self.dirstate.getcwd()
867 867
868 868 def pathto(self, f, cwd=None):
869 869 return self.dirstate.pathto(f, cwd)
870 870
871 871 def wfile(self, f, mode='r'):
872 872 return self.wopener(f, mode)
873 873
874 874 def _link(self, f):
875 875 return os.path.islink(self.wjoin(f))
876 876
877 877 def _loadfilter(self, filter):
878 878 if filter not in self.filterpats:
879 879 l = []
880 880 for pat, cmd in self.ui.configitems(filter):
881 881 if cmd == '!':
882 882 continue
883 883 mf = matchmod.match(self.root, '', [pat])
884 884 fn = None
885 885 params = cmd
886 886 for name, filterfn in self._datafilters.iteritems():
887 887 if cmd.startswith(name):
888 888 fn = filterfn
889 889 params = cmd[len(name):].lstrip()
890 890 break
891 891 if not fn:
892 892 fn = lambda s, c, **kwargs: util.filter(s, c)
893 893 # Wrap old filters not supporting keyword arguments
894 894 if not inspect.getargspec(fn)[2]:
895 895 oldfn = fn
896 896 fn = lambda s, c, **kwargs: oldfn(s, c)
897 897 l.append((mf, fn, params))
898 898 self.filterpats[filter] = l
899 899 return self.filterpats[filter]
900 900
901 901 def _filter(self, filterpats, filename, data):
902 902 for mf, fn, cmd in filterpats:
903 903 if mf(filename):
904 904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 906 break
907 907
908 908 return data
909 909
910 910 @unfilteredpropertycache
911 911 def _encodefilterpats(self):
912 912 return self._loadfilter('encode')
913 913
914 914 @unfilteredpropertycache
915 915 def _decodefilterpats(self):
916 916 return self._loadfilter('decode')
917 917
918 918 def adddatafilter(self, name, filter):
919 919 self._datafilters[name] = filter
920 920
921 921 def wread(self, filename):
922 922 if self._link(filename):
923 923 data = os.readlink(self.wjoin(filename))
924 924 else:
925 925 data = self.wopener.read(filename)
926 926 return self._filter(self._encodefilterpats, filename, data)
927 927
928 928 def wwrite(self, filename, data, flags):
929 929 data = self._filter(self._decodefilterpats, filename, data)
930 930 if 'l' in flags:
931 931 self.wopener.symlink(data, filename)
932 932 else:
933 933 self.wopener.write(filename, data)
934 934 if 'x' in flags:
935 935 util.setflags(self.wjoin(filename), False, True)
936 936
937 937 def wwritedata(self, filename, data):
938 938 return self._filter(self._decodefilterpats, filename, data)
939 939
940 940 def transaction(self, desc):
941 941 tr = self._transref and self._transref() or None
942 942 if tr and tr.running():
943 943 return tr.nest()
944 944
945 945 # abort here if the journal already exists
946 946 if os.path.exists(self.sjoin("journal")):
947 947 raise error.RepoError(
948 948 _("abandoned transaction found - run hg recover"))
949 949
950 950 self._writejournal(desc)
951 951 renames = [(x, undoname(x)) for x in self._journalfiles()]
952 952
953 953 tr = transaction.transaction(self.ui.warn, self.sopener,
954 954 self.sjoin("journal"),
955 955 aftertrans(renames),
956 956 self.store.createmode)
957 957 self._transref = weakref.ref(tr)
958 958 return tr
959 959
960 960 def _journalfiles(self):
961 961 return (self.sjoin('journal'), self.join('journal.dirstate'),
962 962 self.join('journal.branch'), self.join('journal.desc'),
963 963 self.join('journal.bookmarks'),
964 964 self.sjoin('journal.phaseroots'))
965 965
966 966 def undofiles(self):
967 967 return [undoname(x) for x in self._journalfiles()]
968 968
969 969 def _writejournal(self, desc):
970 970 self.opener.write("journal.dirstate",
971 971 self.opener.tryread("dirstate"))
972 972 self.opener.write("journal.branch",
973 973 encoding.fromlocal(self.dirstate.branch()))
974 974 self.opener.write("journal.desc",
975 975 "%d\n%s\n" % (len(self), desc))
976 976 self.opener.write("journal.bookmarks",
977 977 self.opener.tryread("bookmarks"))
978 978 self.sopener.write("journal.phaseroots",
979 979 self.sopener.tryread("phaseroots"))
980 980
981 981 def recover(self):
982 982 lock = self.lock()
983 983 try:
984 984 if os.path.exists(self.sjoin("journal")):
985 985 self.ui.status(_("rolling back interrupted transaction\n"))
986 986 transaction.rollback(self.sopener, self.sjoin("journal"),
987 987 self.ui.warn)
988 988 self.invalidate()
989 989 return True
990 990 else:
991 991 self.ui.warn(_("no interrupted transaction available\n"))
992 992 return False
993 993 finally:
994 994 lock.release()
995 995
996 996 def rollback(self, dryrun=False, force=False):
997 997 wlock = lock = None
998 998 try:
999 999 wlock = self.wlock()
1000 1000 lock = self.lock()
1001 1001 if os.path.exists(self.sjoin("undo")):
1002 1002 return self._rollback(dryrun, force)
1003 1003 else:
1004 1004 self.ui.warn(_("no rollback information available\n"))
1005 1005 return 1
1006 1006 finally:
1007 1007 release(lock, wlock)
1008 1008
1009 1009 @unfilteredmethod # Until we get smarter cache management
1010 1010 def _rollback(self, dryrun, force):
1011 1011 ui = self.ui
1012 1012 try:
1013 1013 args = self.opener.read('undo.desc').splitlines()
1014 1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1015 1015 if len(args) >= 3:
1016 1016 detail = args[2]
1017 1017 oldtip = oldlen - 1
1018 1018
1019 1019 if detail and ui.verbose:
1020 1020 msg = (_('repository tip rolled back to revision %s'
1021 1021 ' (undo %s: %s)\n')
1022 1022 % (oldtip, desc, detail))
1023 1023 else:
1024 1024 msg = (_('repository tip rolled back to revision %s'
1025 1025 ' (undo %s)\n')
1026 1026 % (oldtip, desc))
1027 1027 except IOError:
1028 1028 msg = _('rolling back unknown transaction\n')
1029 1029 desc = None
1030 1030
1031 1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1032 1032 raise util.Abort(
1033 1033 _('rollback of last commit while not checked out '
1034 1034 'may lose data'), hint=_('use -f to force'))
1035 1035
1036 1036 ui.status(msg)
1037 1037 if dryrun:
1038 1038 return 0
1039 1039
1040 1040 parents = self.dirstate.parents()
1041 1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1042 1042 if os.path.exists(self.join('undo.bookmarks')):
1043 1043 util.rename(self.join('undo.bookmarks'),
1044 1044 self.join('bookmarks'))
1045 1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1046 1046 util.rename(self.sjoin('undo.phaseroots'),
1047 1047 self.sjoin('phaseroots'))
1048 1048 self.invalidate()
1049 1049
1050 1050 # Discard all cache entries to force reloading everything.
1051 1051 self._filecache.clear()
1052 1052
1053 1053 parentgone = (parents[0] not in self.changelog.nodemap or
1054 1054 parents[1] not in self.changelog.nodemap)
1055 1055 if parentgone:
1056 1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1057 1057 try:
1058 1058 branch = self.opener.read('undo.branch')
1059 1059 self.dirstate.setbranch(encoding.tolocal(branch))
1060 1060 except IOError:
1061 1061 ui.warn(_('named branch could not be reset: '
1062 1062 'current branch is still \'%s\'\n')
1063 1063 % self.dirstate.branch())
1064 1064
1065 1065 self.dirstate.invalidate()
1066 1066 parents = tuple([p.rev() for p in self.parents()])
1067 1067 if len(parents) > 1:
1068 1068 ui.status(_('working directory now based on '
1069 1069 'revisions %d and %d\n') % parents)
1070 1070 else:
1071 1071 ui.status(_('working directory now based on '
1072 1072 'revision %d\n') % parents)
1073 1073 # TODO: if we know which new heads may result from this rollback, pass
1074 1074 # them to destroy(), which will prevent the branchhead cache from being
1075 1075 # invalidated.
1076 1076 self.destroyed()
1077 1077 return 0
1078 1078
1079 1079 def invalidatecaches(self):
1080 1080
1081 1081 if '_tagscache' in vars(self):
1082 1082 # can't use delattr on proxy
1083 1083 del self.__dict__['_tagscache']
1084 1084
1085 1085 self.unfiltered()._branchcache = None # in UTF-8
1086 1086 self.unfiltered()._branchcachetip = None
1087 1087 obsolete.clearobscaches(self)
1088 1088
1089 1089 def invalidatedirstate(self):
1090 1090 '''Invalidates the dirstate, causing the next call to dirstate
1091 1091 to check if it was modified since the last time it was read,
1092 1092 rereading it if it has.
1093 1093
1094 1094 This is different to dirstate.invalidate() that it doesn't always
1095 1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1096 1096 explicitly read the dirstate again (i.e. restoring it to a previous
1097 1097 known good state).'''
1098 1098 if hasunfilteredcache(self, 'dirstate'):
1099 1099 for k in self.dirstate._filecache:
1100 1100 try:
1101 1101 delattr(self.dirstate, k)
1102 1102 except AttributeError:
1103 1103 pass
1104 1104 delattr(self.unfiltered(), 'dirstate')
1105 1105
1106 1106 def invalidate(self):
1107 1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1108 1108 for k in self._filecache:
1109 1109 # dirstate is invalidated separately in invalidatedirstate()
1110 1110 if k == 'dirstate':
1111 1111 continue
1112 1112
1113 1113 try:
1114 1114 delattr(unfiltered, k)
1115 1115 except AttributeError:
1116 1116 pass
1117 1117 self.invalidatecaches()
1118 1118
1119 1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1120 1120 try:
1121 1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1122 1122 except error.LockHeld, inst:
1123 1123 if not wait:
1124 1124 raise
1125 1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1126 1126 (desc, inst.locker))
1127 1127 # default to 600 seconds timeout
1128 1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1129 1129 releasefn, desc=desc)
1130 1130 if acquirefn:
1131 1131 acquirefn()
1132 1132 return l
1133 1133
1134 1134 def _afterlock(self, callback):
1135 1135 """add a callback to the current repository lock.
1136 1136
1137 1137 The callback will be executed on lock release."""
1138 1138 l = self._lockref and self._lockref()
1139 1139 if l:
1140 1140 l.postrelease.append(callback)
1141 1141 else:
1142 1142 callback()
1143 1143
1144 1144 def lock(self, wait=True):
1145 1145 '''Lock the repository store (.hg/store) and return a weak reference
1146 1146 to the lock. Use this before modifying the store (e.g. committing or
1147 1147 stripping). If you are opening a transaction, get a lock as well.)'''
1148 1148 l = self._lockref and self._lockref()
1149 1149 if l is not None and l.held:
1150 1150 l.lock()
1151 1151 return l
1152 1152
1153 1153 def unlock():
1154 1154 self.store.write()
1155 1155 if hasunfilteredcache(self, '_phasecache'):
1156 1156 self._phasecache.write()
1157 1157 for k, ce in self._filecache.items():
1158 1158 if k == 'dirstate':
1159 1159 continue
1160 1160 ce.refresh()
1161 1161
1162 1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1163 1163 self.invalidate, _('repository %s') % self.origroot)
1164 1164 self._lockref = weakref.ref(l)
1165 1165 return l
1166 1166
1167 1167 def wlock(self, wait=True):
1168 1168 '''Lock the non-store parts of the repository (everything under
1169 1169 .hg except .hg/store) and return a weak reference to the lock.
1170 1170 Use this before modifying files in .hg.'''
1171 1171 l = self._wlockref and self._wlockref()
1172 1172 if l is not None and l.held:
1173 1173 l.lock()
1174 1174 return l
1175 1175
1176 1176 def unlock():
1177 1177 self.dirstate.write()
1178 1178 ce = self._filecache.get('dirstate')
1179 1179 if ce:
1180 1180 ce.refresh()
1181 1181
1182 1182 l = self._lock(self.join("wlock"), wait, unlock,
1183 1183 self.invalidatedirstate, _('working directory of %s') %
1184 1184 self.origroot)
1185 1185 self._wlockref = weakref.ref(l)
1186 1186 return l
1187 1187
1188 1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1189 1189 """
1190 1190 commit an individual file as part of a larger transaction
1191 1191 """
1192 1192
1193 1193 fname = fctx.path()
1194 1194 text = fctx.data()
1195 1195 flog = self.file(fname)
1196 1196 fparent1 = manifest1.get(fname, nullid)
1197 1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1198 1198
1199 1199 meta = {}
1200 1200 copy = fctx.renamed()
1201 1201 if copy and copy[0] != fname:
1202 1202 # Mark the new revision of this file as a copy of another
1203 1203 # file. This copy data will effectively act as a parent
1204 1204 # of this new revision. If this is a merge, the first
1205 1205 # parent will be the nullid (meaning "look up the copy data")
1206 1206 # and the second one will be the other parent. For example:
1207 1207 #
1208 1208 # 0 --- 1 --- 3 rev1 changes file foo
1209 1209 # \ / rev2 renames foo to bar and changes it
1210 1210 # \- 2 -/ rev3 should have bar with all changes and
1211 1211 # should record that bar descends from
1212 1212 # bar in rev2 and foo in rev1
1213 1213 #
1214 1214 # this allows this merge to succeed:
1215 1215 #
1216 1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1217 1217 # \ / merging rev3 and rev4 should use bar@rev2
1218 1218 # \- 2 --- 4 as the merge base
1219 1219 #
1220 1220
1221 1221 cfname = copy[0]
1222 1222 crev = manifest1.get(cfname)
1223 1223 newfparent = fparent2
1224 1224
1225 1225 if manifest2: # branch merge
1226 1226 if fparent2 == nullid or crev is None: # copied on remote side
1227 1227 if cfname in manifest2:
1228 1228 crev = manifest2[cfname]
1229 1229 newfparent = fparent1
1230 1230
1231 1231 # find source in nearest ancestor if we've lost track
1232 1232 if not crev:
1233 1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1234 1234 (fname, cfname))
1235 1235 for ancestor in self[None].ancestors():
1236 1236 if cfname in ancestor:
1237 1237 crev = ancestor[cfname].filenode()
1238 1238 break
1239 1239
1240 1240 if crev:
1241 1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1242 1242 meta["copy"] = cfname
1243 1243 meta["copyrev"] = hex(crev)
1244 1244 fparent1, fparent2 = nullid, newfparent
1245 1245 else:
1246 1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1247 1247 "copied from '%s'!\n") % (fname, cfname))
1248 1248
1249 1249 elif fparent2 != nullid:
1250 1250 # is one parent an ancestor of the other?
1251 1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1252 1252 if fparentancestor == fparent1:
1253 1253 fparent1, fparent2 = fparent2, nullid
1254 1254 elif fparentancestor == fparent2:
1255 1255 fparent2 = nullid
1256 1256
1257 1257 # is the file changed?
1258 1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1259 1259 changelist.append(fname)
1260 1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1261 1261
1262 1262 # are just the flags changed during merge?
1263 1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1264 1264 changelist.append(fname)
1265 1265
1266 1266 return fparent1
1267 1267
1268 1268 @unfilteredmethod
1269 1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1270 1270 editor=False, extra={}):
1271 1271 """Add a new revision to current repository.
1272 1272
1273 1273 Revision information is gathered from the working directory,
1274 1274 match can be used to filter the committed files. If editor is
1275 1275 supplied, it is called to get a commit message.
1276 1276 """
1277 1277
1278 1278 def fail(f, msg):
1279 1279 raise util.Abort('%s: %s' % (f, msg))
1280 1280
1281 1281 if not match:
1282 1282 match = matchmod.always(self.root, '')
1283 1283
1284 1284 if not force:
1285 1285 vdirs = []
1286 1286 match.dir = vdirs.append
1287 1287 match.bad = fail
1288 1288
1289 1289 wlock = self.wlock()
1290 1290 try:
1291 1291 wctx = self[None]
1292 1292 merge = len(wctx.parents()) > 1
1293 1293
1294 1294 if (not force and merge and match and
1295 1295 (match.files() or match.anypats())):
1296 1296 raise util.Abort(_('cannot partially commit a merge '
1297 1297 '(do not specify files or patterns)'))
1298 1298
1299 1299 changes = self.status(match=match, clean=force)
1300 1300 if force:
1301 1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1302 1302
1303 1303 # check subrepos
1304 1304 subs = []
1305 1305 commitsubs = set()
1306 1306 newstate = wctx.substate.copy()
1307 1307 # only manage subrepos and .hgsubstate if .hgsub is present
1308 1308 if '.hgsub' in wctx:
1309 1309 # we'll decide whether to track this ourselves, thanks
1310 1310 if '.hgsubstate' in changes[0]:
1311 1311 changes[0].remove('.hgsubstate')
1312 1312 if '.hgsubstate' in changes[2]:
1313 1313 changes[2].remove('.hgsubstate')
1314 1314
1315 1315 # compare current state to last committed state
1316 1316 # build new substate based on last committed state
1317 1317 oldstate = wctx.p1().substate
1318 1318 for s in sorted(newstate.keys()):
1319 1319 if not match(s):
1320 1320 # ignore working copy, use old state if present
1321 1321 if s in oldstate:
1322 1322 newstate[s] = oldstate[s]
1323 1323 continue
1324 1324 if not force:
1325 1325 raise util.Abort(
1326 1326 _("commit with new subrepo %s excluded") % s)
1327 1327 if wctx.sub(s).dirty(True):
1328 1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1329 1329 raise util.Abort(
1330 1330 _("uncommitted changes in subrepo %s") % s,
1331 1331 hint=_("use --subrepos for recursive commit"))
1332 1332 subs.append(s)
1333 1333 commitsubs.add(s)
1334 1334 else:
1335 1335 bs = wctx.sub(s).basestate()
1336 1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1337 1337 if oldstate.get(s, (None, None, None))[1] != bs:
1338 1338 subs.append(s)
1339 1339
1340 1340 # check for removed subrepos
1341 1341 for p in wctx.parents():
1342 1342 r = [s for s in p.substate if s not in newstate]
1343 1343 subs += [s for s in r if match(s)]
1344 1344 if subs:
1345 1345 if (not match('.hgsub') and
1346 1346 '.hgsub' in (wctx.modified() + wctx.added())):
1347 1347 raise util.Abort(
1348 1348 _("can't commit subrepos without .hgsub"))
1349 1349 changes[0].insert(0, '.hgsubstate')
1350 1350
1351 1351 elif '.hgsub' in changes[2]:
1352 1352 # clean up .hgsubstate when .hgsub is removed
1353 1353 if ('.hgsubstate' in wctx and
1354 1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1355 1355 changes[2].insert(0, '.hgsubstate')
1356 1356
1357 1357 # make sure all explicit patterns are matched
1358 1358 if not force and match.files():
1359 1359 matched = set(changes[0] + changes[1] + changes[2])
1360 1360
1361 1361 for f in match.files():
1362 1362 f = self.dirstate.normalize(f)
1363 1363 if f == '.' or f in matched or f in wctx.substate:
1364 1364 continue
1365 1365 if f in changes[3]: # missing
1366 1366 fail(f, _('file not found!'))
1367 1367 if f in vdirs: # visited directory
1368 1368 d = f + '/'
1369 1369 for mf in matched:
1370 1370 if mf.startswith(d):
1371 1371 break
1372 1372 else:
1373 1373 fail(f, _("no match under directory!"))
1374 1374 elif f not in self.dirstate:
1375 1375 fail(f, _("file not tracked!"))
1376 1376
1377 1377 if (not force and not extra.get("close") and not merge
1378 1378 and not (changes[0] or changes[1] or changes[2])
1379 1379 and wctx.branch() == wctx.p1().branch()):
1380 1380 return None
1381 1381
1382 1382 if merge and changes[3]:
1383 1383 raise util.Abort(_("cannot commit merge with missing files"))
1384 1384
1385 1385 ms = mergemod.mergestate(self)
1386 1386 for f in changes[0]:
1387 1387 if f in ms and ms[f] == 'u':
1388 1388 raise util.Abort(_("unresolved merge conflicts "
1389 1389 "(see hg help resolve)"))
1390 1390
1391 1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1392 1392 if editor:
1393 1393 cctx._text = editor(self, cctx, subs)
1394 1394 edited = (text != cctx._text)
1395 1395
1396 1396 # commit subs and write new state
1397 1397 if subs:
1398 1398 for s in sorted(commitsubs):
1399 1399 sub = wctx.sub(s)
1400 1400 self.ui.status(_('committing subrepository %s\n') %
1401 1401 subrepo.subrelpath(sub))
1402 1402 sr = sub.commit(cctx._text, user, date)
1403 1403 newstate[s] = (newstate[s][0], sr)
1404 1404 subrepo.writestate(self, newstate)
1405 1405
1406 1406 # Save commit message in case this transaction gets rolled back
1407 1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1408 1408 # the assumption that the user will use the same editor again.
1409 1409 msgfn = self.savecommitmessage(cctx._text)
1410 1410
1411 1411 p1, p2 = self.dirstate.parents()
1412 1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1413 1413 try:
1414 1414 self.hook("precommit", throw=True, parent1=hookp1,
1415 1415 parent2=hookp2)
1416 1416 ret = self.commitctx(cctx, True)
1417 1417 except: # re-raises
1418 1418 if edited:
1419 1419 self.ui.write(
1420 1420 _('note: commit message saved in %s\n') % msgfn)
1421 1421 raise
1422 1422
1423 1423 # update bookmarks, dirstate and mergestate
1424 1424 bookmarks.update(self, [p1, p2], ret)
1425 1425 for f in changes[0] + changes[1]:
1426 1426 self.dirstate.normal(f)
1427 1427 for f in changes[2]:
1428 1428 self.dirstate.drop(f)
1429 1429 self.dirstate.setparents(ret)
1430 1430 ms.reset()
1431 1431 finally:
1432 1432 wlock.release()
1433 1433
1434 1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1435 1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1436 1436 self._afterlock(commithook)
1437 1437 return ret
1438 1438
1439 1439 @unfilteredmethod
1440 1440 def commitctx(self, ctx, error=False):
1441 1441 """Add a new revision to current repository.
1442 1442 Revision information is passed via the context argument.
1443 1443 """
1444 1444
1445 1445 tr = lock = None
1446 1446 removed = list(ctx.removed())
1447 1447 p1, p2 = ctx.p1(), ctx.p2()
1448 1448 user = ctx.user()
1449 1449
1450 1450 lock = self.lock()
1451 1451 try:
1452 1452 tr = self.transaction("commit")
1453 1453 trp = weakref.proxy(tr)
1454 1454
1455 1455 if ctx.files():
1456 1456 m1 = p1.manifest().copy()
1457 1457 m2 = p2.manifest()
1458 1458
1459 1459 # check in files
1460 1460 new = {}
1461 1461 changed = []
1462 1462 linkrev = len(self)
1463 1463 for f in sorted(ctx.modified() + ctx.added()):
1464 1464 self.ui.note(f + "\n")
1465 1465 try:
1466 1466 fctx = ctx[f]
1467 1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1468 1468 changed)
1469 1469 m1.set(f, fctx.flags())
1470 1470 except OSError, inst:
1471 1471 self.ui.warn(_("trouble committing %s!\n") % f)
1472 1472 raise
1473 1473 except IOError, inst:
1474 1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1475 1475 if error or errcode and errcode != errno.ENOENT:
1476 1476 self.ui.warn(_("trouble committing %s!\n") % f)
1477 1477 raise
1478 1478 else:
1479 1479 removed.append(f)
1480 1480
1481 1481 # update manifest
1482 1482 m1.update(new)
1483 1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1484 1484 drop = [f for f in removed if f in m1]
1485 1485 for f in drop:
1486 1486 del m1[f]
1487 1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1488 1488 p2.manifestnode(), (new, drop))
1489 1489 files = changed + removed
1490 1490 else:
1491 1491 mn = p1.manifestnode()
1492 1492 files = []
1493 1493
1494 1494 # update changelog
1495 1495 self.changelog.delayupdate()
1496 1496 n = self.changelog.add(mn, files, ctx.description(),
1497 1497 trp, p1.node(), p2.node(),
1498 1498 user, ctx.date(), ctx.extra().copy())
1499 1499 p = lambda: self.changelog.writepending() and self.root or ""
1500 1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1501 1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1502 1502 parent2=xp2, pending=p)
1503 1503 self.changelog.finalize(trp)
1504 1504 # set the new commit is proper phase
1505 1505 targetphase = phases.newcommitphase(self.ui)
1506 1506 if targetphase:
1507 1507 # retract boundary do not alter parent changeset.
1508 1508 # if a parent have higher the resulting phase will
1509 1509 # be compliant anyway
1510 1510 #
1511 1511 # if minimal phase was 0 we don't need to retract anything
1512 1512 phases.retractboundary(self, targetphase, [n])
1513 1513 tr.close()
1514 1514 self.updatebranchcache()
1515 1515 return n
1516 1516 finally:
1517 1517 if tr:
1518 1518 tr.release()
1519 1519 lock.release()
1520 1520
1521 1521 @unfilteredmethod
1522 1522 def destroyed(self, newheadnodes=None):
1523 1523 '''Inform the repository that nodes have been destroyed.
1524 1524 Intended for use by strip and rollback, so there's a common
1525 1525 place for anything that has to be done after destroying history.
1526 1526
1527 1527 If you know the branchheadcache was uptodate before nodes were removed
1528 1528 and you also know the set of candidate new heads that may have resulted
1529 1529 from the destruction, you can set newheadnodes. This will enable the
1530 1530 code to update the branchheads cache, rather than having future code
1531 1531 decide it's invalid and regenerating it from scratch.
1532 1532 '''
1533 1533 # If we have info, newheadnodes, on how to update the branch cache, do
1534 1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1535 1535 # will be caught the next time it is read.
1536 1536 if newheadnodes:
1537 1537 tiprev = len(self) - 1
1538 1538 ctxgen = (self[node] for node in newheadnodes
1539 1539 if self.changelog.hasnode(node))
1540 1540 self._updatebranchcache(self._branchcache, ctxgen)
1541 1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1542 1542 tiprev)
1543 1543
1544 1544 # Ensure the persistent tag cache is updated. Doing it now
1545 1545 # means that the tag cache only has to worry about destroyed
1546 1546 # heads immediately after a strip/rollback. That in turn
1547 1547 # guarantees that "cachetip == currenttip" (comparing both rev
1548 1548 # and node) always means no nodes have been added or destroyed.
1549 1549
1550 1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1551 1551 # head, refresh the tag cache, then immediately add a new head.
1552 1552 # But I think doing it this way is necessary for the "instant
1553 1553 # tag cache retrieval" case to work.
1554 1554 self.invalidatecaches()
1555 1555
1556 1556 # Discard all cache entries to force reloading everything.
1557 1557 self._filecache.clear()
1558 1558
1559 1559 def walk(self, match, node=None):
1560 1560 '''
1561 1561 walk recursively through the directory tree or a given
1562 1562 changeset, finding all files matched by the match
1563 1563 function
1564 1564 '''
1565 1565 return self[node].walk(match)
1566 1566
1567 1567 def status(self, node1='.', node2=None, match=None,
1568 1568 ignored=False, clean=False, unknown=False,
1569 1569 listsubrepos=False):
1570 1570 """return status of files between two nodes or node and working
1571 1571 directory.
1572 1572
1573 1573 If node1 is None, use the first dirstate parent instead.
1574 1574 If node2 is None, compare node1 with working directory.
1575 1575 """
1576 1576
1577 1577 def mfmatches(ctx):
1578 1578 mf = ctx.manifest().copy()
1579 1579 if match.always():
1580 1580 return mf
1581 1581 for fn in mf.keys():
1582 1582 if not match(fn):
1583 1583 del mf[fn]
1584 1584 return mf
1585 1585
1586 1586 if isinstance(node1, context.changectx):
1587 1587 ctx1 = node1
1588 1588 else:
1589 1589 ctx1 = self[node1]
1590 1590 if isinstance(node2, context.changectx):
1591 1591 ctx2 = node2
1592 1592 else:
1593 1593 ctx2 = self[node2]
1594 1594
1595 1595 working = ctx2.rev() is None
1596 1596 parentworking = working and ctx1 == self['.']
1597 1597 match = match or matchmod.always(self.root, self.getcwd())
1598 1598 listignored, listclean, listunknown = ignored, clean, unknown
1599 1599
1600 1600 # load earliest manifest first for caching reasons
1601 1601 if not working and ctx2.rev() < ctx1.rev():
1602 1602 ctx2.manifest()
1603 1603
1604 1604 if not parentworking:
1605 1605 def bad(f, msg):
1606 1606 # 'f' may be a directory pattern from 'match.files()',
1607 1607 # so 'f not in ctx1' is not enough
1608 1608 if f not in ctx1 and f not in ctx1.dirs():
1609 1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1610 1610 match.bad = bad
1611 1611
1612 1612 if working: # we need to scan the working dir
1613 1613 subrepos = []
1614 1614 if '.hgsub' in self.dirstate:
1615 1615 subrepos = ctx2.substate.keys()
1616 1616 s = self.dirstate.status(match, subrepos, listignored,
1617 1617 listclean, listunknown)
1618 1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1619 1619
1620 1620 # check for any possibly clean files
1621 1621 if parentworking and cmp:
1622 1622 fixup = []
1623 1623 # do a full compare of any files that might have changed
1624 1624 for f in sorted(cmp):
1625 1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1626 1626 or ctx1[f].cmp(ctx2[f])):
1627 1627 modified.append(f)
1628 1628 else:
1629 1629 fixup.append(f)
1630 1630
1631 1631 # update dirstate for files that are actually clean
1632 1632 if fixup:
1633 1633 if listclean:
1634 1634 clean += fixup
1635 1635
1636 1636 try:
1637 1637 # updating the dirstate is optional
1638 1638 # so we don't wait on the lock
1639 1639 wlock = self.wlock(False)
1640 1640 try:
1641 1641 for f in fixup:
1642 1642 self.dirstate.normal(f)
1643 1643 finally:
1644 1644 wlock.release()
1645 1645 except error.LockError:
1646 1646 pass
1647 1647
1648 1648 if not parentworking:
1649 1649 mf1 = mfmatches(ctx1)
1650 1650 if working:
1651 1651 # we are comparing working dir against non-parent
1652 1652 # generate a pseudo-manifest for the working dir
1653 1653 mf2 = mfmatches(self['.'])
1654 1654 for f in cmp + modified + added:
1655 1655 mf2[f] = None
1656 1656 mf2.set(f, ctx2.flags(f))
1657 1657 for f in removed:
1658 1658 if f in mf2:
1659 1659 del mf2[f]
1660 1660 else:
1661 1661 # we are comparing two revisions
1662 1662 deleted, unknown, ignored = [], [], []
1663 1663 mf2 = mfmatches(ctx2)
1664 1664
1665 1665 modified, added, clean = [], [], []
1666 1666 withflags = mf1.withflags() | mf2.withflags()
1667 1667 for fn in mf2:
1668 1668 if fn in mf1:
1669 1669 if (fn not in deleted and
1670 1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1671 1671 (mf1[fn] != mf2[fn] and
1672 1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1673 1673 modified.append(fn)
1674 1674 elif listclean:
1675 1675 clean.append(fn)
1676 1676 del mf1[fn]
1677 1677 elif fn not in deleted:
1678 1678 added.append(fn)
1679 1679 removed = mf1.keys()
1680 1680
1681 1681 if working and modified and not self.dirstate._checklink:
1682 1682 # Symlink placeholders may get non-symlink-like contents
1683 1683 # via user error or dereferencing by NFS or Samba servers,
1684 1684 # so we filter out any placeholders that don't look like a
1685 1685 # symlink
1686 1686 sane = []
1687 1687 for f in modified:
1688 1688 if ctx2.flags(f) == 'l':
1689 1689 d = ctx2[f].data()
1690 1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1691 1691 self.ui.debug('ignoring suspect symlink placeholder'
1692 1692 ' "%s"\n' % f)
1693 1693 continue
1694 1694 sane.append(f)
1695 1695 modified = sane
1696 1696
1697 1697 r = modified, added, removed, deleted, unknown, ignored, clean
1698 1698
1699 1699 if listsubrepos:
1700 1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1701 1701 if working:
1702 1702 rev2 = None
1703 1703 else:
1704 1704 rev2 = ctx2.substate[subpath][1]
1705 1705 try:
1706 1706 submatch = matchmod.narrowmatcher(subpath, match)
1707 1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1708 1708 clean=listclean, unknown=listunknown,
1709 1709 listsubrepos=True)
1710 1710 for rfiles, sfiles in zip(r, s):
1711 1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1712 1712 except error.LookupError:
1713 1713 self.ui.status(_("skipping missing subrepository: %s\n")
1714 1714 % subpath)
1715 1715
1716 1716 for l in r:
1717 1717 l.sort()
1718 1718 return r
1719 1719
1720 1720 def heads(self, start=None):
1721 1721 heads = self.changelog.heads(start)
1722 1722 # sort the output in rev descending order
1723 1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1724 1724
1725 1725 def branchheads(self, branch=None, start=None, closed=False):
1726 1726 '''return a (possibly filtered) list of heads for the given branch
1727 1727
1728 1728 Heads are returned in topological order, from newest to oldest.
1729 1729 If branch is None, use the dirstate branch.
1730 1730 If start is not None, return only heads reachable from start.
1731 1731 If closed is True, return heads that are marked as closed as well.
1732 1732 '''
1733 1733 if branch is None:
1734 1734 branch = self[None].branch()
1735 1735 branches = self.branchmap()
1736 1736 if branch not in branches:
1737 1737 return []
1738 1738 # the cache returns heads ordered lowest to highest
1739 1739 bheads = list(reversed(branches[branch]))
1740 1740 if start is not None:
1741 1741 # filter out the heads that cannot be reached from startrev
1742 1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1743 1743 bheads = [h for h in bheads if h in fbheads]
1744 1744 if not closed:
1745 1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1746 1746 return bheads
1747 1747
1748 1748 def branches(self, nodes):
1749 1749 if not nodes:
1750 1750 nodes = [self.changelog.tip()]
1751 1751 b = []
1752 1752 for n in nodes:
1753 1753 t = n
1754 1754 while True:
1755 1755 p = self.changelog.parents(n)
1756 1756 if p[1] != nullid or p[0] == nullid:
1757 1757 b.append((t, n, p[0], p[1]))
1758 1758 break
1759 1759 n = p[0]
1760 1760 return b
1761 1761
1762 1762 def between(self, pairs):
1763 1763 r = []
1764 1764
1765 1765 for top, bottom in pairs:
1766 1766 n, l, i = top, [], 0
1767 1767 f = 1
1768 1768
1769 1769 while n != bottom and n != nullid:
1770 1770 p = self.changelog.parents(n)[0]
1771 1771 if i == f:
1772 1772 l.append(n)
1773 1773 f = f * 2
1774 1774 n = p
1775 1775 i += 1
1776 1776
1777 1777 r.append(l)
1778 1778
1779 1779 return r
1780 1780
1781 1781 def pull(self, remote, heads=None, force=False):
1782 1782 # don't open transaction for nothing or you break future useful
1783 1783 # rollback call
1784 1784 tr = None
1785 1785 trname = 'pull\n' + util.hidepassword(remote.url())
1786 1786 lock = self.lock()
1787 1787 try:
1788 1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1789 1789 force=force)
1790 1790 common, fetch, rheads = tmp
1791 1791 if not fetch:
1792 1792 self.ui.status(_("no changes found\n"))
1793 1793 added = []
1794 1794 result = 0
1795 1795 else:
1796 1796 tr = self.transaction(trname)
1797 1797 if heads is None and list(common) == [nullid]:
1798 1798 self.ui.status(_("requesting all changes\n"))
1799 1799 elif heads is None and remote.capable('changegroupsubset'):
1800 1800 # issue1320, avoid a race if remote changed after discovery
1801 1801 heads = rheads
1802 1802
1803 1803 if remote.capable('getbundle'):
1804 1804 cg = remote.getbundle('pull', common=common,
1805 1805 heads=heads or rheads)
1806 1806 elif heads is None:
1807 1807 cg = remote.changegroup(fetch, 'pull')
1808 1808 elif not remote.capable('changegroupsubset'):
1809 1809 raise util.Abort(_("partial pull cannot be done because "
1810 1810 "other repository doesn't support "
1811 1811 "changegroupsubset."))
1812 1812 else:
1813 1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1814 1814 clstart = len(self.changelog)
1815 1815 result = self.addchangegroup(cg, 'pull', remote.url())
1816 1816 clend = len(self.changelog)
1817 1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1818 1818
1819 1819 # compute target subset
1820 1820 if heads is None:
1821 1821 # We pulled every thing possible
1822 1822 # sync on everything common
1823 1823 subset = common + added
1824 1824 else:
1825 1825 # We pulled a specific subset
1826 1826 # sync on this subset
1827 1827 subset = heads
1828 1828
1829 1829 # Get remote phases data from remote
1830 1830 remotephases = remote.listkeys('phases')
1831 1831 publishing = bool(remotephases.get('publishing', False))
1832 1832 if remotephases and not publishing:
1833 1833 # remote is new and unpublishing
1834 1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1835 1835 remotephases)
1836 1836 phases.advanceboundary(self, phases.public, pheads)
1837 1837 phases.advanceboundary(self, phases.draft, subset)
1838 1838 else:
1839 1839 # Remote is old or publishing all common changesets
1840 1840 # should be seen as public
1841 1841 phases.advanceboundary(self, phases.public, subset)
1842 1842
1843 1843 if obsolete._enabled:
1844 1844 self.ui.debug('fetching remote obsolete markers\n')
1845 1845 remoteobs = remote.listkeys('obsolete')
1846 1846 if 'dump0' in remoteobs:
1847 1847 if tr is None:
1848 1848 tr = self.transaction(trname)
1849 1849 for key in sorted(remoteobs, reverse=True):
1850 1850 if key.startswith('dump'):
1851 1851 data = base85.b85decode(remoteobs[key])
1852 1852 self.obsstore.mergemarkers(tr, data)
1853 1853 if tr is not None:
1854 1854 tr.close()
1855 1855 finally:
1856 1856 if tr is not None:
1857 1857 tr.release()
1858 1858 lock.release()
1859 1859
1860 1860 return result
1861 1861
1862 1862 def checkpush(self, force, revs):
1863 1863 """Extensions can override this function if additional checks have
1864 1864 to be performed before pushing, or call it if they override push
1865 1865 command.
1866 1866 """
1867 1867 pass
1868 1868
1869 1869 def push(self, remote, force=False, revs=None, newbranch=False):
1870 1870 '''Push outgoing changesets (limited by revs) from the current
1871 1871 repository to remote. Return an integer:
1872 1872 - None means nothing to push
1873 1873 - 0 means HTTP error
1874 1874 - 1 means we pushed and remote head count is unchanged *or*
1875 1875 we have outgoing changesets but refused to push
1876 1876 - other values as described by addchangegroup()
1877 1877 '''
1878 1878 # there are two ways to push to remote repo:
1879 1879 #
1880 1880 # addchangegroup assumes local user can lock remote
1881 1881 # repo (local filesystem, old ssh servers).
1882 1882 #
1883 1883 # unbundle assumes local user cannot lock remote repo (new ssh
1884 1884 # servers, http servers).
1885 1885
1886 1886 if not remote.canpush():
1887 1887 raise util.Abort(_("destination does not support push"))
1888 1888 unfi = self.unfiltered()
1889 1889 # get local lock as we might write phase data
1890 1890 locallock = self.lock()
1891 1891 try:
1892 1892 self.checkpush(force, revs)
1893 1893 lock = None
1894 1894 unbundle = remote.capable('unbundle')
1895 1895 if not unbundle:
1896 1896 lock = remote.lock()
1897 1897 try:
1898 1898 # discovery
1899 1899 fci = discovery.findcommonincoming
1900 1900 commoninc = fci(unfi, remote, force=force)
1901 1901 common, inc, remoteheads = commoninc
1902 1902 fco = discovery.findcommonoutgoing
1903 1903 outgoing = fco(unfi, remote, onlyheads=revs,
1904 1904 commoninc=commoninc, force=force)
1905 1905
1906 1906
1907 1907 if not outgoing.missing:
1908 1908 # nothing to push
1909 1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1910 1910 ret = None
1911 1911 else:
1912 1912 # something to push
1913 1913 if not force:
1914 1914 # if self.obsstore == False --> no obsolete
1915 1915 # then, save the iteration
1916 1916 if unfi.obsstore:
1917 1917 # this message are here for 80 char limit reason
1918 1918 mso = _("push includes obsolete changeset: %s!")
1919 1919 msu = _("push includes unstable changeset: %s!")
1920 1920 msb = _("push includes bumped changeset: %s!")
1921 1921 # If we are to push if there is at least one
1922 1922 # obsolete or unstable changeset in missing, at
1923 1923 # least one of the missinghead will be obsolete or
1924 1924 # unstable. So checking heads only is ok
1925 1925 for node in outgoing.missingheads:
1926 1926 ctx = unfi[node]
1927 1927 if ctx.obsolete():
1928 1928 raise util.Abort(mso % ctx)
1929 1929 elif ctx.unstable():
1930 1930 raise util.Abort(msu % ctx)
1931 1931 elif ctx.bumped():
1932 1932 raise util.Abort(msb % ctx)
1933 1933 discovery.checkheads(unfi, remote, outgoing,
1934 1934 remoteheads, newbranch,
1935 1935 bool(inc))
1936 1936
1937 1937 # create a changegroup from local
1938 1938 if revs is None and not outgoing.excluded:
1939 1939 # push everything,
1940 1940 # use the fast path, no race possible on push
1941 1941 cg = self._changegroup(outgoing.missing, 'push')
1942 1942 else:
1943 1943 cg = self.getlocalbundle('push', outgoing)
1944 1944
1945 1945 # apply changegroup to remote
1946 1946 if unbundle:
1947 1947 # local repo finds heads on server, finds out what
1948 1948 # revs it must push. once revs transferred, if server
1949 1949 # finds it has different heads (someone else won
1950 1950 # commit/push race), server aborts.
1951 1951 if force:
1952 1952 remoteheads = ['force']
1953 1953 # ssh: return remote's addchangegroup()
1954 1954 # http: return remote's addchangegroup() or 0 for error
1955 1955 ret = remote.unbundle(cg, remoteheads, 'push')
1956 1956 else:
1957 1957 # we return an integer indicating remote head count
1958 1958 # change
1959 1959 ret = remote.addchangegroup(cg, 'push', self.url())
1960 1960
1961 1961 if ret:
1962 1962 # push succeed, synchronize target of the push
1963 1963 cheads = outgoing.missingheads
1964 1964 elif revs is None:
1965 1965 # All out push fails. synchronize all common
1966 1966 cheads = outgoing.commonheads
1967 1967 else:
1968 1968 # I want cheads = heads(::missingheads and ::commonheads)
1969 1969 # (missingheads is revs with secret changeset filtered out)
1970 1970 #
1971 1971 # This can be expressed as:
1972 1972 # cheads = ( (missingheads and ::commonheads)
1973 1973 # + (commonheads and ::missingheads))"
1974 1974 # )
1975 1975 #
1976 1976 # while trying to push we already computed the following:
1977 1977 # common = (::commonheads)
1978 1978 # missing = ((commonheads::missingheads) - commonheads)
1979 1979 #
1980 1980 # We can pick:
1981 1981 # * missingheads part of common (::commonheads)
1982 1982 common = set(outgoing.common)
1983 1983 cheads = [node for node in revs if node in common]
1984 1984 # and
1985 1985 # * commonheads parents on missing
1986 1986 revset = unfi.set('%ln and parents(roots(%ln))',
1987 1987 outgoing.commonheads,
1988 1988 outgoing.missing)
1989 1989 cheads.extend(c.node() for c in revset)
1990 1990 # even when we don't push, exchanging phase data is useful
1991 1991 remotephases = remote.listkeys('phases')
1992 1992 if not remotephases: # old server or public only repo
1993 1993 phases.advanceboundary(self, phases.public, cheads)
1994 1994 # don't push any phase data as there is nothing to push
1995 1995 else:
1996 1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1997 1997 pheads, droots = ana
1998 1998 ### Apply remote phase on local
1999 1999 if remotephases.get('publishing', False):
2000 2000 phases.advanceboundary(self, phases.public, cheads)
2001 2001 else: # publish = False
2002 2002 phases.advanceboundary(self, phases.public, pheads)
2003 2003 phases.advanceboundary(self, phases.draft, cheads)
2004 2004 ### Apply local phase on remote
2005 2005
2006 2006 # Get the list of all revs draft on remote by public here.
2007 2007 # XXX Beware that revset break if droots is not strictly
2008 2008 # XXX root we may want to ensure it is but it is costly
2009 2009 outdated = unfi.set('heads((%ln::%ln) and public())',
2010 2010 droots, cheads)
2011 2011 for newremotehead in outdated:
2012 2012 r = remote.pushkey('phases',
2013 2013 newremotehead.hex(),
2014 2014 str(phases.draft),
2015 2015 str(phases.public))
2016 2016 if not r:
2017 2017 self.ui.warn(_('updating %s to public failed!\n')
2018 2018 % newremotehead)
2019 2019 self.ui.debug('try to push obsolete markers to remote\n')
2020 2020 if (obsolete._enabled and self.obsstore and
2021 2021 'obsolete' in remote.listkeys('namespaces')):
2022 2022 rslts = []
2023 2023 remotedata = self.listkeys('obsolete')
2024 2024 for key in sorted(remotedata, reverse=True):
2025 2025 # reverse sort to ensure we end with dump0
2026 2026 data = remotedata[key]
2027 2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2028 2028 if [r for r in rslts if not r]:
2029 2029 msg = _('failed to push some obsolete markers!\n')
2030 2030 self.ui.warn(msg)
2031 2031 finally:
2032 2032 if lock is not None:
2033 2033 lock.release()
2034 2034 finally:
2035 2035 locallock.release()
2036 2036
2037 2037 self.ui.debug("checking for updated bookmarks\n")
2038 2038 rb = remote.listkeys('bookmarks')
2039 2039 for k in rb.keys():
2040 2040 if k in unfi._bookmarks:
2041 2041 nr, nl = rb[k], hex(self._bookmarks[k])
2042 2042 if nr in unfi:
2043 2043 cr = unfi[nr]
2044 2044 cl = unfi[nl]
2045 2045 if bookmarks.validdest(unfi, cr, cl):
2046 2046 r = remote.pushkey('bookmarks', k, nr, nl)
2047 2047 if r:
2048 2048 self.ui.status(_("updating bookmark %s\n") % k)
2049 2049 else:
2050 2050 self.ui.warn(_('updating bookmark %s'
2051 2051 ' failed!\n') % k)
2052 2052
2053 2053 return ret
2054 2054
2055 2055 def changegroupinfo(self, nodes, source):
2056 2056 if self.ui.verbose or source == 'bundle':
2057 2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2058 2058 if self.ui.debugflag:
2059 2059 self.ui.debug("list of changesets:\n")
2060 2060 for node in nodes:
2061 2061 self.ui.debug("%s\n" % hex(node))
2062 2062
2063 2063 def changegroupsubset(self, bases, heads, source):
2064 2064 """Compute a changegroup consisting of all the nodes that are
2065 2065 descendants of any of the bases and ancestors of any of the heads.
2066 2066 Return a chunkbuffer object whose read() method will return
2067 2067 successive changegroup chunks.
2068 2068
2069 2069 It is fairly complex as determining which filenodes and which
2070 2070 manifest nodes need to be included for the changeset to be complete
2071 2071 is non-trivial.
2072 2072
2073 2073 Another wrinkle is doing the reverse, figuring out which changeset in
2074 2074 the changegroup a particular filenode or manifestnode belongs to.
2075 2075 """
2076 2076 cl = self.changelog
2077 2077 if not bases:
2078 2078 bases = [nullid]
2079 2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2080 2080 # We assume that all ancestors of bases are known
2081 2081 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2082 2082 return self._changegroupsubset(common, csets, heads, source)
2083 2083
2084 2084 def getlocalbundle(self, source, outgoing):
2085 2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2086 2086
2087 2087 This is only implemented for local repos and reuses potentially
2088 2088 precomputed sets in outgoing."""
2089 2089 if not outgoing.missing:
2090 2090 return None
2091 2091 return self._changegroupsubset(outgoing.common,
2092 2092 outgoing.missing,
2093 2093 outgoing.missingheads,
2094 2094 source)
2095 2095
2096 2096 def getbundle(self, source, heads=None, common=None):
2097 2097 """Like changegroupsubset, but returns the set difference between the
2098 2098 ancestors of heads and the ancestors common.
2099 2099
2100 2100 If heads is None, use the local heads. If common is None, use [nullid].
2101 2101
2102 2102 The nodes in common might not all be known locally due to the way the
2103 2103 current discovery protocol works.
2104 2104 """
2105 2105 cl = self.changelog
2106 2106 if common:
2107 2107 nm = cl.nodemap
2108 2108 common = [n for n in common if n in nm]
2109 2109 else:
2110 2110 common = [nullid]
2111 2111 if not heads:
2112 2112 heads = cl.heads()
2113 2113 return self.getlocalbundle(source,
2114 2114 discovery.outgoing(cl, common, heads))
2115 2115
2116 2116 @unfilteredmethod
2117 2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2118 2118
2119 2119 cl = self.changelog
2120 2120 mf = self.manifest
2121 2121 mfs = {} # needed manifests
2122 2122 fnodes = {} # needed file nodes
2123 2123 changedfiles = set()
2124 2124 fstate = ['', {}]
2125 2125 count = [0, 0]
2126 2126
2127 2127 # can we go through the fast path ?
2128 2128 heads.sort()
2129 2129 if heads == sorted(self.heads()):
2130 2130 return self._changegroup(csets, source)
2131 2131
2132 2132 # slow path
2133 2133 self.hook('preoutgoing', throw=True, source=source)
2134 2134 self.changegroupinfo(csets, source)
2135 2135
2136 2136 # filter any nodes that claim to be part of the known set
2137 2137 def prune(revlog, missing):
2138 2138 rr, rl = revlog.rev, revlog.linkrev
2139 2139 return [n for n in missing
2140 2140 if rl(rr(n)) not in commonrevs]
2141 2141
2142 2142 progress = self.ui.progress
2143 2143 _bundling = _('bundling')
2144 2144 _changesets = _('changesets')
2145 2145 _manifests = _('manifests')
2146 2146 _files = _('files')
2147 2147
2148 2148 def lookup(revlog, x):
2149 2149 if revlog == cl:
2150 2150 c = cl.read(x)
2151 2151 changedfiles.update(c[3])
2152 2152 mfs.setdefault(c[0], x)
2153 2153 count[0] += 1
2154 2154 progress(_bundling, count[0],
2155 2155 unit=_changesets, total=count[1])
2156 2156 return x
2157 2157 elif revlog == mf:
2158 2158 clnode = mfs[x]
2159 2159 mdata = mf.readfast(x)
2160 2160 for f, n in mdata.iteritems():
2161 2161 if f in changedfiles:
2162 2162 fnodes[f].setdefault(n, clnode)
2163 2163 count[0] += 1
2164 2164 progress(_bundling, count[0],
2165 2165 unit=_manifests, total=count[1])
2166 2166 return clnode
2167 2167 else:
2168 2168 progress(_bundling, count[0], item=fstate[0],
2169 2169 unit=_files, total=count[1])
2170 2170 return fstate[1][x]
2171 2171
2172 2172 bundler = changegroup.bundle10(lookup)
2173 2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2174 2174 if reorder == 'auto':
2175 2175 reorder = None
2176 2176 else:
2177 2177 reorder = util.parsebool(reorder)
2178 2178
2179 2179 def gengroup():
2180 2180 # Create a changenode group generator that will call our functions
2181 2181 # back to lookup the owning changenode and collect information.
2182 2182 count[:] = [0, len(csets)]
2183 2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2184 2184 yield chunk
2185 2185 progress(_bundling, None)
2186 2186
2187 2187 # Create a generator for the manifestnodes that calls our lookup
2188 2188 # and data collection functions back.
2189 2189 for f in changedfiles:
2190 2190 fnodes[f] = {}
2191 2191 count[:] = [0, len(mfs)]
2192 2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2193 2193 yield chunk
2194 2194 progress(_bundling, None)
2195 2195
2196 2196 mfs.clear()
2197 2197
2198 2198 # Go through all our files in order sorted by name.
2199 2199 count[:] = [0, len(changedfiles)]
2200 2200 for fname in sorted(changedfiles):
2201 2201 filerevlog = self.file(fname)
2202 2202 if not len(filerevlog):
2203 2203 raise util.Abort(_("empty or missing revlog for %s")
2204 2204 % fname)
2205 2205 fstate[0] = fname
2206 2206 fstate[1] = fnodes.pop(fname, {})
2207 2207
2208 2208 nodelist = prune(filerevlog, fstate[1])
2209 2209 if nodelist:
2210 2210 count[0] += 1
2211 2211 yield bundler.fileheader(fname)
2212 2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2213 2213 yield chunk
2214 2214
2215 2215 # Signal that no more groups are left.
2216 2216 yield bundler.close()
2217 2217 progress(_bundling, None)
2218 2218
2219 2219 if csets:
2220 2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2221 2221
2222 2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2223 2223
2224 2224 def changegroup(self, basenodes, source):
2225 2225 # to avoid a race we use changegroupsubset() (issue1320)
2226 2226 return self.changegroupsubset(basenodes, self.heads(), source)
2227 2227
2228 2228 @unfilteredmethod
2229 2229 def _changegroup(self, nodes, source):
2230 2230 """Compute the changegroup of all nodes that we have that a recipient
2231 2231 doesn't. Return a chunkbuffer object whose read() method will return
2232 2232 successive changegroup chunks.
2233 2233
2234 2234 This is much easier than the previous function as we can assume that
2235 2235 the recipient has any changenode we aren't sending them.
2236 2236
2237 2237 nodes is the set of nodes to send"""
2238 2238
2239 2239 cl = self.changelog
2240 2240 mf = self.manifest
2241 2241 mfs = {}
2242 2242 changedfiles = set()
2243 2243 fstate = ['']
2244 2244 count = [0, 0]
2245 2245
2246 2246 self.hook('preoutgoing', throw=True, source=source)
2247 2247 self.changegroupinfo(nodes, source)
2248 2248
2249 2249 revset = set([cl.rev(n) for n in nodes])
2250 2250
2251 2251 def gennodelst(log):
2252 2252 ln, llr = log.node, log.linkrev
2253 2253 return [ln(r) for r in log if llr(r) in revset]
2254 2254
2255 2255 progress = self.ui.progress
2256 2256 _bundling = _('bundling')
2257 2257 _changesets = _('changesets')
2258 2258 _manifests = _('manifests')
2259 2259 _files = _('files')
2260 2260
2261 2261 def lookup(revlog, x):
2262 2262 if revlog == cl:
2263 2263 c = cl.read(x)
2264 2264 changedfiles.update(c[3])
2265 2265 mfs.setdefault(c[0], x)
2266 2266 count[0] += 1
2267 2267 progress(_bundling, count[0],
2268 2268 unit=_changesets, total=count[1])
2269 2269 return x
2270 2270 elif revlog == mf:
2271 2271 count[0] += 1
2272 2272 progress(_bundling, count[0],
2273 2273 unit=_manifests, total=count[1])
2274 2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2275 2275 else:
2276 2276 progress(_bundling, count[0], item=fstate[0],
2277 2277 total=count[1], unit=_files)
2278 2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2279 2279
2280 2280 bundler = changegroup.bundle10(lookup)
2281 2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2282 2282 if reorder == 'auto':
2283 2283 reorder = None
2284 2284 else:
2285 2285 reorder = util.parsebool(reorder)
2286 2286
2287 2287 def gengroup():
2288 2288 '''yield a sequence of changegroup chunks (strings)'''
2289 2289 # construct a list of all changed files
2290 2290
2291 2291 count[:] = [0, len(nodes)]
2292 2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2293 2293 yield chunk
2294 2294 progress(_bundling, None)
2295 2295
2296 2296 count[:] = [0, len(mfs)]
2297 2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2298 2298 yield chunk
2299 2299 progress(_bundling, None)
2300 2300
2301 2301 count[:] = [0, len(changedfiles)]
2302 2302 for fname in sorted(changedfiles):
2303 2303 filerevlog = self.file(fname)
2304 2304 if not len(filerevlog):
2305 2305 raise util.Abort(_("empty or missing revlog for %s")
2306 2306 % fname)
2307 2307 fstate[0] = fname
2308 2308 nodelist = gennodelst(filerevlog)
2309 2309 if nodelist:
2310 2310 count[0] += 1
2311 2311 yield bundler.fileheader(fname)
2312 2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2313 2313 yield chunk
2314 2314 yield bundler.close()
2315 2315 progress(_bundling, None)
2316 2316
2317 2317 if nodes:
2318 2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2319 2319
2320 2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2321 2321
2322 2322 @unfilteredmethod
2323 2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2324 2324 """Add the changegroup returned by source.read() to this repo.
2325 2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2326 2326 the URL of the repo where this changegroup is coming from.
2327 2327
2328 2328 Return an integer summarizing the change to this repo:
2329 2329 - nothing changed or no source: 0
2330 2330 - more heads than before: 1+added heads (2..n)
2331 2331 - fewer heads than before: -1-removed heads (-2..-n)
2332 2332 - number of heads stays the same: 1
2333 2333 """
2334 2334 def csmap(x):
2335 2335 self.ui.debug("add changeset %s\n" % short(x))
2336 2336 return len(cl)
2337 2337
2338 2338 def revmap(x):
2339 2339 return cl.rev(x)
2340 2340
2341 2341 if not source:
2342 2342 return 0
2343 2343
2344 2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2345 2345
2346 2346 changesets = files = revisions = 0
2347 2347 efiles = set()
2348 2348
2349 2349 # write changelog data to temp files so concurrent readers will not see
2350 2350 # inconsistent view
2351 2351 cl = self.changelog
2352 2352 cl.delayupdate()
2353 2353 oldheads = cl.heads()
2354 2354
2355 2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2356 2356 try:
2357 2357 trp = weakref.proxy(tr)
2358 2358 # pull off the changeset group
2359 2359 self.ui.status(_("adding changesets\n"))
2360 2360 clstart = len(cl)
2361 2361 class prog(object):
2362 2362 step = _('changesets')
2363 2363 count = 1
2364 2364 ui = self.ui
2365 2365 total = None
2366 2366 def __call__(self):
2367 2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2368 2368 total=self.total)
2369 2369 self.count += 1
2370 2370 pr = prog()
2371 2371 source.callback = pr
2372 2372
2373 2373 source.changelogheader()
2374 2374 srccontent = cl.addgroup(source, csmap, trp)
2375 2375 if not (srccontent or emptyok):
2376 2376 raise util.Abort(_("received changelog group is empty"))
2377 2377 clend = len(cl)
2378 2378 changesets = clend - clstart
2379 2379 for c in xrange(clstart, clend):
2380 2380 efiles.update(self[c].files())
2381 2381 efiles = len(efiles)
2382 2382 self.ui.progress(_('changesets'), None)
2383 2383
2384 2384 # pull off the manifest group
2385 2385 self.ui.status(_("adding manifests\n"))
2386 2386 pr.step = _('manifests')
2387 2387 pr.count = 1
2388 2388 pr.total = changesets # manifests <= changesets
2389 2389 # no need to check for empty manifest group here:
2390 2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2391 2391 # no new manifest will be created and the manifest group will
2392 2392 # be empty during the pull
2393 2393 source.manifestheader()
2394 2394 self.manifest.addgroup(source, revmap, trp)
2395 2395 self.ui.progress(_('manifests'), None)
2396 2396
2397 2397 needfiles = {}
2398 2398 if self.ui.configbool('server', 'validate', default=False):
2399 2399 # validate incoming csets have their manifests
2400 2400 for cset in xrange(clstart, clend):
2401 2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2402 2402 mfest = self.manifest.readdelta(mfest)
2403 2403 # store file nodes we must see
2404 2404 for f, n in mfest.iteritems():
2405 2405 needfiles.setdefault(f, set()).add(n)
2406 2406
2407 2407 # process the files
2408 2408 self.ui.status(_("adding file changes\n"))
2409 2409 pr.step = _('files')
2410 2410 pr.count = 1
2411 2411 pr.total = efiles
2412 2412 source.callback = None
2413 2413
2414 2414 while True:
2415 2415 chunkdata = source.filelogheader()
2416 2416 if not chunkdata:
2417 2417 break
2418 2418 f = chunkdata["filename"]
2419 2419 self.ui.debug("adding %s revisions\n" % f)
2420 2420 pr()
2421 2421 fl = self.file(f)
2422 2422 o = len(fl)
2423 2423 if not fl.addgroup(source, revmap, trp):
2424 2424 raise util.Abort(_("received file revlog group is empty"))
2425 2425 revisions += len(fl) - o
2426 2426 files += 1
2427 2427 if f in needfiles:
2428 2428 needs = needfiles[f]
2429 2429 for new in xrange(o, len(fl)):
2430 2430 n = fl.node(new)
2431 2431 if n in needs:
2432 2432 needs.remove(n)
2433 2433 if not needs:
2434 2434 del needfiles[f]
2435 2435 self.ui.progress(_('files'), None)
2436 2436
2437 2437 for f, needs in needfiles.iteritems():
2438 2438 fl = self.file(f)
2439 2439 for n in needs:
2440 2440 try:
2441 2441 fl.rev(n)
2442 2442 except error.LookupError:
2443 2443 raise util.Abort(
2444 2444 _('missing file data for %s:%s - run hg verify') %
2445 2445 (f, hex(n)))
2446 2446
2447 2447 dh = 0
2448 2448 if oldheads:
2449 2449 heads = cl.heads()
2450 2450 dh = len(heads) - len(oldheads)
2451 2451 for h in heads:
2452 2452 if h not in oldheads and self[h].closesbranch():
2453 2453 dh -= 1
2454 2454 htext = ""
2455 2455 if dh:
2456 2456 htext = _(" (%+d heads)") % dh
2457 2457
2458 2458 self.ui.status(_("added %d changesets"
2459 2459 " with %d changes to %d files%s\n")
2460 2460 % (changesets, revisions, files, htext))
2461 2461 obsolete.clearobscaches(self)
2462 2462
2463 2463 if changesets > 0:
2464 2464 p = lambda: cl.writepending() and self.root or ""
2465 2465 self.hook('pretxnchangegroup', throw=True,
2466 2466 node=hex(cl.node(clstart)), source=srctype,
2467 2467 url=url, pending=p)
2468 2468
2469 2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2470 2470 publishing = self.ui.configbool('phases', 'publish', True)
2471 2471 if srctype == 'push':
2472 2472 # Old server can not push the boundary themself.
2473 2473 # New server won't push the boundary if changeset already
2474 2474 # existed locally as secrete
2475 2475 #
2476 2476 # We should not use added here but the list of all change in
2477 2477 # the bundle
2478 2478 if publishing:
2479 2479 phases.advanceboundary(self, phases.public, srccontent)
2480 2480 else:
2481 2481 phases.advanceboundary(self, phases.draft, srccontent)
2482 2482 phases.retractboundary(self, phases.draft, added)
2483 2483 elif srctype != 'strip':
2484 2484 # publishing only alter behavior during push
2485 2485 #
2486 2486 # strip should not touch boundary at all
2487 2487 phases.retractboundary(self, phases.draft, added)
2488 2488
2489 2489 # make changelog see real files again
2490 2490 cl.finalize(trp)
2491 2491
2492 2492 tr.close()
2493 2493
2494 2494 if changesets > 0:
2495 2495 self.updatebranchcache()
2496 2496 def runhooks():
2497 2497 # forcefully update the on-disk branch cache
2498 2498 self.ui.debug("updating the branch cache\n")
2499 2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2500 2500 source=srctype, url=url)
2501 2501
2502 2502 for n in added:
2503 2503 self.hook("incoming", node=hex(n), source=srctype,
2504 2504 url=url)
2505 2505 self._afterlock(runhooks)
2506 2506
2507 2507 finally:
2508 2508 tr.release()
2509 2509 # never return 0 here:
2510 2510 if dh < 0:
2511 2511 return dh - 1
2512 2512 else:
2513 2513 return dh + 1
2514 2514
2515 2515 def stream_in(self, remote, requirements):
2516 2516 lock = self.lock()
2517 2517 try:
2518 2518 # Save remote branchmap. We will use it later
2519 2519 # to speed up branchcache creation
2520 2520 rbranchmap = None
2521 2521 if remote.capable("branchmap"):
2522 2522 rbranchmap = remote.branchmap()
2523 2523
2524 2524 fp = remote.stream_out()
2525 2525 l = fp.readline()
2526 2526 try:
2527 2527 resp = int(l)
2528 2528 except ValueError:
2529 2529 raise error.ResponseError(
2530 2530 _('unexpected response from remote server:'), l)
2531 2531 if resp == 1:
2532 2532 raise util.Abort(_('operation forbidden by server'))
2533 2533 elif resp == 2:
2534 2534 raise util.Abort(_('locking the remote repository failed'))
2535 2535 elif resp != 0:
2536 2536 raise util.Abort(_('the server sent an unknown error code'))
2537 2537 self.ui.status(_('streaming all changes\n'))
2538 2538 l = fp.readline()
2539 2539 try:
2540 2540 total_files, total_bytes = map(int, l.split(' ', 1))
2541 2541 except (ValueError, TypeError):
2542 2542 raise error.ResponseError(
2543 2543 _('unexpected response from remote server:'), l)
2544 2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2545 2545 (total_files, util.bytecount(total_bytes)))
2546 2546 handled_bytes = 0
2547 2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2548 2548 start = time.time()
2549 2549 for i in xrange(total_files):
2550 2550 # XXX doesn't support '\n' or '\r' in filenames
2551 2551 l = fp.readline()
2552 2552 try:
2553 2553 name, size = l.split('\0', 1)
2554 2554 size = int(size)
2555 2555 except (ValueError, TypeError):
2556 2556 raise error.ResponseError(
2557 2557 _('unexpected response from remote server:'), l)
2558 2558 if self.ui.debugflag:
2559 2559 self.ui.debug('adding %s (%s)\n' %
2560 2560 (name, util.bytecount(size)))
2561 2561 # for backwards compat, name was partially encoded
2562 2562 ofp = self.sopener(store.decodedir(name), 'w')
2563 2563 for chunk in util.filechunkiter(fp, limit=size):
2564 2564 handled_bytes += len(chunk)
2565 2565 self.ui.progress(_('clone'), handled_bytes,
2566 2566 total=total_bytes)
2567 2567 ofp.write(chunk)
2568 2568 ofp.close()
2569 2569 elapsed = time.time() - start
2570 2570 if elapsed <= 0:
2571 2571 elapsed = 0.001
2572 2572 self.ui.progress(_('clone'), None)
2573 2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2574 2574 (util.bytecount(total_bytes), elapsed,
2575 2575 util.bytecount(total_bytes / elapsed)))
2576 2576
2577 2577 # new requirements = old non-format requirements +
2578 2578 # new format-related
2579 2579 # requirements from the streamed-in repository
2580 2580 requirements.update(set(self.requirements) - self.supportedformats)
2581 2581 self._applyrequirements(requirements)
2582 2582 self._writerequirements()
2583 2583
2584 2584 if rbranchmap:
2585 2585 rbheads = []
2586 2586 for bheads in rbranchmap.itervalues():
2587 2587 rbheads.extend(bheads)
2588 2588
2589 2589 self.branchcache = rbranchmap
2590 2590 if rbheads:
2591 2591 rtiprev = max((int(self.changelog.rev(node))
2592 2592 for node in rbheads))
2593 2593 self._writebranchcache(self.branchcache,
2594 2594 self[rtiprev].node(), rtiprev)
2595 2595 self.invalidate()
2596 2596 return len(self.heads()) + 1
2597 2597 finally:
2598 2598 lock.release()
2599 2599
2600 2600 def clone(self, remote, heads=[], stream=False):
2601 2601 '''clone remote repository.
2602 2602
2603 2603 keyword arguments:
2604 2604 heads: list of revs to clone (forces use of pull)
2605 2605 stream: use streaming clone if possible'''
2606 2606
2607 2607 # now, all clients that can request uncompressed clones can
2608 2608 # read repo formats supported by all servers that can serve
2609 2609 # them.
2610 2610
2611 2611 # if revlog format changes, client will have to check version
2612 2612 # and format flags on "stream" capability, and use
2613 2613 # uncompressed only if compatible.
2614 2614
2615 2615 if not stream:
2616 2616 # if the server explicitly prefers to stream (for fast LANs)
2617 2617 stream = remote.capable('stream-preferred')
2618 2618
2619 2619 if stream and not heads:
2620 2620 # 'stream' means remote revlog format is revlogv1 only
2621 2621 if remote.capable('stream'):
2622 2622 return self.stream_in(remote, set(('revlogv1',)))
2623 2623 # otherwise, 'streamreqs' contains the remote revlog format
2624 2624 streamreqs = remote.capable('streamreqs')
2625 2625 if streamreqs:
2626 2626 streamreqs = set(streamreqs.split(','))
2627 2627 # if we support it, stream in and adjust our requirements
2628 2628 if not streamreqs - self.supportedformats:
2629 2629 return self.stream_in(remote, streamreqs)
2630 2630 return self.pull(remote, heads)
2631 2631
2632 2632 def pushkey(self, namespace, key, old, new):
2633 2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2634 2634 old=old, new=new)
2635 2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2636 2636 ret = pushkey.push(self, namespace, key, old, new)
2637 2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2638 2638 ret=ret)
2639 2639 return ret
2640 2640
2641 2641 def listkeys(self, namespace):
2642 2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2643 2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2644 2644 values = pushkey.list(self, namespace)
2645 2645 self.hook('listkeys', namespace=namespace, values=values)
2646 2646 return values
2647 2647
2648 2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2649 2649 '''used to test argument passing over the wire'''
2650 2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2651 2651
2652 2652 def savecommitmessage(self, text):
2653 2653 fp = self.opener('last-message.txt', 'wb')
2654 2654 try:
2655 2655 fp.write(text)
2656 2656 finally:
2657 2657 fp.close()
2658 return self.pathto(fp.name[len(self.root)+1:])
2658 return self.pathto(fp.name[len(self.root) + 1:])
2659 2659
2660 2660 # used to avoid circular references so destructors work
2661 2661 def aftertrans(files):
2662 2662 renamefiles = [tuple(t) for t in files]
2663 2663 def a():
2664 2664 for src, dest in renamefiles:
2665 2665 try:
2666 2666 util.rename(src, dest)
2667 2667 except OSError: # journal file does not yet exist
2668 2668 pass
2669 2669 return a
2670 2670
2671 2671 def undoname(fn):
2672 2672 base, name = os.path.split(fn)
2673 2673 assert name.startswith('journal')
2674 2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2675 2675
2676 2676 def instance(ui, path, create):
2677 2677 return localrepository(ui, util.urllocalpath(path), create)
2678 2678
2679 2679 def islocal(path):
2680 2680 return True
@@ -1,1890 +1,1890 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re, posixpath
10 10 import tempfile, zlib, shutil
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 15 import context
16 16
17 17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 18
19 19 class PatchError(Exception):
20 20 pass
21 21
22 22
23 23 # public functions
24 24
25 25 def split(stream):
26 26 '''return an iterator of individual patches from a stream'''
27 27 def isheader(line, inheader):
28 28 if inheader and line[0] in (' ', '\t'):
29 29 # continuation
30 30 return True
31 31 if line[0] in (' ', '-', '+'):
32 32 # diff line - don't check for header pattern in there
33 33 return False
34 34 l = line.split(': ', 1)
35 35 return len(l) == 2 and ' ' not in l[0]
36 36
37 37 def chunk(lines):
38 38 return cStringIO.StringIO(''.join(lines))
39 39
40 40 def hgsplit(stream, cur):
41 41 inheader = True
42 42
43 43 for line in stream:
44 44 if not line.strip():
45 45 inheader = False
46 46 if not inheader and line.startswith('# HG changeset patch'):
47 47 yield chunk(cur)
48 48 cur = []
49 49 inheader = True
50 50
51 51 cur.append(line)
52 52
53 53 if cur:
54 54 yield chunk(cur)
55 55
56 56 def mboxsplit(stream, cur):
57 57 for line in stream:
58 58 if line.startswith('From '):
59 59 for c in split(chunk(cur[1:])):
60 60 yield c
61 61 cur = []
62 62
63 63 cur.append(line)
64 64
65 65 if cur:
66 66 for c in split(chunk(cur[1:])):
67 67 yield c
68 68
69 69 def mimesplit(stream, cur):
70 70 def msgfp(m):
71 71 fp = cStringIO.StringIO()
72 72 g = email.Generator.Generator(fp, mangle_from_=False)
73 73 g.flatten(m)
74 74 fp.seek(0)
75 75 return fp
76 76
77 77 for line in stream:
78 78 cur.append(line)
79 79 c = chunk(cur)
80 80
81 81 m = email.Parser.Parser().parse(c)
82 82 if not m.is_multipart():
83 83 yield msgfp(m)
84 84 else:
85 85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 86 for part in m.walk():
87 87 ct = part.get_content_type()
88 88 if ct not in ok_types:
89 89 continue
90 90 yield msgfp(part)
91 91
92 92 def headersplit(stream, cur):
93 93 inheader = False
94 94
95 95 for line in stream:
96 96 if not inheader and isheader(line, inheader):
97 97 yield chunk(cur)
98 98 cur = []
99 99 inheader = True
100 100 if inheader and not isheader(line, inheader):
101 101 inheader = False
102 102
103 103 cur.append(line)
104 104
105 105 if cur:
106 106 yield chunk(cur)
107 107
108 108 def remainder(cur):
109 109 yield chunk(cur)
110 110
111 111 class fiter(object):
112 112 def __init__(self, fp):
113 113 self.fp = fp
114 114
115 115 def __iter__(self):
116 116 return self
117 117
118 118 def next(self):
119 119 l = self.fp.readline()
120 120 if not l:
121 121 raise StopIteration
122 122 return l
123 123
124 124 inheader = False
125 125 cur = []
126 126
127 127 mimeheaders = ['content-type']
128 128
129 129 if not util.safehasattr(stream, 'next'):
130 130 # http responses, for example, have readline but not next
131 131 stream = fiter(stream)
132 132
133 133 for line in stream:
134 134 cur.append(line)
135 135 if line.startswith('# HG changeset patch'):
136 136 return hgsplit(stream, cur)
137 137 elif line.startswith('From '):
138 138 return mboxsplit(stream, cur)
139 139 elif isheader(line, inheader):
140 140 inheader = True
141 141 if line.split(':', 1)[0].lower() in mimeheaders:
142 142 # let email parser handle this
143 143 return mimesplit(stream, cur)
144 144 elif line.startswith('--- ') and inheader:
145 145 # No evil headers seen by diff start, split by hand
146 146 return headersplit(stream, cur)
147 147 # Not enough info, keep reading
148 148
149 149 # if we are here, we have a very plain patch
150 150 return remainder(cur)
151 151
152 152 def extract(ui, fileobj):
153 153 '''extract patch from data read from fileobj.
154 154
155 155 patch can be a normal patch or contained in an email message.
156 156
157 157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 158 Any item in the returned tuple can be None. If filename is None,
159 159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160 160
161 161 # attempt to detect the start of a patch
162 162 # (this heuristic is borrowed from quilt)
163 163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 165 r'---[ \t].*?^\+\+\+[ \t]|'
166 166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167 167
168 168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 169 tmpfp = os.fdopen(fd, 'w')
170 170 try:
171 171 msg = email.Parser.Parser().parse(fileobj)
172 172
173 173 subject = msg['Subject']
174 174 user = msg['From']
175 175 if not subject and not user:
176 176 # Not an email, restore parsed headers if any
177 177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178 178
179 179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 180 # should try to parse msg['Date']
181 181 date = None
182 182 nodeid = None
183 183 branch = None
184 184 parents = []
185 185
186 186 if subject:
187 187 if subject.startswith('[PATCH'):
188 188 pend = subject.find(']')
189 189 if pend >= 0:
190 190 subject = subject[pend + 1:].lstrip()
191 191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 192 ui.debug('Subject: %s\n' % subject)
193 193 if user:
194 194 ui.debug('From: %s\n' % user)
195 195 diffs_seen = 0
196 196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 197 message = ''
198 198 for part in msg.walk():
199 199 content_type = part.get_content_type()
200 200 ui.debug('Content-Type: %s\n' % content_type)
201 201 if content_type not in ok_types:
202 202 continue
203 203 payload = part.get_payload(decode=True)
204 204 m = diffre.search(payload)
205 205 if m:
206 206 hgpatch = False
207 207 hgpatchheader = False
208 208 ignoretext = False
209 209
210 210 ui.debug('found patch at byte %d\n' % m.start(0))
211 211 diffs_seen += 1
212 212 cfp = cStringIO.StringIO()
213 213 for line in payload[:m.start(0)].splitlines():
214 214 if line.startswith('# HG changeset patch') and not hgpatch:
215 215 ui.debug('patch generated by hg export\n')
216 216 hgpatch = True
217 217 hgpatchheader = True
218 218 # drop earlier commit message content
219 219 cfp.seek(0)
220 220 cfp.truncate()
221 221 subject = None
222 222 elif hgpatchheader:
223 223 if line.startswith('# User '):
224 224 user = line[7:]
225 225 ui.debug('From: %s\n' % user)
226 226 elif line.startswith("# Date "):
227 227 date = line[7:]
228 228 elif line.startswith("# Branch "):
229 229 branch = line[9:]
230 230 elif line.startswith("# Node ID "):
231 231 nodeid = line[10:]
232 232 elif line.startswith("# Parent "):
233 233 parents.append(line[9:].lstrip())
234 234 elif not line.startswith("# "):
235 235 hgpatchheader = False
236 236 elif line == '---' and gitsendmail:
237 237 ignoretext = True
238 238 if not hgpatchheader and not ignoretext:
239 239 cfp.write(line)
240 240 cfp.write('\n')
241 241 message = cfp.getvalue()
242 242 if tmpfp:
243 243 tmpfp.write(payload)
244 244 if not payload.endswith('\n'):
245 245 tmpfp.write('\n')
246 246 elif not diffs_seen and message and content_type == 'text/plain':
247 247 message += '\n' + payload
248 248 except: # re-raises
249 249 tmpfp.close()
250 250 os.unlink(tmpname)
251 251 raise
252 252
253 253 if subject and not message.startswith(subject):
254 254 message = '%s\n%s' % (subject, message)
255 255 tmpfp.close()
256 256 if not diffs_seen:
257 257 os.unlink(tmpname)
258 258 return None, message, user, date, branch, None, None, None
259 259 p1 = parents and parents.pop(0) or None
260 260 p2 = parents and parents.pop(0) or None
261 261 return tmpname, message, user, date, branch, nodeid, p1, p2
262 262
263 263 class patchmeta(object):
264 264 """Patched file metadata
265 265
266 266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 270 'islink' is True if the file is a symlink and 'isexec' is True if
271 271 the file is executable. Otherwise, 'mode' is None.
272 272 """
273 273 def __init__(self, path):
274 274 self.path = path
275 275 self.oldpath = None
276 276 self.mode = None
277 277 self.op = 'MODIFY'
278 278 self.binary = False
279 279
280 280 def setmode(self, mode):
281 281 islink = mode & 020000
282 282 isexec = mode & 0100
283 283 self.mode = (islink, isexec)
284 284
285 285 def copy(self):
286 286 other = patchmeta(self.path)
287 287 other.oldpath = self.oldpath
288 288 other.mode = self.mode
289 289 other.op = self.op
290 290 other.binary = self.binary
291 291 return other
292 292
293 293 def _ispatchinga(self, afile):
294 294 if afile == '/dev/null':
295 295 return self.op == 'ADD'
296 296 return afile == 'a/' + (self.oldpath or self.path)
297 297
298 298 def _ispatchingb(self, bfile):
299 299 if bfile == '/dev/null':
300 300 return self.op == 'DELETE'
301 301 return bfile == 'b/' + self.path
302 302
303 303 def ispatching(self, afile, bfile):
304 304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
305 305
306 306 def __repr__(self):
307 307 return "<patchmeta %s %r>" % (self.op, self.path)
308 308
309 309 def readgitpatch(lr):
310 310 """extract git-style metadata about patches from <patchname>"""
311 311
312 312 # Filter patch for git information
313 313 gp = None
314 314 gitpatches = []
315 315 for line in lr:
316 316 line = line.rstrip(' \r\n')
317 317 if line.startswith('diff --git'):
318 318 m = gitre.match(line)
319 319 if m:
320 320 if gp:
321 321 gitpatches.append(gp)
322 322 dst = m.group(2)
323 323 gp = patchmeta(dst)
324 324 elif gp:
325 325 if line.startswith('--- '):
326 326 gitpatches.append(gp)
327 327 gp = None
328 328 continue
329 329 if line.startswith('rename from '):
330 330 gp.op = 'RENAME'
331 331 gp.oldpath = line[12:]
332 332 elif line.startswith('rename to '):
333 333 gp.path = line[10:]
334 334 elif line.startswith('copy from '):
335 335 gp.op = 'COPY'
336 336 gp.oldpath = line[10:]
337 337 elif line.startswith('copy to '):
338 338 gp.path = line[8:]
339 339 elif line.startswith('deleted file'):
340 340 gp.op = 'DELETE'
341 341 elif line.startswith('new file mode '):
342 342 gp.op = 'ADD'
343 343 gp.setmode(int(line[-6:], 8))
344 344 elif line.startswith('new mode '):
345 345 gp.setmode(int(line[-6:], 8))
346 346 elif line.startswith('GIT binary patch'):
347 347 gp.binary = True
348 348 if gp:
349 349 gitpatches.append(gp)
350 350
351 351 return gitpatches
352 352
353 353 class linereader(object):
354 354 # simple class to allow pushing lines back into the input stream
355 355 def __init__(self, fp):
356 356 self.fp = fp
357 357 self.buf = []
358 358
359 359 def push(self, line):
360 360 if line is not None:
361 361 self.buf.append(line)
362 362
363 363 def readline(self):
364 364 if self.buf:
365 365 l = self.buf[0]
366 366 del self.buf[0]
367 367 return l
368 368 return self.fp.readline()
369 369
370 370 def __iter__(self):
371 371 while True:
372 372 l = self.readline()
373 373 if not l:
374 374 break
375 375 yield l
376 376
377 377 class abstractbackend(object):
378 378 def __init__(self, ui):
379 379 self.ui = ui
380 380
381 381 def getfile(self, fname):
382 382 """Return target file data and flags as a (data, (islink,
383 383 isexec)) tuple.
384 384 """
385 385 raise NotImplementedError
386 386
387 387 def setfile(self, fname, data, mode, copysource):
388 388 """Write data to target file fname and set its mode. mode is a
389 389 (islink, isexec) tuple. If data is None, the file content should
390 390 be left unchanged. If the file is modified after being copied,
391 391 copysource is set to the original file name.
392 392 """
393 393 raise NotImplementedError
394 394
395 395 def unlink(self, fname):
396 396 """Unlink target file."""
397 397 raise NotImplementedError
398 398
399 399 def writerej(self, fname, failed, total, lines):
400 400 """Write rejected lines for fname. total is the number of hunks
401 401 which failed to apply and total the total number of hunks for this
402 402 files.
403 403 """
404 404 pass
405 405
406 406 def exists(self, fname):
407 407 raise NotImplementedError
408 408
409 409 class fsbackend(abstractbackend):
410 410 def __init__(self, ui, basedir):
411 411 super(fsbackend, self).__init__(ui)
412 412 self.opener = scmutil.opener(basedir)
413 413
414 414 def _join(self, f):
415 415 return os.path.join(self.opener.base, f)
416 416
417 417 def getfile(self, fname):
418 418 path = self._join(fname)
419 419 if os.path.islink(path):
420 420 return (os.readlink(path), (True, False))
421 421 isexec = False
422 422 try:
423 423 isexec = os.lstat(path).st_mode & 0100 != 0
424 424 except OSError, e:
425 425 if e.errno != errno.ENOENT:
426 426 raise
427 427 return (self.opener.read(fname), (False, isexec))
428 428
429 429 def setfile(self, fname, data, mode, copysource):
430 430 islink, isexec = mode
431 431 if data is None:
432 432 util.setflags(self._join(fname), islink, isexec)
433 433 return
434 434 if islink:
435 435 self.opener.symlink(data, fname)
436 436 else:
437 437 self.opener.write(fname, data)
438 438 if isexec:
439 439 util.setflags(self._join(fname), False, True)
440 440
441 441 def unlink(self, fname):
442 442 try:
443 443 util.unlinkpath(self._join(fname))
444 444 except OSError, inst:
445 445 if inst.errno != errno.ENOENT:
446 446 raise
447 447
448 448 def writerej(self, fname, failed, total, lines):
449 449 fname = fname + ".rej"
450 450 self.ui.warn(
451 451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
452 452 (failed, total, fname))
453 453 fp = self.opener(fname, 'w')
454 454 fp.writelines(lines)
455 455 fp.close()
456 456
457 457 def exists(self, fname):
458 458 return os.path.lexists(self._join(fname))
459 459
460 460 class workingbackend(fsbackend):
461 461 def __init__(self, ui, repo, similarity):
462 462 super(workingbackend, self).__init__(ui, repo.root)
463 463 self.repo = repo
464 464 self.similarity = similarity
465 465 self.removed = set()
466 466 self.changed = set()
467 467 self.copied = []
468 468
469 469 def _checkknown(self, fname):
470 470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
471 471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
472 472
473 473 def setfile(self, fname, data, mode, copysource):
474 474 self._checkknown(fname)
475 475 super(workingbackend, self).setfile(fname, data, mode, copysource)
476 476 if copysource is not None:
477 477 self.copied.append((copysource, fname))
478 478 self.changed.add(fname)
479 479
480 480 def unlink(self, fname):
481 481 self._checkknown(fname)
482 482 super(workingbackend, self).unlink(fname)
483 483 self.removed.add(fname)
484 484 self.changed.add(fname)
485 485
486 486 def close(self):
487 487 wctx = self.repo[None]
488 488 addremoved = set(self.changed)
489 489 for src, dst in self.copied:
490 490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
491 491 if self.removed:
492 492 wctx.forget(sorted(self.removed))
493 493 for f in self.removed:
494 494 if f not in self.repo.dirstate:
495 495 # File was deleted and no longer belongs to the
496 496 # dirstate, it was probably marked added then
497 497 # deleted, and should not be considered by
498 498 # addremove().
499 499 addremoved.discard(f)
500 500 if addremoved:
501 501 cwd = self.repo.getcwd()
502 502 if cwd:
503 503 addremoved = [util.pathto(self.repo.root, cwd, f)
504 504 for f in addremoved]
505 505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
506 506 return sorted(self.changed)
507 507
508 508 class filestore(object):
509 509 def __init__(self, maxsize=None):
510 510 self.opener = None
511 511 self.files = {}
512 512 self.created = 0
513 513 self.maxsize = maxsize
514 514 if self.maxsize is None:
515 515 self.maxsize = 4*(2**20)
516 516 self.size = 0
517 517 self.data = {}
518 518
519 519 def setfile(self, fname, data, mode, copied=None):
520 520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 521 self.data[fname] = (data, mode, copied)
522 522 self.size += len(data)
523 523 else:
524 524 if self.opener is None:
525 525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 526 self.opener = scmutil.opener(root)
527 527 # Avoid filename issues with these simple names
528 528 fn = str(self.created)
529 529 self.opener.write(fn, data)
530 530 self.created += 1
531 531 self.files[fname] = (fn, mode, copied)
532 532
533 533 def getfile(self, fname):
534 534 if fname in self.data:
535 535 return self.data[fname]
536 536 if not self.opener or fname not in self.files:
537 537 raise IOError
538 538 fn, mode, copied = self.files[fname]
539 539 return self.opener.read(fn), mode, copied
540 540
541 541 def close(self):
542 542 if self.opener:
543 543 shutil.rmtree(self.opener.base)
544 544
545 545 class repobackend(abstractbackend):
546 546 def __init__(self, ui, repo, ctx, store):
547 547 super(repobackend, self).__init__(ui)
548 548 self.repo = repo
549 549 self.ctx = ctx
550 550 self.store = store
551 551 self.changed = set()
552 552 self.removed = set()
553 553 self.copied = {}
554 554
555 555 def _checkknown(self, fname):
556 556 if fname not in self.ctx:
557 557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558 558
559 559 def getfile(self, fname):
560 560 try:
561 561 fctx = self.ctx[fname]
562 562 except error.LookupError:
563 563 raise IOError
564 564 flags = fctx.flags()
565 565 return fctx.data(), ('l' in flags, 'x' in flags)
566 566
567 567 def setfile(self, fname, data, mode, copysource):
568 568 if copysource:
569 569 self._checkknown(copysource)
570 570 if data is None:
571 571 data = self.ctx[fname].data()
572 572 self.store.setfile(fname, data, mode, copysource)
573 573 self.changed.add(fname)
574 574 if copysource:
575 575 self.copied[fname] = copysource
576 576
577 577 def unlink(self, fname):
578 578 self._checkknown(fname)
579 579 self.removed.add(fname)
580 580
581 581 def exists(self, fname):
582 582 return fname in self.ctx
583 583
584 584 def close(self):
585 585 return self.changed | self.removed
586 586
587 587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591 591
592 592 class patchfile(object):
593 593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 594 self.fname = gp.path
595 595 self.eolmode = eolmode
596 596 self.eol = None
597 597 self.backend = backend
598 598 self.ui = ui
599 599 self.lines = []
600 600 self.exists = False
601 601 self.missing = True
602 602 self.mode = gp.mode
603 603 self.copysource = gp.oldpath
604 604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 605 self.remove = gp.op == 'DELETE'
606 606 try:
607 607 if self.copysource is None:
608 608 data, mode = backend.getfile(self.fname)
609 609 self.exists = True
610 610 else:
611 611 data, mode = store.getfile(self.copysource)[:2]
612 612 self.exists = backend.exists(self.fname)
613 613 self.missing = False
614 614 if data:
615 615 self.lines = mdiff.splitnewlines(data)
616 616 if self.mode is None:
617 617 self.mode = mode
618 618 if self.lines:
619 619 # Normalize line endings
620 620 if self.lines[0].endswith('\r\n'):
621 621 self.eol = '\r\n'
622 622 elif self.lines[0].endswith('\n'):
623 623 self.eol = '\n'
624 624 if eolmode != 'strict':
625 625 nlines = []
626 626 for l in self.lines:
627 627 if l.endswith('\r\n'):
628 628 l = l[:-2] + '\n'
629 629 nlines.append(l)
630 630 self.lines = nlines
631 631 except IOError:
632 632 if self.create:
633 633 self.missing = False
634 634 if self.mode is None:
635 635 self.mode = (False, False)
636 636 if self.missing:
637 637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
638 638
639 639 self.hash = {}
640 640 self.dirty = 0
641 641 self.offset = 0
642 642 self.skew = 0
643 643 self.rej = []
644 644 self.fileprinted = False
645 645 self.printfile(False)
646 646 self.hunks = 0
647 647
648 648 def writelines(self, fname, lines, mode):
649 649 if self.eolmode == 'auto':
650 650 eol = self.eol
651 651 elif self.eolmode == 'crlf':
652 652 eol = '\r\n'
653 653 else:
654 654 eol = '\n'
655 655
656 656 if self.eolmode != 'strict' and eol and eol != '\n':
657 657 rawlines = []
658 658 for l in lines:
659 659 if l and l[-1] == '\n':
660 660 l = l[:-1] + eol
661 661 rawlines.append(l)
662 662 lines = rawlines
663 663
664 664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
665 665
666 666 def printfile(self, warn):
667 667 if self.fileprinted:
668 668 return
669 669 if warn or self.ui.verbose:
670 670 self.fileprinted = True
671 671 s = _("patching file %s\n") % self.fname
672 672 if warn:
673 673 self.ui.warn(s)
674 674 else:
675 675 self.ui.note(s)
676 676
677 677
678 678 def findlines(self, l, linenum):
679 679 # looks through the hash and finds candidate lines. The
680 680 # result is a list of line numbers sorted based on distance
681 681 # from linenum
682 682
683 683 cand = self.hash.get(l, [])
684 684 if len(cand) > 1:
685 685 # resort our list of potentials forward then back.
686 686 cand.sort(key=lambda x: abs(x - linenum))
687 687 return cand
688 688
689 689 def write_rej(self):
690 690 # our rejects are a little different from patch(1). This always
691 691 # creates rejects in the same form as the original patch. A file
692 692 # header is inserted so that you can run the reject through patch again
693 693 # without having to type the filename.
694 694 if not self.rej:
695 695 return
696 696 base = os.path.basename(self.fname)
697 697 lines = ["--- %s\n+++ %s\n" % (base, base)]
698 698 for x in self.rej:
699 699 for l in x.hunk:
700 700 lines.append(l)
701 701 if l[-1] != '\n':
702 702 lines.append("\n\ No newline at end of file\n")
703 703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
704 704
705 705 def apply(self, h):
706 706 if not h.complete():
707 707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
708 708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
709 709 h.lenb))
710 710
711 711 self.hunks += 1
712 712
713 713 if self.missing:
714 714 self.rej.append(h)
715 715 return -1
716 716
717 717 if self.exists and self.create:
718 718 if self.copysource:
719 719 self.ui.warn(_("cannot create %s: destination already "
720 720 "exists\n" % self.fname))
721 721 else:
722 722 self.ui.warn(_("file %s already exists\n") % self.fname)
723 723 self.rej.append(h)
724 724 return -1
725 725
726 726 if isinstance(h, binhunk):
727 727 if self.remove:
728 728 self.backend.unlink(self.fname)
729 729 else:
730 730 self.lines[:] = h.new()
731 731 self.offset += len(h.new())
732 732 self.dirty = True
733 733 return 0
734 734
735 735 horig = h
736 736 if (self.eolmode in ('crlf', 'lf')
737 737 or self.eolmode == 'auto' and self.eol):
738 738 # If new eols are going to be normalized, then normalize
739 739 # hunk data before patching. Otherwise, preserve input
740 740 # line-endings.
741 741 h = h.getnormalized()
742 742
743 743 # fast case first, no offsets, no fuzz
744 744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 745 oldstart += self.offset
746 746 orig_start = oldstart
747 747 # if there's skew we want to emit the "(offset %d lines)" even
748 748 # when the hunk cleanly applies at start + skew, so skip the
749 749 # fast case code
750 750 if (self.skew == 0 and
751 751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 752 if self.remove:
753 753 self.backend.unlink(self.fname)
754 754 else:
755 755 self.lines[oldstart:oldstart + len(old)] = new
756 756 self.offset += len(new) - len(old)
757 757 self.dirty = True
758 758 return 0
759 759
760 760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 761 self.hash = {}
762 762 for x, s in enumerate(self.lines):
763 763 self.hash.setdefault(s, []).append(x)
764 764
765 765 for fuzzlen in xrange(3):
766 766 for toponly in [True, False]:
767 767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 768 oldstart = oldstart + self.offset + self.skew
769 769 oldstart = min(oldstart, len(self.lines))
770 770 if old:
771 771 cand = self.findlines(old[0][1:], oldstart)
772 772 else:
773 773 # Only adding lines with no or fuzzed context, just
774 774 # take the skew in account
775 775 cand = [oldstart]
776 776
777 777 for l in cand:
778 778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 779 self.lines[l : l + len(old)] = new
780 780 self.offset += len(new) - len(old)
781 781 self.skew = l - orig_start
782 782 self.dirty = True
783 783 offset = l - orig_start - fuzzlen
784 784 if fuzzlen:
785 785 msg = _("Hunk #%d succeeded at %d "
786 786 "with fuzz %d "
787 787 "(offset %d lines).\n")
788 788 self.printfile(True)
789 789 self.ui.warn(msg %
790 790 (h.number, l + 1, fuzzlen, offset))
791 791 else:
792 792 msg = _("Hunk #%d succeeded at %d "
793 793 "(offset %d lines).\n")
794 794 self.ui.note(msg % (h.number, l + 1, offset))
795 795 return fuzzlen
796 796 self.printfile(True)
797 797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 798 self.rej.append(horig)
799 799 return -1
800 800
801 801 def close(self):
802 802 if self.dirty:
803 803 self.writelines(self.fname, self.lines, self.mode)
804 804 self.write_rej()
805 805 return len(self.rej)
806 806
807 807 class hunk(object):
808 808 def __init__(self, desc, num, lr, context):
809 809 self.number = num
810 810 self.desc = desc
811 811 self.hunk = [desc]
812 812 self.a = []
813 813 self.b = []
814 814 self.starta = self.lena = None
815 815 self.startb = self.lenb = None
816 816 if lr is not None:
817 817 if context:
818 818 self.read_context_hunk(lr)
819 819 else:
820 820 self.read_unified_hunk(lr)
821 821
822 822 def getnormalized(self):
823 823 """Return a copy with line endings normalized to LF."""
824 824
825 825 def normalize(lines):
826 826 nlines = []
827 827 for line in lines:
828 828 if line.endswith('\r\n'):
829 829 line = line[:-2] + '\n'
830 830 nlines.append(line)
831 831 return nlines
832 832
833 833 # Dummy object, it is rebuilt manually
834 834 nh = hunk(self.desc, self.number, None, None)
835 835 nh.number = self.number
836 836 nh.desc = self.desc
837 837 nh.hunk = self.hunk
838 838 nh.a = normalize(self.a)
839 839 nh.b = normalize(self.b)
840 840 nh.starta = self.starta
841 841 nh.startb = self.startb
842 842 nh.lena = self.lena
843 843 nh.lenb = self.lenb
844 844 return nh
845 845
846 846 def read_unified_hunk(self, lr):
847 847 m = unidesc.match(self.desc)
848 848 if not m:
849 849 raise PatchError(_("bad hunk #%d") % self.number)
850 850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 851 if self.lena is None:
852 852 self.lena = 1
853 853 else:
854 854 self.lena = int(self.lena)
855 855 if self.lenb is None:
856 856 self.lenb = 1
857 857 else:
858 858 self.lenb = int(self.lenb)
859 859 self.starta = int(self.starta)
860 860 self.startb = int(self.startb)
861 861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 862 self.b)
863 863 # if we hit eof before finishing out the hunk, the last line will
864 864 # be zero length. Lets try to fix it up.
865 865 while len(self.hunk[-1]) == 0:
866 866 del self.hunk[-1]
867 867 del self.a[-1]
868 868 del self.b[-1]
869 869 self.lena -= 1
870 870 self.lenb -= 1
871 871 self._fixnewline(lr)
872 872
873 873 def read_context_hunk(self, lr):
874 874 self.desc = lr.readline()
875 875 m = contextdesc.match(self.desc)
876 876 if not m:
877 877 raise PatchError(_("bad hunk #%d") % self.number)
878 878 self.starta, aend = m.groups()
879 879 self.starta = int(self.starta)
880 880 if aend is None:
881 881 aend = self.starta
882 882 self.lena = int(aend) - self.starta
883 883 if self.starta:
884 884 self.lena += 1
885 885 for x in xrange(self.lena):
886 886 l = lr.readline()
887 887 if l.startswith('---'):
888 888 # lines addition, old block is empty
889 889 lr.push(l)
890 890 break
891 891 s = l[2:]
892 892 if l.startswith('- ') or l.startswith('! '):
893 893 u = '-' + s
894 894 elif l.startswith(' '):
895 895 u = ' ' + s
896 896 else:
897 897 raise PatchError(_("bad hunk #%d old text line %d") %
898 898 (self.number, x))
899 899 self.a.append(u)
900 900 self.hunk.append(u)
901 901
902 902 l = lr.readline()
903 903 if l.startswith('\ '):
904 904 s = self.a[-1][:-1]
905 905 self.a[-1] = s
906 906 self.hunk[-1] = s
907 907 l = lr.readline()
908 908 m = contextdesc.match(l)
909 909 if not m:
910 910 raise PatchError(_("bad hunk #%d") % self.number)
911 911 self.startb, bend = m.groups()
912 912 self.startb = int(self.startb)
913 913 if bend is None:
914 914 bend = self.startb
915 915 self.lenb = int(bend) - self.startb
916 916 if self.startb:
917 917 self.lenb += 1
918 918 hunki = 1
919 919 for x in xrange(self.lenb):
920 920 l = lr.readline()
921 921 if l.startswith('\ '):
922 922 # XXX: the only way to hit this is with an invalid line range.
923 923 # The no-eol marker is not counted in the line range, but I
924 924 # guess there are diff(1) out there which behave differently.
925 925 s = self.b[-1][:-1]
926 926 self.b[-1] = s
927 927 self.hunk[hunki - 1] = s
928 928 continue
929 929 if not l:
930 930 # line deletions, new block is empty and we hit EOF
931 931 lr.push(l)
932 932 break
933 933 s = l[2:]
934 934 if l.startswith('+ ') or l.startswith('! '):
935 935 u = '+' + s
936 936 elif l.startswith(' '):
937 937 u = ' ' + s
938 938 elif len(self.b) == 0:
939 939 # line deletions, new block is empty
940 940 lr.push(l)
941 941 break
942 942 else:
943 943 raise PatchError(_("bad hunk #%d old text line %d") %
944 944 (self.number, x))
945 945 self.b.append(s)
946 946 while True:
947 947 if hunki >= len(self.hunk):
948 948 h = ""
949 949 else:
950 950 h = self.hunk[hunki]
951 951 hunki += 1
952 952 if h == u:
953 953 break
954 954 elif h.startswith('-'):
955 955 continue
956 956 else:
957 957 self.hunk.insert(hunki - 1, u)
958 958 break
959 959
960 960 if not self.a:
961 961 # this happens when lines were only added to the hunk
962 962 for x in self.hunk:
963 963 if x.startswith('-') or x.startswith(' '):
964 964 self.a.append(x)
965 965 if not self.b:
966 966 # this happens when lines were only deleted from the hunk
967 967 for x in self.hunk:
968 968 if x.startswith('+') or x.startswith(' '):
969 969 self.b.append(x[1:])
970 970 # @@ -start,len +start,len @@
971 971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 972 self.startb, self.lenb)
973 973 self.hunk[0] = self.desc
974 974 self._fixnewline(lr)
975 975
976 976 def _fixnewline(self, lr):
977 977 l = lr.readline()
978 978 if l.startswith('\ '):
979 979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 980 else:
981 981 lr.push(l)
982 982
983 983 def complete(self):
984 984 return len(self.a) == self.lena and len(self.b) == self.lenb
985 985
986 986 def _fuzzit(self, old, new, fuzz, toponly):
987 987 # this removes context lines from the top and bottom of list 'l'. It
988 988 # checks the hunk to make sure only context lines are removed, and then
989 989 # returns a new shortened list of lines.
990 990 fuzz = min(fuzz, len(old))
991 991 if fuzz:
992 992 top = 0
993 993 bot = 0
994 994 hlen = len(self.hunk)
995 995 for x in xrange(hlen - 1):
996 996 # the hunk starts with the @@ line, so use x+1
997 997 if self.hunk[x + 1][0] == ' ':
998 998 top += 1
999 999 else:
1000 1000 break
1001 1001 if not toponly:
1002 1002 for x in xrange(hlen - 1):
1003 1003 if self.hunk[hlen - bot - 1][0] == ' ':
1004 1004 bot += 1
1005 1005 else:
1006 1006 break
1007 1007
1008 1008 bot = min(fuzz, bot)
1009 1009 top = min(fuzz, top)
1010 return old[top:len(old)-bot], new[top:len(new)-bot], top
1010 return old[top:len(old) - bot], new[top:len(new) - bot], top
1011 1011 return old, new, 0
1012 1012
1013 1013 def fuzzit(self, fuzz, toponly):
1014 1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 1015 oldstart = self.starta + top
1016 1016 newstart = self.startb + top
1017 1017 # zero length hunk ranges already have their start decremented
1018 1018 if self.lena and oldstart > 0:
1019 1019 oldstart -= 1
1020 1020 if self.lenb and newstart > 0:
1021 1021 newstart -= 1
1022 1022 return old, oldstart, new, newstart
1023 1023
1024 1024 class binhunk(object):
1025 1025 'A binary patch file. Only understands literals so far.'
1026 1026 def __init__(self, lr, fname):
1027 1027 self.text = None
1028 1028 self.hunk = ['GIT binary patch\n']
1029 1029 self._fname = fname
1030 1030 self._read(lr)
1031 1031
1032 1032 def complete(self):
1033 1033 return self.text is not None
1034 1034
1035 1035 def new(self):
1036 1036 return [self.text]
1037 1037
1038 1038 def _read(self, lr):
1039 1039 def getline(lr, hunk):
1040 1040 l = lr.readline()
1041 1041 hunk.append(l)
1042 1042 return l.rstrip('\r\n')
1043 1043
1044 1044 while True:
1045 1045 line = getline(lr, self.hunk)
1046 1046 if not line:
1047 1047 raise PatchError(_('could not extract "%s" binary data')
1048 1048 % self._fname)
1049 1049 if line.startswith('literal '):
1050 1050 break
1051 1051 size = int(line[8:].rstrip())
1052 1052 dec = []
1053 1053 line = getline(lr, self.hunk)
1054 1054 while len(line) > 1:
1055 1055 l = line[0]
1056 1056 if l <= 'Z' and l >= 'A':
1057 1057 l = ord(l) - ord('A') + 1
1058 1058 else:
1059 1059 l = ord(l) - ord('a') + 27
1060 1060 try:
1061 1061 dec.append(base85.b85decode(line[1:])[:l])
1062 1062 except ValueError, e:
1063 1063 raise PatchError(_('could not decode "%s" binary patch: %s')
1064 1064 % (self._fname, str(e)))
1065 1065 line = getline(lr, self.hunk)
1066 1066 text = zlib.decompress(''.join(dec))
1067 1067 if len(text) != size:
1068 1068 raise PatchError(_('"%s" length is %d bytes, should be %d')
1069 1069 % (self._fname, len(text), size))
1070 1070 self.text = text
1071 1071
1072 1072 def parsefilename(str):
1073 1073 # --- filename \t|space stuff
1074 1074 s = str[4:].rstrip('\r\n')
1075 1075 i = s.find('\t')
1076 1076 if i < 0:
1077 1077 i = s.find(' ')
1078 1078 if i < 0:
1079 1079 return s
1080 1080 return s[:i]
1081 1081
1082 1082 def pathstrip(path, strip):
1083 1083 pathlen = len(path)
1084 1084 i = 0
1085 1085 if strip == 0:
1086 1086 return '', path.rstrip()
1087 1087 count = strip
1088 1088 while count > 0:
1089 1089 i = path.find('/', i)
1090 1090 if i == -1:
1091 1091 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1092 1092 (count, strip, path))
1093 1093 i += 1
1094 1094 # consume '//' in the path
1095 1095 while i < pathlen - 1 and path[i] == '/':
1096 1096 i += 1
1097 1097 count -= 1
1098 1098 return path[:i].lstrip(), path[i:].rstrip()
1099 1099
1100 1100 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1101 1101 nulla = afile_orig == "/dev/null"
1102 1102 nullb = bfile_orig == "/dev/null"
1103 1103 create = nulla and hunk.starta == 0 and hunk.lena == 0
1104 1104 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1105 1105 abase, afile = pathstrip(afile_orig, strip)
1106 1106 gooda = not nulla and backend.exists(afile)
1107 1107 bbase, bfile = pathstrip(bfile_orig, strip)
1108 1108 if afile == bfile:
1109 1109 goodb = gooda
1110 1110 else:
1111 1111 goodb = not nullb and backend.exists(bfile)
1112 1112 missing = not goodb and not gooda and not create
1113 1113
1114 1114 # some diff programs apparently produce patches where the afile is
1115 1115 # not /dev/null, but afile starts with bfile
1116 1116 abasedir = afile[:afile.rfind('/') + 1]
1117 1117 bbasedir = bfile[:bfile.rfind('/') + 1]
1118 1118 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1119 1119 and hunk.starta == 0 and hunk.lena == 0):
1120 1120 create = True
1121 1121 missing = False
1122 1122
1123 1123 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1124 1124 # diff is between a file and its backup. In this case, the original
1125 1125 # file should be patched (see original mpatch code).
1126 1126 isbackup = (abase == bbase and bfile.startswith(afile))
1127 1127 fname = None
1128 1128 if not missing:
1129 1129 if gooda and goodb:
1130 1130 fname = isbackup and afile or bfile
1131 1131 elif gooda:
1132 1132 fname = afile
1133 1133
1134 1134 if not fname:
1135 1135 if not nullb:
1136 1136 fname = isbackup and afile or bfile
1137 1137 elif not nulla:
1138 1138 fname = afile
1139 1139 else:
1140 1140 raise PatchError(_("undefined source and destination files"))
1141 1141
1142 1142 gp = patchmeta(fname)
1143 1143 if create:
1144 1144 gp.op = 'ADD'
1145 1145 elif remove:
1146 1146 gp.op = 'DELETE'
1147 1147 return gp
1148 1148
1149 1149 def scangitpatch(lr, firstline):
1150 1150 """
1151 1151 Git patches can emit:
1152 1152 - rename a to b
1153 1153 - change b
1154 1154 - copy a to c
1155 1155 - change c
1156 1156
1157 1157 We cannot apply this sequence as-is, the renamed 'a' could not be
1158 1158 found for it would have been renamed already. And we cannot copy
1159 1159 from 'b' instead because 'b' would have been changed already. So
1160 1160 we scan the git patch for copy and rename commands so we can
1161 1161 perform the copies ahead of time.
1162 1162 """
1163 1163 pos = 0
1164 1164 try:
1165 1165 pos = lr.fp.tell()
1166 1166 fp = lr.fp
1167 1167 except IOError:
1168 1168 fp = cStringIO.StringIO(lr.fp.read())
1169 1169 gitlr = linereader(fp)
1170 1170 gitlr.push(firstline)
1171 1171 gitpatches = readgitpatch(gitlr)
1172 1172 fp.seek(pos)
1173 1173 return gitpatches
1174 1174
1175 1175 def iterhunks(fp):
1176 1176 """Read a patch and yield the following events:
1177 1177 - ("file", afile, bfile, firsthunk): select a new target file.
1178 1178 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1179 1179 "file" event.
1180 1180 - ("git", gitchanges): current diff is in git format, gitchanges
1181 1181 maps filenames to gitpatch records. Unique event.
1182 1182 """
1183 1183 afile = ""
1184 1184 bfile = ""
1185 1185 state = None
1186 1186 hunknum = 0
1187 1187 emitfile = newfile = False
1188 1188 gitpatches = None
1189 1189
1190 1190 # our states
1191 1191 BFILE = 1
1192 1192 context = None
1193 1193 lr = linereader(fp)
1194 1194
1195 1195 while True:
1196 1196 x = lr.readline()
1197 1197 if not x:
1198 1198 break
1199 1199 if state == BFILE and (
1200 1200 (not context and x[0] == '@')
1201 1201 or (context is not False and x.startswith('***************'))
1202 1202 or x.startswith('GIT binary patch')):
1203 1203 gp = None
1204 1204 if (gitpatches and
1205 1205 gitpatches[-1].ispatching(afile, bfile)):
1206 1206 gp = gitpatches.pop()
1207 1207 if x.startswith('GIT binary patch'):
1208 1208 h = binhunk(lr, gp.path)
1209 1209 else:
1210 1210 if context is None and x.startswith('***************'):
1211 1211 context = True
1212 1212 h = hunk(x, hunknum + 1, lr, context)
1213 1213 hunknum += 1
1214 1214 if emitfile:
1215 1215 emitfile = False
1216 1216 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1217 1217 yield 'hunk', h
1218 1218 elif x.startswith('diff --git'):
1219 1219 m = gitre.match(x.rstrip(' \r\n'))
1220 1220 if not m:
1221 1221 continue
1222 1222 if gitpatches is None:
1223 1223 # scan whole input for git metadata
1224 1224 gitpatches = scangitpatch(lr, x)
1225 1225 yield 'git', [g.copy() for g in gitpatches
1226 1226 if g.op in ('COPY', 'RENAME')]
1227 1227 gitpatches.reverse()
1228 1228 afile = 'a/' + m.group(1)
1229 1229 bfile = 'b/' + m.group(2)
1230 1230 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1231 1231 gp = gitpatches.pop()
1232 1232 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1233 1233 if not gitpatches:
1234 1234 raise PatchError(_('failed to synchronize metadata for "%s"')
1235 1235 % afile[2:])
1236 1236 gp = gitpatches[-1]
1237 1237 newfile = True
1238 1238 elif x.startswith('---'):
1239 1239 # check for a unified diff
1240 1240 l2 = lr.readline()
1241 1241 if not l2.startswith('+++'):
1242 1242 lr.push(l2)
1243 1243 continue
1244 1244 newfile = True
1245 1245 context = False
1246 1246 afile = parsefilename(x)
1247 1247 bfile = parsefilename(l2)
1248 1248 elif x.startswith('***'):
1249 1249 # check for a context diff
1250 1250 l2 = lr.readline()
1251 1251 if not l2.startswith('---'):
1252 1252 lr.push(l2)
1253 1253 continue
1254 1254 l3 = lr.readline()
1255 1255 lr.push(l3)
1256 1256 if not l3.startswith("***************"):
1257 1257 lr.push(l2)
1258 1258 continue
1259 1259 newfile = True
1260 1260 context = True
1261 1261 afile = parsefilename(x)
1262 1262 bfile = parsefilename(l2)
1263 1263
1264 1264 if newfile:
1265 1265 newfile = False
1266 1266 emitfile = True
1267 1267 state = BFILE
1268 1268 hunknum = 0
1269 1269
1270 1270 while gitpatches:
1271 1271 gp = gitpatches.pop()
1272 1272 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1273 1273
1274 1274 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1275 1275 """Reads a patch from fp and tries to apply it.
1276 1276
1277 1277 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1278 1278 there was any fuzz.
1279 1279
1280 1280 If 'eolmode' is 'strict', the patch content and patched file are
1281 1281 read in binary mode. Otherwise, line endings are ignored when
1282 1282 patching then normalized according to 'eolmode'.
1283 1283 """
1284 1284 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1285 1285 eolmode=eolmode)
1286 1286
1287 1287 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1288 1288 eolmode='strict'):
1289 1289
1290 1290 def pstrip(p):
1291 1291 return pathstrip(p, strip - 1)[1]
1292 1292
1293 1293 rejects = 0
1294 1294 err = 0
1295 1295 current_file = None
1296 1296
1297 1297 for state, values in iterhunks(fp):
1298 1298 if state == 'hunk':
1299 1299 if not current_file:
1300 1300 continue
1301 1301 ret = current_file.apply(values)
1302 1302 if ret > 0:
1303 1303 err = 1
1304 1304 elif state == 'file':
1305 1305 if current_file:
1306 1306 rejects += current_file.close()
1307 1307 current_file = None
1308 1308 afile, bfile, first_hunk, gp = values
1309 1309 if gp:
1310 1310 gp.path = pstrip(gp.path)
1311 1311 if gp.oldpath:
1312 1312 gp.oldpath = pstrip(gp.oldpath)
1313 1313 else:
1314 1314 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1315 1315 if gp.op == 'RENAME':
1316 1316 backend.unlink(gp.oldpath)
1317 1317 if not first_hunk:
1318 1318 if gp.op == 'DELETE':
1319 1319 backend.unlink(gp.path)
1320 1320 continue
1321 1321 data, mode = None, None
1322 1322 if gp.op in ('RENAME', 'COPY'):
1323 1323 data, mode = store.getfile(gp.oldpath)[:2]
1324 1324 if gp.mode:
1325 1325 mode = gp.mode
1326 1326 if gp.op == 'ADD':
1327 1327 # Added files without content have no hunk and
1328 1328 # must be created
1329 1329 data = ''
1330 1330 if data or mode:
1331 1331 if (gp.op in ('ADD', 'RENAME', 'COPY')
1332 1332 and backend.exists(gp.path)):
1333 1333 raise PatchError(_("cannot create %s: destination "
1334 1334 "already exists") % gp.path)
1335 1335 backend.setfile(gp.path, data, mode, gp.oldpath)
1336 1336 continue
1337 1337 try:
1338 1338 current_file = patcher(ui, gp, backend, store,
1339 1339 eolmode=eolmode)
1340 1340 except PatchError, inst:
1341 1341 ui.warn(str(inst) + '\n')
1342 1342 current_file = None
1343 1343 rejects += 1
1344 1344 continue
1345 1345 elif state == 'git':
1346 1346 for gp in values:
1347 1347 path = pstrip(gp.oldpath)
1348 1348 try:
1349 1349 data, mode = backend.getfile(path)
1350 1350 except IOError, e:
1351 1351 if e.errno != errno.ENOENT:
1352 1352 raise
1353 1353 # The error ignored here will trigger a getfile()
1354 1354 # error in a place more appropriate for error
1355 1355 # handling, and will not interrupt the patching
1356 1356 # process.
1357 1357 else:
1358 1358 store.setfile(path, data, mode)
1359 1359 else:
1360 1360 raise util.Abort(_('unsupported parser state: %s') % state)
1361 1361
1362 1362 if current_file:
1363 1363 rejects += current_file.close()
1364 1364
1365 1365 if rejects:
1366 1366 return -1
1367 1367 return err
1368 1368
1369 1369 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1370 1370 similarity):
1371 1371 """use <patcher> to apply <patchname> to the working directory.
1372 1372 returns whether patch was applied with fuzz factor."""
1373 1373
1374 1374 fuzz = False
1375 1375 args = []
1376 1376 cwd = repo.root
1377 1377 if cwd:
1378 1378 args.append('-d %s' % util.shellquote(cwd))
1379 1379 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1380 1380 util.shellquote(patchname)))
1381 1381 try:
1382 1382 for line in fp:
1383 1383 line = line.rstrip()
1384 1384 ui.note(line + '\n')
1385 1385 if line.startswith('patching file '):
1386 1386 pf = util.parsepatchoutput(line)
1387 1387 printed_file = False
1388 1388 files.add(pf)
1389 1389 elif line.find('with fuzz') >= 0:
1390 1390 fuzz = True
1391 1391 if not printed_file:
1392 1392 ui.warn(pf + '\n')
1393 1393 printed_file = True
1394 1394 ui.warn(line + '\n')
1395 1395 elif line.find('saving rejects to file') >= 0:
1396 1396 ui.warn(line + '\n')
1397 1397 elif line.find('FAILED') >= 0:
1398 1398 if not printed_file:
1399 1399 ui.warn(pf + '\n')
1400 1400 printed_file = True
1401 1401 ui.warn(line + '\n')
1402 1402 finally:
1403 1403 if files:
1404 1404 cfiles = list(files)
1405 1405 cwd = repo.getcwd()
1406 1406 if cwd:
1407 1407 cfiles = [util.pathto(repo.root, cwd, f)
1408 1408 for f in cfiles]
1409 1409 scmutil.addremove(repo, cfiles, similarity=similarity)
1410 1410 code = fp.close()
1411 1411 if code:
1412 1412 raise PatchError(_("patch command failed: %s") %
1413 1413 util.explainexit(code)[0])
1414 1414 return fuzz
1415 1415
1416 1416 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1417 1417 if files is None:
1418 1418 files = set()
1419 1419 if eolmode is None:
1420 1420 eolmode = ui.config('patch', 'eol', 'strict')
1421 1421 if eolmode.lower() not in eolmodes:
1422 1422 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1423 1423 eolmode = eolmode.lower()
1424 1424
1425 1425 store = filestore()
1426 1426 try:
1427 1427 fp = open(patchobj, 'rb')
1428 1428 except TypeError:
1429 1429 fp = patchobj
1430 1430 try:
1431 1431 ret = applydiff(ui, fp, backend, store, strip=strip,
1432 1432 eolmode=eolmode)
1433 1433 finally:
1434 1434 if fp != patchobj:
1435 1435 fp.close()
1436 1436 files.update(backend.close())
1437 1437 store.close()
1438 1438 if ret < 0:
1439 1439 raise PatchError(_('patch failed to apply'))
1440 1440 return ret > 0
1441 1441
1442 1442 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1443 1443 similarity=0):
1444 1444 """use builtin patch to apply <patchobj> to the working directory.
1445 1445 returns whether patch was applied with fuzz factor."""
1446 1446 backend = workingbackend(ui, repo, similarity)
1447 1447 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1448 1448
1449 1449 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1450 1450 eolmode='strict'):
1451 1451 backend = repobackend(ui, repo, ctx, store)
1452 1452 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1453 1453
1454 1454 def makememctx(repo, parents, text, user, date, branch, files, store,
1455 1455 editor=None):
1456 1456 def getfilectx(repo, memctx, path):
1457 1457 data, (islink, isexec), copied = store.getfile(path)
1458 1458 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1459 1459 copied=copied)
1460 1460 extra = {}
1461 1461 if branch:
1462 1462 extra['branch'] = encoding.fromlocal(branch)
1463 1463 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1464 1464 date, extra)
1465 1465 if editor:
1466 1466 ctx._text = editor(repo, ctx, [])
1467 1467 return ctx
1468 1468
1469 1469 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1470 1470 similarity=0):
1471 1471 """Apply <patchname> to the working directory.
1472 1472
1473 1473 'eolmode' specifies how end of lines should be handled. It can be:
1474 1474 - 'strict': inputs are read in binary mode, EOLs are preserved
1475 1475 - 'crlf': EOLs are ignored when patching and reset to CRLF
1476 1476 - 'lf': EOLs are ignored when patching and reset to LF
1477 1477 - None: get it from user settings, default to 'strict'
1478 1478 'eolmode' is ignored when using an external patcher program.
1479 1479
1480 1480 Returns whether patch was applied with fuzz factor.
1481 1481 """
1482 1482 patcher = ui.config('ui', 'patch')
1483 1483 if files is None:
1484 1484 files = set()
1485 1485 try:
1486 1486 if patcher:
1487 1487 return _externalpatch(ui, repo, patcher, patchname, strip,
1488 1488 files, similarity)
1489 1489 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1490 1490 similarity)
1491 1491 except PatchError, err:
1492 1492 raise util.Abort(str(err))
1493 1493
1494 1494 def changedfiles(ui, repo, patchpath, strip=1):
1495 1495 backend = fsbackend(ui, repo.root)
1496 1496 fp = open(patchpath, 'rb')
1497 1497 try:
1498 1498 changed = set()
1499 1499 for state, values in iterhunks(fp):
1500 1500 if state == 'file':
1501 1501 afile, bfile, first_hunk, gp = values
1502 1502 if gp:
1503 1503 gp.path = pathstrip(gp.path, strip - 1)[1]
1504 1504 if gp.oldpath:
1505 1505 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1506 1506 else:
1507 1507 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1508 1508 changed.add(gp.path)
1509 1509 if gp.op == 'RENAME':
1510 1510 changed.add(gp.oldpath)
1511 1511 elif state not in ('hunk', 'git'):
1512 1512 raise util.Abort(_('unsupported parser state: %s') % state)
1513 1513 return changed
1514 1514 finally:
1515 1515 fp.close()
1516 1516
1517 1517 class GitDiffRequired(Exception):
1518 1518 pass
1519 1519
1520 1520 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1521 1521 def get(key, name=None, getter=ui.configbool):
1522 1522 return ((opts and opts.get(key)) or
1523 1523 getter(section, name or key, None, untrusted=untrusted))
1524 1524 return mdiff.diffopts(
1525 1525 text=opts and opts.get('text'),
1526 1526 git=get('git'),
1527 1527 nodates=get('nodates'),
1528 1528 showfunc=get('show_function', 'showfunc'),
1529 1529 ignorews=get('ignore_all_space', 'ignorews'),
1530 1530 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1531 1531 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1532 1532 context=get('unified', getter=ui.config))
1533 1533
1534 1534 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1535 1535 losedatafn=None, prefix=''):
1536 1536 '''yields diff of changes to files between two nodes, or node and
1537 1537 working directory.
1538 1538
1539 1539 if node1 is None, use first dirstate parent instead.
1540 1540 if node2 is None, compare node1 with working directory.
1541 1541
1542 1542 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1543 1543 every time some change cannot be represented with the current
1544 1544 patch format. Return False to upgrade to git patch format, True to
1545 1545 accept the loss or raise an exception to abort the diff. It is
1546 1546 called with the name of current file being diffed as 'fn'. If set
1547 1547 to None, patches will always be upgraded to git format when
1548 1548 necessary.
1549 1549
1550 1550 prefix is a filename prefix that is prepended to all filenames on
1551 1551 display (used for subrepos).
1552 1552 '''
1553 1553
1554 1554 if opts is None:
1555 1555 opts = mdiff.defaultopts
1556 1556
1557 1557 if not node1 and not node2:
1558 1558 node1 = repo.dirstate.p1()
1559 1559
1560 1560 def lrugetfilectx():
1561 1561 cache = {}
1562 1562 order = util.deque()
1563 1563 def getfilectx(f, ctx):
1564 1564 fctx = ctx.filectx(f, filelog=cache.get(f))
1565 1565 if f not in cache:
1566 1566 if len(cache) > 20:
1567 1567 del cache[order.popleft()]
1568 1568 cache[f] = fctx.filelog()
1569 1569 else:
1570 1570 order.remove(f)
1571 1571 order.append(f)
1572 1572 return fctx
1573 1573 return getfilectx
1574 1574 getfilectx = lrugetfilectx()
1575 1575
1576 1576 ctx1 = repo[node1]
1577 1577 ctx2 = repo[node2]
1578 1578
1579 1579 if not changes:
1580 1580 changes = repo.status(ctx1, ctx2, match=match)
1581 1581 modified, added, removed = changes[:3]
1582 1582
1583 1583 if not modified and not added and not removed:
1584 1584 return []
1585 1585
1586 1586 revs = None
1587 1587 hexfunc = repo.ui.debugflag and hex or short
1588 1588 revs = [hexfunc(node) for node in [node1, node2] if node]
1589 1589
1590 1590 copy = {}
1591 1591 if opts.git or opts.upgrade:
1592 1592 copy = copies.pathcopies(ctx1, ctx2)
1593 1593
1594 1594 def difffn(opts, losedata):
1595 1595 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1596 1596 copy, getfilectx, opts, losedata, prefix)
1597 1597 if opts.upgrade and not opts.git:
1598 1598 try:
1599 1599 def losedata(fn):
1600 1600 if not losedatafn or not losedatafn(fn=fn):
1601 1601 raise GitDiffRequired
1602 1602 # Buffer the whole output until we are sure it can be generated
1603 1603 return list(difffn(opts.copy(git=False), losedata))
1604 1604 except GitDiffRequired:
1605 1605 return difffn(opts.copy(git=True), None)
1606 1606 else:
1607 1607 return difffn(opts, None)
1608 1608
1609 1609 def difflabel(func, *args, **kw):
1610 1610 '''yields 2-tuples of (output, label) based on the output of func()'''
1611 1611 headprefixes = [('diff', 'diff.diffline'),
1612 1612 ('copy', 'diff.extended'),
1613 1613 ('rename', 'diff.extended'),
1614 1614 ('old', 'diff.extended'),
1615 1615 ('new', 'diff.extended'),
1616 1616 ('deleted', 'diff.extended'),
1617 1617 ('---', 'diff.file_a'),
1618 1618 ('+++', 'diff.file_b')]
1619 1619 textprefixes = [('@', 'diff.hunk'),
1620 1620 ('-', 'diff.deleted'),
1621 1621 ('+', 'diff.inserted')]
1622 1622 head = False
1623 1623 for chunk in func(*args, **kw):
1624 1624 lines = chunk.split('\n')
1625 1625 for i, line in enumerate(lines):
1626 1626 if i != 0:
1627 1627 yield ('\n', '')
1628 1628 if head:
1629 1629 if line.startswith('@'):
1630 1630 head = False
1631 1631 else:
1632 1632 if line and line[0] not in ' +-@\\':
1633 1633 head = True
1634 1634 stripline = line
1635 1635 if not head and line and line[0] in '+-':
1636 1636 # highlight trailing whitespace, but only in changed lines
1637 1637 stripline = line.rstrip()
1638 1638 prefixes = textprefixes
1639 1639 if head:
1640 1640 prefixes = headprefixes
1641 1641 for prefix, label in prefixes:
1642 1642 if stripline.startswith(prefix):
1643 1643 yield (stripline, label)
1644 1644 break
1645 1645 else:
1646 1646 yield (line, '')
1647 1647 if line != stripline:
1648 1648 yield (line[len(stripline):], 'diff.trailingwhitespace')
1649 1649
1650 1650 def diffui(*args, **kw):
1651 1651 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1652 1652 return difflabel(diff, *args, **kw)
1653 1653
1654 1654 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1655 1655 copy, getfilectx, opts, losedatafn, prefix):
1656 1656
1657 1657 def join(f):
1658 1658 return posixpath.join(prefix, f)
1659 1659
1660 1660 def addmodehdr(header, omode, nmode):
1661 1661 if omode != nmode:
1662 1662 header.append('old mode %s\n' % omode)
1663 1663 header.append('new mode %s\n' % nmode)
1664 1664
1665 1665 def addindexmeta(meta, revs):
1666 1666 if opts.git:
1667 1667 i = len(revs)
1668 1668 if i==2:
1669 1669 meta.append('index %s..%s\n' % tuple(revs))
1670 1670 elif i==3:
1671 1671 meta.append('index %s,%s..%s\n' % tuple(revs))
1672 1672
1673 1673 def gitindex(text):
1674 1674 if not text:
1675 1675 return hex(nullid)
1676 1676 l = len(text)
1677 1677 s = util.sha1('blob %d\0' % l)
1678 1678 s.update(text)
1679 1679 return s.hexdigest()
1680 1680
1681 1681 def diffline(a, b, revs):
1682 1682 if opts.git:
1683 1683 line = 'diff --git a/%s b/%s\n' % (a, b)
1684 1684 elif not repo.ui.quiet:
1685 1685 if revs:
1686 1686 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1687 1687 line = 'diff %s %s\n' % (revinfo, a)
1688 1688 else:
1689 1689 line = 'diff %s\n' % a
1690 1690 else:
1691 1691 line = ''
1692 1692 return line
1693 1693
1694 1694 date1 = util.datestr(ctx1.date())
1695 1695 man1 = ctx1.manifest()
1696 1696
1697 1697 gone = set()
1698 1698 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1699 1699
1700 1700 copyto = dict([(v, k) for k, v in copy.items()])
1701 1701
1702 1702 if opts.git:
1703 1703 revs = None
1704 1704
1705 1705 for f in sorted(modified + added + removed):
1706 1706 to = None
1707 1707 tn = None
1708 1708 dodiff = True
1709 1709 header = []
1710 1710 if f in man1:
1711 1711 to = getfilectx(f, ctx1).data()
1712 1712 if f not in removed:
1713 1713 tn = getfilectx(f, ctx2).data()
1714 1714 a, b = f, f
1715 1715 if opts.git or losedatafn:
1716 1716 if f in added:
1717 1717 mode = gitmode[ctx2.flags(f)]
1718 1718 if f in copy or f in copyto:
1719 1719 if opts.git:
1720 1720 if f in copy:
1721 1721 a = copy[f]
1722 1722 else:
1723 1723 a = copyto[f]
1724 1724 omode = gitmode[man1.flags(a)]
1725 1725 addmodehdr(header, omode, mode)
1726 1726 if a in removed and a not in gone:
1727 1727 op = 'rename'
1728 1728 gone.add(a)
1729 1729 else:
1730 1730 op = 'copy'
1731 1731 header.append('%s from %s\n' % (op, join(a)))
1732 1732 header.append('%s to %s\n' % (op, join(f)))
1733 1733 to = getfilectx(a, ctx1).data()
1734 1734 else:
1735 1735 losedatafn(f)
1736 1736 else:
1737 1737 if opts.git:
1738 1738 header.append('new file mode %s\n' % mode)
1739 1739 elif ctx2.flags(f):
1740 1740 losedatafn(f)
1741 1741 # In theory, if tn was copied or renamed we should check
1742 1742 # if the source is binary too but the copy record already
1743 1743 # forces git mode.
1744 1744 if util.binary(tn):
1745 1745 if opts.git:
1746 1746 dodiff = 'binary'
1747 1747 else:
1748 1748 losedatafn(f)
1749 1749 if not opts.git and not tn:
1750 1750 # regular diffs cannot represent new empty file
1751 1751 losedatafn(f)
1752 1752 elif f in removed:
1753 1753 if opts.git:
1754 1754 # have we already reported a copy above?
1755 1755 if ((f in copy and copy[f] in added
1756 1756 and copyto[copy[f]] == f) or
1757 1757 (f in copyto and copyto[f] in added
1758 1758 and copy[copyto[f]] == f)):
1759 1759 dodiff = False
1760 1760 else:
1761 1761 header.append('deleted file mode %s\n' %
1762 1762 gitmode[man1.flags(f)])
1763 1763 elif not to or util.binary(to):
1764 1764 # regular diffs cannot represent empty file deletion
1765 1765 losedatafn(f)
1766 1766 else:
1767 1767 oflag = man1.flags(f)
1768 1768 nflag = ctx2.flags(f)
1769 1769 binary = util.binary(to) or util.binary(tn)
1770 1770 if opts.git:
1771 1771 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1772 1772 if binary:
1773 1773 dodiff = 'binary'
1774 1774 elif binary or nflag != oflag:
1775 1775 losedatafn(f)
1776 1776
1777 1777 if dodiff:
1778 1778 if opts.git or revs:
1779 1779 header.insert(0, diffline(join(a), join(b), revs))
1780 1780 if dodiff == 'binary':
1781 1781 text = mdiff.b85diff(to, tn)
1782 1782 if text:
1783 1783 addindexmeta(header, [gitindex(to), gitindex(tn)])
1784 1784 else:
1785 1785 text = mdiff.unidiff(to, date1,
1786 1786 # ctx2 date may be dynamic
1787 1787 tn, util.datestr(ctx2.date()),
1788 1788 join(a), join(b), opts=opts)
1789 1789 if header and (text or len(header) > 1):
1790 1790 yield ''.join(header)
1791 1791 if text:
1792 1792 yield text
1793 1793
1794 1794 def diffstatsum(stats):
1795 1795 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1796 1796 for f, a, r, b in stats:
1797 1797 maxfile = max(maxfile, encoding.colwidth(f))
1798 1798 maxtotal = max(maxtotal, a + r)
1799 1799 addtotal += a
1800 1800 removetotal += r
1801 1801 binary = binary or b
1802 1802
1803 1803 return maxfile, maxtotal, addtotal, removetotal, binary
1804 1804
1805 1805 def diffstatdata(lines):
1806 1806 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1807 1807
1808 1808 results = []
1809 1809 filename, adds, removes, isbinary = None, 0, 0, False
1810 1810
1811 1811 def addresult():
1812 1812 if filename:
1813 1813 results.append((filename, adds, removes, isbinary))
1814 1814
1815 1815 for line in lines:
1816 1816 if line.startswith('diff'):
1817 1817 addresult()
1818 1818 # set numbers to 0 anyway when starting new file
1819 1819 adds, removes, isbinary = 0, 0, False
1820 1820 if line.startswith('diff --git'):
1821 1821 filename = gitre.search(line).group(1)
1822 1822 elif line.startswith('diff -r'):
1823 1823 # format: "diff -r ... -r ... filename"
1824 1824 filename = diffre.search(line).group(1)
1825 1825 elif line.startswith('+') and not line.startswith('+++ '):
1826 1826 adds += 1
1827 1827 elif line.startswith('-') and not line.startswith('--- '):
1828 1828 removes += 1
1829 1829 elif (line.startswith('GIT binary patch') or
1830 1830 line.startswith('Binary file')):
1831 1831 isbinary = True
1832 1832 addresult()
1833 1833 return results
1834 1834
1835 1835 def diffstat(lines, width=80, git=False):
1836 1836 output = []
1837 1837 stats = diffstatdata(lines)
1838 1838 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1839 1839
1840 1840 countwidth = len(str(maxtotal))
1841 1841 if hasbinary and countwidth < 3:
1842 1842 countwidth = 3
1843 1843 graphwidth = width - countwidth - maxname - 6
1844 1844 if graphwidth < 10:
1845 1845 graphwidth = 10
1846 1846
1847 1847 def scale(i):
1848 1848 if maxtotal <= graphwidth:
1849 1849 return i
1850 1850 # If diffstat runs out of room it doesn't print anything,
1851 1851 # which isn't very useful, so always print at least one + or -
1852 1852 # if there were at least some changes.
1853 1853 return max(i * graphwidth // maxtotal, int(bool(i)))
1854 1854
1855 1855 for filename, adds, removes, isbinary in stats:
1856 1856 if isbinary:
1857 1857 count = 'Bin'
1858 1858 else:
1859 1859 count = adds + removes
1860 1860 pluses = '+' * scale(adds)
1861 1861 minuses = '-' * scale(removes)
1862 1862 output.append(' %s%s | %*s %s%s\n' %
1863 1863 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1864 1864 countwidth, count, pluses, minuses))
1865 1865
1866 1866 if stats:
1867 1867 output.append(_(' %d files changed, %d insertions(+), '
1868 1868 '%d deletions(-)\n')
1869 1869 % (len(stats), totaladds, totalremoves))
1870 1870
1871 1871 return ''.join(output)
1872 1872
1873 1873 def diffstatui(*args, **kw):
1874 1874 '''like diffstat(), but yields 2-tuples of (output, label) for
1875 1875 ui.write()
1876 1876 '''
1877 1877
1878 1878 for line in diffstat(*args, **kw).splitlines():
1879 1879 if line and line[-1] in '+-':
1880 1880 name, graph = line.rsplit(' ', 1)
1881 1881 yield (name + ' ', '')
1882 1882 m = re.search(r'\++', graph)
1883 1883 if m:
1884 1884 yield (m.group(0), 'diffstat.inserted')
1885 1885 m = re.search(r'-+', graph)
1886 1886 if m:
1887 1887 yield (m.group(0), 'diffstat.deleted')
1888 1888 else:
1889 1889 yield (line, '')
1890 1890 yield ('\n', '')
@@ -1,538 +1,538 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import scmutil, util, parsers
10 10 import os, stat, errno
11 11
12 12 _sha = util.sha1
13 13
14 14 # This avoids a collision between a file named foo and a dir named
15 15 # foo.i or foo.d
16 16 def _encodedir(path):
17 17 '''
18 18 >>> _encodedir('data/foo.i')
19 19 'data/foo.i'
20 20 >>> _encodedir('data/foo.i/bla.i')
21 21 'data/foo.i.hg/bla.i'
22 22 >>> _encodedir('data/foo.i.hg/bla.i')
23 23 'data/foo.i.hg.hg/bla.i'
24 24 >>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
25 25 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
26 26 '''
27 27 return (path
28 28 .replace(".hg/", ".hg.hg/")
29 29 .replace(".i/", ".i.hg/")
30 30 .replace(".d/", ".d.hg/"))
31 31
32 32 encodedir = getattr(parsers, 'encodedir', _encodedir)
33 33
34 34 def decodedir(path):
35 35 '''
36 36 >>> decodedir('data/foo.i')
37 37 'data/foo.i'
38 38 >>> decodedir('data/foo.i.hg/bla.i')
39 39 'data/foo.i/bla.i'
40 40 >>> decodedir('data/foo.i.hg.hg/bla.i')
41 41 'data/foo.i.hg/bla.i'
42 42 '''
43 43 if ".hg/" not in path:
44 44 return path
45 45 return (path
46 46 .replace(".d.hg/", ".d/")
47 47 .replace(".i.hg/", ".i/")
48 48 .replace(".hg.hg/", ".hg/"))
49 49
50 50 def _buildencodefun():
51 51 '''
52 52 >>> enc, dec = _buildencodefun()
53 53
54 54 >>> enc('nothing/special.txt')
55 55 'nothing/special.txt'
56 56 >>> dec('nothing/special.txt')
57 57 'nothing/special.txt'
58 58
59 59 >>> enc('HELLO')
60 60 '_h_e_l_l_o'
61 61 >>> dec('_h_e_l_l_o')
62 62 'HELLO'
63 63
64 64 >>> enc('hello:world?')
65 65 'hello~3aworld~3f'
66 66 >>> dec('hello~3aworld~3f')
67 67 'hello:world?'
68 68
69 69 >>> enc('the\x07quick\xADshot')
70 70 'the~07quick~adshot'
71 71 >>> dec('the~07quick~adshot')
72 72 'the\\x07quick\\xadshot'
73 73 '''
74 74 e = '_'
75 75 winreserved = [ord(x) for x in '\\:*?"<>|']
76 76 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
77 77 for x in (range(32) + range(126, 256) + winreserved):
78 78 cmap[chr(x)] = "~%02x" % x
79 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
79 for x in range(ord("A"), ord("Z") + 1) + [ord(e)]:
80 80 cmap[chr(x)] = e + chr(x).lower()
81 81 dmap = {}
82 82 for k, v in cmap.iteritems():
83 83 dmap[v] = k
84 84 def decode(s):
85 85 i = 0
86 86 while i < len(s):
87 87 for l in xrange(1, 4):
88 88 try:
89 89 yield dmap[s[i:i + l]]
90 90 i += l
91 91 break
92 92 except KeyError:
93 93 pass
94 94 else:
95 95 raise KeyError
96 96 return (lambda s: ''.join([cmap[c] for c in s]),
97 97 lambda s: ''.join(list(decode(s))))
98 98
99 99 _encodefname, _decodefname = _buildencodefun()
100 100
101 101 def encodefilename(s):
102 102 '''
103 103 >>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO')
104 104 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
105 105 '''
106 106 return _encodefname(encodedir(s))
107 107
108 108 def decodefilename(s):
109 109 '''
110 110 >>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
111 111 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
112 112 '''
113 113 return decodedir(_decodefname(s))
114 114
115 115 def _buildlowerencodefun():
116 116 '''
117 117 >>> f = _buildlowerencodefun()
118 118 >>> f('nothing/special.txt')
119 119 'nothing/special.txt'
120 120 >>> f('HELLO')
121 121 'hello'
122 122 >>> f('hello:world?')
123 123 'hello~3aworld~3f'
124 124 >>> f('the\x07quick\xADshot')
125 125 'the~07quick~adshot'
126 126 '''
127 127 winreserved = [ord(x) for x in '\\:*?"<>|']
128 128 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
129 129 for x in (range(32) + range(126, 256) + winreserved):
130 130 cmap[chr(x)] = "~%02x" % x
131 for x in range(ord("A"), ord("Z")+1):
131 for x in range(ord("A"), ord("Z") + 1):
132 132 cmap[chr(x)] = chr(x).lower()
133 133 return lambda s: "".join([cmap[c] for c in s])
134 134
135 135 lowerencode = _buildlowerencodefun()
136 136
137 137 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
138 138 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
139 139 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
140 140 def _auxencode(path, dotencode):
141 141 '''
142 142 Encodes filenames containing names reserved by Windows or which end in
143 143 period or space. Does not touch other single reserved characters c.
144 144 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
145 145 Additionally encodes space or period at the beginning, if dotencode is
146 146 True. Parameter path is assumed to be all lowercase.
147 147 A segment only needs encoding if a reserved name appears as a
148 148 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
149 149 doesn't need encoding.
150 150
151 151 >>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.'
152 152 >>> _auxencode(s.split('/'), True)
153 153 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
154 154 >>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
155 155 >>> _auxencode(s.split('/'), False)
156 156 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
157 157 >>> _auxencode(['foo. '], True)
158 158 ['foo.~20']
159 159 >>> _auxencode([' .foo'], True)
160 160 ['~20.foo']
161 161 '''
162 162 for i, n in enumerate(path):
163 163 if not n:
164 164 continue
165 165 if dotencode and n[0] in '. ':
166 166 n = "~%02x" % ord(n[0]) + n[1:]
167 167 path[i] = n
168 168 else:
169 169 l = n.find('.')
170 170 if l == -1:
171 171 l = len(n)
172 172 if ((l == 3 and n[:3] in _winres3) or
173 173 (l == 4 and n[3] <= '9' and n[3] >= '1'
174 174 and n[:3] in _winres4)):
175 175 # encode third letter ('aux' -> 'au~78')
176 176 ec = "~%02x" % ord(n[2])
177 177 n = n[0:2] + ec + n[3:]
178 178 path[i] = n
179 179 if n[-1] in '. ':
180 180 # encode last period or space ('foo...' -> 'foo..~2e')
181 181 path[i] = n[:-1] + "~%02x" % ord(n[-1])
182 182 return path
183 183
184 184 _maxstorepathlen = 120
185 185 _dirprefixlen = 8
186 186 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
187 187
188 188 def _hashencode(path, dotencode):
189 189 digest = _sha(path).hexdigest()
190 190 le = lowerencode(path).split('/')[1:]
191 191 parts = _auxencode(le, dotencode)
192 192 basename = parts[-1]
193 193 _root, ext = os.path.splitext(basename)
194 194 sdirs = []
195 195 sdirslen = 0
196 196 for p in parts[:-1]:
197 197 d = p[:_dirprefixlen]
198 198 if d[-1] in '. ':
199 199 # Windows can't access dirs ending in period or space
200 200 d = d[:-1] + '_'
201 201 if sdirslen == 0:
202 202 t = len(d)
203 203 else:
204 204 t = sdirslen + 1 + len(d)
205 205 if t > _maxshortdirslen:
206 206 break
207 207 sdirs.append(d)
208 208 sdirslen = t
209 209 dirs = '/'.join(sdirs)
210 210 if len(dirs) > 0:
211 211 dirs += '/'
212 212 res = 'dh/' + dirs + digest + ext
213 213 spaceleft = _maxstorepathlen - len(res)
214 214 if spaceleft > 0:
215 215 filler = basename[:spaceleft]
216 216 res = 'dh/' + dirs + filler + digest + ext
217 217 return res
218 218
219 219 def _hybridencode(path, dotencode):
220 220 '''encodes path with a length limit
221 221
222 222 Encodes all paths that begin with 'data/', according to the following.
223 223
224 224 Default encoding (reversible):
225 225
226 226 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
227 227 characters are encoded as '~xx', where xx is the two digit hex code
228 228 of the character (see encodefilename).
229 229 Relevant path components consisting of Windows reserved filenames are
230 230 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
231 231
232 232 Hashed encoding (not reversible):
233 233
234 234 If the default-encoded path is longer than _maxstorepathlen, a
235 235 non-reversible hybrid hashing of the path is done instead.
236 236 This encoding uses up to _dirprefixlen characters of all directory
237 237 levels of the lowerencoded path, but not more levels than can fit into
238 238 _maxshortdirslen.
239 239 Then follows the filler followed by the sha digest of the full path.
240 240 The filler is the beginning of the basename of the lowerencoded path
241 241 (the basename is everything after the last path separator). The filler
242 242 is as long as possible, filling in characters from the basename until
243 243 the encoded path has _maxstorepathlen characters (or all chars of the
244 244 basename have been taken).
245 245 The extension (e.g. '.i' or '.d') is preserved.
246 246
247 247 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
248 248 encoding was used.
249 249 '''
250 250 path = encodedir(path)
251 251 ef = _encodefname(path).split('/')
252 252 res = '/'.join(_auxencode(ef, dotencode))
253 253 if len(res) > _maxstorepathlen:
254 254 res = _hashencode(path, dotencode)
255 255 return res
256 256
257 257 def _pathencode(path):
258 258 if len(path) > _maxstorepathlen:
259 259 return None
260 260 ef = _encodefname(encodedir(path)).split('/')
261 261 res = '/'.join(_auxencode(ef, True))
262 262 if len(res) > _maxstorepathlen:
263 263 return None
264 264 return res
265 265
266 266 _pathencode = getattr(parsers, 'pathencode', _pathencode)
267 267
268 268 def _dothybridencode(f):
269 269 ef = _pathencode(f)
270 270 if ef is None:
271 271 return _hashencode(encodedir(f), True)
272 272 return ef
273 273
274 274 def _plainhybridencode(f):
275 275 return _hybridencode(f, False)
276 276
277 277 def _calcmode(vfs):
278 278 try:
279 279 # files in .hg/ will be created using this mode
280 280 mode = vfs.stat().st_mode
281 281 # avoid some useless chmods
282 282 if (0777 & ~util.umask) == (0777 & mode):
283 283 mode = None
284 284 except OSError:
285 285 mode = None
286 286 return mode
287 287
288 288 _data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
289 289 ' phaseroots obsstore')
290 290
291 291 class basicstore(object):
292 292 '''base class for local repository stores'''
293 293 def __init__(self, path, vfstype):
294 294 vfs = vfstype(path)
295 295 self.path = vfs.base
296 296 self.createmode = _calcmode(vfs)
297 297 vfs.createmode = self.createmode
298 298 self.rawvfs = vfs
299 299 self.vfs = scmutil.filtervfs(vfs, encodedir)
300 300 self.opener = self.vfs
301 301
302 302 def join(self, f):
303 303 return self.path + '/' + encodedir(f)
304 304
305 305 def _walk(self, relpath, recurse):
306 306 '''yields (unencoded, encoded, size)'''
307 307 path = self.path
308 308 if relpath:
309 309 path += '/' + relpath
310 310 striplen = len(self.path) + 1
311 311 l = []
312 312 if self.rawvfs.isdir(path):
313 313 visit = [path]
314 314 readdir = self.rawvfs.readdir
315 315 while visit:
316 316 p = visit.pop()
317 317 for f, kind, st in readdir(p, stat=True):
318 318 fp = p + '/' + f
319 319 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
320 320 n = util.pconvert(fp[striplen:])
321 321 l.append((decodedir(n), n, st.st_size))
322 322 elif kind == stat.S_IFDIR and recurse:
323 323 visit.append(fp)
324 324 l.sort()
325 325 return l
326 326
327 327 def datafiles(self):
328 328 return self._walk('data', True)
329 329
330 330 def walk(self):
331 331 '''yields (unencoded, encoded, size)'''
332 332 # yield data files first
333 333 for x in self.datafiles():
334 334 yield x
335 335 # yield manifest before changelog
336 336 for x in reversed(self._walk('', False)):
337 337 yield x
338 338
339 339 def copylist(self):
340 340 return ['requires'] + _data.split()
341 341
342 342 def write(self):
343 343 pass
344 344
345 345 def __contains__(self, path):
346 346 '''Checks if the store contains path'''
347 347 path = "/".join(("data", path))
348 348 # file?
349 349 if os.path.exists(self.join(path + ".i")):
350 350 return True
351 351 # dir?
352 352 if not path.endswith("/"):
353 353 path = path + "/"
354 354 return os.path.exists(self.join(path))
355 355
356 356 class encodedstore(basicstore):
357 357 def __init__(self, path, vfstype):
358 358 vfs = vfstype(path + '/store')
359 359 self.path = vfs.base
360 360 self.createmode = _calcmode(vfs)
361 361 vfs.createmode = self.createmode
362 362 self.rawvfs = vfs
363 363 self.vfs = scmutil.filtervfs(vfs, encodefilename)
364 364 self.opener = self.vfs
365 365
366 366 def datafiles(self):
367 367 for a, b, size in self._walk('data', True):
368 368 try:
369 369 a = decodefilename(a)
370 370 except KeyError:
371 371 a = None
372 372 yield a, b, size
373 373
374 374 def join(self, f):
375 375 return self.path + '/' + encodefilename(f)
376 376
377 377 def copylist(self):
378 378 return (['requires', '00changelog.i'] +
379 379 ['store/' + f for f in _data.split()])
380 380
381 381 class fncache(object):
382 382 # the filename used to be partially encoded
383 383 # hence the encodedir/decodedir dance
384 384 def __init__(self, vfs):
385 385 self.vfs = vfs
386 386 self.entries = None
387 387 self._dirty = False
388 388
389 389 def _load(self):
390 390 '''fill the entries from the fncache file'''
391 391 self._dirty = False
392 392 try:
393 393 fp = self.vfs('fncache', mode='rb')
394 394 except IOError:
395 395 # skip nonexistent file
396 396 self.entries = set()
397 397 return
398 398 self.entries = set(decodedir(fp.read()).splitlines())
399 399 if '' in self.entries:
400 400 fp.seek(0)
401 401 for n, line in enumerate(fp):
402 402 if not line.rstrip('\n'):
403 403 t = _('invalid entry in fncache, line %s') % (n + 1)
404 404 raise util.Abort(t)
405 405 fp.close()
406 406
407 407 def _write(self, files, atomictemp):
408 408 fp = self.vfs('fncache', mode='wb', atomictemp=atomictemp)
409 409 if files:
410 410 fp.write(encodedir('\n'.join(files) + '\n'))
411 411 fp.close()
412 412 self._dirty = False
413 413
414 414 def rewrite(self, files):
415 415 self._write(files, False)
416 416 self.entries = set(files)
417 417
418 418 def write(self):
419 419 if self._dirty:
420 420 self._write(self.entries, True)
421 421
422 422 def add(self, fn):
423 423 if self.entries is None:
424 424 self._load()
425 425 if fn not in self.entries:
426 426 self._dirty = True
427 427 self.entries.add(fn)
428 428
429 429 def __contains__(self, fn):
430 430 if self.entries is None:
431 431 self._load()
432 432 return fn in self.entries
433 433
434 434 def __iter__(self):
435 435 if self.entries is None:
436 436 self._load()
437 437 return iter(self.entries)
438 438
439 439 class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs):
440 440 def __init__(self, vfs, fnc, encode):
441 441 scmutil.auditvfs.__init__(self, vfs)
442 442 self.fncache = fnc
443 443 self.encode = encode
444 444
445 445 def __call__(self, path, mode='r', *args, **kw):
446 446 if mode not in ('r', 'rb') and path.startswith('data/'):
447 447 self.fncache.add(path)
448 448 return self.vfs(self.encode(path), mode, *args, **kw)
449 449
450 450 def join(self, path):
451 451 if path:
452 452 return self.vfs.join(self.encode(path))
453 453 else:
454 454 return self.vfs.join(path)
455 455
456 456 class fncachestore(basicstore):
457 457 def __init__(self, path, vfstype, dotencode):
458 458 if dotencode:
459 459 encode = _dothybridencode
460 460 else:
461 461 encode = _plainhybridencode
462 462 self.encode = encode
463 463 vfs = vfstype(path + '/store')
464 464 self.path = vfs.base
465 465 self.pathsep = self.path + '/'
466 466 self.createmode = _calcmode(vfs)
467 467 vfs.createmode = self.createmode
468 468 self.rawvfs = vfs
469 469 fnc = fncache(vfs)
470 470 self.fncache = fnc
471 471 self.vfs = _fncachevfs(vfs, fnc, encode)
472 472 self.opener = self.vfs
473 473
474 474 def join(self, f):
475 475 return self.pathsep + self.encode(f)
476 476
477 477 def getsize(self, path):
478 478 return self.rawvfs.stat(path).st_size
479 479
480 480 def datafiles(self):
481 481 rewrite = False
482 482 existing = []
483 483 for f in sorted(self.fncache):
484 484 ef = self.encode(f)
485 485 try:
486 486 yield f, ef, self.getsize(ef)
487 487 existing.append(f)
488 488 except OSError, err:
489 489 if err.errno != errno.ENOENT:
490 490 raise
491 491 # nonexistent entry
492 492 rewrite = True
493 493 if rewrite:
494 494 # rewrite fncache to remove nonexistent entries
495 495 # (may be caused by rollback / strip)
496 496 self.fncache.rewrite(existing)
497 497
498 498 def copylist(self):
499 499 d = ('data dh fncache phaseroots obsstore'
500 500 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
501 501 return (['requires', '00changelog.i'] +
502 502 ['store/' + f for f in d.split()])
503 503
504 504 def write(self):
505 505 self.fncache.write()
506 506
507 507 def _exists(self, f):
508 508 ef = self.encode(f)
509 509 try:
510 510 self.getsize(ef)
511 511 return True
512 512 except OSError, err:
513 513 if err.errno != errno.ENOENT:
514 514 raise
515 515 # nonexistent entry
516 516 return False
517 517
518 518 def __contains__(self, path):
519 519 '''Checks if the store contains path'''
520 520 path = "/".join(("data", path))
521 521 # check for files (exact match)
522 522 e = path + '.i'
523 523 if e in self.fncache and self._exists(e):
524 524 return True
525 525 # now check for directories (prefix match)
526 526 if not path.endswith('/'):
527 527 path += '/'
528 528 for e in self.fncache:
529 529 if e.startswith(path) and self._exists(e):
530 530 return True
531 531 return False
532 532
533 533 def store(requirements, path, vfstype):
534 534 if 'store' in requirements:
535 535 if 'fncache' in requirements:
536 536 return fncachestore(path, vfstype, 'dotencode' in requirements)
537 537 return encodedstore(path, vfstype)
538 538 return basicstore(path, vfstype)
@@ -1,761 +1,761 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import errno, getpass, os, socket, sys, tempfile, traceback
10 10 import config, scmutil, util, error, formatter
11 11
12 12 class ui(object):
13 13 def __init__(self, src=None):
14 14 self._buffers = []
15 15 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
16 16 self._reportuntrusted = True
17 17 self._ocfg = config.config() # overlay
18 18 self._tcfg = config.config() # trusted
19 19 self._ucfg = config.config() # untrusted
20 20 self._trustusers = set()
21 21 self._trustgroups = set()
22 22 self.callhooks = True
23 23
24 24 if src:
25 25 self.fout = src.fout
26 26 self.ferr = src.ferr
27 27 self.fin = src.fin
28 28
29 29 self._tcfg = src._tcfg.copy()
30 30 self._ucfg = src._ucfg.copy()
31 31 self._ocfg = src._ocfg.copy()
32 32 self._trustusers = src._trustusers.copy()
33 33 self._trustgroups = src._trustgroups.copy()
34 34 self.environ = src.environ
35 35 self.callhooks = src.callhooks
36 36 self.fixconfig()
37 37 else:
38 38 self.fout = sys.stdout
39 39 self.ferr = sys.stderr
40 40 self.fin = sys.stdin
41 41
42 42 # shared read-only environment
43 43 self.environ = os.environ
44 44 # we always trust global config files
45 45 for f in scmutil.rcpath():
46 46 self.readconfig(f, trust=True)
47 47
48 48 def copy(self):
49 49 return self.__class__(self)
50 50
51 51 def formatter(self, topic, opts):
52 52 return formatter.formatter(self, topic, opts)
53 53
54 54 def _trusted(self, fp, f):
55 55 st = util.fstat(fp)
56 56 if util.isowner(st):
57 57 return True
58 58
59 59 tusers, tgroups = self._trustusers, self._trustgroups
60 60 if '*' in tusers or '*' in tgroups:
61 61 return True
62 62
63 63 user = util.username(st.st_uid)
64 64 group = util.groupname(st.st_gid)
65 65 if user in tusers or group in tgroups or user == util.username():
66 66 return True
67 67
68 68 if self._reportuntrusted:
69 69 self.warn(_('not trusting file %s from untrusted '
70 70 'user %s, group %s\n') % (f, user, group))
71 71 return False
72 72
73 73 def readconfig(self, filename, root=None, trust=False,
74 74 sections=None, remap=None):
75 75 try:
76 76 fp = open(filename)
77 77 except IOError:
78 78 if not sections: # ignore unless we were looking for something
79 79 return
80 80 raise
81 81
82 82 cfg = config.config()
83 83 trusted = sections or trust or self._trusted(fp, filename)
84 84
85 85 try:
86 86 cfg.read(filename, fp, sections=sections, remap=remap)
87 87 fp.close()
88 88 except error.ConfigError, inst:
89 89 if trusted:
90 90 raise
91 91 self.warn(_("ignored: %s\n") % str(inst))
92 92
93 93 if self.plain():
94 94 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
95 95 'logtemplate', 'style',
96 96 'traceback', 'verbose'):
97 97 if k in cfg['ui']:
98 98 del cfg['ui'][k]
99 99 for k, v in cfg.items('defaults'):
100 100 del cfg['defaults'][k]
101 101 # Don't remove aliases from the configuration if in the exceptionlist
102 102 if self.plain('alias'):
103 103 for k, v in cfg.items('alias'):
104 104 del cfg['alias'][k]
105 105
106 106 if trusted:
107 107 self._tcfg.update(cfg)
108 108 self._tcfg.update(self._ocfg)
109 109 self._ucfg.update(cfg)
110 110 self._ucfg.update(self._ocfg)
111 111
112 112 if root is None:
113 113 root = os.path.expanduser('~')
114 114 self.fixconfig(root=root)
115 115
116 116 def fixconfig(self, root=None, section=None):
117 117 if section in (None, 'paths'):
118 118 # expand vars and ~
119 119 # translate paths relative to root (or home) into absolute paths
120 120 root = root or os.getcwd()
121 121 for c in self._tcfg, self._ucfg, self._ocfg:
122 122 for n, p in c.items('paths'):
123 123 if not p:
124 124 continue
125 125 if '%%' in p:
126 126 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
127 127 % (n, p, self.configsource('paths', n)))
128 128 p = p.replace('%%', '%')
129 129 p = util.expandpath(p)
130 130 if not util.hasscheme(p) and not os.path.isabs(p):
131 131 p = os.path.normpath(os.path.join(root, p))
132 132 c.set("paths", n, p)
133 133
134 134 if section in (None, 'ui'):
135 135 # update ui options
136 136 self.debugflag = self.configbool('ui', 'debug')
137 137 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
138 138 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
139 139 if self.verbose and self.quiet:
140 140 self.quiet = self.verbose = False
141 141 self._reportuntrusted = self.debugflag or self.configbool("ui",
142 142 "report_untrusted", True)
143 143 self.tracebackflag = self.configbool('ui', 'traceback', False)
144 144
145 145 if section in (None, 'trusted'):
146 146 # update trust information
147 147 self._trustusers.update(self.configlist('trusted', 'users'))
148 148 self._trustgroups.update(self.configlist('trusted', 'groups'))
149 149
150 150 def backupconfig(self, section, item):
151 151 return (self._ocfg.backup(section, item),
152 152 self._tcfg.backup(section, item),
153 153 self._ucfg.backup(section, item),)
154 154 def restoreconfig(self, data):
155 155 self._ocfg.restore(data[0])
156 156 self._tcfg.restore(data[1])
157 157 self._ucfg.restore(data[2])
158 158
159 159 def setconfig(self, section, name, value, overlay=True):
160 160 if overlay:
161 161 self._ocfg.set(section, name, value)
162 162 self._tcfg.set(section, name, value)
163 163 self._ucfg.set(section, name, value)
164 164 self.fixconfig(section=section)
165 165
166 166 def _data(self, untrusted):
167 167 return untrusted and self._ucfg or self._tcfg
168 168
169 169 def configsource(self, section, name, untrusted=False):
170 170 return self._data(untrusted).source(section, name) or 'none'
171 171
172 172 def config(self, section, name, default=None, untrusted=False):
173 173 if isinstance(name, list):
174 174 alternates = name
175 175 else:
176 176 alternates = [name]
177 177
178 178 for n in alternates:
179 179 value = self._data(untrusted).get(section, name, None)
180 180 if value is not None:
181 181 name = n
182 182 break
183 183 else:
184 184 value = default
185 185
186 186 if self.debugflag and not untrusted and self._reportuntrusted:
187 187 uvalue = self._ucfg.get(section, name)
188 188 if uvalue is not None and uvalue != value:
189 189 self.debug("ignoring untrusted configuration option "
190 190 "%s.%s = %s\n" % (section, name, uvalue))
191 191 return value
192 192
193 193 def configpath(self, section, name, default=None, untrusted=False):
194 194 'get a path config item, expanded relative to repo root or config file'
195 195 v = self.config(section, name, default, untrusted)
196 196 if v is None:
197 197 return None
198 198 if not os.path.isabs(v) or "://" not in v:
199 199 src = self.configsource(section, name, untrusted)
200 200 if ':' in src:
201 201 base = os.path.dirname(src.rsplit(':')[0])
202 202 v = os.path.join(base, os.path.expanduser(v))
203 203 return v
204 204
205 205 def configbool(self, section, name, default=False, untrusted=False):
206 206 """parse a configuration element as a boolean
207 207
208 208 >>> u = ui(); s = 'foo'
209 209 >>> u.setconfig(s, 'true', 'yes')
210 210 >>> u.configbool(s, 'true')
211 211 True
212 212 >>> u.setconfig(s, 'false', 'no')
213 213 >>> u.configbool(s, 'false')
214 214 False
215 215 >>> u.configbool(s, 'unknown')
216 216 False
217 217 >>> u.configbool(s, 'unknown', True)
218 218 True
219 219 >>> u.setconfig(s, 'invalid', 'somevalue')
220 220 >>> u.configbool(s, 'invalid')
221 221 Traceback (most recent call last):
222 222 ...
223 223 ConfigError: foo.invalid is not a boolean ('somevalue')
224 224 """
225 225
226 226 v = self.config(section, name, None, untrusted)
227 227 if v is None:
228 228 return default
229 229 if isinstance(v, bool):
230 230 return v
231 231 b = util.parsebool(v)
232 232 if b is None:
233 233 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
234 234 % (section, name, v))
235 235 return b
236 236
237 237 def configint(self, section, name, default=None, untrusted=False):
238 238 """parse a configuration element as an integer
239 239
240 240 >>> u = ui(); s = 'foo'
241 241 >>> u.setconfig(s, 'int1', '42')
242 242 >>> u.configint(s, 'int1')
243 243 42
244 244 >>> u.setconfig(s, 'int2', '-42')
245 245 >>> u.configint(s, 'int2')
246 246 -42
247 247 >>> u.configint(s, 'unknown', 7)
248 248 7
249 249 >>> u.setconfig(s, 'invalid', 'somevalue')
250 250 >>> u.configint(s, 'invalid')
251 251 Traceback (most recent call last):
252 252 ...
253 253 ConfigError: foo.invalid is not an integer ('somevalue')
254 254 """
255 255
256 256 v = self.config(section, name, None, untrusted)
257 257 if v is None:
258 258 return default
259 259 try:
260 260 return int(v)
261 261 except ValueError:
262 262 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
263 263 % (section, name, v))
264 264
265 265 def configlist(self, section, name, default=None, untrusted=False):
266 266 """parse a configuration element as a list of comma/space separated
267 267 strings
268 268
269 269 >>> u = ui(); s = 'foo'
270 270 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
271 271 >>> u.configlist(s, 'list1')
272 272 ['this', 'is', 'a small', 'test']
273 273 """
274 274
275 275 def _parse_plain(parts, s, offset):
276 276 whitespace = False
277 277 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
278 278 whitespace = True
279 279 offset += 1
280 280 if offset >= len(s):
281 281 return None, parts, offset
282 282 if whitespace:
283 283 parts.append('')
284 284 if s[offset] == '"' and not parts[-1]:
285 285 return _parse_quote, parts, offset + 1
286 286 elif s[offset] == '"' and parts[-1][-1] == '\\':
287 287 parts[-1] = parts[-1][:-1] + s[offset]
288 288 return _parse_plain, parts, offset + 1
289 289 parts[-1] += s[offset]
290 290 return _parse_plain, parts, offset + 1
291 291
292 292 def _parse_quote(parts, s, offset):
293 293 if offset < len(s) and s[offset] == '"': # ""
294 294 parts.append('')
295 295 offset += 1
296 296 while offset < len(s) and (s[offset].isspace() or
297 297 s[offset] == ','):
298 298 offset += 1
299 299 return _parse_plain, parts, offset
300 300
301 301 while offset < len(s) and s[offset] != '"':
302 302 if (s[offset] == '\\' and offset + 1 < len(s)
303 303 and s[offset + 1] == '"'):
304 304 offset += 1
305 305 parts[-1] += '"'
306 306 else:
307 307 parts[-1] += s[offset]
308 308 offset += 1
309 309
310 310 if offset >= len(s):
311 311 real_parts = _configlist(parts[-1])
312 312 if not real_parts:
313 313 parts[-1] = '"'
314 314 else:
315 315 real_parts[0] = '"' + real_parts[0]
316 316 parts = parts[:-1]
317 317 parts.extend(real_parts)
318 318 return None, parts, offset
319 319
320 320 offset += 1
321 321 while offset < len(s) and s[offset] in [' ', ',']:
322 322 offset += 1
323 323
324 324 if offset < len(s):
325 325 if offset + 1 == len(s) and s[offset] == '"':
326 326 parts[-1] += '"'
327 327 offset += 1
328 328 else:
329 329 parts.append('')
330 330 else:
331 331 return None, parts, offset
332 332
333 333 return _parse_plain, parts, offset
334 334
335 335 def _configlist(s):
336 336 s = s.rstrip(' ,')
337 337 if not s:
338 338 return []
339 339 parser, parts, offset = _parse_plain, [''], 0
340 340 while parser:
341 341 parser, parts, offset = parser(parts, s, offset)
342 342 return parts
343 343
344 344 result = self.config(section, name, untrusted=untrusted)
345 345 if result is None:
346 346 result = default or []
347 347 if isinstance(result, basestring):
348 348 result = _configlist(result.lstrip(' ,\n'))
349 349 if result is None:
350 350 result = default or []
351 351 return result
352 352
353 353 def has_section(self, section, untrusted=False):
354 354 '''tell whether section exists in config.'''
355 355 return section in self._data(untrusted)
356 356
357 357 def configitems(self, section, untrusted=False):
358 358 items = self._data(untrusted).items(section)
359 359 if self.debugflag and not untrusted and self._reportuntrusted:
360 360 for k, v in self._ucfg.items(section):
361 361 if self._tcfg.get(section, k) != v:
362 362 self.debug("ignoring untrusted configuration option "
363 363 "%s.%s = %s\n" % (section, k, v))
364 364 return items
365 365
366 366 def walkconfig(self, untrusted=False):
367 367 cfg = self._data(untrusted)
368 368 for section in cfg.sections():
369 369 for name, value in self.configitems(section, untrusted):
370 370 yield section, name, value
371 371
372 372 def plain(self, feature=None):
373 373 '''is plain mode active?
374 374
375 375 Plain mode means that all configuration variables which affect
376 376 the behavior and output of Mercurial should be
377 377 ignored. Additionally, the output should be stable,
378 378 reproducible and suitable for use in scripts or applications.
379 379
380 380 The only way to trigger plain mode is by setting either the
381 381 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
382 382
383 383 The return value can either be
384 384 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
385 385 - True otherwise
386 386 '''
387 387 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
388 388 return False
389 389 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
390 390 if feature and exceptions:
391 391 return feature not in exceptions
392 392 return True
393 393
394 394 def username(self):
395 395 """Return default username to be used in commits.
396 396
397 397 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
398 398 and stop searching if one of these is set.
399 399 If not found and ui.askusername is True, ask the user, else use
400 400 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
401 401 """
402 402 user = os.environ.get("HGUSER")
403 403 if user is None:
404 404 user = self.config("ui", "username")
405 405 if user is not None:
406 406 user = os.path.expandvars(user)
407 407 if user is None:
408 408 user = os.environ.get("EMAIL")
409 409 if user is None and self.configbool("ui", "askusername"):
410 410 user = self.prompt(_("enter a commit username:"), default=None)
411 411 if user is None and not self.interactive():
412 412 try:
413 413 user = '%s@%s' % (util.getuser(), socket.getfqdn())
414 414 self.warn(_("no username found, using '%s' instead\n") % user)
415 415 except KeyError:
416 416 pass
417 417 if not user:
418 418 raise util.Abort(_('no username supplied (see "hg help config")'))
419 419 if "\n" in user:
420 420 raise util.Abort(_("username %s contains a newline\n") % repr(user))
421 421 return user
422 422
423 423 def shortuser(self, user):
424 424 """Return a short representation of a user name or email address."""
425 425 if not self.verbose:
426 426 user = util.shortuser(user)
427 427 return user
428 428
429 429 def expandpath(self, loc, default=None):
430 430 """Return repository location relative to cwd or from [paths]"""
431 431 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
432 432 return loc
433 433
434 434 path = self.config('paths', loc)
435 435 if not path and default is not None:
436 436 path = self.config('paths', default)
437 437 return path or loc
438 438
439 439 def pushbuffer(self):
440 440 self._buffers.append([])
441 441
442 442 def popbuffer(self, labeled=False):
443 443 '''pop the last buffer and return the buffered output
444 444
445 445 If labeled is True, any labels associated with buffered
446 446 output will be handled. By default, this has no effect
447 447 on the output returned, but extensions and GUI tools may
448 448 handle this argument and returned styled output. If output
449 449 is being buffered so it can be captured and parsed or
450 450 processed, labeled should not be set to True.
451 451 '''
452 452 return "".join(self._buffers.pop())
453 453
454 454 def write(self, *args, **opts):
455 455 '''write args to output
456 456
457 457 By default, this method simply writes to the buffer or stdout,
458 458 but extensions or GUI tools may override this method,
459 459 write_err(), popbuffer(), and label() to style output from
460 460 various parts of hg.
461 461
462 462 An optional keyword argument, "label", can be passed in.
463 463 This should be a string containing label names separated by
464 464 space. Label names take the form of "topic.type". For example,
465 465 ui.debug() issues a label of "ui.debug".
466 466
467 467 When labeling output for a specific command, a label of
468 468 "cmdname.type" is recommended. For example, status issues
469 469 a label of "status.modified" for modified files.
470 470 '''
471 471 if self._buffers:
472 472 self._buffers[-1].extend([str(a) for a in args])
473 473 else:
474 474 for a in args:
475 475 self.fout.write(str(a))
476 476
477 477 def write_err(self, *args, **opts):
478 478 try:
479 479 if not getattr(self.fout, 'closed', False):
480 480 self.fout.flush()
481 481 for a in args:
482 482 self.ferr.write(str(a))
483 483 # stderr may be buffered under win32 when redirected to files,
484 484 # including stdout.
485 485 if not getattr(self.ferr, 'closed', False):
486 486 self.ferr.flush()
487 487 except IOError, inst:
488 488 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
489 489 raise
490 490
491 491 def flush(self):
492 492 try: self.fout.flush()
493 493 except (IOError, ValueError): pass
494 494 try: self.ferr.flush()
495 495 except (IOError, ValueError): pass
496 496
497 497 def _isatty(self, fh):
498 498 if self.configbool('ui', 'nontty', False):
499 499 return False
500 500 return util.isatty(fh)
501 501
502 502 def interactive(self):
503 503 '''is interactive input allowed?
504 504
505 505 An interactive session is a session where input can be reasonably read
506 506 from `sys.stdin'. If this function returns false, any attempt to read
507 507 from stdin should fail with an error, unless a sensible default has been
508 508 specified.
509 509
510 510 Interactiveness is triggered by the value of the `ui.interactive'
511 511 configuration variable or - if it is unset - when `sys.stdin' points
512 512 to a terminal device.
513 513
514 514 This function refers to input only; for output, see `ui.formatted()'.
515 515 '''
516 516 i = self.configbool("ui", "interactive", None)
517 517 if i is None:
518 518 # some environments replace stdin without implementing isatty
519 519 # usually those are non-interactive
520 520 return self._isatty(self.fin)
521 521
522 522 return i
523 523
524 524 def termwidth(self):
525 525 '''how wide is the terminal in columns?
526 526 '''
527 527 if 'COLUMNS' in os.environ:
528 528 try:
529 529 return int(os.environ['COLUMNS'])
530 530 except ValueError:
531 531 pass
532 532 return util.termwidth()
533 533
534 534 def formatted(self):
535 535 '''should formatted output be used?
536 536
537 537 It is often desirable to format the output to suite the output medium.
538 538 Examples of this are truncating long lines or colorizing messages.
539 539 However, this is not often not desirable when piping output into other
540 540 utilities, e.g. `grep'.
541 541
542 542 Formatted output is triggered by the value of the `ui.formatted'
543 543 configuration variable or - if it is unset - when `sys.stdout' points
544 544 to a terminal device. Please note that `ui.formatted' should be
545 545 considered an implementation detail; it is not intended for use outside
546 546 Mercurial or its extensions.
547 547
548 548 This function refers to output only; for input, see `ui.interactive()'.
549 549 This function always returns false when in plain mode, see `ui.plain()'.
550 550 '''
551 551 if self.plain():
552 552 return False
553 553
554 554 i = self.configbool("ui", "formatted", None)
555 555 if i is None:
556 556 # some environments replace stdout without implementing isatty
557 557 # usually those are non-interactive
558 558 return self._isatty(self.fout)
559 559
560 560 return i
561 561
562 562 def _readline(self, prompt=''):
563 563 if self._isatty(self.fin):
564 564 try:
565 565 # magically add command line editing support, where
566 566 # available
567 567 import readline
568 568 # force demandimport to really load the module
569 569 readline.read_history_file
570 570 # windows sometimes raises something other than ImportError
571 571 except Exception:
572 572 pass
573 573
574 574 # call write() so output goes through subclassed implementation
575 575 # e.g. color extension on Windows
576 576 self.write(prompt)
577 577
578 578 # instead of trying to emulate raw_input, swap (self.fin,
579 579 # self.fout) with (sys.stdin, sys.stdout)
580 580 oldin = sys.stdin
581 581 oldout = sys.stdout
582 582 sys.stdin = self.fin
583 583 sys.stdout = self.fout
584 584 line = raw_input(' ')
585 585 sys.stdin = oldin
586 586 sys.stdout = oldout
587 587
588 588 # When stdin is in binary mode on Windows, it can cause
589 589 # raw_input() to emit an extra trailing carriage return
590 590 if os.linesep == '\r\n' and line and line[-1] == '\r':
591 591 line = line[:-1]
592 592 return line
593 593
594 594 def prompt(self, msg, default="y"):
595 595 """Prompt user with msg, read response.
596 596 If ui is not interactive, the default is returned.
597 597 """
598 598 if not self.interactive():
599 599 self.write(msg, ' ', default, "\n")
600 600 return default
601 601 try:
602 602 r = self._readline(self.label(msg, 'ui.prompt'))
603 603 if not r:
604 604 return default
605 605 return r
606 606 except EOFError:
607 607 raise util.Abort(_('response expected'))
608 608
609 609 def promptchoice(self, msg, choices, default=0):
610 610 """Prompt user with msg, read response, and ensure it matches
611 611 one of the provided choices. The index of the choice is returned.
612 612 choices is a sequence of acceptable responses with the format:
613 613 ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
614 614 If ui is not interactive, the default is returned.
615 615 """
616 resps = [s[s.index('&')+1].lower() for s in choices]
616 resps = [s[s.index('&') + 1].lower() for s in choices]
617 617 while True:
618 618 r = self.prompt(msg, resps[default])
619 619 if r.lower() in resps:
620 620 return resps.index(r.lower())
621 621 self.write(_("unrecognized response\n"))
622 622
623 623 def getpass(self, prompt=None, default=None):
624 624 if not self.interactive():
625 625 return default
626 626 try:
627 627 return getpass.getpass(prompt or _('password: '))
628 628 except EOFError:
629 629 raise util.Abort(_('response expected'))
630 630 def status(self, *msg, **opts):
631 631 '''write status message to output (if ui.quiet is False)
632 632
633 633 This adds an output label of "ui.status".
634 634 '''
635 635 if not self.quiet:
636 636 opts['label'] = opts.get('label', '') + ' ui.status'
637 637 self.write(*msg, **opts)
638 638 def warn(self, *msg, **opts):
639 639 '''write warning message to output (stderr)
640 640
641 641 This adds an output label of "ui.warning".
642 642 '''
643 643 opts['label'] = opts.get('label', '') + ' ui.warning'
644 644 self.write_err(*msg, **opts)
645 645 def note(self, *msg, **opts):
646 646 '''write note to output (if ui.verbose is True)
647 647
648 648 This adds an output label of "ui.note".
649 649 '''
650 650 if self.verbose:
651 651 opts['label'] = opts.get('label', '') + ' ui.note'
652 652 self.write(*msg, **opts)
653 653 def debug(self, *msg, **opts):
654 654 '''write debug message to output (if ui.debugflag is True)
655 655
656 656 This adds an output label of "ui.debug".
657 657 '''
658 658 if self.debugflag:
659 659 opts['label'] = opts.get('label', '') + ' ui.debug'
660 660 self.write(*msg, **opts)
661 661 def edit(self, text, user):
662 662 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
663 663 text=True)
664 664 try:
665 665 f = os.fdopen(fd, "w")
666 666 f.write(text)
667 667 f.close()
668 668
669 669 editor = self.geteditor()
670 670
671 671 util.system("%s \"%s\"" % (editor, name),
672 672 environ={'HGUSER': user},
673 673 onerr=util.Abort, errprefix=_("edit failed"),
674 674 out=self.fout)
675 675
676 676 f = open(name)
677 677 t = f.read()
678 678 f.close()
679 679 finally:
680 680 os.unlink(name)
681 681
682 682 return t
683 683
684 684 def traceback(self, exc=None):
685 685 '''print exception traceback if traceback printing enabled.
686 686 only to call in exception handler. returns true if traceback
687 687 printed.'''
688 688 if self.tracebackflag:
689 689 if exc:
690 690 traceback.print_exception(exc[0], exc[1], exc[2],
691 691 file=self.ferr)
692 692 else:
693 693 traceback.print_exc(file=self.ferr)
694 694 return self.tracebackflag
695 695
696 696 def geteditor(self):
697 697 '''return editor to use'''
698 698 if sys.platform == 'plan9':
699 699 # vi is the MIPS instruction simulator on Plan 9. We
700 700 # instead default to E to plumb commit messages to
701 701 # avoid confusion.
702 702 editor = 'E'
703 703 else:
704 704 editor = 'vi'
705 705 return (os.environ.get("HGEDITOR") or
706 706 self.config("ui", "editor") or
707 707 os.environ.get("VISUAL") or
708 708 os.environ.get("EDITOR", editor))
709 709
710 710 def progress(self, topic, pos, item="", unit="", total=None):
711 711 '''show a progress message
712 712
713 713 With stock hg, this is simply a debug message that is hidden
714 714 by default, but with extensions or GUI tools it may be
715 715 visible. 'topic' is the current operation, 'item' is a
716 716 non-numeric marker of the current position (i.e. the currently
717 717 in-process file), 'pos' is the current numeric position (i.e.
718 718 revision, bytes, etc.), unit is a corresponding unit label,
719 719 and total is the highest expected pos.
720 720
721 721 Multiple nested topics may be active at a time.
722 722
723 723 All topics should be marked closed by setting pos to None at
724 724 termination.
725 725 '''
726 726
727 727 if pos is None or not self.debugflag:
728 728 return
729 729
730 730 if unit:
731 731 unit = ' ' + unit
732 732 if item:
733 733 item = ' ' + item
734 734
735 735 if total:
736 736 pct = 100.0 * pos / total
737 737 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
738 738 % (topic, item, pos, total, unit, pct))
739 739 else:
740 740 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
741 741
742 742 def log(self, service, message):
743 743 '''hook for logging facility extensions
744 744
745 745 service should be a readily-identifiable subsystem, which will
746 746 allow filtering.
747 747 message should be a newline-terminated string to log.
748 748 '''
749 749 pass
750 750
751 751 def label(self, msg, label):
752 752 '''style msg based on supplied label
753 753
754 754 Like ui.write(), this just returns msg unchanged, but extensions
755 755 and GUI tools can override it to allow styling output without
756 756 writing it.
757 757
758 758 ui.write(s, 'label') is equivalent to
759 759 ui.write(ui.label(s, 'label')).
760 760 '''
761 761 return msg
General Comments 0
You need to be logged in to leave comments. Login now