##// END OF EJS Templates
i18n: fix "% inside _()" problems...
FUJIWARA Katsunori -
r20869:9658a799 stable
parent child Browse files
Show More
@@ -1,553 +1,553 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # check-code - a style and portability checker for Mercurial
4 4 #
5 5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """style and portability checker for Mercurial
11 11
12 12 when a rule triggers wrong, do one of the following (prefer one from top):
13 13 * do the work-around the rule suggests
14 14 * doublecheck that it is a false match
15 15 * improve the rule pattern
16 16 * add an ignore pattern to the rule (3rd arg) which matches your good line
17 17 (you can append a short comment and match this, like: #re-raises, # no-py24)
18 18 * change the pattern to a warning and list the exception in test-check-code-hg
19 19 * ONLY use no--check-code for skipping entire files from external sources
20 20 """
21 21
22 22 import re, glob, os, sys
23 23 import keyword
24 24 import optparse
25 25 try:
26 26 import re2
27 27 except ImportError:
28 28 re2 = None
29 29
30 30 def compilere(pat, multiline=False):
31 31 if multiline:
32 32 pat = '(?m)' + pat
33 33 if re2:
34 34 try:
35 35 return re2.compile(pat)
36 36 except re2.error:
37 37 pass
38 38 return re.compile(pat)
39 39
40 40 def repquote(m):
41 41 fromc = '.:'
42 42 tochr = 'pq'
43 43 def encodechr(i):
44 44 if i > 255:
45 45 return 'u'
46 46 c = chr(i)
47 47 if c in ' \n':
48 48 return c
49 49 if c.isalpha():
50 50 return 'x'
51 51 if c.isdigit():
52 52 return 'n'
53 53 try:
54 54 return tochr[fromc.find(c)]
55 55 except (ValueError, IndexError):
56 56 return 'o'
57 57 t = m.group('text')
58 58 tt = ''.join(encodechr(i) for i in xrange(256))
59 59 t = t.translate(tt)
60 60 return m.group('quote') + t + m.group('quote')
61 61
62 62 def reppython(m):
63 63 comment = m.group('comment')
64 64 if comment:
65 65 l = len(comment.rstrip())
66 66 return "#" * l + comment[l:]
67 67 return repquote(m)
68 68
69 69 def repcomment(m):
70 70 return m.group(1) + "#" * len(m.group(2))
71 71
72 72 def repccomment(m):
73 73 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
74 74 return m.group(1) + t + "*/"
75 75
76 76 def repcallspaces(m):
77 77 t = re.sub(r"\n\s+", "\n", m.group(2))
78 78 return m.group(1) + t
79 79
80 80 def repinclude(m):
81 81 return m.group(1) + "<foo>"
82 82
83 83 def rephere(m):
84 84 t = re.sub(r"\S", "x", m.group(2))
85 85 return m.group(1) + t
86 86
87 87
88 88 testpats = [
89 89 [
90 90 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
91 91 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
92 92 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
93 93 (r'(?<!hg )grep.*-a', "don't use 'grep -a', use in-line python"),
94 94 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
95 95 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
96 96 (r'echo -n', "don't use 'echo -n', use printf"),
97 97 (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"),
98 98 (r'head -c', "don't use 'head -c', use 'dd'"),
99 99 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
100 100 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
101 101 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
102 102 (r'printf.*[^\\]\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"),
103 103 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
104 104 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
105 105 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
106 106 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
107 107 "use egrep for extended grep syntax"),
108 108 (r'/bin/', "don't use explicit paths for tools"),
109 109 (r'[^\n]\Z', "no trailing newline"),
110 110 (r'export.*=', "don't export and assign at once"),
111 111 (r'^source\b', "don't use 'source', use '.'"),
112 112 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
113 113 (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
114 114 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
115 115 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
116 116 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
117 117 (r'^alias\b.*=', "don't use alias, use a function"),
118 118 (r'if\s*!', "don't use '!' to negate exit status"),
119 119 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
120 120 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
121 121 (r'^( *)\t', "don't use tabs to indent"),
122 122 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
123 123 "put a backslash-escaped newline after sed 'i' command"),
124 124 ],
125 125 # warnings
126 126 [
127 127 (r'^function', "don't use 'function', use old style"),
128 128 (r'^diff.*-\w*N', "don't use 'diff -N'"),
129 129 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
130 130 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
131 131 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
132 132 ]
133 133 ]
134 134
135 135 testfilters = [
136 136 (r"( *)(#([^\n]*\S)?)", repcomment),
137 137 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
138 138 ]
139 139
140 140 winglobmsg = "use (glob) to match Windows paths too"
141 141 uprefix = r"^ \$ "
142 142 utestpats = [
143 143 [
144 144 (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"),
145 145 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
146 146 "use regex test output patterns instead of sed"),
147 147 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
148 148 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
149 149 (uprefix + r'.*\|\| echo.*(fail|error)',
150 150 "explicit exit code checks unnecessary"),
151 151 (uprefix + r'set -e', "don't use set -e"),
152 152 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
153 153 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
154 154 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
155 155 winglobmsg),
156 156 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
157 157 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
158 158 (r'^ reverting .*/.*[^)]$', winglobmsg),
159 159 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
160 160 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
161 161 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
162 162 (r'^ moving \S+/.*[^)]$', winglobmsg),
163 163 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
164 164 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
165 165 ],
166 166 # warnings
167 167 [
168 168 (r'^ [^*?/\n]* \(glob\)$',
169 169 "glob match with no glob character (?*/)"),
170 170 ]
171 171 ]
172 172
173 173 for i in [0, 1]:
174 174 for p, m in testpats[i]:
175 175 if p.startswith(r'^'):
176 176 p = r"^ [$>] (%s)" % p[1:]
177 177 else:
178 178 p = r"^ [$>] .*(%s)" % p
179 179 utestpats[i].append((p, m))
180 180
181 181 utestfilters = [
182 182 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
183 183 (r"( *)(#([^\n]*\S)?)", repcomment),
184 184 ]
185 185
186 186 pypats = [
187 187 [
188 188 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
189 189 "tuple parameter unpacking not available in Python 3+"),
190 190 (r'lambda\s*\(.*,.*\)',
191 191 "tuple parameter unpacking not available in Python 3+"),
192 192 (r'import (.+,[^.]+\.[^.]+|[^.]+\.[^.]+,)',
193 193 '2to3 can\'t always rewrite "import qux, foo.bar", '
194 194 'use "import foo.bar" on its own line instead.'),
195 195 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
196 196 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
197 197 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
198 198 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
199 199 (r'^\s*\t', "don't use tabs"),
200 200 (r'\S;\s*\n', "semicolon"),
201 (r'[^_]_\("[^"]+"[ \t\n]*%', "don't use % inside _()"),
202 (r"[^_]_\('[^']+'[ \t\n]*%", "don't use % inside _()"),
201 (r'[^_]_\((?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
202 (r"[^_]_\((?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
203 203 (r'(\w|\)),\w', "missing whitespace after ,"),
204 204 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
205 205 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
206 206 (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n'
207 207 r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'),
208 208 (r'(?<!def)(\s+|^|\()next\(.+\)',
209 209 'no next(foo) in Python 2.4 and 2.5, use foo.next() instead'),
210 210 (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?'
211 211 r'((?:\n|\1\s.*\n)+?)\1finally:',
212 212 'no yield inside try/finally in Python 2.4'),
213 213 (r'.{81}', "line too long"),
214 214 (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'),
215 215 (r'[^\n]\Z', "no trailing newline"),
216 216 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
217 217 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
218 218 # "don't use underbars in identifiers"),
219 219 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
220 220 "don't use camelcase in identifiers"),
221 221 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
222 222 "linebreak after :"),
223 223 (r'class\s[^( \n]+:', "old-style class, use class foo(object)"),
224 224 (r'class\s[^( \n]+\(\):',
225 225 "class foo() not available in Python 2.4, use class foo(object)"),
226 226 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
227 227 "Python keyword is not a function"),
228 228 (r',]', "unneeded trailing ',' in list"),
229 229 # (r'class\s[A-Z][^\(]*\((?!Exception)',
230 230 # "don't capitalize non-exception classes"),
231 231 # (r'in range\(', "use xrange"),
232 232 # (r'^\s*print\s+', "avoid using print in core and extensions"),
233 233 (r'[\x80-\xff]', "non-ASCII character literal"),
234 234 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
235 235 (r'^\s*with\s+', "with not available in Python 2.4"),
236 236 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
237 237 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
238 238 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
239 239 (r'(?<!def)\s+(any|all|format)\(',
240 240 "any/all/format not available in Python 2.4", 'no-py24'),
241 241 (r'(?<!def)\s+(callable)\(',
242 242 "callable not available in Python 3, use getattr(f, '__call__', None)"),
243 243 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
244 244 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
245 245 "gratuitous whitespace after Python keyword"),
246 246 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
247 247 # (r'\s\s=', "gratuitous whitespace before ="),
248 248 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
249 249 "missing whitespace around operator"),
250 250 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
251 251 "missing whitespace around operator"),
252 252 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
253 253 "missing whitespace around operator"),
254 254 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
255 255 "wrong whitespace around ="),
256 256 (r'\([^()]*( =[^=]|[^<>!=]= )',
257 257 "no whitespace around = for named parameters"),
258 258 (r'raise Exception', "don't raise generic exceptions"),
259 259 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
260 260 "don't use old-style two-argument raise, use Exception(message)"),
261 261 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
262 262 (r' [=!]=\s+(True|False|None)',
263 263 "comparison with singleton, use 'is' or 'is not' instead"),
264 264 (r'^\s*(while|if) [01]:',
265 265 "use True/False for constant Boolean expression"),
266 266 (r'(?:(?<!def)\s+|\()hasattr',
267 267 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
268 268 (r'opener\([^)]*\).read\(',
269 269 "use opener.read() instead"),
270 270 (r'BaseException', 'not in Python 2.4, use Exception'),
271 271 (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'),
272 272 (r'opener\([^)]*\).write\(',
273 273 "use opener.write() instead"),
274 274 (r'[\s\(](open|file)\([^)]*\)\.read\(',
275 275 "use util.readfile() instead"),
276 276 (r'[\s\(](open|file)\([^)]*\)\.write\(',
277 277 "use util.writefile() instead"),
278 278 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
279 279 "always assign an opened file to a variable, and close it afterwards"),
280 280 (r'[\s\(](open|file)\([^)]*\)\.',
281 281 "always assign an opened file to a variable, and close it afterwards"),
282 282 (r'(?i)descendent', "the proper spelling is descendAnt"),
283 283 (r'\.debug\(\_', "don't mark debug messages for translation"),
284 284 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
285 285 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
286 286 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
287 287 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
288 288 "missing _() in ui message (use () to hide false-positives)"),
289 289 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
290 290 ],
291 291 # warnings
292 292 [
293 293 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
294 294 ]
295 295 ]
296 296
297 297 pyfilters = [
298 298 (r"""(?msx)(?P<comment>\#.*?$)|
299 299 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
300 300 (?P<text>(([^\\]|\\.)*?))
301 301 (?P=quote))""", reppython),
302 302 ]
303 303
304 304 txtfilters = []
305 305
306 306 txtpats = [
307 307 [
308 308 ('\s$', 'trailing whitespace'),
309 309 ],
310 310 []
311 311 ]
312 312
313 313 cpats = [
314 314 [
315 315 (r'//', "don't use //-style comments"),
316 316 (r'^ ', "don't use spaces to indent"),
317 317 (r'\S\t', "don't use tabs except for indent"),
318 318 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
319 319 (r'.{81}', "line too long"),
320 320 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
321 321 (r'return\(', "return is not a function"),
322 322 (r' ;', "no space before ;"),
323 323 (r'[)][{]', "space between ) and {"),
324 324 (r'\w+\* \w+', "use int *foo, not int* foo"),
325 325 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
326 326 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
327 327 (r'\w,\w', "missing whitespace after ,"),
328 328 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
329 329 (r'^#\s+\w', "use #foo, not # foo"),
330 330 (r'[^\n]\Z', "no trailing newline"),
331 331 (r'^\s*#import\b', "use only #include in standard C code"),
332 332 ],
333 333 # warnings
334 334 []
335 335 ]
336 336
337 337 cfilters = [
338 338 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
339 339 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
340 340 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
341 341 (r'(\()([^)]+\))', repcallspaces),
342 342 ]
343 343
344 344 inutilpats = [
345 345 [
346 346 (r'\bui\.', "don't use ui in util"),
347 347 ],
348 348 # warnings
349 349 []
350 350 ]
351 351
352 352 inrevlogpats = [
353 353 [
354 354 (r'\brepo\.', "don't use repo in revlog"),
355 355 ],
356 356 # warnings
357 357 []
358 358 ]
359 359
360 360 checks = [
361 361 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
362 362 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
363 363 ('c', r'.*\.[ch]$', cfilters, cpats),
364 364 ('unified test', r'.*\.t$', utestfilters, utestpats),
365 365 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
366 366 inrevlogpats),
367 367 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
368 368 inutilpats),
369 369 ('txt', r'.*\.txt$', txtfilters, txtpats),
370 370 ]
371 371
372 372 def _preparepats():
373 373 for c in checks:
374 374 failandwarn = c[-1]
375 375 for pats in failandwarn:
376 376 for i, pseq in enumerate(pats):
377 377 # fix-up regexes for multi-line searches
378 378 p = pseq[0]
379 379 # \s doesn't match \n
380 380 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
381 381 # [^...] doesn't match newline
382 382 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
383 383
384 384 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
385 385 filters = c[2]
386 386 for i, flt in enumerate(filters):
387 387 filters[i] = re.compile(flt[0]), flt[1]
388 388 _preparepats()
389 389
390 390 class norepeatlogger(object):
391 391 def __init__(self):
392 392 self._lastseen = None
393 393
394 394 def log(self, fname, lineno, line, msg, blame):
395 395 """print error related a to given line of a given file.
396 396
397 397 The faulty line will also be printed but only once in the case
398 398 of multiple errors.
399 399
400 400 :fname: filename
401 401 :lineno: line number
402 402 :line: actual content of the line
403 403 :msg: error message
404 404 """
405 405 msgid = fname, lineno, line
406 406 if msgid != self._lastseen:
407 407 if blame:
408 408 print "%s:%d (%s):" % (fname, lineno, blame)
409 409 else:
410 410 print "%s:%d:" % (fname, lineno)
411 411 print " > %s" % line
412 412 self._lastseen = msgid
413 413 print " " + msg
414 414
415 415 _defaultlogger = norepeatlogger()
416 416
417 417 def getblame(f):
418 418 lines = []
419 419 for l in os.popen('hg annotate -un %s' % f):
420 420 start, line = l.split(':', 1)
421 421 user, rev = start.split()
422 422 lines.append((line[1:-1], user, rev))
423 423 return lines
424 424
425 425 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
426 426 blame=False, debug=False, lineno=True):
427 427 """checks style and portability of a given file
428 428
429 429 :f: filepath
430 430 :logfunc: function used to report error
431 431 logfunc(filename, linenumber, linecontent, errormessage)
432 432 :maxerr: number of error to display before aborting.
433 433 Set to false (default) to report all errors
434 434
435 435 return True if no error is found, False otherwise.
436 436 """
437 437 blamecache = None
438 438 result = True
439 439 for name, match, filters, pats in checks:
440 440 if debug:
441 441 print name, f
442 442 fc = 0
443 443 if not re.match(match, f):
444 444 if debug:
445 445 print "Skipping %s for %s it doesn't match %s" % (
446 446 name, match, f)
447 447 continue
448 448 try:
449 449 fp = open(f)
450 450 except IOError, e:
451 451 print "Skipping %s, %s" % (f, str(e).split(':', 1)[0])
452 452 continue
453 453 pre = post = fp.read()
454 454 fp.close()
455 455 if "no-" "check-code" in pre:
456 456 print "Skipping %s it has no-" "check-code" % f
457 457 return "Skip" # skip checking this file
458 458 for p, r in filters:
459 459 post = re.sub(p, r, post)
460 460 nerrs = len(pats[0]) # nerr elements are errors
461 461 if warnings:
462 462 pats = pats[0] + pats[1]
463 463 else:
464 464 pats = pats[0]
465 465 # print post # uncomment to show filtered version
466 466
467 467 if debug:
468 468 print "Checking %s for %s" % (name, f)
469 469
470 470 prelines = None
471 471 errors = []
472 472 for i, pat in enumerate(pats):
473 473 if len(pat) == 3:
474 474 p, msg, ignore = pat
475 475 else:
476 476 p, msg = pat
477 477 ignore = None
478 478 if i >= nerrs:
479 479 msg = "warning: " + msg
480 480
481 481 pos = 0
482 482 n = 0
483 483 for m in p.finditer(post):
484 484 if prelines is None:
485 485 prelines = pre.splitlines()
486 486 postlines = post.splitlines(True)
487 487
488 488 start = m.start()
489 489 while n < len(postlines):
490 490 step = len(postlines[n])
491 491 if pos + step > start:
492 492 break
493 493 pos += step
494 494 n += 1
495 495 l = prelines[n]
496 496
497 497 if ignore and re.search(ignore, l, re.MULTILINE):
498 498 if debug:
499 499 print "Skipping %s for %s:%s (ignore pattern)" % (
500 500 name, f, n)
501 501 continue
502 502 bd = ""
503 503 if blame:
504 504 bd = 'working directory'
505 505 if not blamecache:
506 506 blamecache = getblame(f)
507 507 if n < len(blamecache):
508 508 bl, bu, br = blamecache[n]
509 509 if bl == l:
510 510 bd = '%s@%s' % (bu, br)
511 511
512 512 errors.append((f, lineno and n + 1, l, msg, bd))
513 513 result = False
514 514
515 515 errors.sort()
516 516 for e in errors:
517 517 logfunc(*e)
518 518 fc += 1
519 519 if maxerr and fc >= maxerr:
520 520 print " (too many errors, giving up)"
521 521 break
522 522
523 523 return result
524 524
525 525 if __name__ == "__main__":
526 526 parser = optparse.OptionParser("%prog [options] [files]")
527 527 parser.add_option("-w", "--warnings", action="store_true",
528 528 help="include warning-level checks")
529 529 parser.add_option("-p", "--per-file", type="int",
530 530 help="max warnings per file")
531 531 parser.add_option("-b", "--blame", action="store_true",
532 532 help="use annotate to generate blame info")
533 533 parser.add_option("", "--debug", action="store_true",
534 534 help="show debug information")
535 535 parser.add_option("", "--nolineno", action="store_false",
536 536 dest='lineno', help="don't show line numbers")
537 537
538 538 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
539 539 lineno=True)
540 540 (options, args) = parser.parse_args()
541 541
542 542 if len(args) == 0:
543 543 check = glob.glob("*")
544 544 else:
545 545 check = args
546 546
547 547 ret = 0
548 548 for f in check:
549 549 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
550 550 blame=options.blame, debug=options.debug,
551 551 lineno=options.lineno):
552 552 ret = 1
553 553 sys.exit(ret)
@@ -1,350 +1,350 b''
1 1 """automatically manage newlines in repository files
2 2
3 3 This extension allows you to manage the type of line endings (CRLF or
4 4 LF) that are used in the repository and in the local working
5 5 directory. That way you can get CRLF line endings on Windows and LF on
6 6 Unix/Mac, thereby letting everybody use their OS native line endings.
7 7
8 8 The extension reads its configuration from a versioned ``.hgeol``
9 9 configuration file found in the root of the working copy. The
10 10 ``.hgeol`` file use the same syntax as all other Mercurial
11 11 configuration files. It uses two sections, ``[patterns]`` and
12 12 ``[repository]``.
13 13
14 14 The ``[patterns]`` section specifies how line endings should be
15 15 converted between the working copy and the repository. The format is
16 16 specified by a file pattern. The first match is used, so put more
17 17 specific patterns first. The available line endings are ``LF``,
18 18 ``CRLF``, and ``BIN``.
19 19
20 20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 21 checked out and stored in the repository in that format and files
22 22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 23 ``native`` is an alias for checking out in the platform's default line
24 24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 26 default behaviour; it is only needed if you need to override a later,
27 27 more general pattern.
28 28
29 29 The optional ``[repository]`` section specifies the line endings to
30 30 use for files stored in the repository. It has a single setting,
31 31 ``native``, which determines the storage line endings for files
32 32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 35 will be converted to ``LF`` when stored in the repository. Files
36 36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 37 are always stored as-is in the repository.
38 38
39 39 Example versioned ``.hgeol`` file::
40 40
41 41 [patterns]
42 42 **.py = native
43 43 **.vcproj = CRLF
44 44 **.txt = native
45 45 Makefile = LF
46 46 **.jpg = BIN
47 47
48 48 [repository]
49 49 native = LF
50 50
51 51 .. note::
52 52
53 53 The rules will first apply when files are touched in the working
54 54 copy, e.g. by updating to null and back to tip to touch all files.
55 55
56 56 The extension uses an optional ``[eol]`` section read from both the
57 57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 58 latter overriding the former. You can use that section to control the
59 59 overall behavior. There are three settings:
60 60
61 61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 62 ``CRLF`` to override the default interpretation of ``native`` for
63 63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 64 generate an archive where files have line endings for Windows.
65 65
66 66 - ``eol.only-consistent`` (default True) can be set to False to make
67 67 the extension convert files with inconsistent EOLs. Inconsistent
68 68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 69 Such files are normally not touched under the assumption that they
70 70 have mixed EOLs on purpose.
71 71
72 72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 73 ensure that converted files end with a EOL character (either ``\\n``
74 74 or ``\\r\\n`` as per the configured patterns).
75 75
76 76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 77 like the deprecated win32text extension does. This means that you can
78 78 disable win32text and enable eol and your filters will still work. You
79 79 only need to these filters until you have prepared a ``.hgeol`` file.
80 80
81 81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 82 have been unified into a single hook named ``eol.checkheadshook``. The
83 83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 84 which means you must migrate to a ``.hgeol`` file first before using
85 85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 86 invalid revisions will be pushed. To forbid them completely, use the
87 87 ``eol.checkallhook`` hook. These hooks are best used as
88 88 ``pretxnchangegroup`` hooks.
89 89
90 90 See :hg:`help patterns` for more information about the glob patterns
91 91 used.
92 92 """
93 93
94 94 from mercurial.i18n import _
95 95 from mercurial import util, config, extensions, match, error
96 96 import re, os
97 97
98 98 testedwith = 'internal'
99 99
100 100 # Matches a lone LF, i.e., one that is not part of CRLF.
101 101 singlelf = re.compile('(^|[^\r])\n')
102 102 # Matches a single EOL which can either be a CRLF where repeated CR
103 103 # are removed or a LF. We do not care about old Macintosh files, so a
104 104 # stray CR is an error.
105 105 eolre = re.compile('\r*\n')
106 106
107 107
108 108 def inconsistenteol(data):
109 109 return '\r\n' in data and singlelf.search(data)
110 110
111 111 def tolf(s, params, ui, **kwargs):
112 112 """Filter to convert to LF EOLs."""
113 113 if util.binary(s):
114 114 return s
115 115 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
116 116 return s
117 117 if (ui.configbool('eol', 'fix-trailing-newline', False)
118 118 and s and s[-1] != '\n'):
119 119 s = s + '\n'
120 120 return eolre.sub('\n', s)
121 121
122 122 def tocrlf(s, params, ui, **kwargs):
123 123 """Filter to convert to CRLF EOLs."""
124 124 if util.binary(s):
125 125 return s
126 126 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
127 127 return s
128 128 if (ui.configbool('eol', 'fix-trailing-newline', False)
129 129 and s and s[-1] != '\n'):
130 130 s = s + '\n'
131 131 return eolre.sub('\r\n', s)
132 132
133 133 def isbinary(s, params):
134 134 """Filter to do nothing with the file."""
135 135 return s
136 136
137 137 filters = {
138 138 'to-lf': tolf,
139 139 'to-crlf': tocrlf,
140 140 'is-binary': isbinary,
141 141 # The following provide backwards compatibility with win32text
142 142 'cleverencode:': tolf,
143 143 'cleverdecode:': tocrlf
144 144 }
145 145
146 146 class eolfile(object):
147 147 def __init__(self, ui, root, data):
148 148 self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
149 149 self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
150 150
151 151 self.cfg = config.config()
152 152 # Our files should not be touched. The pattern must be
153 153 # inserted first override a '** = native' pattern.
154 154 self.cfg.set('patterns', '.hg*', 'BIN')
155 155 # We can then parse the user's patterns.
156 156 self.cfg.parse('.hgeol', data)
157 157
158 158 isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
159 159 self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
160 160 iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
161 161 self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
162 162
163 163 include = []
164 164 exclude = []
165 165 for pattern, style in self.cfg.items('patterns'):
166 166 key = style.upper()
167 167 if key == 'BIN':
168 168 exclude.append(pattern)
169 169 else:
170 170 include.append(pattern)
171 171 # This will match the files for which we need to care
172 172 # about inconsistent newlines.
173 173 self.match = match.match(root, '', [], include, exclude)
174 174
175 175 def copytoui(self, ui):
176 176 for pattern, style in self.cfg.items('patterns'):
177 177 key = style.upper()
178 178 try:
179 179 ui.setconfig('decode', pattern, self._decode[key])
180 180 ui.setconfig('encode', pattern, self._encode[key])
181 181 except KeyError:
182 182 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
183 183 % (style, self.cfg.source('patterns', pattern)))
184 184 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
185 185 for k, v in self.cfg.items('eol'):
186 186 ui.setconfig('eol', k, v)
187 187
188 188 def checkrev(self, repo, ctx, files):
189 189 failed = []
190 190 for f in (files or ctx.files()):
191 191 if f not in ctx:
192 192 continue
193 193 for pattern, style in self.cfg.items('patterns'):
194 194 if not match.match(repo.root, '', [pattern])(f):
195 195 continue
196 196 target = self._encode[style.upper()]
197 197 data = ctx[f].data()
198 198 if (target == "to-lf" and "\r\n" in data
199 199 or target == "to-crlf" and singlelf.search(data)):
200 200 failed.append((str(ctx), target, f))
201 201 break
202 202 return failed
203 203
204 204 def parseeol(ui, repo, nodes):
205 205 try:
206 206 for node in nodes:
207 207 try:
208 208 if node is None:
209 209 # Cannot use workingctx.data() since it would load
210 210 # and cache the filters before we configure them.
211 211 data = repo.wfile('.hgeol').read()
212 212 else:
213 213 data = repo[node]['.hgeol'].data()
214 214 return eolfile(ui, repo.root, data)
215 215 except (IOError, LookupError):
216 216 pass
217 217 except error.ParseError, inst:
218 218 ui.warn(_("warning: ignoring .hgeol file due to parse error "
219 219 "at %s: %s\n") % (inst.args[1], inst.args[0]))
220 220 return None
221 221
222 222 def _checkhook(ui, repo, node, headsonly):
223 223 # Get revisions to check and touched files at the same time
224 224 files = set()
225 225 revs = set()
226 226 for rev in xrange(repo[node].rev(), len(repo)):
227 227 revs.add(rev)
228 228 if headsonly:
229 229 ctx = repo[rev]
230 230 files.update(ctx.files())
231 231 for pctx in ctx.parents():
232 232 revs.discard(pctx.rev())
233 233 failed = []
234 234 for rev in revs:
235 235 ctx = repo[rev]
236 236 eol = parseeol(ui, repo, [ctx.node()])
237 237 if eol:
238 238 failed.extend(eol.checkrev(repo, ctx, files))
239 239
240 240 if failed:
241 241 eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
242 242 msgs = []
243 243 for node, target, f in failed:
244 244 msgs.append(_(" %s in %s should not have %s line endings") %
245 245 (f, node, eols[target]))
246 246 raise util.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
247 247
248 248 def checkallhook(ui, repo, node, hooktype, **kwargs):
249 249 """verify that files have expected EOLs"""
250 250 _checkhook(ui, repo, node, False)
251 251
252 252 def checkheadshook(ui, repo, node, hooktype, **kwargs):
253 253 """verify that files have expected EOLs"""
254 254 _checkhook(ui, repo, node, True)
255 255
256 256 # "checkheadshook" used to be called "hook"
257 257 hook = checkheadshook
258 258
259 259 def preupdate(ui, repo, hooktype, parent1, parent2):
260 260 repo.loadeol([parent1])
261 261 return False
262 262
263 263 def uisetup(ui):
264 264 ui.setconfig('hooks', 'preupdate.eol', preupdate)
265 265
266 266 def extsetup(ui):
267 267 try:
268 268 extensions.find('win32text')
269 269 ui.warn(_("the eol extension is incompatible with the "
270 270 "win32text extension\n"))
271 271 except KeyError:
272 272 pass
273 273
274 274
275 275 def reposetup(ui, repo):
276 276 uisetup(repo.ui)
277 277
278 278 if not repo.local():
279 279 return
280 280 for name, fn in filters.iteritems():
281 281 repo.adddatafilter(name, fn)
282 282
283 283 ui.setconfig('patch', 'eol', 'auto')
284 284
285 285 class eolrepo(repo.__class__):
286 286
287 287 def loadeol(self, nodes):
288 288 eol = parseeol(self.ui, self, nodes)
289 289 if eol is None:
290 290 return None
291 291 eol.copytoui(self.ui)
292 292 return eol.match
293 293
294 294 def _hgcleardirstate(self):
295 295 self._eolfile = self.loadeol([None, 'tip'])
296 296 if not self._eolfile:
297 297 self._eolfile = util.never
298 298 return
299 299
300 300 try:
301 301 cachemtime = os.path.getmtime(self.join("eol.cache"))
302 302 except OSError:
303 303 cachemtime = 0
304 304
305 305 try:
306 306 eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
307 307 except OSError:
308 308 eolmtime = 0
309 309
310 310 if eolmtime > cachemtime:
311 311 self.ui.debug("eol: detected change in .hgeol\n")
312 312 wlock = None
313 313 try:
314 314 wlock = self.wlock()
315 315 for f in self.dirstate:
316 316 if self.dirstate[f] == 'n':
317 317 # all normal files need to be looked at
318 318 # again since the new .hgeol file might no
319 319 # longer match a file it matched before
320 320 self.dirstate.normallookup(f)
321 321 # Create or touch the cache to update mtime
322 322 self.opener("eol.cache", "w").close()
323 323 wlock.release()
324 324 except error.LockUnavailable:
325 325 # If we cannot lock the repository and clear the
326 326 # dirstate, then a commit might not see all files
327 327 # as modified. But if we cannot lock the
328 328 # repository, then we can also not make a commit,
329 329 # so ignore the error.
330 330 pass
331 331
332 332 def commitctx(self, ctx, error=False):
333 333 for f in sorted(ctx.added() + ctx.modified()):
334 334 if not self._eolfile(f):
335 335 continue
336 336 try:
337 337 data = ctx[f].data()
338 338 except IOError:
339 339 continue
340 340 if util.binary(data):
341 341 # We should not abort here, since the user should
342 342 # be able to say "** = native" to automatically
343 343 # have all non-binary files taken care of.
344 344 continue
345 345 if inconsistenteol(data):
346 346 raise util.Abort(_("inconsistent newline style "
347 "in %s\n" % f))
347 "in %s\n") % f)
348 348 return super(eolrepo, self).commitctx(ctx, error)
349 349 repo.__class__ = eolrepo
350 350 repo._hgcleardirstate()
@@ -1,1931 +1,1931 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email, os, errno, re, posixpath
10 10 import tempfile, zlib, shutil
11 11 # On python2.4 you have to import these by name or they fail to
12 12 # load. This was not a problem on Python 2.7.
13 13 import email.Generator
14 14 import email.Parser
15 15
16 16 from i18n import _
17 17 from node import hex, short
18 18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 19
20 20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 21
22 22 class PatchError(Exception):
23 23 pass
24 24
25 25
26 26 # public functions
27 27
28 28 def split(stream):
29 29 '''return an iterator of individual patches from a stream'''
30 30 def isheader(line, inheader):
31 31 if inheader and line[0] in (' ', '\t'):
32 32 # continuation
33 33 return True
34 34 if line[0] in (' ', '-', '+'):
35 35 # diff line - don't check for header pattern in there
36 36 return False
37 37 l = line.split(': ', 1)
38 38 return len(l) == 2 and ' ' not in l[0]
39 39
40 40 def chunk(lines):
41 41 return cStringIO.StringIO(''.join(lines))
42 42
43 43 def hgsplit(stream, cur):
44 44 inheader = True
45 45
46 46 for line in stream:
47 47 if not line.strip():
48 48 inheader = False
49 49 if not inheader and line.startswith('# HG changeset patch'):
50 50 yield chunk(cur)
51 51 cur = []
52 52 inheader = True
53 53
54 54 cur.append(line)
55 55
56 56 if cur:
57 57 yield chunk(cur)
58 58
59 59 def mboxsplit(stream, cur):
60 60 for line in stream:
61 61 if line.startswith('From '):
62 62 for c in split(chunk(cur[1:])):
63 63 yield c
64 64 cur = []
65 65
66 66 cur.append(line)
67 67
68 68 if cur:
69 69 for c in split(chunk(cur[1:])):
70 70 yield c
71 71
72 72 def mimesplit(stream, cur):
73 73 def msgfp(m):
74 74 fp = cStringIO.StringIO()
75 75 g = email.Generator.Generator(fp, mangle_from_=False)
76 76 g.flatten(m)
77 77 fp.seek(0)
78 78 return fp
79 79
80 80 for line in stream:
81 81 cur.append(line)
82 82 c = chunk(cur)
83 83
84 84 m = email.Parser.Parser().parse(c)
85 85 if not m.is_multipart():
86 86 yield msgfp(m)
87 87 else:
88 88 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 89 for part in m.walk():
90 90 ct = part.get_content_type()
91 91 if ct not in ok_types:
92 92 continue
93 93 yield msgfp(part)
94 94
95 95 def headersplit(stream, cur):
96 96 inheader = False
97 97
98 98 for line in stream:
99 99 if not inheader and isheader(line, inheader):
100 100 yield chunk(cur)
101 101 cur = []
102 102 inheader = True
103 103 if inheader and not isheader(line, inheader):
104 104 inheader = False
105 105
106 106 cur.append(line)
107 107
108 108 if cur:
109 109 yield chunk(cur)
110 110
111 111 def remainder(cur):
112 112 yield chunk(cur)
113 113
114 114 class fiter(object):
115 115 def __init__(self, fp):
116 116 self.fp = fp
117 117
118 118 def __iter__(self):
119 119 return self
120 120
121 121 def next(self):
122 122 l = self.fp.readline()
123 123 if not l:
124 124 raise StopIteration
125 125 return l
126 126
127 127 inheader = False
128 128 cur = []
129 129
130 130 mimeheaders = ['content-type']
131 131
132 132 if not util.safehasattr(stream, 'next'):
133 133 # http responses, for example, have readline but not next
134 134 stream = fiter(stream)
135 135
136 136 for line in stream:
137 137 cur.append(line)
138 138 if line.startswith('# HG changeset patch'):
139 139 return hgsplit(stream, cur)
140 140 elif line.startswith('From '):
141 141 return mboxsplit(stream, cur)
142 142 elif isheader(line, inheader):
143 143 inheader = True
144 144 if line.split(':', 1)[0].lower() in mimeheaders:
145 145 # let email parser handle this
146 146 return mimesplit(stream, cur)
147 147 elif line.startswith('--- ') and inheader:
148 148 # No evil headers seen by diff start, split by hand
149 149 return headersplit(stream, cur)
150 150 # Not enough info, keep reading
151 151
152 152 # if we are here, we have a very plain patch
153 153 return remainder(cur)
154 154
155 155 def extract(ui, fileobj):
156 156 '''extract patch from data read from fileobj.
157 157
158 158 patch can be a normal patch or contained in an email message.
159 159
160 160 return tuple (filename, message, user, date, branch, node, p1, p2).
161 161 Any item in the returned tuple can be None. If filename is None,
162 162 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 163
164 164 # attempt to detect the start of a patch
165 165 # (this heuristic is borrowed from quilt)
166 166 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 167 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 168 r'---[ \t].*?^\+\+\+[ \t]|'
169 169 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 170
171 171 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 172 tmpfp = os.fdopen(fd, 'w')
173 173 try:
174 174 msg = email.Parser.Parser().parse(fileobj)
175 175
176 176 subject = msg['Subject']
177 177 user = msg['From']
178 178 if not subject and not user:
179 179 # Not an email, restore parsed headers if any
180 180 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 181
182 182 # should try to parse msg['Date']
183 183 date = None
184 184 nodeid = None
185 185 branch = None
186 186 parents = []
187 187
188 188 if subject:
189 189 if subject.startswith('[PATCH'):
190 190 pend = subject.find(']')
191 191 if pend >= 0:
192 192 subject = subject[pend + 1:].lstrip()
193 193 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 194 ui.debug('Subject: %s\n' % subject)
195 195 if user:
196 196 ui.debug('From: %s\n' % user)
197 197 diffs_seen = 0
198 198 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 199 message = ''
200 200 for part in msg.walk():
201 201 content_type = part.get_content_type()
202 202 ui.debug('Content-Type: %s\n' % content_type)
203 203 if content_type not in ok_types:
204 204 continue
205 205 payload = part.get_payload(decode=True)
206 206 m = diffre.search(payload)
207 207 if m:
208 208 hgpatch = False
209 209 hgpatchheader = False
210 210 ignoretext = False
211 211
212 212 ui.debug('found patch at byte %d\n' % m.start(0))
213 213 diffs_seen += 1
214 214 cfp = cStringIO.StringIO()
215 215 for line in payload[:m.start(0)].splitlines():
216 216 if line.startswith('# HG changeset patch') and not hgpatch:
217 217 ui.debug('patch generated by hg export\n')
218 218 hgpatch = True
219 219 hgpatchheader = True
220 220 # drop earlier commit message content
221 221 cfp.seek(0)
222 222 cfp.truncate()
223 223 subject = None
224 224 elif hgpatchheader:
225 225 if line.startswith('# User '):
226 226 user = line[7:]
227 227 ui.debug('From: %s\n' % user)
228 228 elif line.startswith("# Date "):
229 229 date = line[7:]
230 230 elif line.startswith("# Branch "):
231 231 branch = line[9:]
232 232 elif line.startswith("# Node ID "):
233 233 nodeid = line[10:]
234 234 elif line.startswith("# Parent "):
235 235 parents.append(line[9:].lstrip())
236 236 elif not line.startswith("# "):
237 237 hgpatchheader = False
238 238 elif line == '---':
239 239 ignoretext = True
240 240 if not hgpatchheader and not ignoretext:
241 241 cfp.write(line)
242 242 cfp.write('\n')
243 243 message = cfp.getvalue()
244 244 if tmpfp:
245 245 tmpfp.write(payload)
246 246 if not payload.endswith('\n'):
247 247 tmpfp.write('\n')
248 248 elif not diffs_seen and message and content_type == 'text/plain':
249 249 message += '\n' + payload
250 250 except: # re-raises
251 251 tmpfp.close()
252 252 os.unlink(tmpname)
253 253 raise
254 254
255 255 if subject and not message.startswith(subject):
256 256 message = '%s\n%s' % (subject, message)
257 257 tmpfp.close()
258 258 if not diffs_seen:
259 259 os.unlink(tmpname)
260 260 return None, message, user, date, branch, None, None, None
261 261 p1 = parents and parents.pop(0) or None
262 262 p2 = parents and parents.pop(0) or None
263 263 return tmpname, message, user, date, branch, nodeid, p1, p2
264 264
265 265 class patchmeta(object):
266 266 """Patched file metadata
267 267
268 268 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 269 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 270 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 271 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 272 'islink' is True if the file is a symlink and 'isexec' is True if
273 273 the file is executable. Otherwise, 'mode' is None.
274 274 """
275 275 def __init__(self, path):
276 276 self.path = path
277 277 self.oldpath = None
278 278 self.mode = None
279 279 self.op = 'MODIFY'
280 280 self.binary = False
281 281
282 282 def setmode(self, mode):
283 283 islink = mode & 020000
284 284 isexec = mode & 0100
285 285 self.mode = (islink, isexec)
286 286
287 287 def copy(self):
288 288 other = patchmeta(self.path)
289 289 other.oldpath = self.oldpath
290 290 other.mode = self.mode
291 291 other.op = self.op
292 292 other.binary = self.binary
293 293 return other
294 294
295 295 def _ispatchinga(self, afile):
296 296 if afile == '/dev/null':
297 297 return self.op == 'ADD'
298 298 return afile == 'a/' + (self.oldpath or self.path)
299 299
300 300 def _ispatchingb(self, bfile):
301 301 if bfile == '/dev/null':
302 302 return self.op == 'DELETE'
303 303 return bfile == 'b/' + self.path
304 304
305 305 def ispatching(self, afile, bfile):
306 306 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 307
308 308 def __repr__(self):
309 309 return "<patchmeta %s %r>" % (self.op, self.path)
310 310
311 311 def readgitpatch(lr):
312 312 """extract git-style metadata about patches from <patchname>"""
313 313
314 314 # Filter patch for git information
315 315 gp = None
316 316 gitpatches = []
317 317 for line in lr:
318 318 line = line.rstrip(' \r\n')
319 319 if line.startswith('diff --git a/'):
320 320 m = gitre.match(line)
321 321 if m:
322 322 if gp:
323 323 gitpatches.append(gp)
324 324 dst = m.group(2)
325 325 gp = patchmeta(dst)
326 326 elif gp:
327 327 if line.startswith('--- '):
328 328 gitpatches.append(gp)
329 329 gp = None
330 330 continue
331 331 if line.startswith('rename from '):
332 332 gp.op = 'RENAME'
333 333 gp.oldpath = line[12:]
334 334 elif line.startswith('rename to '):
335 335 gp.path = line[10:]
336 336 elif line.startswith('copy from '):
337 337 gp.op = 'COPY'
338 338 gp.oldpath = line[10:]
339 339 elif line.startswith('copy to '):
340 340 gp.path = line[8:]
341 341 elif line.startswith('deleted file'):
342 342 gp.op = 'DELETE'
343 343 elif line.startswith('new file mode '):
344 344 gp.op = 'ADD'
345 345 gp.setmode(int(line[-6:], 8))
346 346 elif line.startswith('new mode '):
347 347 gp.setmode(int(line[-6:], 8))
348 348 elif line.startswith('GIT binary patch'):
349 349 gp.binary = True
350 350 if gp:
351 351 gitpatches.append(gp)
352 352
353 353 return gitpatches
354 354
355 355 class linereader(object):
356 356 # simple class to allow pushing lines back into the input stream
357 357 def __init__(self, fp):
358 358 self.fp = fp
359 359 self.buf = []
360 360
361 361 def push(self, line):
362 362 if line is not None:
363 363 self.buf.append(line)
364 364
365 365 def readline(self):
366 366 if self.buf:
367 367 l = self.buf[0]
368 368 del self.buf[0]
369 369 return l
370 370 return self.fp.readline()
371 371
372 372 def __iter__(self):
373 373 while True:
374 374 l = self.readline()
375 375 if not l:
376 376 break
377 377 yield l
378 378
379 379 class abstractbackend(object):
380 380 def __init__(self, ui):
381 381 self.ui = ui
382 382
383 383 def getfile(self, fname):
384 384 """Return target file data and flags as a (data, (islink,
385 385 isexec)) tuple.
386 386 """
387 387 raise NotImplementedError
388 388
389 389 def setfile(self, fname, data, mode, copysource):
390 390 """Write data to target file fname and set its mode. mode is a
391 391 (islink, isexec) tuple. If data is None, the file content should
392 392 be left unchanged. If the file is modified after being copied,
393 393 copysource is set to the original file name.
394 394 """
395 395 raise NotImplementedError
396 396
397 397 def unlink(self, fname):
398 398 """Unlink target file."""
399 399 raise NotImplementedError
400 400
401 401 def writerej(self, fname, failed, total, lines):
402 402 """Write rejected lines for fname. total is the number of hunks
403 403 which failed to apply and total the total number of hunks for this
404 404 files.
405 405 """
406 406 pass
407 407
408 408 def exists(self, fname):
409 409 raise NotImplementedError
410 410
411 411 class fsbackend(abstractbackend):
412 412 def __init__(self, ui, basedir):
413 413 super(fsbackend, self).__init__(ui)
414 414 self.opener = scmutil.opener(basedir)
415 415
416 416 def _join(self, f):
417 417 return os.path.join(self.opener.base, f)
418 418
419 419 def getfile(self, fname):
420 420 path = self._join(fname)
421 421 if os.path.islink(path):
422 422 return (os.readlink(path), (True, False))
423 423 isexec = False
424 424 try:
425 425 isexec = os.lstat(path).st_mode & 0100 != 0
426 426 except OSError, e:
427 427 if e.errno != errno.ENOENT:
428 428 raise
429 429 return (self.opener.read(fname), (False, isexec))
430 430
431 431 def setfile(self, fname, data, mode, copysource):
432 432 islink, isexec = mode
433 433 if data is None:
434 434 util.setflags(self._join(fname), islink, isexec)
435 435 return
436 436 if islink:
437 437 self.opener.symlink(data, fname)
438 438 else:
439 439 self.opener.write(fname, data)
440 440 if isexec:
441 441 util.setflags(self._join(fname), False, True)
442 442
443 443 def unlink(self, fname):
444 444 util.unlinkpath(self._join(fname), ignoremissing=True)
445 445
446 446 def writerej(self, fname, failed, total, lines):
447 447 fname = fname + ".rej"
448 448 self.ui.warn(
449 449 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
450 450 (failed, total, fname))
451 451 fp = self.opener(fname, 'w')
452 452 fp.writelines(lines)
453 453 fp.close()
454 454
455 455 def exists(self, fname):
456 456 return os.path.lexists(self._join(fname))
457 457
458 458 class workingbackend(fsbackend):
459 459 def __init__(self, ui, repo, similarity):
460 460 super(workingbackend, self).__init__(ui, repo.root)
461 461 self.repo = repo
462 462 self.similarity = similarity
463 463 self.removed = set()
464 464 self.changed = set()
465 465 self.copied = []
466 466
467 467 def _checkknown(self, fname):
468 468 if self.repo.dirstate[fname] == '?' and self.exists(fname):
469 469 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
470 470
471 471 def setfile(self, fname, data, mode, copysource):
472 472 self._checkknown(fname)
473 473 super(workingbackend, self).setfile(fname, data, mode, copysource)
474 474 if copysource is not None:
475 475 self.copied.append((copysource, fname))
476 476 self.changed.add(fname)
477 477
478 478 def unlink(self, fname):
479 479 self._checkknown(fname)
480 480 super(workingbackend, self).unlink(fname)
481 481 self.removed.add(fname)
482 482 self.changed.add(fname)
483 483
484 484 def close(self):
485 485 wctx = self.repo[None]
486 486 changed = set(self.changed)
487 487 for src, dst in self.copied:
488 488 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
489 489 if self.removed:
490 490 wctx.forget(sorted(self.removed))
491 491 for f in self.removed:
492 492 if f not in self.repo.dirstate:
493 493 # File was deleted and no longer belongs to the
494 494 # dirstate, it was probably marked added then
495 495 # deleted, and should not be considered by
496 496 # marktouched().
497 497 changed.discard(f)
498 498 if changed:
499 499 scmutil.marktouched(self.repo, changed, self.similarity)
500 500 return sorted(self.changed)
501 501
502 502 class filestore(object):
503 503 def __init__(self, maxsize=None):
504 504 self.opener = None
505 505 self.files = {}
506 506 self.created = 0
507 507 self.maxsize = maxsize
508 508 if self.maxsize is None:
509 509 self.maxsize = 4*(2**20)
510 510 self.size = 0
511 511 self.data = {}
512 512
513 513 def setfile(self, fname, data, mode, copied=None):
514 514 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
515 515 self.data[fname] = (data, mode, copied)
516 516 self.size += len(data)
517 517 else:
518 518 if self.opener is None:
519 519 root = tempfile.mkdtemp(prefix='hg-patch-')
520 520 self.opener = scmutil.opener(root)
521 521 # Avoid filename issues with these simple names
522 522 fn = str(self.created)
523 523 self.opener.write(fn, data)
524 524 self.created += 1
525 525 self.files[fname] = (fn, mode, copied)
526 526
527 527 def getfile(self, fname):
528 528 if fname in self.data:
529 529 return self.data[fname]
530 530 if not self.opener or fname not in self.files:
531 531 raise IOError
532 532 fn, mode, copied = self.files[fname]
533 533 return self.opener.read(fn), mode, copied
534 534
535 535 def close(self):
536 536 if self.opener:
537 537 shutil.rmtree(self.opener.base)
538 538
539 539 class repobackend(abstractbackend):
540 540 def __init__(self, ui, repo, ctx, store):
541 541 super(repobackend, self).__init__(ui)
542 542 self.repo = repo
543 543 self.ctx = ctx
544 544 self.store = store
545 545 self.changed = set()
546 546 self.removed = set()
547 547 self.copied = {}
548 548
549 549 def _checkknown(self, fname):
550 550 if fname not in self.ctx:
551 551 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
552 552
553 553 def getfile(self, fname):
554 554 try:
555 555 fctx = self.ctx[fname]
556 556 except error.LookupError:
557 557 raise IOError
558 558 flags = fctx.flags()
559 559 return fctx.data(), ('l' in flags, 'x' in flags)
560 560
561 561 def setfile(self, fname, data, mode, copysource):
562 562 if copysource:
563 563 self._checkknown(copysource)
564 564 if data is None:
565 565 data = self.ctx[fname].data()
566 566 self.store.setfile(fname, data, mode, copysource)
567 567 self.changed.add(fname)
568 568 if copysource:
569 569 self.copied[fname] = copysource
570 570
571 571 def unlink(self, fname):
572 572 self._checkknown(fname)
573 573 self.removed.add(fname)
574 574
575 575 def exists(self, fname):
576 576 return fname in self.ctx
577 577
578 578 def close(self):
579 579 return self.changed | self.removed
580 580
581 581 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
582 582 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
583 583 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
584 584 eolmodes = ['strict', 'crlf', 'lf', 'auto']
585 585
586 586 class patchfile(object):
587 587 def __init__(self, ui, gp, backend, store, eolmode='strict'):
588 588 self.fname = gp.path
589 589 self.eolmode = eolmode
590 590 self.eol = None
591 591 self.backend = backend
592 592 self.ui = ui
593 593 self.lines = []
594 594 self.exists = False
595 595 self.missing = True
596 596 self.mode = gp.mode
597 597 self.copysource = gp.oldpath
598 598 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
599 599 self.remove = gp.op == 'DELETE'
600 600 try:
601 601 if self.copysource is None:
602 602 data, mode = backend.getfile(self.fname)
603 603 self.exists = True
604 604 else:
605 605 data, mode = store.getfile(self.copysource)[:2]
606 606 self.exists = backend.exists(self.fname)
607 607 self.missing = False
608 608 if data:
609 609 self.lines = mdiff.splitnewlines(data)
610 610 if self.mode is None:
611 611 self.mode = mode
612 612 if self.lines:
613 613 # Normalize line endings
614 614 if self.lines[0].endswith('\r\n'):
615 615 self.eol = '\r\n'
616 616 elif self.lines[0].endswith('\n'):
617 617 self.eol = '\n'
618 618 if eolmode != 'strict':
619 619 nlines = []
620 620 for l in self.lines:
621 621 if l.endswith('\r\n'):
622 622 l = l[:-2] + '\n'
623 623 nlines.append(l)
624 624 self.lines = nlines
625 625 except IOError:
626 626 if self.create:
627 627 self.missing = False
628 628 if self.mode is None:
629 629 self.mode = (False, False)
630 630 if self.missing:
631 631 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
632 632
633 633 self.hash = {}
634 634 self.dirty = 0
635 635 self.offset = 0
636 636 self.skew = 0
637 637 self.rej = []
638 638 self.fileprinted = False
639 639 self.printfile(False)
640 640 self.hunks = 0
641 641
642 642 def writelines(self, fname, lines, mode):
643 643 if self.eolmode == 'auto':
644 644 eol = self.eol
645 645 elif self.eolmode == 'crlf':
646 646 eol = '\r\n'
647 647 else:
648 648 eol = '\n'
649 649
650 650 if self.eolmode != 'strict' and eol and eol != '\n':
651 651 rawlines = []
652 652 for l in lines:
653 653 if l and l[-1] == '\n':
654 654 l = l[:-1] + eol
655 655 rawlines.append(l)
656 656 lines = rawlines
657 657
658 658 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
659 659
660 660 def printfile(self, warn):
661 661 if self.fileprinted:
662 662 return
663 663 if warn or self.ui.verbose:
664 664 self.fileprinted = True
665 665 s = _("patching file %s\n") % self.fname
666 666 if warn:
667 667 self.ui.warn(s)
668 668 else:
669 669 self.ui.note(s)
670 670
671 671
672 672 def findlines(self, l, linenum):
673 673 # looks through the hash and finds candidate lines. The
674 674 # result is a list of line numbers sorted based on distance
675 675 # from linenum
676 676
677 677 cand = self.hash.get(l, [])
678 678 if len(cand) > 1:
679 679 # resort our list of potentials forward then back.
680 680 cand.sort(key=lambda x: abs(x - linenum))
681 681 return cand
682 682
683 683 def write_rej(self):
684 684 # our rejects are a little different from patch(1). This always
685 685 # creates rejects in the same form as the original patch. A file
686 686 # header is inserted so that you can run the reject through patch again
687 687 # without having to type the filename.
688 688 if not self.rej:
689 689 return
690 690 base = os.path.basename(self.fname)
691 691 lines = ["--- %s\n+++ %s\n" % (base, base)]
692 692 for x in self.rej:
693 693 for l in x.hunk:
694 694 lines.append(l)
695 695 if l[-1] != '\n':
696 696 lines.append("\n\ No newline at end of file\n")
697 697 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
698 698
699 699 def apply(self, h):
700 700 if not h.complete():
701 701 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
702 702 (h.number, h.desc, len(h.a), h.lena, len(h.b),
703 703 h.lenb))
704 704
705 705 self.hunks += 1
706 706
707 707 if self.missing:
708 708 self.rej.append(h)
709 709 return -1
710 710
711 711 if self.exists and self.create:
712 712 if self.copysource:
713 713 self.ui.warn(_("cannot create %s: destination already "
714 "exists\n" % self.fname))
714 "exists\n") % self.fname)
715 715 else:
716 716 self.ui.warn(_("file %s already exists\n") % self.fname)
717 717 self.rej.append(h)
718 718 return -1
719 719
720 720 if isinstance(h, binhunk):
721 721 if self.remove:
722 722 self.backend.unlink(self.fname)
723 723 else:
724 724 l = h.new(self.lines)
725 725 self.lines[:] = l
726 726 self.offset += len(l)
727 727 self.dirty = True
728 728 return 0
729 729
730 730 horig = h
731 731 if (self.eolmode in ('crlf', 'lf')
732 732 or self.eolmode == 'auto' and self.eol):
733 733 # If new eols are going to be normalized, then normalize
734 734 # hunk data before patching. Otherwise, preserve input
735 735 # line-endings.
736 736 h = h.getnormalized()
737 737
738 738 # fast case first, no offsets, no fuzz
739 739 old, oldstart, new, newstart = h.fuzzit(0, False)
740 740 oldstart += self.offset
741 741 orig_start = oldstart
742 742 # if there's skew we want to emit the "(offset %d lines)" even
743 743 # when the hunk cleanly applies at start + skew, so skip the
744 744 # fast case code
745 745 if (self.skew == 0 and
746 746 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
747 747 if self.remove:
748 748 self.backend.unlink(self.fname)
749 749 else:
750 750 self.lines[oldstart:oldstart + len(old)] = new
751 751 self.offset += len(new) - len(old)
752 752 self.dirty = True
753 753 return 0
754 754
755 755 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
756 756 self.hash = {}
757 757 for x, s in enumerate(self.lines):
758 758 self.hash.setdefault(s, []).append(x)
759 759
760 760 for fuzzlen in xrange(3):
761 761 for toponly in [True, False]:
762 762 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
763 763 oldstart = oldstart + self.offset + self.skew
764 764 oldstart = min(oldstart, len(self.lines))
765 765 if old:
766 766 cand = self.findlines(old[0][1:], oldstart)
767 767 else:
768 768 # Only adding lines with no or fuzzed context, just
769 769 # take the skew in account
770 770 cand = [oldstart]
771 771
772 772 for l in cand:
773 773 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
774 774 self.lines[l : l + len(old)] = new
775 775 self.offset += len(new) - len(old)
776 776 self.skew = l - orig_start
777 777 self.dirty = True
778 778 offset = l - orig_start - fuzzlen
779 779 if fuzzlen:
780 780 msg = _("Hunk #%d succeeded at %d "
781 781 "with fuzz %d "
782 782 "(offset %d lines).\n")
783 783 self.printfile(True)
784 784 self.ui.warn(msg %
785 785 (h.number, l + 1, fuzzlen, offset))
786 786 else:
787 787 msg = _("Hunk #%d succeeded at %d "
788 788 "(offset %d lines).\n")
789 789 self.ui.note(msg % (h.number, l + 1, offset))
790 790 return fuzzlen
791 791 self.printfile(True)
792 792 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
793 793 self.rej.append(horig)
794 794 return -1
795 795
796 796 def close(self):
797 797 if self.dirty:
798 798 self.writelines(self.fname, self.lines, self.mode)
799 799 self.write_rej()
800 800 return len(self.rej)
801 801
802 802 class hunk(object):
803 803 def __init__(self, desc, num, lr, context):
804 804 self.number = num
805 805 self.desc = desc
806 806 self.hunk = [desc]
807 807 self.a = []
808 808 self.b = []
809 809 self.starta = self.lena = None
810 810 self.startb = self.lenb = None
811 811 if lr is not None:
812 812 if context:
813 813 self.read_context_hunk(lr)
814 814 else:
815 815 self.read_unified_hunk(lr)
816 816
817 817 def getnormalized(self):
818 818 """Return a copy with line endings normalized to LF."""
819 819
820 820 def normalize(lines):
821 821 nlines = []
822 822 for line in lines:
823 823 if line.endswith('\r\n'):
824 824 line = line[:-2] + '\n'
825 825 nlines.append(line)
826 826 return nlines
827 827
828 828 # Dummy object, it is rebuilt manually
829 829 nh = hunk(self.desc, self.number, None, None)
830 830 nh.number = self.number
831 831 nh.desc = self.desc
832 832 nh.hunk = self.hunk
833 833 nh.a = normalize(self.a)
834 834 nh.b = normalize(self.b)
835 835 nh.starta = self.starta
836 836 nh.startb = self.startb
837 837 nh.lena = self.lena
838 838 nh.lenb = self.lenb
839 839 return nh
840 840
841 841 def read_unified_hunk(self, lr):
842 842 m = unidesc.match(self.desc)
843 843 if not m:
844 844 raise PatchError(_("bad hunk #%d") % self.number)
845 845 self.starta, self.lena, self.startb, self.lenb = m.groups()
846 846 if self.lena is None:
847 847 self.lena = 1
848 848 else:
849 849 self.lena = int(self.lena)
850 850 if self.lenb is None:
851 851 self.lenb = 1
852 852 else:
853 853 self.lenb = int(self.lenb)
854 854 self.starta = int(self.starta)
855 855 self.startb = int(self.startb)
856 856 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
857 857 self.b)
858 858 # if we hit eof before finishing out the hunk, the last line will
859 859 # be zero length. Lets try to fix it up.
860 860 while len(self.hunk[-1]) == 0:
861 861 del self.hunk[-1]
862 862 del self.a[-1]
863 863 del self.b[-1]
864 864 self.lena -= 1
865 865 self.lenb -= 1
866 866 self._fixnewline(lr)
867 867
868 868 def read_context_hunk(self, lr):
869 869 self.desc = lr.readline()
870 870 m = contextdesc.match(self.desc)
871 871 if not m:
872 872 raise PatchError(_("bad hunk #%d") % self.number)
873 873 self.starta, aend = m.groups()
874 874 self.starta = int(self.starta)
875 875 if aend is None:
876 876 aend = self.starta
877 877 self.lena = int(aend) - self.starta
878 878 if self.starta:
879 879 self.lena += 1
880 880 for x in xrange(self.lena):
881 881 l = lr.readline()
882 882 if l.startswith('---'):
883 883 # lines addition, old block is empty
884 884 lr.push(l)
885 885 break
886 886 s = l[2:]
887 887 if l.startswith('- ') or l.startswith('! '):
888 888 u = '-' + s
889 889 elif l.startswith(' '):
890 890 u = ' ' + s
891 891 else:
892 892 raise PatchError(_("bad hunk #%d old text line %d") %
893 893 (self.number, x))
894 894 self.a.append(u)
895 895 self.hunk.append(u)
896 896
897 897 l = lr.readline()
898 898 if l.startswith('\ '):
899 899 s = self.a[-1][:-1]
900 900 self.a[-1] = s
901 901 self.hunk[-1] = s
902 902 l = lr.readline()
903 903 m = contextdesc.match(l)
904 904 if not m:
905 905 raise PatchError(_("bad hunk #%d") % self.number)
906 906 self.startb, bend = m.groups()
907 907 self.startb = int(self.startb)
908 908 if bend is None:
909 909 bend = self.startb
910 910 self.lenb = int(bend) - self.startb
911 911 if self.startb:
912 912 self.lenb += 1
913 913 hunki = 1
914 914 for x in xrange(self.lenb):
915 915 l = lr.readline()
916 916 if l.startswith('\ '):
917 917 # XXX: the only way to hit this is with an invalid line range.
918 918 # The no-eol marker is not counted in the line range, but I
919 919 # guess there are diff(1) out there which behave differently.
920 920 s = self.b[-1][:-1]
921 921 self.b[-1] = s
922 922 self.hunk[hunki - 1] = s
923 923 continue
924 924 if not l:
925 925 # line deletions, new block is empty and we hit EOF
926 926 lr.push(l)
927 927 break
928 928 s = l[2:]
929 929 if l.startswith('+ ') or l.startswith('! '):
930 930 u = '+' + s
931 931 elif l.startswith(' '):
932 932 u = ' ' + s
933 933 elif len(self.b) == 0:
934 934 # line deletions, new block is empty
935 935 lr.push(l)
936 936 break
937 937 else:
938 938 raise PatchError(_("bad hunk #%d old text line %d") %
939 939 (self.number, x))
940 940 self.b.append(s)
941 941 while True:
942 942 if hunki >= len(self.hunk):
943 943 h = ""
944 944 else:
945 945 h = self.hunk[hunki]
946 946 hunki += 1
947 947 if h == u:
948 948 break
949 949 elif h.startswith('-'):
950 950 continue
951 951 else:
952 952 self.hunk.insert(hunki - 1, u)
953 953 break
954 954
955 955 if not self.a:
956 956 # this happens when lines were only added to the hunk
957 957 for x in self.hunk:
958 958 if x.startswith('-') or x.startswith(' '):
959 959 self.a.append(x)
960 960 if not self.b:
961 961 # this happens when lines were only deleted from the hunk
962 962 for x in self.hunk:
963 963 if x.startswith('+') or x.startswith(' '):
964 964 self.b.append(x[1:])
965 965 # @@ -start,len +start,len @@
966 966 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
967 967 self.startb, self.lenb)
968 968 self.hunk[0] = self.desc
969 969 self._fixnewline(lr)
970 970
971 971 def _fixnewline(self, lr):
972 972 l = lr.readline()
973 973 if l.startswith('\ '):
974 974 diffhelpers.fix_newline(self.hunk, self.a, self.b)
975 975 else:
976 976 lr.push(l)
977 977
978 978 def complete(self):
979 979 return len(self.a) == self.lena and len(self.b) == self.lenb
980 980
981 981 def _fuzzit(self, old, new, fuzz, toponly):
982 982 # this removes context lines from the top and bottom of list 'l'. It
983 983 # checks the hunk to make sure only context lines are removed, and then
984 984 # returns a new shortened list of lines.
985 985 fuzz = min(fuzz, len(old))
986 986 if fuzz:
987 987 top = 0
988 988 bot = 0
989 989 hlen = len(self.hunk)
990 990 for x in xrange(hlen - 1):
991 991 # the hunk starts with the @@ line, so use x+1
992 992 if self.hunk[x + 1][0] == ' ':
993 993 top += 1
994 994 else:
995 995 break
996 996 if not toponly:
997 997 for x in xrange(hlen - 1):
998 998 if self.hunk[hlen - bot - 1][0] == ' ':
999 999 bot += 1
1000 1000 else:
1001 1001 break
1002 1002
1003 1003 bot = min(fuzz, bot)
1004 1004 top = min(fuzz, top)
1005 1005 return old[top:len(old) - bot], new[top:len(new) - bot], top
1006 1006 return old, new, 0
1007 1007
1008 1008 def fuzzit(self, fuzz, toponly):
1009 1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1010 1010 oldstart = self.starta + top
1011 1011 newstart = self.startb + top
1012 1012 # zero length hunk ranges already have their start decremented
1013 1013 if self.lena and oldstart > 0:
1014 1014 oldstart -= 1
1015 1015 if self.lenb and newstart > 0:
1016 1016 newstart -= 1
1017 1017 return old, oldstart, new, newstart
1018 1018
1019 1019 class binhunk(object):
1020 1020 'A binary patch file.'
1021 1021 def __init__(self, lr, fname):
1022 1022 self.text = None
1023 1023 self.delta = False
1024 1024 self.hunk = ['GIT binary patch\n']
1025 1025 self._fname = fname
1026 1026 self._read(lr)
1027 1027
1028 1028 def complete(self):
1029 1029 return self.text is not None
1030 1030
1031 1031 def new(self, lines):
1032 1032 if self.delta:
1033 1033 return [applybindelta(self.text, ''.join(lines))]
1034 1034 return [self.text]
1035 1035
1036 1036 def _read(self, lr):
1037 1037 def getline(lr, hunk):
1038 1038 l = lr.readline()
1039 1039 hunk.append(l)
1040 1040 return l.rstrip('\r\n')
1041 1041
1042 1042 size = 0
1043 1043 while True:
1044 1044 line = getline(lr, self.hunk)
1045 1045 if not line:
1046 1046 raise PatchError(_('could not extract "%s" binary data')
1047 1047 % self._fname)
1048 1048 if line.startswith('literal '):
1049 1049 size = int(line[8:].rstrip())
1050 1050 break
1051 1051 if line.startswith('delta '):
1052 1052 size = int(line[6:].rstrip())
1053 1053 self.delta = True
1054 1054 break
1055 1055 dec = []
1056 1056 line = getline(lr, self.hunk)
1057 1057 while len(line) > 1:
1058 1058 l = line[0]
1059 1059 if l <= 'Z' and l >= 'A':
1060 1060 l = ord(l) - ord('A') + 1
1061 1061 else:
1062 1062 l = ord(l) - ord('a') + 27
1063 1063 try:
1064 1064 dec.append(base85.b85decode(line[1:])[:l])
1065 1065 except ValueError, e:
1066 1066 raise PatchError(_('could not decode "%s" binary patch: %s')
1067 1067 % (self._fname, str(e)))
1068 1068 line = getline(lr, self.hunk)
1069 1069 text = zlib.decompress(''.join(dec))
1070 1070 if len(text) != size:
1071 1071 raise PatchError(_('"%s" length is %d bytes, should be %d')
1072 1072 % (self._fname, len(text), size))
1073 1073 self.text = text
1074 1074
1075 1075 def parsefilename(str):
1076 1076 # --- filename \t|space stuff
1077 1077 s = str[4:].rstrip('\r\n')
1078 1078 i = s.find('\t')
1079 1079 if i < 0:
1080 1080 i = s.find(' ')
1081 1081 if i < 0:
1082 1082 return s
1083 1083 return s[:i]
1084 1084
1085 1085 def pathstrip(path, strip):
1086 1086 pathlen = len(path)
1087 1087 i = 0
1088 1088 if strip == 0:
1089 1089 return '', path.rstrip()
1090 1090 count = strip
1091 1091 while count > 0:
1092 1092 i = path.find('/', i)
1093 1093 if i == -1:
1094 1094 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1095 1095 (count, strip, path))
1096 1096 i += 1
1097 1097 # consume '//' in the path
1098 1098 while i < pathlen - 1 and path[i] == '/':
1099 1099 i += 1
1100 1100 count -= 1
1101 1101 return path[:i].lstrip(), path[i:].rstrip()
1102 1102
1103 1103 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1104 1104 nulla = afile_orig == "/dev/null"
1105 1105 nullb = bfile_orig == "/dev/null"
1106 1106 create = nulla and hunk.starta == 0 and hunk.lena == 0
1107 1107 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1108 1108 abase, afile = pathstrip(afile_orig, strip)
1109 1109 gooda = not nulla and backend.exists(afile)
1110 1110 bbase, bfile = pathstrip(bfile_orig, strip)
1111 1111 if afile == bfile:
1112 1112 goodb = gooda
1113 1113 else:
1114 1114 goodb = not nullb and backend.exists(bfile)
1115 1115 missing = not goodb and not gooda and not create
1116 1116
1117 1117 # some diff programs apparently produce patches where the afile is
1118 1118 # not /dev/null, but afile starts with bfile
1119 1119 abasedir = afile[:afile.rfind('/') + 1]
1120 1120 bbasedir = bfile[:bfile.rfind('/') + 1]
1121 1121 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1122 1122 and hunk.starta == 0 and hunk.lena == 0):
1123 1123 create = True
1124 1124 missing = False
1125 1125
1126 1126 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1127 1127 # diff is between a file and its backup. In this case, the original
1128 1128 # file should be patched (see original mpatch code).
1129 1129 isbackup = (abase == bbase and bfile.startswith(afile))
1130 1130 fname = None
1131 1131 if not missing:
1132 1132 if gooda and goodb:
1133 1133 fname = isbackup and afile or bfile
1134 1134 elif gooda:
1135 1135 fname = afile
1136 1136
1137 1137 if not fname:
1138 1138 if not nullb:
1139 1139 fname = isbackup and afile or bfile
1140 1140 elif not nulla:
1141 1141 fname = afile
1142 1142 else:
1143 1143 raise PatchError(_("undefined source and destination files"))
1144 1144
1145 1145 gp = patchmeta(fname)
1146 1146 if create:
1147 1147 gp.op = 'ADD'
1148 1148 elif remove:
1149 1149 gp.op = 'DELETE'
1150 1150 return gp
1151 1151
1152 1152 def scangitpatch(lr, firstline):
1153 1153 """
1154 1154 Git patches can emit:
1155 1155 - rename a to b
1156 1156 - change b
1157 1157 - copy a to c
1158 1158 - change c
1159 1159
1160 1160 We cannot apply this sequence as-is, the renamed 'a' could not be
1161 1161 found for it would have been renamed already. And we cannot copy
1162 1162 from 'b' instead because 'b' would have been changed already. So
1163 1163 we scan the git patch for copy and rename commands so we can
1164 1164 perform the copies ahead of time.
1165 1165 """
1166 1166 pos = 0
1167 1167 try:
1168 1168 pos = lr.fp.tell()
1169 1169 fp = lr.fp
1170 1170 except IOError:
1171 1171 fp = cStringIO.StringIO(lr.fp.read())
1172 1172 gitlr = linereader(fp)
1173 1173 gitlr.push(firstline)
1174 1174 gitpatches = readgitpatch(gitlr)
1175 1175 fp.seek(pos)
1176 1176 return gitpatches
1177 1177
1178 1178 def iterhunks(fp):
1179 1179 """Read a patch and yield the following events:
1180 1180 - ("file", afile, bfile, firsthunk): select a new target file.
1181 1181 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1182 1182 "file" event.
1183 1183 - ("git", gitchanges): current diff is in git format, gitchanges
1184 1184 maps filenames to gitpatch records. Unique event.
1185 1185 """
1186 1186 afile = ""
1187 1187 bfile = ""
1188 1188 state = None
1189 1189 hunknum = 0
1190 1190 emitfile = newfile = False
1191 1191 gitpatches = None
1192 1192
1193 1193 # our states
1194 1194 BFILE = 1
1195 1195 context = None
1196 1196 lr = linereader(fp)
1197 1197
1198 1198 while True:
1199 1199 x = lr.readline()
1200 1200 if not x:
1201 1201 break
1202 1202 if state == BFILE and (
1203 1203 (not context and x[0] == '@')
1204 1204 or (context is not False and x.startswith('***************'))
1205 1205 or x.startswith('GIT binary patch')):
1206 1206 gp = None
1207 1207 if (gitpatches and
1208 1208 gitpatches[-1].ispatching(afile, bfile)):
1209 1209 gp = gitpatches.pop()
1210 1210 if x.startswith('GIT binary patch'):
1211 1211 h = binhunk(lr, gp.path)
1212 1212 else:
1213 1213 if context is None and x.startswith('***************'):
1214 1214 context = True
1215 1215 h = hunk(x, hunknum + 1, lr, context)
1216 1216 hunknum += 1
1217 1217 if emitfile:
1218 1218 emitfile = False
1219 1219 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1220 1220 yield 'hunk', h
1221 1221 elif x.startswith('diff --git a/'):
1222 1222 m = gitre.match(x.rstrip(' \r\n'))
1223 1223 if not m:
1224 1224 continue
1225 1225 if gitpatches is None:
1226 1226 # scan whole input for git metadata
1227 1227 gitpatches = scangitpatch(lr, x)
1228 1228 yield 'git', [g.copy() for g in gitpatches
1229 1229 if g.op in ('COPY', 'RENAME')]
1230 1230 gitpatches.reverse()
1231 1231 afile = 'a/' + m.group(1)
1232 1232 bfile = 'b/' + m.group(2)
1233 1233 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1234 1234 gp = gitpatches.pop()
1235 1235 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1236 1236 if not gitpatches:
1237 1237 raise PatchError(_('failed to synchronize metadata for "%s"')
1238 1238 % afile[2:])
1239 1239 gp = gitpatches[-1]
1240 1240 newfile = True
1241 1241 elif x.startswith('---'):
1242 1242 # check for a unified diff
1243 1243 l2 = lr.readline()
1244 1244 if not l2.startswith('+++'):
1245 1245 lr.push(l2)
1246 1246 continue
1247 1247 newfile = True
1248 1248 context = False
1249 1249 afile = parsefilename(x)
1250 1250 bfile = parsefilename(l2)
1251 1251 elif x.startswith('***'):
1252 1252 # check for a context diff
1253 1253 l2 = lr.readline()
1254 1254 if not l2.startswith('---'):
1255 1255 lr.push(l2)
1256 1256 continue
1257 1257 l3 = lr.readline()
1258 1258 lr.push(l3)
1259 1259 if not l3.startswith("***************"):
1260 1260 lr.push(l2)
1261 1261 continue
1262 1262 newfile = True
1263 1263 context = True
1264 1264 afile = parsefilename(x)
1265 1265 bfile = parsefilename(l2)
1266 1266
1267 1267 if newfile:
1268 1268 newfile = False
1269 1269 emitfile = True
1270 1270 state = BFILE
1271 1271 hunknum = 0
1272 1272
1273 1273 while gitpatches:
1274 1274 gp = gitpatches.pop()
1275 1275 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1276 1276
1277 1277 def applybindelta(binchunk, data):
1278 1278 """Apply a binary delta hunk
1279 1279 The algorithm used is the algorithm from git's patch-delta.c
1280 1280 """
1281 1281 def deltahead(binchunk):
1282 1282 i = 0
1283 1283 for c in binchunk:
1284 1284 i += 1
1285 1285 if not (ord(c) & 0x80):
1286 1286 return i
1287 1287 return i
1288 1288 out = ""
1289 1289 s = deltahead(binchunk)
1290 1290 binchunk = binchunk[s:]
1291 1291 s = deltahead(binchunk)
1292 1292 binchunk = binchunk[s:]
1293 1293 i = 0
1294 1294 while i < len(binchunk):
1295 1295 cmd = ord(binchunk[i])
1296 1296 i += 1
1297 1297 if (cmd & 0x80):
1298 1298 offset = 0
1299 1299 size = 0
1300 1300 if (cmd & 0x01):
1301 1301 offset = ord(binchunk[i])
1302 1302 i += 1
1303 1303 if (cmd & 0x02):
1304 1304 offset |= ord(binchunk[i]) << 8
1305 1305 i += 1
1306 1306 if (cmd & 0x04):
1307 1307 offset |= ord(binchunk[i]) << 16
1308 1308 i += 1
1309 1309 if (cmd & 0x08):
1310 1310 offset |= ord(binchunk[i]) << 24
1311 1311 i += 1
1312 1312 if (cmd & 0x10):
1313 1313 size = ord(binchunk[i])
1314 1314 i += 1
1315 1315 if (cmd & 0x20):
1316 1316 size |= ord(binchunk[i]) << 8
1317 1317 i += 1
1318 1318 if (cmd & 0x40):
1319 1319 size |= ord(binchunk[i]) << 16
1320 1320 i += 1
1321 1321 if size == 0:
1322 1322 size = 0x10000
1323 1323 offset_end = offset + size
1324 1324 out += data[offset:offset_end]
1325 1325 elif cmd != 0:
1326 1326 offset_end = i + cmd
1327 1327 out += binchunk[i:offset_end]
1328 1328 i += cmd
1329 1329 else:
1330 1330 raise PatchError(_('unexpected delta opcode 0'))
1331 1331 return out
1332 1332
1333 1333 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1334 1334 """Reads a patch from fp and tries to apply it.
1335 1335
1336 1336 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1337 1337 there was any fuzz.
1338 1338
1339 1339 If 'eolmode' is 'strict', the patch content and patched file are
1340 1340 read in binary mode. Otherwise, line endings are ignored when
1341 1341 patching then normalized according to 'eolmode'.
1342 1342 """
1343 1343 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1344 1344 eolmode=eolmode)
1345 1345
1346 1346 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1347 1347 eolmode='strict'):
1348 1348
1349 1349 def pstrip(p):
1350 1350 return pathstrip(p, strip - 1)[1]
1351 1351
1352 1352 rejects = 0
1353 1353 err = 0
1354 1354 current_file = None
1355 1355
1356 1356 for state, values in iterhunks(fp):
1357 1357 if state == 'hunk':
1358 1358 if not current_file:
1359 1359 continue
1360 1360 ret = current_file.apply(values)
1361 1361 if ret > 0:
1362 1362 err = 1
1363 1363 elif state == 'file':
1364 1364 if current_file:
1365 1365 rejects += current_file.close()
1366 1366 current_file = None
1367 1367 afile, bfile, first_hunk, gp = values
1368 1368 if gp:
1369 1369 gp.path = pstrip(gp.path)
1370 1370 if gp.oldpath:
1371 1371 gp.oldpath = pstrip(gp.oldpath)
1372 1372 else:
1373 1373 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1374 1374 if gp.op == 'RENAME':
1375 1375 backend.unlink(gp.oldpath)
1376 1376 if not first_hunk:
1377 1377 if gp.op == 'DELETE':
1378 1378 backend.unlink(gp.path)
1379 1379 continue
1380 1380 data, mode = None, None
1381 1381 if gp.op in ('RENAME', 'COPY'):
1382 1382 data, mode = store.getfile(gp.oldpath)[:2]
1383 1383 if gp.mode:
1384 1384 mode = gp.mode
1385 1385 if gp.op == 'ADD':
1386 1386 # Added files without content have no hunk and
1387 1387 # must be created
1388 1388 data = ''
1389 1389 if data or mode:
1390 1390 if (gp.op in ('ADD', 'RENAME', 'COPY')
1391 1391 and backend.exists(gp.path)):
1392 1392 raise PatchError(_("cannot create %s: destination "
1393 1393 "already exists") % gp.path)
1394 1394 backend.setfile(gp.path, data, mode, gp.oldpath)
1395 1395 continue
1396 1396 try:
1397 1397 current_file = patcher(ui, gp, backend, store,
1398 1398 eolmode=eolmode)
1399 1399 except PatchError, inst:
1400 1400 ui.warn(str(inst) + '\n')
1401 1401 current_file = None
1402 1402 rejects += 1
1403 1403 continue
1404 1404 elif state == 'git':
1405 1405 for gp in values:
1406 1406 path = pstrip(gp.oldpath)
1407 1407 try:
1408 1408 data, mode = backend.getfile(path)
1409 1409 except IOError, e:
1410 1410 if e.errno != errno.ENOENT:
1411 1411 raise
1412 1412 # The error ignored here will trigger a getfile()
1413 1413 # error in a place more appropriate for error
1414 1414 # handling, and will not interrupt the patching
1415 1415 # process.
1416 1416 else:
1417 1417 store.setfile(path, data, mode)
1418 1418 else:
1419 1419 raise util.Abort(_('unsupported parser state: %s') % state)
1420 1420
1421 1421 if current_file:
1422 1422 rejects += current_file.close()
1423 1423
1424 1424 if rejects:
1425 1425 return -1
1426 1426 return err
1427 1427
1428 1428 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1429 1429 similarity):
1430 1430 """use <patcher> to apply <patchname> to the working directory.
1431 1431 returns whether patch was applied with fuzz factor."""
1432 1432
1433 1433 fuzz = False
1434 1434 args = []
1435 1435 cwd = repo.root
1436 1436 if cwd:
1437 1437 args.append('-d %s' % util.shellquote(cwd))
1438 1438 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1439 1439 util.shellquote(patchname)))
1440 1440 try:
1441 1441 for line in fp:
1442 1442 line = line.rstrip()
1443 1443 ui.note(line + '\n')
1444 1444 if line.startswith('patching file '):
1445 1445 pf = util.parsepatchoutput(line)
1446 1446 printed_file = False
1447 1447 files.add(pf)
1448 1448 elif line.find('with fuzz') >= 0:
1449 1449 fuzz = True
1450 1450 if not printed_file:
1451 1451 ui.warn(pf + '\n')
1452 1452 printed_file = True
1453 1453 ui.warn(line + '\n')
1454 1454 elif line.find('saving rejects to file') >= 0:
1455 1455 ui.warn(line + '\n')
1456 1456 elif line.find('FAILED') >= 0:
1457 1457 if not printed_file:
1458 1458 ui.warn(pf + '\n')
1459 1459 printed_file = True
1460 1460 ui.warn(line + '\n')
1461 1461 finally:
1462 1462 if files:
1463 1463 scmutil.marktouched(repo, files, similarity)
1464 1464 code = fp.close()
1465 1465 if code:
1466 1466 raise PatchError(_("patch command failed: %s") %
1467 1467 util.explainexit(code)[0])
1468 1468 return fuzz
1469 1469
1470 1470 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1471 1471 if files is None:
1472 1472 files = set()
1473 1473 if eolmode is None:
1474 1474 eolmode = ui.config('patch', 'eol', 'strict')
1475 1475 if eolmode.lower() not in eolmodes:
1476 1476 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1477 1477 eolmode = eolmode.lower()
1478 1478
1479 1479 store = filestore()
1480 1480 try:
1481 1481 fp = open(patchobj, 'rb')
1482 1482 except TypeError:
1483 1483 fp = patchobj
1484 1484 try:
1485 1485 ret = applydiff(ui, fp, backend, store, strip=strip,
1486 1486 eolmode=eolmode)
1487 1487 finally:
1488 1488 if fp != patchobj:
1489 1489 fp.close()
1490 1490 files.update(backend.close())
1491 1491 store.close()
1492 1492 if ret < 0:
1493 1493 raise PatchError(_('patch failed to apply'))
1494 1494 return ret > 0
1495 1495
1496 1496 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1497 1497 similarity=0):
1498 1498 """use builtin patch to apply <patchobj> to the working directory.
1499 1499 returns whether patch was applied with fuzz factor."""
1500 1500 backend = workingbackend(ui, repo, similarity)
1501 1501 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1502 1502
1503 1503 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1504 1504 eolmode='strict'):
1505 1505 backend = repobackend(ui, repo, ctx, store)
1506 1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1507 1507
1508 1508 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1509 1509 similarity=0):
1510 1510 """Apply <patchname> to the working directory.
1511 1511
1512 1512 'eolmode' specifies how end of lines should be handled. It can be:
1513 1513 - 'strict': inputs are read in binary mode, EOLs are preserved
1514 1514 - 'crlf': EOLs are ignored when patching and reset to CRLF
1515 1515 - 'lf': EOLs are ignored when patching and reset to LF
1516 1516 - None: get it from user settings, default to 'strict'
1517 1517 'eolmode' is ignored when using an external patcher program.
1518 1518
1519 1519 Returns whether patch was applied with fuzz factor.
1520 1520 """
1521 1521 patcher = ui.config('ui', 'patch')
1522 1522 if files is None:
1523 1523 files = set()
1524 1524 try:
1525 1525 if patcher:
1526 1526 return _externalpatch(ui, repo, patcher, patchname, strip,
1527 1527 files, similarity)
1528 1528 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1529 1529 similarity)
1530 1530 except PatchError, err:
1531 1531 raise util.Abort(str(err))
1532 1532
1533 1533 def changedfiles(ui, repo, patchpath, strip=1):
1534 1534 backend = fsbackend(ui, repo.root)
1535 1535 fp = open(patchpath, 'rb')
1536 1536 try:
1537 1537 changed = set()
1538 1538 for state, values in iterhunks(fp):
1539 1539 if state == 'file':
1540 1540 afile, bfile, first_hunk, gp = values
1541 1541 if gp:
1542 1542 gp.path = pathstrip(gp.path, strip - 1)[1]
1543 1543 if gp.oldpath:
1544 1544 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1545 1545 else:
1546 1546 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1547 1547 changed.add(gp.path)
1548 1548 if gp.op == 'RENAME':
1549 1549 changed.add(gp.oldpath)
1550 1550 elif state not in ('hunk', 'git'):
1551 1551 raise util.Abort(_('unsupported parser state: %s') % state)
1552 1552 return changed
1553 1553 finally:
1554 1554 fp.close()
1555 1555
1556 1556 class GitDiffRequired(Exception):
1557 1557 pass
1558 1558
1559 1559 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1560 1560 def get(key, name=None, getter=ui.configbool):
1561 1561 return ((opts and opts.get(key)) or
1562 1562 getter(section, name or key, None, untrusted=untrusted))
1563 1563 return mdiff.diffopts(
1564 1564 text=opts and opts.get('text'),
1565 1565 git=get('git'),
1566 1566 nodates=get('nodates'),
1567 1567 showfunc=get('show_function', 'showfunc'),
1568 1568 ignorews=get('ignore_all_space', 'ignorews'),
1569 1569 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1570 1570 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1571 1571 context=get('unified', getter=ui.config))
1572 1572
1573 1573 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1574 1574 losedatafn=None, prefix=''):
1575 1575 '''yields diff of changes to files between two nodes, or node and
1576 1576 working directory.
1577 1577
1578 1578 if node1 is None, use first dirstate parent instead.
1579 1579 if node2 is None, compare node1 with working directory.
1580 1580
1581 1581 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1582 1582 every time some change cannot be represented with the current
1583 1583 patch format. Return False to upgrade to git patch format, True to
1584 1584 accept the loss or raise an exception to abort the diff. It is
1585 1585 called with the name of current file being diffed as 'fn'. If set
1586 1586 to None, patches will always be upgraded to git format when
1587 1587 necessary.
1588 1588
1589 1589 prefix is a filename prefix that is prepended to all filenames on
1590 1590 display (used for subrepos).
1591 1591 '''
1592 1592
1593 1593 if opts is None:
1594 1594 opts = mdiff.defaultopts
1595 1595
1596 1596 if not node1 and not node2:
1597 1597 node1 = repo.dirstate.p1()
1598 1598
1599 1599 def lrugetfilectx():
1600 1600 cache = {}
1601 1601 order = util.deque()
1602 1602 def getfilectx(f, ctx):
1603 1603 fctx = ctx.filectx(f, filelog=cache.get(f))
1604 1604 if f not in cache:
1605 1605 if len(cache) > 20:
1606 1606 del cache[order.popleft()]
1607 1607 cache[f] = fctx.filelog()
1608 1608 else:
1609 1609 order.remove(f)
1610 1610 order.append(f)
1611 1611 return fctx
1612 1612 return getfilectx
1613 1613 getfilectx = lrugetfilectx()
1614 1614
1615 1615 ctx1 = repo[node1]
1616 1616 ctx2 = repo[node2]
1617 1617
1618 1618 if not changes:
1619 1619 changes = repo.status(ctx1, ctx2, match=match)
1620 1620 modified, added, removed = changes[:3]
1621 1621
1622 1622 if not modified and not added and not removed:
1623 1623 return []
1624 1624
1625 1625 revs = None
1626 1626 hexfunc = repo.ui.debugflag and hex or short
1627 1627 revs = [hexfunc(node) for node in [node1, node2] if node]
1628 1628
1629 1629 copy = {}
1630 1630 if opts.git or opts.upgrade:
1631 1631 copy = copies.pathcopies(ctx1, ctx2)
1632 1632
1633 1633 def difffn(opts, losedata):
1634 1634 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1635 1635 copy, getfilectx, opts, losedata, prefix)
1636 1636 if opts.upgrade and not opts.git:
1637 1637 try:
1638 1638 def losedata(fn):
1639 1639 if not losedatafn or not losedatafn(fn=fn):
1640 1640 raise GitDiffRequired
1641 1641 # Buffer the whole output until we are sure it can be generated
1642 1642 return list(difffn(opts.copy(git=False), losedata))
1643 1643 except GitDiffRequired:
1644 1644 return difffn(opts.copy(git=True), None)
1645 1645 else:
1646 1646 return difffn(opts, None)
1647 1647
1648 1648 def difflabel(func, *args, **kw):
1649 1649 '''yields 2-tuples of (output, label) based on the output of func()'''
1650 1650 headprefixes = [('diff', 'diff.diffline'),
1651 1651 ('copy', 'diff.extended'),
1652 1652 ('rename', 'diff.extended'),
1653 1653 ('old', 'diff.extended'),
1654 1654 ('new', 'diff.extended'),
1655 1655 ('deleted', 'diff.extended'),
1656 1656 ('---', 'diff.file_a'),
1657 1657 ('+++', 'diff.file_b')]
1658 1658 textprefixes = [('@', 'diff.hunk'),
1659 1659 ('-', 'diff.deleted'),
1660 1660 ('+', 'diff.inserted')]
1661 1661 head = False
1662 1662 for chunk in func(*args, **kw):
1663 1663 lines = chunk.split('\n')
1664 1664 for i, line in enumerate(lines):
1665 1665 if i != 0:
1666 1666 yield ('\n', '')
1667 1667 if head:
1668 1668 if line.startswith('@'):
1669 1669 head = False
1670 1670 else:
1671 1671 if line and line[0] not in ' +-@\\':
1672 1672 head = True
1673 1673 stripline = line
1674 1674 if not head and line and line[0] in '+-':
1675 1675 # highlight trailing whitespace, but only in changed lines
1676 1676 stripline = line.rstrip()
1677 1677 prefixes = textprefixes
1678 1678 if head:
1679 1679 prefixes = headprefixes
1680 1680 for prefix, label in prefixes:
1681 1681 if stripline.startswith(prefix):
1682 1682 yield (stripline, label)
1683 1683 break
1684 1684 else:
1685 1685 yield (line, '')
1686 1686 if line != stripline:
1687 1687 yield (line[len(stripline):], 'diff.trailingwhitespace')
1688 1688
1689 1689 def diffui(*args, **kw):
1690 1690 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1691 1691 return difflabel(diff, *args, **kw)
1692 1692
1693 1693 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1694 1694 copy, getfilectx, opts, losedatafn, prefix):
1695 1695
1696 1696 def join(f):
1697 1697 return posixpath.join(prefix, f)
1698 1698
1699 1699 def addmodehdr(header, omode, nmode):
1700 1700 if omode != nmode:
1701 1701 header.append('old mode %s\n' % omode)
1702 1702 header.append('new mode %s\n' % nmode)
1703 1703
1704 1704 def addindexmeta(meta, revs):
1705 1705 if opts.git:
1706 1706 i = len(revs)
1707 1707 if i==2:
1708 1708 meta.append('index %s..%s\n' % tuple(revs))
1709 1709 elif i==3:
1710 1710 meta.append('index %s,%s..%s\n' % tuple(revs))
1711 1711
1712 1712 def gitindex(text):
1713 1713 if not text:
1714 1714 text = ""
1715 1715 l = len(text)
1716 1716 s = util.sha1('blob %d\0' % l)
1717 1717 s.update(text)
1718 1718 return s.hexdigest()
1719 1719
1720 1720 def diffline(a, b, revs):
1721 1721 if opts.git:
1722 1722 line = 'diff --git a/%s b/%s\n' % (a, b)
1723 1723 elif not repo.ui.quiet:
1724 1724 if revs:
1725 1725 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1726 1726 line = 'diff %s %s\n' % (revinfo, a)
1727 1727 else:
1728 1728 line = 'diff %s\n' % a
1729 1729 else:
1730 1730 line = ''
1731 1731 return line
1732 1732
1733 1733 date1 = util.datestr(ctx1.date())
1734 1734 man1 = ctx1.manifest()
1735 1735
1736 1736 gone = set()
1737 1737 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1738 1738
1739 1739 copyto = dict([(v, k) for k, v in copy.items()])
1740 1740
1741 1741 if opts.git:
1742 1742 revs = None
1743 1743
1744 1744 for f in sorted(modified + added + removed):
1745 1745 to = None
1746 1746 tn = None
1747 1747 dodiff = True
1748 1748 header = []
1749 1749 if f in man1:
1750 1750 to = getfilectx(f, ctx1).data()
1751 1751 if f not in removed:
1752 1752 tn = getfilectx(f, ctx2).data()
1753 1753 a, b = f, f
1754 1754 if opts.git or losedatafn:
1755 1755 if f in added or (f in modified and to is None):
1756 1756 mode = gitmode[ctx2.flags(f)]
1757 1757 if f in copy or f in copyto:
1758 1758 if opts.git:
1759 1759 if f in copy:
1760 1760 a = copy[f]
1761 1761 else:
1762 1762 a = copyto[f]
1763 1763 omode = gitmode[man1.flags(a)]
1764 1764 addmodehdr(header, omode, mode)
1765 1765 if a in removed and a not in gone:
1766 1766 op = 'rename'
1767 1767 gone.add(a)
1768 1768 else:
1769 1769 op = 'copy'
1770 1770 header.append('%s from %s\n' % (op, join(a)))
1771 1771 header.append('%s to %s\n' % (op, join(f)))
1772 1772 to = getfilectx(a, ctx1).data()
1773 1773 else:
1774 1774 losedatafn(f)
1775 1775 else:
1776 1776 if opts.git:
1777 1777 header.append('new file mode %s\n' % mode)
1778 1778 elif ctx2.flags(f):
1779 1779 losedatafn(f)
1780 1780 # In theory, if tn was copied or renamed we should check
1781 1781 # if the source is binary too but the copy record already
1782 1782 # forces git mode.
1783 1783 if util.binary(tn):
1784 1784 if opts.git:
1785 1785 dodiff = 'binary'
1786 1786 else:
1787 1787 losedatafn(f)
1788 1788 if not opts.git and not tn:
1789 1789 # regular diffs cannot represent new empty file
1790 1790 losedatafn(f)
1791 1791 elif f in removed or (f in modified and tn is None):
1792 1792 if opts.git:
1793 1793 # have we already reported a copy above?
1794 1794 if ((f in copy and copy[f] in added
1795 1795 and copyto[copy[f]] == f) or
1796 1796 (f in copyto and copyto[f] in added
1797 1797 and copy[copyto[f]] == f)):
1798 1798 dodiff = False
1799 1799 else:
1800 1800 header.append('deleted file mode %s\n' %
1801 1801 gitmode[man1.flags(f)])
1802 1802 if util.binary(to):
1803 1803 dodiff = 'binary'
1804 1804 elif not to or util.binary(to):
1805 1805 # regular diffs cannot represent empty file deletion
1806 1806 losedatafn(f)
1807 1807 else:
1808 1808 oflag = man1.flags(f)
1809 1809 nflag = ctx2.flags(f)
1810 1810 binary = util.binary(to) or util.binary(tn)
1811 1811 if opts.git:
1812 1812 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1813 1813 if binary:
1814 1814 dodiff = 'binary'
1815 1815 elif binary or nflag != oflag:
1816 1816 losedatafn(f)
1817 1817
1818 1818 if dodiff:
1819 1819 if opts.git or revs:
1820 1820 header.insert(0, diffline(join(a), join(b), revs))
1821 1821 if dodiff == 'binary':
1822 1822 text = mdiff.b85diff(to, tn)
1823 1823 if text:
1824 1824 addindexmeta(header, [gitindex(to), gitindex(tn)])
1825 1825 else:
1826 1826 text = mdiff.unidiff(to, date1,
1827 1827 # ctx2 date may be dynamic
1828 1828 tn, util.datestr(ctx2.date()),
1829 1829 join(a), join(b), opts=opts)
1830 1830 if header and (text or len(header) > 1):
1831 1831 yield ''.join(header)
1832 1832 if text:
1833 1833 yield text
1834 1834
1835 1835 def diffstatsum(stats):
1836 1836 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1837 1837 for f, a, r, b in stats:
1838 1838 maxfile = max(maxfile, encoding.colwidth(f))
1839 1839 maxtotal = max(maxtotal, a + r)
1840 1840 addtotal += a
1841 1841 removetotal += r
1842 1842 binary = binary or b
1843 1843
1844 1844 return maxfile, maxtotal, addtotal, removetotal, binary
1845 1845
1846 1846 def diffstatdata(lines):
1847 1847 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1848 1848
1849 1849 results = []
1850 1850 filename, adds, removes, isbinary = None, 0, 0, False
1851 1851
1852 1852 def addresult():
1853 1853 if filename:
1854 1854 results.append((filename, adds, removes, isbinary))
1855 1855
1856 1856 for line in lines:
1857 1857 if line.startswith('diff'):
1858 1858 addresult()
1859 1859 # set numbers to 0 anyway when starting new file
1860 1860 adds, removes, isbinary = 0, 0, False
1861 1861 if line.startswith('diff --git a/'):
1862 1862 filename = gitre.search(line).group(1)
1863 1863 elif line.startswith('diff -r'):
1864 1864 # format: "diff -r ... -r ... filename"
1865 1865 filename = diffre.search(line).group(1)
1866 1866 elif line.startswith('+') and not line.startswith('+++ '):
1867 1867 adds += 1
1868 1868 elif line.startswith('-') and not line.startswith('--- '):
1869 1869 removes += 1
1870 1870 elif (line.startswith('GIT binary patch') or
1871 1871 line.startswith('Binary file')):
1872 1872 isbinary = True
1873 1873 addresult()
1874 1874 return results
1875 1875
1876 1876 def diffstat(lines, width=80, git=False):
1877 1877 output = []
1878 1878 stats = diffstatdata(lines)
1879 1879 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1880 1880
1881 1881 countwidth = len(str(maxtotal))
1882 1882 if hasbinary and countwidth < 3:
1883 1883 countwidth = 3
1884 1884 graphwidth = width - countwidth - maxname - 6
1885 1885 if graphwidth < 10:
1886 1886 graphwidth = 10
1887 1887
1888 1888 def scale(i):
1889 1889 if maxtotal <= graphwidth:
1890 1890 return i
1891 1891 # If diffstat runs out of room it doesn't print anything,
1892 1892 # which isn't very useful, so always print at least one + or -
1893 1893 # if there were at least some changes.
1894 1894 return max(i * graphwidth // maxtotal, int(bool(i)))
1895 1895
1896 1896 for filename, adds, removes, isbinary in stats:
1897 1897 if isbinary:
1898 1898 count = 'Bin'
1899 1899 else:
1900 1900 count = adds + removes
1901 1901 pluses = '+' * scale(adds)
1902 1902 minuses = '-' * scale(removes)
1903 1903 output.append(' %s%s | %*s %s%s\n' %
1904 1904 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1905 1905 countwidth, count, pluses, minuses))
1906 1906
1907 1907 if stats:
1908 1908 output.append(_(' %d files changed, %d insertions(+), '
1909 1909 '%d deletions(-)\n')
1910 1910 % (len(stats), totaladds, totalremoves))
1911 1911
1912 1912 return ''.join(output)
1913 1913
1914 1914 def diffstatui(*args, **kw):
1915 1915 '''like diffstat(), but yields 2-tuples of (output, label) for
1916 1916 ui.write()
1917 1917 '''
1918 1918
1919 1919 for line in diffstat(*args, **kw).splitlines():
1920 1920 if line and line[-1] in '+-':
1921 1921 name, graph = line.rsplit(' ', 1)
1922 1922 yield (name + ' ', '')
1923 1923 m = re.search(r'\++', graph)
1924 1924 if m:
1925 1925 yield (m.group(0), 'diffstat.inserted')
1926 1926 m = re.search(r'-+', graph)
1927 1927 if m:
1928 1928 yield (m.group(0), 'diffstat.deleted')
1929 1929 else:
1930 1930 yield (line, '')
1931 1931 yield ('\n', '')
@@ -1,1537 +1,1537 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, shutil, posixpath, sys
9 9 import xml.dom.minidom
10 10 import stat, subprocess, tarfile
11 11 from i18n import _
12 12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
13 13 import phases
14 14 import pathutil
15 15 hg = None
16 16 propertycache = util.propertycache
17 17
18 18 nullstate = ('', '', 'empty')
19 19
20 20 def _expandedabspath(path):
21 21 '''
22 22 get a path or url and if it is a path expand it and return an absolute path
23 23 '''
24 24 expandedpath = util.urllocalpath(util.expandpath(path))
25 25 u = util.url(expandedpath)
26 26 if not u.scheme:
27 27 path = util.normpath(os.path.abspath(u.path))
28 28 return path
29 29
30 30 def _getstorehashcachename(remotepath):
31 31 '''get a unique filename for the store hash cache of a remote repository'''
32 32 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
33 33
34 34 def _calcfilehash(filename):
35 35 data = ''
36 36 if os.path.exists(filename):
37 37 fd = open(filename, 'rb')
38 38 data = fd.read()
39 39 fd.close()
40 40 return util.sha1(data).hexdigest()
41 41
42 42 class SubrepoAbort(error.Abort):
43 43 """Exception class used to avoid handling a subrepo error more than once"""
44 44 def __init__(self, *args, **kw):
45 45 error.Abort.__init__(self, *args, **kw)
46 46 self.subrepo = kw.get('subrepo')
47 47 self.cause = kw.get('cause')
48 48
49 49 def annotatesubrepoerror(func):
50 50 def decoratedmethod(self, *args, **kargs):
51 51 try:
52 52 res = func(self, *args, **kargs)
53 53 except SubrepoAbort, ex:
54 54 # This exception has already been handled
55 55 raise ex
56 56 except error.Abort, ex:
57 57 subrepo = subrelpath(self)
58 58 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
59 59 # avoid handling this exception by raising a SubrepoAbort exception
60 60 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
61 61 cause=sys.exc_info())
62 62 return res
63 63 return decoratedmethod
64 64
65 65 def state(ctx, ui):
66 66 """return a state dict, mapping subrepo paths configured in .hgsub
67 67 to tuple: (source from .hgsub, revision from .hgsubstate, kind
68 68 (key in types dict))
69 69 """
70 70 p = config.config()
71 71 def read(f, sections=None, remap=None):
72 72 if f in ctx:
73 73 try:
74 74 data = ctx[f].data()
75 75 except IOError, err:
76 76 if err.errno != errno.ENOENT:
77 77 raise
78 78 # handle missing subrepo spec files as removed
79 79 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
80 80 return
81 81 p.parse(f, data, sections, remap, read)
82 82 else:
83 83 raise util.Abort(_("subrepo spec file %s not found") % f)
84 84
85 85 if '.hgsub' in ctx:
86 86 read('.hgsub')
87 87
88 88 for path, src in ui.configitems('subpaths'):
89 89 p.set('subpaths', path, src, ui.configsource('subpaths', path))
90 90
91 91 rev = {}
92 92 if '.hgsubstate' in ctx:
93 93 try:
94 94 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
95 95 l = l.lstrip()
96 96 if not l:
97 97 continue
98 98 try:
99 99 revision, path = l.split(" ", 1)
100 100 except ValueError:
101 101 raise util.Abort(_("invalid subrepository revision "
102 102 "specifier in .hgsubstate line %d")
103 103 % (i + 1))
104 104 rev[path] = revision
105 105 except IOError, err:
106 106 if err.errno != errno.ENOENT:
107 107 raise
108 108
109 109 def remap(src):
110 110 for pattern, repl in p.items('subpaths'):
111 111 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
112 112 # does a string decode.
113 113 repl = repl.encode('string-escape')
114 114 # However, we still want to allow back references to go
115 115 # through unharmed, so we turn r'\\1' into r'\1'. Again,
116 116 # extra escapes are needed because re.sub string decodes.
117 117 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
118 118 try:
119 119 src = re.sub(pattern, repl, src, 1)
120 120 except re.error, e:
121 121 raise util.Abort(_("bad subrepository pattern in %s: %s")
122 122 % (p.source('subpaths', pattern), e))
123 123 return src
124 124
125 125 state = {}
126 126 for path, src in p[''].items():
127 127 kind = 'hg'
128 128 if src.startswith('['):
129 129 if ']' not in src:
130 130 raise util.Abort(_('missing ] in subrepo source'))
131 131 kind, src = src.split(']', 1)
132 132 kind = kind[1:]
133 133 src = src.lstrip() # strip any extra whitespace after ']'
134 134
135 135 if not util.url(src).isabs():
136 136 parent = _abssource(ctx._repo, abort=False)
137 137 if parent:
138 138 parent = util.url(parent)
139 139 parent.path = posixpath.join(parent.path or '', src)
140 140 parent.path = posixpath.normpath(parent.path)
141 141 joined = str(parent)
142 142 # Remap the full joined path and use it if it changes,
143 143 # else remap the original source.
144 144 remapped = remap(joined)
145 145 if remapped == joined:
146 146 src = remap(src)
147 147 else:
148 148 src = remapped
149 149
150 150 src = remap(src)
151 151 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
152 152
153 153 return state
154 154
155 155 def writestate(repo, state):
156 156 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
157 157 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
158 158 repo.wwrite('.hgsubstate', ''.join(lines), '')
159 159
160 160 def submerge(repo, wctx, mctx, actx, overwrite):
161 161 """delegated from merge.applyupdates: merging of .hgsubstate file
162 162 in working context, merging context and ancestor context"""
163 163 if mctx == actx: # backwards?
164 164 actx = wctx.p1()
165 165 s1 = wctx.substate
166 166 s2 = mctx.substate
167 167 sa = actx.substate
168 168 sm = {}
169 169
170 170 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
171 171
172 172 def debug(s, msg, r=""):
173 173 if r:
174 174 r = "%s:%s:%s" % r
175 175 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
176 176
177 177 for s, l in sorted(s1.iteritems()):
178 178 a = sa.get(s, nullstate)
179 179 ld = l # local state with possible dirty flag for compares
180 180 if wctx.sub(s).dirty():
181 181 ld = (l[0], l[1] + "+")
182 182 if wctx == actx: # overwrite
183 183 a = ld
184 184
185 185 if s in s2:
186 186 r = s2[s]
187 187 if ld == r or r == a: # no change or local is newer
188 188 sm[s] = l
189 189 continue
190 190 elif ld == a: # other side changed
191 191 debug(s, "other changed, get", r)
192 192 wctx.sub(s).get(r, overwrite)
193 193 sm[s] = r
194 194 elif ld[0] != r[0]: # sources differ
195 195 if repo.ui.promptchoice(
196 196 _(' subrepository sources for %s differ\n'
197 197 'use (l)ocal source (%s) or (r)emote source (%s)?'
198 198 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
199 199 debug(s, "prompt changed, get", r)
200 200 wctx.sub(s).get(r, overwrite)
201 201 sm[s] = r
202 202 elif ld[1] == a[1]: # local side is unchanged
203 203 debug(s, "other side changed, get", r)
204 204 wctx.sub(s).get(r, overwrite)
205 205 sm[s] = r
206 206 else:
207 207 debug(s, "both sides changed")
208 208 option = repo.ui.promptchoice(
209 209 _(' subrepository %s diverged (local revision: %s, '
210 210 'remote revision: %s)\n'
211 211 '(M)erge, keep (l)ocal or keep (r)emote?'
212 212 '$$ &Merge $$ &Local $$ &Remote')
213 213 % (s, l[1][:12], r[1][:12]), 0)
214 214 if option == 0:
215 215 wctx.sub(s).merge(r)
216 216 sm[s] = l
217 217 debug(s, "merge with", r)
218 218 elif option == 1:
219 219 sm[s] = l
220 220 debug(s, "keep local subrepo revision", l)
221 221 else:
222 222 wctx.sub(s).get(r, overwrite)
223 223 sm[s] = r
224 224 debug(s, "get remote subrepo revision", r)
225 225 elif ld == a: # remote removed, local unchanged
226 226 debug(s, "remote removed, remove")
227 227 wctx.sub(s).remove()
228 228 elif a == nullstate: # not present in remote or ancestor
229 229 debug(s, "local added, keep")
230 230 sm[s] = l
231 231 continue
232 232 else:
233 233 if repo.ui.promptchoice(
234 234 _(' local changed subrepository %s which remote removed\n'
235 235 'use (c)hanged version or (d)elete?'
236 236 '$$ &Changed $$ &Delete') % s, 0):
237 237 debug(s, "prompt remove")
238 238 wctx.sub(s).remove()
239 239
240 240 for s, r in sorted(s2.items()):
241 241 if s in s1:
242 242 continue
243 243 elif s not in sa:
244 244 debug(s, "remote added, get", r)
245 245 mctx.sub(s).get(r)
246 246 sm[s] = r
247 247 elif r != sa[s]:
248 248 if repo.ui.promptchoice(
249 249 _(' remote changed subrepository %s which local removed\n'
250 250 'use (c)hanged version or (d)elete?'
251 251 '$$ &Changed $$ &Delete') % s, 0) == 0:
252 252 debug(s, "prompt recreate", r)
253 253 wctx.sub(s).get(r)
254 254 sm[s] = r
255 255
256 256 # record merged .hgsubstate
257 257 writestate(repo, sm)
258 258 return sm
259 259
260 260 def _updateprompt(ui, sub, dirty, local, remote):
261 261 if dirty:
262 262 msg = (_(' subrepository sources for %s differ\n'
263 263 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
264 264 '$$ &Local $$ &Remote')
265 265 % (subrelpath(sub), local, remote))
266 266 else:
267 267 msg = (_(' subrepository sources for %s differ (in checked out '
268 268 'version)\n'
269 269 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
270 270 '$$ &Local $$ &Remote')
271 271 % (subrelpath(sub), local, remote))
272 272 return ui.promptchoice(msg, 0)
273 273
274 274 def reporelpath(repo):
275 275 """return path to this (sub)repo as seen from outermost repo"""
276 276 parent = repo
277 277 while util.safehasattr(parent, '_subparent'):
278 278 parent = parent._subparent
279 279 p = parent.root.rstrip(os.sep)
280 280 return repo.root[len(p) + 1:]
281 281
282 282 def subrelpath(sub):
283 283 """return path to this subrepo as seen from outermost repo"""
284 284 if util.safehasattr(sub, '_relpath'):
285 285 return sub._relpath
286 286 if not util.safehasattr(sub, '_repo'):
287 287 return sub._path
288 288 return reporelpath(sub._repo)
289 289
290 290 def _abssource(repo, push=False, abort=True):
291 291 """return pull/push path of repo - either based on parent repo .hgsub info
292 292 or on the top repo config. Abort or return None if no source found."""
293 293 if util.safehasattr(repo, '_subparent'):
294 294 source = util.url(repo._subsource)
295 295 if source.isabs():
296 296 return str(source)
297 297 source.path = posixpath.normpath(source.path)
298 298 parent = _abssource(repo._subparent, push, abort=False)
299 299 if parent:
300 300 parent = util.url(util.pconvert(parent))
301 301 parent.path = posixpath.join(parent.path or '', source.path)
302 302 parent.path = posixpath.normpath(parent.path)
303 303 return str(parent)
304 304 else: # recursion reached top repo
305 305 if util.safehasattr(repo, '_subtoppath'):
306 306 return repo._subtoppath
307 307 if push and repo.ui.config('paths', 'default-push'):
308 308 return repo.ui.config('paths', 'default-push')
309 309 if repo.ui.config('paths', 'default'):
310 310 return repo.ui.config('paths', 'default')
311 311 if repo.sharedpath != repo.path:
312 312 # chop off the .hg component to get the default path form
313 313 return os.path.dirname(repo.sharedpath)
314 314 if abort:
315 315 raise util.Abort(_("default path for subrepository not found"))
316 316
317 317 def _sanitize(ui, path):
318 318 def v(arg, dirname, names):
319 319 if os.path.basename(dirname).lower() != '.hg':
320 320 return
321 321 for f in names:
322 322 if f.lower() == 'hgrc':
323 323 ui.warn(
324 324 _("warning: removing potentially hostile .hg/hgrc in '%s'")
325 325 % path)
326 326 os.unlink(os.path.join(dirname, f))
327 327 os.walk(path, v, None)
328 328
329 329 def itersubrepos(ctx1, ctx2):
330 330 """find subrepos in ctx1 or ctx2"""
331 331 # Create a (subpath, ctx) mapping where we prefer subpaths from
332 332 # ctx1. The subpaths from ctx2 are important when the .hgsub file
333 333 # has been modified (in ctx2) but not yet committed (in ctx1).
334 334 subpaths = dict.fromkeys(ctx2.substate, ctx2)
335 335 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
336 336 for subpath, ctx in sorted(subpaths.iteritems()):
337 337 yield subpath, ctx.sub(subpath)
338 338
339 339 def subrepo(ctx, path):
340 340 """return instance of the right subrepo class for subrepo in path"""
341 341 # subrepo inherently violates our import layering rules
342 342 # because it wants to make repo objects from deep inside the stack
343 343 # so we manually delay the circular imports to not break
344 344 # scripts that don't use our demand-loading
345 345 global hg
346 346 import hg as h
347 347 hg = h
348 348
349 349 pathutil.pathauditor(ctx._repo.root)(path)
350 350 state = ctx.substate[path]
351 351 if state[2] not in types:
352 352 raise util.Abort(_('unknown subrepo type %s') % state[2])
353 353 return types[state[2]](ctx, path, state[:2])
354 354
355 355 def newcommitphase(ui, ctx):
356 356 commitphase = phases.newcommitphase(ui)
357 357 substate = getattr(ctx, "substate", None)
358 358 if not substate:
359 359 return commitphase
360 360 check = ui.config('phases', 'checksubrepos', 'follow')
361 361 if check not in ('ignore', 'follow', 'abort'):
362 362 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
363 363 % (check))
364 364 if check == 'ignore':
365 365 return commitphase
366 366 maxphase = phases.public
367 367 maxsub = None
368 368 for s in sorted(substate):
369 369 sub = ctx.sub(s)
370 370 subphase = sub.phase(substate[s][1])
371 371 if maxphase < subphase:
372 372 maxphase = subphase
373 373 maxsub = s
374 374 if commitphase < maxphase:
375 375 if check == 'abort':
376 376 raise util.Abort(_("can't commit in %s phase"
377 377 " conflicting %s from subrepository %s") %
378 378 (phases.phasenames[commitphase],
379 379 phases.phasenames[maxphase], maxsub))
380 380 ui.warn(_("warning: changes are committed in"
381 381 " %s phase from subrepository %s\n") %
382 382 (phases.phasenames[maxphase], maxsub))
383 383 return maxphase
384 384 return commitphase
385 385
386 386 # subrepo classes need to implement the following abstract class:
387 387
388 388 class abstractsubrepo(object):
389 389
390 390 def storeclean(self, path):
391 391 """
392 392 returns true if the repository has not changed since it was last
393 393 cloned from or pushed to a given repository.
394 394 """
395 395 return False
396 396
397 397 def dirty(self, ignoreupdate=False):
398 398 """returns true if the dirstate of the subrepo is dirty or does not
399 399 match current stored state. If ignoreupdate is true, only check
400 400 whether the subrepo has uncommitted changes in its dirstate.
401 401 """
402 402 raise NotImplementedError
403 403
404 404 def basestate(self):
405 405 """current working directory base state, disregarding .hgsubstate
406 406 state and working directory modifications"""
407 407 raise NotImplementedError
408 408
409 409 def checknested(self, path):
410 410 """check if path is a subrepository within this repository"""
411 411 return False
412 412
413 413 def commit(self, text, user, date):
414 414 """commit the current changes to the subrepo with the given
415 415 log message. Use given user and date if possible. Return the
416 416 new state of the subrepo.
417 417 """
418 418 raise NotImplementedError
419 419
420 420 def phase(self, state):
421 421 """returns phase of specified state in the subrepository.
422 422 """
423 423 return phases.public
424 424
425 425 def remove(self):
426 426 """remove the subrepo
427 427
428 428 (should verify the dirstate is not dirty first)
429 429 """
430 430 raise NotImplementedError
431 431
432 432 def get(self, state, overwrite=False):
433 433 """run whatever commands are needed to put the subrepo into
434 434 this state
435 435 """
436 436 raise NotImplementedError
437 437
438 438 def merge(self, state):
439 439 """merge currently-saved state with the new state."""
440 440 raise NotImplementedError
441 441
442 442 def push(self, opts):
443 443 """perform whatever action is analogous to 'hg push'
444 444
445 445 This may be a no-op on some systems.
446 446 """
447 447 raise NotImplementedError
448 448
449 449 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
450 450 return []
451 451
452 452 def status(self, rev2, **opts):
453 453 return [], [], [], [], [], [], []
454 454
455 455 def diff(self, ui, diffopts, node2, match, prefix, **opts):
456 456 pass
457 457
458 458 def outgoing(self, ui, dest, opts):
459 459 return 1
460 460
461 461 def incoming(self, ui, source, opts):
462 462 return 1
463 463
464 464 def files(self):
465 465 """return filename iterator"""
466 466 raise NotImplementedError
467 467
468 468 def filedata(self, name):
469 469 """return file data"""
470 470 raise NotImplementedError
471 471
472 472 def fileflags(self, name):
473 473 """return file flags"""
474 474 return ''
475 475
476 476 def archive(self, ui, archiver, prefix, match=None):
477 477 if match is not None:
478 478 files = [f for f in self.files() if match(f)]
479 479 else:
480 480 files = self.files()
481 481 total = len(files)
482 482 relpath = subrelpath(self)
483 483 ui.progress(_('archiving (%s)') % relpath, 0,
484 484 unit=_('files'), total=total)
485 485 for i, name in enumerate(files):
486 486 flags = self.fileflags(name)
487 487 mode = 'x' in flags and 0755 or 0644
488 488 symlink = 'l' in flags
489 489 archiver.addfile(os.path.join(prefix, self._path, name),
490 490 mode, symlink, self.filedata(name))
491 491 ui.progress(_('archiving (%s)') % relpath, i + 1,
492 492 unit=_('files'), total=total)
493 493 ui.progress(_('archiving (%s)') % relpath, None)
494 494 return total
495 495
496 496 def walk(self, match):
497 497 '''
498 498 walk recursively through the directory tree, finding all files
499 499 matched by the match function
500 500 '''
501 501 pass
502 502
503 503 def forget(self, ui, match, prefix):
504 504 return ([], [])
505 505
506 506 def revert(self, ui, substate, *pats, **opts):
507 507 ui.warn('%s: reverting %s subrepos is unsupported\n' \
508 508 % (substate[0], substate[2]))
509 509 return []
510 510
511 511 class hgsubrepo(abstractsubrepo):
512 512 def __init__(self, ctx, path, state):
513 513 self._path = path
514 514 self._state = state
515 515 r = ctx._repo
516 516 root = r.wjoin(path)
517 517 create = False
518 518 if not os.path.exists(os.path.join(root, '.hg')):
519 519 create = True
520 520 util.makedirs(root)
521 521 self._repo = hg.repository(r.baseui, root, create=create)
522 522 for s, k in [('ui', 'commitsubrepos')]:
523 523 v = r.ui.config(s, k)
524 524 if v:
525 525 self._repo.ui.setconfig(s, k, v)
526 526 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
527 527 self._initrepo(r, state[0], create)
528 528
529 529 def storeclean(self, path):
530 530 clean = True
531 531 lock = self._repo.lock()
532 532 itercache = self._calcstorehash(path)
533 533 try:
534 534 for filehash in self._readstorehashcache(path):
535 535 if filehash != itercache.next():
536 536 clean = False
537 537 break
538 538 except StopIteration:
539 539 # the cached and current pull states have a different size
540 540 clean = False
541 541 if clean:
542 542 try:
543 543 itercache.next()
544 544 # the cached and current pull states have a different size
545 545 clean = False
546 546 except StopIteration:
547 547 pass
548 548 lock.release()
549 549 return clean
550 550
551 551 def _calcstorehash(self, remotepath):
552 552 '''calculate a unique "store hash"
553 553
554 554 This method is used to to detect when there are changes that may
555 555 require a push to a given remote path.'''
556 556 # sort the files that will be hashed in increasing (likely) file size
557 557 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
558 558 yield '# %s\n' % _expandedabspath(remotepath)
559 559 for relname in filelist:
560 560 absname = os.path.normpath(self._repo.join(relname))
561 561 yield '%s = %s\n' % (relname, _calcfilehash(absname))
562 562
563 563 def _getstorehashcachepath(self, remotepath):
564 564 '''get a unique path for the store hash cache'''
565 565 return self._repo.join(os.path.join(
566 566 'cache', 'storehash', _getstorehashcachename(remotepath)))
567 567
568 568 def _readstorehashcache(self, remotepath):
569 569 '''read the store hash cache for a given remote repository'''
570 570 cachefile = self._getstorehashcachepath(remotepath)
571 571 if not os.path.exists(cachefile):
572 572 return ''
573 573 fd = open(cachefile, 'r')
574 574 pullstate = fd.readlines()
575 575 fd.close()
576 576 return pullstate
577 577
578 578 def _cachestorehash(self, remotepath):
579 579 '''cache the current store hash
580 580
581 581 Each remote repo requires its own store hash cache, because a subrepo
582 582 store may be "clean" versus a given remote repo, but not versus another
583 583 '''
584 584 cachefile = self._getstorehashcachepath(remotepath)
585 585 lock = self._repo.lock()
586 586 storehash = list(self._calcstorehash(remotepath))
587 587 cachedir = os.path.dirname(cachefile)
588 588 if not os.path.exists(cachedir):
589 589 util.makedirs(cachedir, notindexed=True)
590 590 fd = open(cachefile, 'w')
591 591 fd.writelines(storehash)
592 592 fd.close()
593 593 lock.release()
594 594
595 595 @annotatesubrepoerror
596 596 def _initrepo(self, parentrepo, source, create):
597 597 self._repo._subparent = parentrepo
598 598 self._repo._subsource = source
599 599
600 600 if create:
601 601 fp = self._repo.opener("hgrc", "w", text=True)
602 602 fp.write('[paths]\n')
603 603
604 604 def addpathconfig(key, value):
605 605 if value:
606 606 fp.write('%s = %s\n' % (key, value))
607 607 self._repo.ui.setconfig('paths', key, value)
608 608
609 609 defpath = _abssource(self._repo, abort=False)
610 610 defpushpath = _abssource(self._repo, True, abort=False)
611 611 addpathconfig('default', defpath)
612 612 if defpath != defpushpath:
613 613 addpathconfig('default-push', defpushpath)
614 614 fp.close()
615 615
616 616 @annotatesubrepoerror
617 617 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
618 618 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
619 619 os.path.join(prefix, self._path), explicitonly)
620 620
621 621 @annotatesubrepoerror
622 622 def status(self, rev2, **opts):
623 623 try:
624 624 rev1 = self._state[1]
625 625 ctx1 = self._repo[rev1]
626 626 ctx2 = self._repo[rev2]
627 627 return self._repo.status(ctx1, ctx2, **opts)
628 628 except error.RepoLookupError, inst:
629 629 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
630 630 % (inst, subrelpath(self)))
631 631 return [], [], [], [], [], [], []
632 632
633 633 @annotatesubrepoerror
634 634 def diff(self, ui, diffopts, node2, match, prefix, **opts):
635 635 try:
636 636 node1 = node.bin(self._state[1])
637 637 # We currently expect node2 to come from substate and be
638 638 # in hex format
639 639 if node2 is not None:
640 640 node2 = node.bin(node2)
641 641 cmdutil.diffordiffstat(ui, self._repo, diffopts,
642 642 node1, node2, match,
643 643 prefix=posixpath.join(prefix, self._path),
644 644 listsubrepos=True, **opts)
645 645 except error.RepoLookupError, inst:
646 646 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
647 647 % (inst, subrelpath(self)))
648 648
649 649 @annotatesubrepoerror
650 650 def archive(self, ui, archiver, prefix, match=None):
651 651 self._get(self._state + ('hg',))
652 652 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
653 653 rev = self._state[1]
654 654 ctx = self._repo[rev]
655 655 for subpath in ctx.substate:
656 656 s = subrepo(ctx, subpath)
657 657 submatch = matchmod.narrowmatcher(subpath, match)
658 658 total += s.archive(
659 659 ui, archiver, os.path.join(prefix, self._path), submatch)
660 660 return total
661 661
662 662 @annotatesubrepoerror
663 663 def dirty(self, ignoreupdate=False):
664 664 r = self._state[1]
665 665 if r == '' and not ignoreupdate: # no state recorded
666 666 return True
667 667 w = self._repo[None]
668 668 if r != w.p1().hex() and not ignoreupdate:
669 669 # different version checked out
670 670 return True
671 671 return w.dirty() # working directory changed
672 672
673 673 def basestate(self):
674 674 return self._repo['.'].hex()
675 675
676 676 def checknested(self, path):
677 677 return self._repo._checknested(self._repo.wjoin(path))
678 678
679 679 @annotatesubrepoerror
680 680 def commit(self, text, user, date):
681 681 # don't bother committing in the subrepo if it's only been
682 682 # updated
683 683 if not self.dirty(True):
684 684 return self._repo['.'].hex()
685 685 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
686 686 n = self._repo.commit(text, user, date)
687 687 if not n:
688 688 return self._repo['.'].hex() # different version checked out
689 689 return node.hex(n)
690 690
691 691 @annotatesubrepoerror
692 692 def phase(self, state):
693 693 return self._repo[state].phase()
694 694
695 695 @annotatesubrepoerror
696 696 def remove(self):
697 697 # we can't fully delete the repository as it may contain
698 698 # local-only history
699 699 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
700 700 hg.clean(self._repo, node.nullid, False)
701 701
702 702 def _get(self, state):
703 703 source, revision, kind = state
704 704 if revision in self._repo.unfiltered():
705 705 return True
706 706 self._repo._subsource = source
707 707 srcurl = _abssource(self._repo)
708 708 other = hg.peer(self._repo, {}, srcurl)
709 709 if len(self._repo) == 0:
710 710 self._repo.ui.status(_('cloning subrepo %s from %s\n')
711 711 % (subrelpath(self), srcurl))
712 712 parentrepo = self._repo._subparent
713 713 shutil.rmtree(self._repo.path)
714 714 other, cloned = hg.clone(self._repo._subparent.baseui, {},
715 715 other, self._repo.root,
716 716 update=False)
717 717 self._repo = cloned.local()
718 718 self._initrepo(parentrepo, source, create=True)
719 719 self._cachestorehash(srcurl)
720 720 else:
721 721 self._repo.ui.status(_('pulling subrepo %s from %s\n')
722 722 % (subrelpath(self), srcurl))
723 723 cleansub = self.storeclean(srcurl)
724 724 remotebookmarks = other.listkeys('bookmarks')
725 725 self._repo.pull(other)
726 726 bookmarks.updatefromremote(self._repo.ui, self._repo,
727 727 remotebookmarks, srcurl)
728 728 if cleansub:
729 729 # keep the repo clean after pull
730 730 self._cachestorehash(srcurl)
731 731 return False
732 732
733 733 @annotatesubrepoerror
734 734 def get(self, state, overwrite=False):
735 735 inrepo = self._get(state)
736 736 source, revision, kind = state
737 737 repo = self._repo
738 738 repo.ui.debug("getting subrepo %s\n" % self._path)
739 739 if inrepo:
740 740 urepo = repo.unfiltered()
741 741 ctx = urepo[revision]
742 742 if ctx.hidden():
743 743 urepo.ui.warn(
744 744 _('revision %s in subrepo %s is hidden\n') \
745 745 % (revision[0:12], self._path))
746 746 repo = urepo
747 747 hg.updaterepo(repo, revision, overwrite)
748 748
749 749 @annotatesubrepoerror
750 750 def merge(self, state):
751 751 self._get(state)
752 752 cur = self._repo['.']
753 753 dst = self._repo[state[1]]
754 754 anc = dst.ancestor(cur)
755 755
756 756 def mergefunc():
757 757 if anc == cur and dst.branch() == cur.branch():
758 758 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
759 759 hg.update(self._repo, state[1])
760 760 elif anc == dst:
761 761 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
762 762 else:
763 763 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
764 764 hg.merge(self._repo, state[1], remind=False)
765 765
766 766 wctx = self._repo[None]
767 767 if self.dirty():
768 768 if anc != dst:
769 769 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
770 770 mergefunc()
771 771 else:
772 772 mergefunc()
773 773 else:
774 774 mergefunc()
775 775
776 776 @annotatesubrepoerror
777 777 def push(self, opts):
778 778 force = opts.get('force')
779 779 newbranch = opts.get('new_branch')
780 780 ssh = opts.get('ssh')
781 781
782 782 # push subrepos depth-first for coherent ordering
783 783 c = self._repo['']
784 784 subs = c.substate # only repos that are committed
785 785 for s in sorted(subs):
786 786 if c.sub(s).push(opts) == 0:
787 787 return False
788 788
789 789 dsturl = _abssource(self._repo, True)
790 790 if not force:
791 791 if self.storeclean(dsturl):
792 792 self._repo.ui.status(
793 793 _('no changes made to subrepo %s since last push to %s\n')
794 794 % (subrelpath(self), dsturl))
795 795 return None
796 796 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
797 797 (subrelpath(self), dsturl))
798 798 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
799 799 res = self._repo.push(other, force, newbranch=newbranch)
800 800
801 801 # the repo is now clean
802 802 self._cachestorehash(dsturl)
803 803 return res
804 804
805 805 @annotatesubrepoerror
806 806 def outgoing(self, ui, dest, opts):
807 807 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
808 808
809 809 @annotatesubrepoerror
810 810 def incoming(self, ui, source, opts):
811 811 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
812 812
813 813 @annotatesubrepoerror
814 814 def files(self):
815 815 rev = self._state[1]
816 816 ctx = self._repo[rev]
817 817 return ctx.manifest()
818 818
819 819 def filedata(self, name):
820 820 rev = self._state[1]
821 821 return self._repo[rev][name].data()
822 822
823 823 def fileflags(self, name):
824 824 rev = self._state[1]
825 825 ctx = self._repo[rev]
826 826 return ctx.flags(name)
827 827
828 828 def walk(self, match):
829 829 ctx = self._repo[None]
830 830 return ctx.walk(match)
831 831
832 832 @annotatesubrepoerror
833 833 def forget(self, ui, match, prefix):
834 834 return cmdutil.forget(ui, self._repo, match,
835 835 os.path.join(prefix, self._path), True)
836 836
837 837 @annotatesubrepoerror
838 838 def revert(self, ui, substate, *pats, **opts):
839 839 # reverting a subrepo is a 2 step process:
840 840 # 1. if the no_backup is not set, revert all modified
841 841 # files inside the subrepo
842 842 # 2. update the subrepo to the revision specified in
843 843 # the corresponding substate dictionary
844 844 ui.status(_('reverting subrepo %s\n') % substate[0])
845 845 if not opts.get('no_backup'):
846 846 # Revert all files on the subrepo, creating backups
847 847 # Note that this will not recursively revert subrepos
848 848 # We could do it if there was a set:subrepos() predicate
849 849 opts = opts.copy()
850 850 opts['date'] = None
851 851 opts['rev'] = substate[1]
852 852
853 853 pats = []
854 854 if not opts.get('all'):
855 855 pats = ['set:modified()']
856 856 self.filerevert(ui, *pats, **opts)
857 857
858 858 # Update the repo to the revision specified in the given substate
859 859 self.get(substate, overwrite=True)
860 860
861 861 def filerevert(self, ui, *pats, **opts):
862 862 ctx = self._repo[opts['rev']]
863 863 parents = self._repo.dirstate.parents()
864 864 if opts.get('all'):
865 865 pats = ['set:modified()']
866 866 else:
867 867 pats = []
868 868 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
869 869
870 870 class svnsubrepo(abstractsubrepo):
871 871 def __init__(self, ctx, path, state):
872 872 self._path = path
873 873 self._state = state
874 874 self._ctx = ctx
875 875 self._ui = ctx._repo.ui
876 876 self._exe = util.findexe('svn')
877 877 if not self._exe:
878 878 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
879 879 % self._path)
880 880
881 881 def _svncommand(self, commands, filename='', failok=False):
882 882 cmd = [self._exe]
883 883 extrakw = {}
884 884 if not self._ui.interactive():
885 885 # Making stdin be a pipe should prevent svn from behaving
886 886 # interactively even if we can't pass --non-interactive.
887 887 extrakw['stdin'] = subprocess.PIPE
888 888 # Starting in svn 1.5 --non-interactive is a global flag
889 889 # instead of being per-command, but we need to support 1.4 so
890 890 # we have to be intelligent about what commands take
891 891 # --non-interactive.
892 892 if commands[0] in ('update', 'checkout', 'commit'):
893 893 cmd.append('--non-interactive')
894 894 cmd.extend(commands)
895 895 if filename is not None:
896 896 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
897 897 cmd.append(path)
898 898 env = dict(os.environ)
899 899 # Avoid localized output, preserve current locale for everything else.
900 900 lc_all = env.get('LC_ALL')
901 901 if lc_all:
902 902 env['LANG'] = lc_all
903 903 del env['LC_ALL']
904 904 env['LC_MESSAGES'] = 'C'
905 905 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
906 906 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
907 907 universal_newlines=True, env=env, **extrakw)
908 908 stdout, stderr = p.communicate()
909 909 stderr = stderr.strip()
910 910 if not failok:
911 911 if p.returncode:
912 912 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
913 913 if stderr:
914 914 self._ui.warn(stderr + '\n')
915 915 return stdout, stderr
916 916
917 917 @propertycache
918 918 def _svnversion(self):
919 919 output, err = self._svncommand(['--version', '--quiet'], filename=None)
920 920 m = re.search(r'^(\d+)\.(\d+)', output)
921 921 if not m:
922 922 raise util.Abort(_('cannot retrieve svn tool version'))
923 923 return (int(m.group(1)), int(m.group(2)))
924 924
925 925 def _wcrevs(self):
926 926 # Get the working directory revision as well as the last
927 927 # commit revision so we can compare the subrepo state with
928 928 # both. We used to store the working directory one.
929 929 output, err = self._svncommand(['info', '--xml'])
930 930 doc = xml.dom.minidom.parseString(output)
931 931 entries = doc.getElementsByTagName('entry')
932 932 lastrev, rev = '0', '0'
933 933 if entries:
934 934 rev = str(entries[0].getAttribute('revision')) or '0'
935 935 commits = entries[0].getElementsByTagName('commit')
936 936 if commits:
937 937 lastrev = str(commits[0].getAttribute('revision')) or '0'
938 938 return (lastrev, rev)
939 939
940 940 def _wcrev(self):
941 941 return self._wcrevs()[0]
942 942
943 943 def _wcchanged(self):
944 944 """Return (changes, extchanges, missing) where changes is True
945 945 if the working directory was changed, extchanges is
946 946 True if any of these changes concern an external entry and missing
947 947 is True if any change is a missing entry.
948 948 """
949 949 output, err = self._svncommand(['status', '--xml'])
950 950 externals, changes, missing = [], [], []
951 951 doc = xml.dom.minidom.parseString(output)
952 952 for e in doc.getElementsByTagName('entry'):
953 953 s = e.getElementsByTagName('wc-status')
954 954 if not s:
955 955 continue
956 956 item = s[0].getAttribute('item')
957 957 props = s[0].getAttribute('props')
958 958 path = e.getAttribute('path')
959 959 if item == 'external':
960 960 externals.append(path)
961 961 elif item == 'missing':
962 962 missing.append(path)
963 963 if (item not in ('', 'normal', 'unversioned', 'external')
964 964 or props not in ('', 'none', 'normal')):
965 965 changes.append(path)
966 966 for path in changes:
967 967 for ext in externals:
968 968 if path == ext or path.startswith(ext + os.sep):
969 969 return True, True, bool(missing)
970 970 return bool(changes), False, bool(missing)
971 971
972 972 def dirty(self, ignoreupdate=False):
973 973 if not self._wcchanged()[0]:
974 974 if self._state[1] in self._wcrevs() or ignoreupdate:
975 975 return False
976 976 return True
977 977
978 978 def basestate(self):
979 979 lastrev, rev = self._wcrevs()
980 980 if lastrev != rev:
981 981 # Last committed rev is not the same than rev. We would
982 982 # like to take lastrev but we do not know if the subrepo
983 983 # URL exists at lastrev. Test it and fallback to rev it
984 984 # is not there.
985 985 try:
986 986 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
987 987 return lastrev
988 988 except error.Abort:
989 989 pass
990 990 return rev
991 991
992 992 @annotatesubrepoerror
993 993 def commit(self, text, user, date):
994 994 # user and date are out of our hands since svn is centralized
995 995 changed, extchanged, missing = self._wcchanged()
996 996 if not changed:
997 997 return self.basestate()
998 998 if extchanged:
999 999 # Do not try to commit externals
1000 1000 raise util.Abort(_('cannot commit svn externals'))
1001 1001 if missing:
1002 1002 # svn can commit with missing entries but aborting like hg
1003 1003 # seems a better approach.
1004 1004 raise util.Abort(_('cannot commit missing svn entries'))
1005 1005 commitinfo, err = self._svncommand(['commit', '-m', text])
1006 1006 self._ui.status(commitinfo)
1007 1007 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1008 1008 if not newrev:
1009 1009 if not commitinfo.strip():
1010 1010 # Sometimes, our definition of "changed" differs from
1011 1011 # svn one. For instance, svn ignores missing files
1012 1012 # when committing. If there are only missing files, no
1013 1013 # commit is made, no output and no error code.
1014 1014 raise util.Abort(_('failed to commit svn changes'))
1015 1015 raise util.Abort(commitinfo.splitlines()[-1])
1016 1016 newrev = newrev.groups()[0]
1017 1017 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1018 1018 return newrev
1019 1019
1020 1020 @annotatesubrepoerror
1021 1021 def remove(self):
1022 1022 if self.dirty():
1023 1023 self._ui.warn(_('not removing repo %s because '
1024 'it has changes.\n' % self._path))
1024 'it has changes.\n') % self._path)
1025 1025 return
1026 1026 self._ui.note(_('removing subrepo %s\n') % self._path)
1027 1027
1028 1028 def onerror(function, path, excinfo):
1029 1029 if function is not os.remove:
1030 1030 raise
1031 1031 # read-only files cannot be unlinked under Windows
1032 1032 s = os.stat(path)
1033 1033 if (s.st_mode & stat.S_IWRITE) != 0:
1034 1034 raise
1035 1035 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1036 1036 os.remove(path)
1037 1037
1038 1038 path = self._ctx._repo.wjoin(self._path)
1039 1039 shutil.rmtree(path, onerror=onerror)
1040 1040 try:
1041 1041 os.removedirs(os.path.dirname(path))
1042 1042 except OSError:
1043 1043 pass
1044 1044
1045 1045 @annotatesubrepoerror
1046 1046 def get(self, state, overwrite=False):
1047 1047 if overwrite:
1048 1048 self._svncommand(['revert', '--recursive'])
1049 1049 args = ['checkout']
1050 1050 if self._svnversion >= (1, 5):
1051 1051 args.append('--force')
1052 1052 # The revision must be specified at the end of the URL to properly
1053 1053 # update to a directory which has since been deleted and recreated.
1054 1054 args.append('%s@%s' % (state[0], state[1]))
1055 1055 status, err = self._svncommand(args, failok=True)
1056 1056 _sanitize(self._ui, self._path)
1057 1057 if not re.search('Checked out revision [0-9]+.', status):
1058 1058 if ('is already a working copy for a different URL' in err
1059 1059 and (self._wcchanged()[:2] == (False, False))):
1060 1060 # obstructed but clean working copy, so just blow it away.
1061 1061 self.remove()
1062 1062 self.get(state, overwrite=False)
1063 1063 return
1064 1064 raise util.Abort((status or err).splitlines()[-1])
1065 1065 self._ui.status(status)
1066 1066
1067 1067 @annotatesubrepoerror
1068 1068 def merge(self, state):
1069 1069 old = self._state[1]
1070 1070 new = state[1]
1071 1071 wcrev = self._wcrev()
1072 1072 if new != wcrev:
1073 1073 dirty = old == wcrev or self._wcchanged()[0]
1074 1074 if _updateprompt(self._ui, self, dirty, wcrev, new):
1075 1075 self.get(state, False)
1076 1076
1077 1077 def push(self, opts):
1078 1078 # push is a no-op for SVN
1079 1079 return True
1080 1080
1081 1081 @annotatesubrepoerror
1082 1082 def files(self):
1083 1083 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1084 1084 doc = xml.dom.minidom.parseString(output)
1085 1085 paths = []
1086 1086 for e in doc.getElementsByTagName('entry'):
1087 1087 kind = str(e.getAttribute('kind'))
1088 1088 if kind != 'file':
1089 1089 continue
1090 1090 name = ''.join(c.data for c
1091 1091 in e.getElementsByTagName('name')[0].childNodes
1092 1092 if c.nodeType == c.TEXT_NODE)
1093 1093 paths.append(name.encode('utf-8'))
1094 1094 return paths
1095 1095
1096 1096 def filedata(self, name):
1097 1097 return self._svncommand(['cat'], name)[0]
1098 1098
1099 1099
1100 1100 class gitsubrepo(abstractsubrepo):
1101 1101 def __init__(self, ctx, path, state):
1102 1102 self._state = state
1103 1103 self._ctx = ctx
1104 1104 self._path = path
1105 1105 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1106 1106 self._abspath = ctx._repo.wjoin(path)
1107 1107 self._subparent = ctx._repo
1108 1108 self._ui = ctx._repo.ui
1109 1109 self._ensuregit()
1110 1110
1111 1111 def _ensuregit(self):
1112 1112 try:
1113 1113 self._gitexecutable = 'git'
1114 1114 out, err = self._gitnodir(['--version'])
1115 1115 except OSError, e:
1116 1116 if e.errno != 2 or os.name != 'nt':
1117 1117 raise
1118 1118 self._gitexecutable = 'git.cmd'
1119 1119 out, err = self._gitnodir(['--version'])
1120 1120 m = re.search(r'^git version (\d+)\.(\d+)', out)
1121 1121 if not m:
1122 1122 self._ui.warn(_('cannot retrieve git version\n'))
1123 1123 return
1124 1124 version = (int(m.group(1)), int(m.group(2)))
1125 1125 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1126 1126 # despite the docstring comment. For now, error on 1.4.0, warn on
1127 1127 # 1.5.0 but attempt to continue.
1128 1128 if version < (1, 5):
1129 1129 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1130 1130 elif version < (1, 6):
1131 1131 self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1132 1132
1133 1133 def _gitcommand(self, commands, env=None, stream=False):
1134 1134 return self._gitdir(commands, env=env, stream=stream)[0]
1135 1135
1136 1136 def _gitdir(self, commands, env=None, stream=False):
1137 1137 return self._gitnodir(commands, env=env, stream=stream,
1138 1138 cwd=self._abspath)
1139 1139
1140 1140 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1141 1141 """Calls the git command
1142 1142
1143 1143 The methods tries to call the git command. versions prior to 1.6.0
1144 1144 are not supported and very probably fail.
1145 1145 """
1146 1146 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1147 1147 # unless ui.quiet is set, print git's stderr,
1148 1148 # which is mostly progress and useful info
1149 1149 errpipe = None
1150 1150 if self._ui.quiet:
1151 1151 errpipe = open(os.devnull, 'w')
1152 1152 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1153 1153 cwd=cwd, env=env, close_fds=util.closefds,
1154 1154 stdout=subprocess.PIPE, stderr=errpipe)
1155 1155 if stream:
1156 1156 return p.stdout, None
1157 1157
1158 1158 retdata = p.stdout.read().strip()
1159 1159 # wait for the child to exit to avoid race condition.
1160 1160 p.wait()
1161 1161
1162 1162 if p.returncode != 0 and p.returncode != 1:
1163 1163 # there are certain error codes that are ok
1164 1164 command = commands[0]
1165 1165 if command in ('cat-file', 'symbolic-ref'):
1166 1166 return retdata, p.returncode
1167 1167 # for all others, abort
1168 1168 raise util.Abort('git %s error %d in %s' %
1169 1169 (command, p.returncode, self._relpath))
1170 1170
1171 1171 return retdata, p.returncode
1172 1172
1173 1173 def _gitmissing(self):
1174 1174 return not os.path.exists(os.path.join(self._abspath, '.git'))
1175 1175
1176 1176 def _gitstate(self):
1177 1177 return self._gitcommand(['rev-parse', 'HEAD'])
1178 1178
1179 1179 def _gitcurrentbranch(self):
1180 1180 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1181 1181 if err:
1182 1182 current = None
1183 1183 return current
1184 1184
1185 1185 def _gitremote(self, remote):
1186 1186 out = self._gitcommand(['remote', 'show', '-n', remote])
1187 1187 line = out.split('\n')[1]
1188 1188 i = line.index('URL: ') + len('URL: ')
1189 1189 return line[i:]
1190 1190
1191 1191 def _githavelocally(self, revision):
1192 1192 out, code = self._gitdir(['cat-file', '-e', revision])
1193 1193 return code == 0
1194 1194
1195 1195 def _gitisancestor(self, r1, r2):
1196 1196 base = self._gitcommand(['merge-base', r1, r2])
1197 1197 return base == r1
1198 1198
1199 1199 def _gitisbare(self):
1200 1200 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1201 1201
1202 1202 def _gitupdatestat(self):
1203 1203 """This must be run before git diff-index.
1204 1204 diff-index only looks at changes to file stat;
1205 1205 this command looks at file contents and updates the stat."""
1206 1206 self._gitcommand(['update-index', '-q', '--refresh'])
1207 1207
1208 1208 def _gitbranchmap(self):
1209 1209 '''returns 2 things:
1210 1210 a map from git branch to revision
1211 1211 a map from revision to branches'''
1212 1212 branch2rev = {}
1213 1213 rev2branch = {}
1214 1214
1215 1215 out = self._gitcommand(['for-each-ref', '--format',
1216 1216 '%(objectname) %(refname)'])
1217 1217 for line in out.split('\n'):
1218 1218 revision, ref = line.split(' ')
1219 1219 if (not ref.startswith('refs/heads/') and
1220 1220 not ref.startswith('refs/remotes/')):
1221 1221 continue
1222 1222 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1223 1223 continue # ignore remote/HEAD redirects
1224 1224 branch2rev[ref] = revision
1225 1225 rev2branch.setdefault(revision, []).append(ref)
1226 1226 return branch2rev, rev2branch
1227 1227
1228 1228 def _gittracking(self, branches):
1229 1229 'return map of remote branch to local tracking branch'
1230 1230 # assumes no more than one local tracking branch for each remote
1231 1231 tracking = {}
1232 1232 for b in branches:
1233 1233 if b.startswith('refs/remotes/'):
1234 1234 continue
1235 1235 bname = b.split('/', 2)[2]
1236 1236 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1237 1237 if remote:
1238 1238 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1239 1239 tracking['refs/remotes/%s/%s' %
1240 1240 (remote, ref.split('/', 2)[2])] = b
1241 1241 return tracking
1242 1242
1243 1243 def _abssource(self, source):
1244 1244 if '://' not in source:
1245 1245 # recognize the scp syntax as an absolute source
1246 1246 colon = source.find(':')
1247 1247 if colon != -1 and '/' not in source[:colon]:
1248 1248 return source
1249 1249 self._subsource = source
1250 1250 return _abssource(self)
1251 1251
1252 1252 def _fetch(self, source, revision):
1253 1253 if self._gitmissing():
1254 1254 source = self._abssource(source)
1255 1255 self._ui.status(_('cloning subrepo %s from %s\n') %
1256 1256 (self._relpath, source))
1257 1257 self._gitnodir(['clone', source, self._abspath])
1258 1258 if self._githavelocally(revision):
1259 1259 return
1260 1260 self._ui.status(_('pulling subrepo %s from %s\n') %
1261 1261 (self._relpath, self._gitremote('origin')))
1262 1262 # try only origin: the originally cloned repo
1263 1263 self._gitcommand(['fetch'])
1264 1264 if not self._githavelocally(revision):
1265 1265 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1266 1266 (revision, self._relpath))
1267 1267
1268 1268 @annotatesubrepoerror
1269 1269 def dirty(self, ignoreupdate=False):
1270 1270 if self._gitmissing():
1271 1271 return self._state[1] != ''
1272 1272 if self._gitisbare():
1273 1273 return True
1274 1274 if not ignoreupdate and self._state[1] != self._gitstate():
1275 1275 # different version checked out
1276 1276 return True
1277 1277 # check for staged changes or modified files; ignore untracked files
1278 1278 self._gitupdatestat()
1279 1279 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1280 1280 return code == 1
1281 1281
1282 1282 def basestate(self):
1283 1283 return self._gitstate()
1284 1284
1285 1285 @annotatesubrepoerror
1286 1286 def get(self, state, overwrite=False):
1287 1287 source, revision, kind = state
1288 1288 if not revision:
1289 1289 self.remove()
1290 1290 return
1291 1291 self._fetch(source, revision)
1292 1292 # if the repo was set to be bare, unbare it
1293 1293 if self._gitisbare():
1294 1294 self._gitcommand(['config', 'core.bare', 'false'])
1295 1295 if self._gitstate() == revision:
1296 1296 self._gitcommand(['reset', '--hard', 'HEAD'])
1297 1297 return
1298 1298 elif self._gitstate() == revision:
1299 1299 if overwrite:
1300 1300 # first reset the index to unmark new files for commit, because
1301 1301 # reset --hard will otherwise throw away files added for commit,
1302 1302 # not just unmark them.
1303 1303 self._gitcommand(['reset', 'HEAD'])
1304 1304 self._gitcommand(['reset', '--hard', 'HEAD'])
1305 1305 return
1306 1306 branch2rev, rev2branch = self._gitbranchmap()
1307 1307
1308 1308 def checkout(args):
1309 1309 cmd = ['checkout']
1310 1310 if overwrite:
1311 1311 # first reset the index to unmark new files for commit, because
1312 1312 # the -f option will otherwise throw away files added for
1313 1313 # commit, not just unmark them.
1314 1314 self._gitcommand(['reset', 'HEAD'])
1315 1315 cmd.append('-f')
1316 1316 self._gitcommand(cmd + args)
1317 1317 _sanitize(self._ui, self._path)
1318 1318
1319 1319 def rawcheckout():
1320 1320 # no branch to checkout, check it out with no branch
1321 1321 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1322 1322 self._relpath)
1323 1323 self._ui.warn(_('check out a git branch if you intend '
1324 1324 'to make changes\n'))
1325 1325 checkout(['-q', revision])
1326 1326
1327 1327 if revision not in rev2branch:
1328 1328 rawcheckout()
1329 1329 return
1330 1330 branches = rev2branch[revision]
1331 1331 firstlocalbranch = None
1332 1332 for b in branches:
1333 1333 if b == 'refs/heads/master':
1334 1334 # master trumps all other branches
1335 1335 checkout(['refs/heads/master'])
1336 1336 return
1337 1337 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1338 1338 firstlocalbranch = b
1339 1339 if firstlocalbranch:
1340 1340 checkout([firstlocalbranch])
1341 1341 return
1342 1342
1343 1343 tracking = self._gittracking(branch2rev.keys())
1344 1344 # choose a remote branch already tracked if possible
1345 1345 remote = branches[0]
1346 1346 if remote not in tracking:
1347 1347 for b in branches:
1348 1348 if b in tracking:
1349 1349 remote = b
1350 1350 break
1351 1351
1352 1352 if remote not in tracking:
1353 1353 # create a new local tracking branch
1354 1354 local = remote.split('/', 3)[3]
1355 1355 checkout(['-b', local, remote])
1356 1356 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1357 1357 # When updating to a tracked remote branch,
1358 1358 # if the local tracking branch is downstream of it,
1359 1359 # a normal `git pull` would have performed a "fast-forward merge"
1360 1360 # which is equivalent to updating the local branch to the remote.
1361 1361 # Since we are only looking at branching at update, we need to
1362 1362 # detect this situation and perform this action lazily.
1363 1363 if tracking[remote] != self._gitcurrentbranch():
1364 1364 checkout([tracking[remote]])
1365 1365 self._gitcommand(['merge', '--ff', remote])
1366 1366 else:
1367 1367 # a real merge would be required, just checkout the revision
1368 1368 rawcheckout()
1369 1369
1370 1370 @annotatesubrepoerror
1371 1371 def commit(self, text, user, date):
1372 1372 if self._gitmissing():
1373 1373 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1374 1374 cmd = ['commit', '-a', '-m', text]
1375 1375 env = os.environ.copy()
1376 1376 if user:
1377 1377 cmd += ['--author', user]
1378 1378 if date:
1379 1379 # git's date parser silently ignores when seconds < 1e9
1380 1380 # convert to ISO8601
1381 1381 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1382 1382 '%Y-%m-%dT%H:%M:%S %1%2')
1383 1383 self._gitcommand(cmd, env=env)
1384 1384 # make sure commit works otherwise HEAD might not exist under certain
1385 1385 # circumstances
1386 1386 return self._gitstate()
1387 1387
1388 1388 @annotatesubrepoerror
1389 1389 def merge(self, state):
1390 1390 source, revision, kind = state
1391 1391 self._fetch(source, revision)
1392 1392 base = self._gitcommand(['merge-base', revision, self._state[1]])
1393 1393 self._gitupdatestat()
1394 1394 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1395 1395
1396 1396 def mergefunc():
1397 1397 if base == revision:
1398 1398 self.get(state) # fast forward merge
1399 1399 elif base != self._state[1]:
1400 1400 self._gitcommand(['merge', '--no-commit', revision])
1401 1401 _sanitize(self._ui, self._path)
1402 1402
1403 1403 if self.dirty():
1404 1404 if self._gitstate() != revision:
1405 1405 dirty = self._gitstate() == self._state[1] or code != 0
1406 1406 if _updateprompt(self._ui, self, dirty,
1407 1407 self._state[1][:7], revision[:7]):
1408 1408 mergefunc()
1409 1409 else:
1410 1410 mergefunc()
1411 1411
1412 1412 @annotatesubrepoerror
1413 1413 def push(self, opts):
1414 1414 force = opts.get('force')
1415 1415
1416 1416 if not self._state[1]:
1417 1417 return True
1418 1418 if self._gitmissing():
1419 1419 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1420 1420 # if a branch in origin contains the revision, nothing to do
1421 1421 branch2rev, rev2branch = self._gitbranchmap()
1422 1422 if self._state[1] in rev2branch:
1423 1423 for b in rev2branch[self._state[1]]:
1424 1424 if b.startswith('refs/remotes/origin/'):
1425 1425 return True
1426 1426 for b, revision in branch2rev.iteritems():
1427 1427 if b.startswith('refs/remotes/origin/'):
1428 1428 if self._gitisancestor(self._state[1], revision):
1429 1429 return True
1430 1430 # otherwise, try to push the currently checked out branch
1431 1431 cmd = ['push']
1432 1432 if force:
1433 1433 cmd.append('--force')
1434 1434
1435 1435 current = self._gitcurrentbranch()
1436 1436 if current:
1437 1437 # determine if the current branch is even useful
1438 1438 if not self._gitisancestor(self._state[1], current):
1439 1439 self._ui.warn(_('unrelated git branch checked out '
1440 1440 'in subrepo %s\n') % self._relpath)
1441 1441 return False
1442 1442 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1443 1443 (current.split('/', 2)[2], self._relpath))
1444 1444 self._gitcommand(cmd + ['origin', current])
1445 1445 return True
1446 1446 else:
1447 1447 self._ui.warn(_('no branch checked out in subrepo %s\n'
1448 1448 'cannot push revision %s\n') %
1449 1449 (self._relpath, self._state[1]))
1450 1450 return False
1451 1451
1452 1452 @annotatesubrepoerror
1453 1453 def remove(self):
1454 1454 if self._gitmissing():
1455 1455 return
1456 1456 if self.dirty():
1457 1457 self._ui.warn(_('not removing repo %s because '
1458 1458 'it has changes.\n') % self._relpath)
1459 1459 return
1460 1460 # we can't fully delete the repository as it may contain
1461 1461 # local-only history
1462 1462 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1463 1463 self._gitcommand(['config', 'core.bare', 'true'])
1464 1464 for f in os.listdir(self._abspath):
1465 1465 if f == '.git':
1466 1466 continue
1467 1467 path = os.path.join(self._abspath, f)
1468 1468 if os.path.isdir(path) and not os.path.islink(path):
1469 1469 shutil.rmtree(path)
1470 1470 else:
1471 1471 os.remove(path)
1472 1472
1473 1473 def archive(self, ui, archiver, prefix, match=None):
1474 1474 total = 0
1475 1475 source, revision = self._state
1476 1476 if not revision:
1477 1477 return total
1478 1478 self._fetch(source, revision)
1479 1479
1480 1480 # Parse git's native archive command.
1481 1481 # This should be much faster than manually traversing the trees
1482 1482 # and objects with many subprocess calls.
1483 1483 tarstream = self._gitcommand(['archive', revision], stream=True)
1484 1484 tar = tarfile.open(fileobj=tarstream, mode='r|')
1485 1485 relpath = subrelpath(self)
1486 1486 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1487 1487 for i, info in enumerate(tar):
1488 1488 if info.isdir():
1489 1489 continue
1490 1490 if match and not match(info.name):
1491 1491 continue
1492 1492 if info.issym():
1493 1493 data = info.linkname
1494 1494 else:
1495 1495 data = tar.extractfile(info).read()
1496 1496 archiver.addfile(os.path.join(prefix, self._path, info.name),
1497 1497 info.mode, info.issym(), data)
1498 1498 total += 1
1499 1499 ui.progress(_('archiving (%s)') % relpath, i + 1,
1500 1500 unit=_('files'))
1501 1501 ui.progress(_('archiving (%s)') % relpath, None)
1502 1502 return total
1503 1503
1504 1504
1505 1505 @annotatesubrepoerror
1506 1506 def status(self, rev2, **opts):
1507 1507 rev1 = self._state[1]
1508 1508 if self._gitmissing() or not rev1:
1509 1509 # if the repo is missing, return no results
1510 1510 return [], [], [], [], [], [], []
1511 1511 modified, added, removed = [], [], []
1512 1512 self._gitupdatestat()
1513 1513 if rev2:
1514 1514 command = ['diff-tree', rev1, rev2]
1515 1515 else:
1516 1516 command = ['diff-index', rev1]
1517 1517 out = self._gitcommand(command)
1518 1518 for line in out.split('\n'):
1519 1519 tab = line.find('\t')
1520 1520 if tab == -1:
1521 1521 continue
1522 1522 status, f = line[tab - 1], line[tab + 1:]
1523 1523 if status == 'M':
1524 1524 modified.append(f)
1525 1525 elif status == 'A':
1526 1526 added.append(f)
1527 1527 elif status == 'D':
1528 1528 removed.append(f)
1529 1529
1530 1530 deleted = unknown = ignored = clean = []
1531 1531 return modified, added, removed, deleted, unknown, ignored, clean
1532 1532
1533 1533 types = {
1534 1534 'hg': hgsubrepo,
1535 1535 'svn': svnsubrepo,
1536 1536 'git': gitsubrepo,
1537 1537 }
General Comments 0
You need to be logged in to leave comments. Login now