##// END OF EJS Templates
diff: use pycompat.{byteskwargs, strkwargs} to switch opts b/w bytes and str
Pulkit Goyal -
r31631:a7acda2d default
parent child Browse files
Show More
@@ -1,457 +1,459 b''
1 1 # mdiff.py - diff and patch routines for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11 import struct
12 12 import zlib
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 base85,
17 17 bdiff,
18 18 error,
19 19 mpatch,
20 pycompat,
20 21 util,
21 22 )
22 23
23 24 def splitnewlines(text):
24 25 '''like str.splitlines, but only split on newlines.'''
25 26 lines = [l + '\n' for l in text.split('\n')]
26 27 if lines:
27 28 if lines[-1] == '\n':
28 29 lines.pop()
29 30 else:
30 31 lines[-1] = lines[-1][:-1]
31 32 return lines
32 33
33 34 class diffopts(object):
34 35 '''context is the number of context lines
35 36 text treats all files as text
36 37 showfunc enables diff -p output
37 38 git enables the git extended patch format
38 39 nodates removes dates from diff headers
39 40 nobinary ignores binary files
40 41 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
41 42 ignorews ignores all whitespace changes in the diff
42 43 ignorewsamount ignores changes in the amount of whitespace
43 44 ignoreblanklines ignores changes whose lines are all blank
44 45 upgrade generates git diffs to avoid data loss
45 46 '''
46 47
47 48 defaults = {
48 49 'context': 3,
49 50 'text': False,
50 51 'showfunc': False,
51 52 'git': False,
52 53 'nodates': False,
53 54 'nobinary': False,
54 55 'noprefix': False,
55 56 'index': 0,
56 57 'ignorews': False,
57 58 'ignorewsamount': False,
58 59 'ignoreblanklines': False,
59 60 'upgrade': False,
60 61 'showsimilarity': False,
61 62 }
62 63
63 64 def __init__(self, **opts):
65 opts = pycompat.byteskwargs(opts)
64 66 for k in self.defaults.keys():
65 67 v = opts.get(k)
66 68 if v is None:
67 69 v = self.defaults[k]
68 70 setattr(self, k, v)
69 71
70 72 try:
71 73 self.context = int(self.context)
72 74 except ValueError:
73 75 raise error.Abort(_('diff context lines count must be '
74 76 'an integer, not %r') % self.context)
75 77
76 78 def copy(self, **kwargs):
77 79 opts = dict((k, getattr(self, k)) for k in self.defaults)
78 80 opts.update(kwargs)
79 81 return diffopts(**opts)
80 82
81 83 defaultopts = diffopts()
82 84
83 85 def wsclean(opts, text, blank=True):
84 86 if opts.ignorews:
85 87 text = bdiff.fixws(text, 1)
86 88 elif opts.ignorewsamount:
87 89 text = bdiff.fixws(text, 0)
88 90 if blank and opts.ignoreblanklines:
89 91 text = re.sub('\n+', '\n', text).strip('\n')
90 92 return text
91 93
92 94 def splitblock(base1, lines1, base2, lines2, opts):
93 95 # The input lines matches except for interwoven blank lines. We
94 96 # transform it into a sequence of matching blocks and blank blocks.
95 97 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
96 98 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
97 99 s1, e1 = 0, len(lines1)
98 100 s2, e2 = 0, len(lines2)
99 101 while s1 < e1 or s2 < e2:
100 102 i1, i2, btype = s1, s2, '='
101 103 if (i1 >= e1 or lines1[i1] == 0
102 104 or i2 >= e2 or lines2[i2] == 0):
103 105 # Consume the block of blank lines
104 106 btype = '~'
105 107 while i1 < e1 and lines1[i1] == 0:
106 108 i1 += 1
107 109 while i2 < e2 and lines2[i2] == 0:
108 110 i2 += 1
109 111 else:
110 112 # Consume the matching lines
111 113 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
112 114 i1 += 1
113 115 i2 += 1
114 116 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
115 117 s1 = i1
116 118 s2 = i2
117 119
118 120 def blocksinrange(blocks, rangeb):
119 121 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
120 122 `rangeb` from ``(b1, b2)`` point of view.
121 123
122 124 Return `filteredblocks, rangea` where:
123 125
124 126 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
125 127 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
126 128 block ``(b1, b2)`` being inside `rangeb` if
127 129 ``rangeb[0] < b2 and b1 < rangeb[1]``;
128 130 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
129 131 """
130 132 lbb, ubb = rangeb
131 133 lba, uba = None, None
132 134 filteredblocks = []
133 135 for block in blocks:
134 136 (a1, a2, b1, b2), stype = block
135 137 if lbb >= b1 and ubb <= b2 and stype == '=':
136 138 # rangeb is within a single "=" hunk, restrict back linerange1
137 139 # by offsetting rangeb
138 140 lba = lbb - b1 + a1
139 141 uba = ubb - b1 + a1
140 142 else:
141 143 if b1 <= lbb < b2:
142 144 if stype == '=':
143 145 lba = a2 - (b2 - lbb)
144 146 else:
145 147 lba = a1
146 148 if b1 < ubb <= b2:
147 149 if stype == '=':
148 150 uba = a1 + (ubb - b1)
149 151 else:
150 152 uba = a2
151 153 if lbb < b2 and b1 < ubb:
152 154 filteredblocks.append(block)
153 155 if lba is None or uba is None or uba < lba:
154 156 raise error.Abort(_('line range exceeds file size'))
155 157 return filteredblocks, (lba, uba)
156 158
157 159 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
158 160 """Return (block, type) tuples, where block is an mdiff.blocks
159 161 line entry. type is '=' for blocks matching exactly one another
160 162 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
161 163 matching only after having filtered blank lines.
162 164 line1 and line2 are text1 and text2 split with splitnewlines() if
163 165 they are already available.
164 166 """
165 167 if opts is None:
166 168 opts = defaultopts
167 169 if opts.ignorews or opts.ignorewsamount:
168 170 text1 = wsclean(opts, text1, False)
169 171 text2 = wsclean(opts, text2, False)
170 172 diff = bdiff.blocks(text1, text2)
171 173 for i, s1 in enumerate(diff):
172 174 # The first match is special.
173 175 # we've either found a match starting at line 0 or a match later
174 176 # in the file. If it starts later, old and new below will both be
175 177 # empty and we'll continue to the next match.
176 178 if i > 0:
177 179 s = diff[i - 1]
178 180 else:
179 181 s = [0, 0, 0, 0]
180 182 s = [s[1], s1[0], s[3], s1[2]]
181 183
182 184 # bdiff sometimes gives huge matches past eof, this check eats them,
183 185 # and deals with the special first match case described above
184 186 if s[0] != s[1] or s[2] != s[3]:
185 187 type = '!'
186 188 if opts.ignoreblanklines:
187 189 if lines1 is None:
188 190 lines1 = splitnewlines(text1)
189 191 if lines2 is None:
190 192 lines2 = splitnewlines(text2)
191 193 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
192 194 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
193 195 if old == new:
194 196 type = '~'
195 197 yield s, type
196 198 yield s1, '='
197 199
198 200 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
199 201 """Return a unified diff as a (headers, hunks) tuple.
200 202
201 203 If the diff is not null, `headers` is a list with unified diff header
202 204 lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
203 205 (hunkrange, hunklines) coming from _unidiff().
204 206 Otherwise, `headers` and `hunks` are empty.
205 207 """
206 208 def datetag(date, fn=None):
207 209 if not opts.git and not opts.nodates:
208 210 return '\t%s' % date
209 211 if fn and ' ' in fn:
210 212 return '\t'
211 213 return ''
212 214
213 215 sentinel = [], ()
214 216 if not a and not b:
215 217 return sentinel
216 218
217 219 if opts.noprefix:
218 220 aprefix = bprefix = ''
219 221 else:
220 222 aprefix = 'a/'
221 223 bprefix = 'b/'
222 224
223 225 epoch = util.datestr((0, 0))
224 226
225 227 fn1 = util.pconvert(fn1)
226 228 fn2 = util.pconvert(fn2)
227 229
228 230 def checknonewline(lines):
229 231 for text in lines:
230 232 if text[-1] != '\n':
231 233 text += "\n\ No newline at end of file\n"
232 234 yield text
233 235
234 236 if not opts.text and (util.binary(a) or util.binary(b)):
235 237 if a and b and len(a) == len(b) and a == b:
236 238 return sentinel
237 239 headerlines = []
238 240 hunks = (None, ['Binary file %s has changed\n' % fn1]),
239 241 elif not a:
240 242 b = splitnewlines(b)
241 243 if a is None:
242 244 l1 = '--- /dev/null%s' % datetag(epoch)
243 245 else:
244 246 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
245 247 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
246 248 headerlines = [l1, l2]
247 249 size = len(b)
248 250 hunkrange = (0, 0, 1, size)
249 251 hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
250 252 hunks = (hunkrange, checknonewline(hunklines)),
251 253 elif not b:
252 254 a = splitnewlines(a)
253 255 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
254 256 if b is None:
255 257 l2 = '+++ /dev/null%s' % datetag(epoch)
256 258 else:
257 259 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
258 260 headerlines = [l1, l2]
259 261 size = len(a)
260 262 hunkrange = (1, size, 0, 0)
261 263 hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
262 264 hunks = (hunkrange, checknonewline(hunklines)),
263 265 else:
264 266 diffhunks = _unidiff(a, b, opts=opts)
265 267 try:
266 268 hunkrange, hunklines = next(diffhunks)
267 269 except StopIteration:
268 270 return sentinel
269 271
270 272 headerlines = [
271 273 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
272 274 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
273 275 ]
274 276 def rewindhunks():
275 277 yield hunkrange, checknonewline(hunklines)
276 278 for hr, hl in diffhunks:
277 279 yield hr, checknonewline(hl)
278 280
279 281 hunks = rewindhunks()
280 282
281 283 return headerlines, hunks
282 284
283 285 def _unidiff(t1, t2, opts=defaultopts):
284 286 """Yield hunks of a headerless unified diff from t1 and t2 texts.
285 287
286 288 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
287 289 tuple (s1, l1, s2, l2) representing the range information of the hunk to
288 290 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
289 291 of the hunk combining said header followed by line additions and
290 292 deletions.
291 293 """
292 294 l1 = splitnewlines(t1)
293 295 l2 = splitnewlines(t2)
294 296 def contextend(l, len):
295 297 ret = l + opts.context
296 298 if ret > len:
297 299 ret = len
298 300 return ret
299 301
300 302 def contextstart(l):
301 303 ret = l - opts.context
302 304 if ret < 0:
303 305 return 0
304 306 return ret
305 307
306 308 lastfunc = [0, '']
307 309 def yieldhunk(hunk):
308 310 (astart, a2, bstart, b2, delta) = hunk
309 311 aend = contextend(a2, len(l1))
310 312 alen = aend - astart
311 313 blen = b2 - bstart + aend - a2
312 314
313 315 func = ""
314 316 if opts.showfunc:
315 317 lastpos, func = lastfunc
316 318 # walk backwards from the start of the context up to the start of
317 319 # the previous hunk context until we find a line starting with an
318 320 # alphanumeric char.
319 321 for i in xrange(astart - 1, lastpos - 1, -1):
320 322 if l1[i][0].isalnum():
321 323 func = ' ' + l1[i].rstrip()[:40]
322 324 lastfunc[1] = func
323 325 break
324 326 # by recording this hunk's starting point as the next place to
325 327 # start looking for function lines, we avoid reading any line in
326 328 # the file more than once.
327 329 lastfunc[0] = astart
328 330
329 331 # zero-length hunk ranges report their start line as one less
330 332 if alen:
331 333 astart += 1
332 334 if blen:
333 335 bstart += 1
334 336
335 337 hunkrange = astart, alen, bstart, blen
336 338 hunklines = (
337 339 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
338 340 + delta
339 341 + [' ' + l1[x] for x in xrange(a2, aend)]
340 342 )
341 343 yield hunkrange, hunklines
342 344
343 345 # bdiff.blocks gives us the matching sequences in the files. The loop
344 346 # below finds the spaces between those matching sequences and translates
345 347 # them into diff output.
346 348 #
347 349 hunk = None
348 350 ignoredlines = 0
349 351 for s, stype in allblocks(t1, t2, opts, l1, l2):
350 352 a1, a2, b1, b2 = s
351 353 if stype != '!':
352 354 if stype == '~':
353 355 # The diff context lines are based on t1 content. When
354 356 # blank lines are ignored, the new lines offsets must
355 357 # be adjusted as if equivalent blocks ('~') had the
356 358 # same sizes on both sides.
357 359 ignoredlines += (b2 - b1) - (a2 - a1)
358 360 continue
359 361 delta = []
360 362 old = l1[a1:a2]
361 363 new = l2[b1:b2]
362 364
363 365 b1 -= ignoredlines
364 366 b2 -= ignoredlines
365 367 astart = contextstart(a1)
366 368 bstart = contextstart(b1)
367 369 prev = None
368 370 if hunk:
369 371 # join with the previous hunk if it falls inside the context
370 372 if astart < hunk[1] + opts.context + 1:
371 373 prev = hunk
372 374 astart = hunk[1]
373 375 bstart = hunk[3]
374 376 else:
375 377 for x in yieldhunk(hunk):
376 378 yield x
377 379 if prev:
378 380 # we've joined the previous hunk, record the new ending points.
379 381 hunk[1] = a2
380 382 hunk[3] = b2
381 383 delta = hunk[4]
382 384 else:
383 385 # create a new hunk
384 386 hunk = [astart, a2, bstart, b2, delta]
385 387
386 388 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
387 389 delta[len(delta):] = ['-' + x for x in old]
388 390 delta[len(delta):] = ['+' + x for x in new]
389 391
390 392 if hunk:
391 393 for x in yieldhunk(hunk):
392 394 yield x
393 395
394 396 def b85diff(to, tn):
395 397 '''print base85-encoded binary diff'''
396 398 def fmtline(line):
397 399 l = len(line)
398 400 if l <= 26:
399 401 l = chr(ord('A') + l - 1)
400 402 else:
401 403 l = chr(l - 26 + ord('a') - 1)
402 404 return '%c%s\n' % (l, base85.b85encode(line, True))
403 405
404 406 def chunk(text, csize=52):
405 407 l = len(text)
406 408 i = 0
407 409 while i < l:
408 410 yield text[i:i + csize]
409 411 i += csize
410 412
411 413 if to is None:
412 414 to = ''
413 415 if tn is None:
414 416 tn = ''
415 417
416 418 if to == tn:
417 419 return ''
418 420
419 421 # TODO: deltas
420 422 ret = []
421 423 ret.append('GIT binary patch\n')
422 424 ret.append('literal %s\n' % len(tn))
423 425 for l in chunk(zlib.compress(tn)):
424 426 ret.append(fmtline(l))
425 427 ret.append('\n')
426 428
427 429 return ''.join(ret)
428 430
429 431 def patchtext(bin):
430 432 pos = 0
431 433 t = []
432 434 while pos < len(bin):
433 435 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
434 436 pos += 12
435 437 t.append(bin[pos:pos + l])
436 438 pos += l
437 439 return "".join(t)
438 440
439 441 def patch(a, bin):
440 442 if len(a) == 0:
441 443 # skip over trivial delta header
442 444 return util.buffer(bin, 12)
443 445 return mpatch.patches(a, [bin])
444 446
445 447 # similar to difflib.SequenceMatcher.get_matching_blocks
446 448 def get_matching_blocks(a, b):
447 449 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
448 450
449 451 def trivialdiffheader(length):
450 452 return struct.pack(">lll", 0, 0, length) if length else ''
451 453
452 454 def replacediffheader(oldlen, newlen):
453 455 return struct.pack(">lll", 0, oldlen, newlen)
454 456
455 457 patches = mpatch.patches
456 458 patchedsize = mpatch.patchedsize
457 459 textdiff = bdiff.bdiff
@@ -1,2673 +1,2673 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import collections
12 12 import copy
13 13 import email
14 14 import errno
15 15 import hashlib
16 16 import os
17 17 import posixpath
18 18 import re
19 19 import shutil
20 20 import tempfile
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 base85,
30 30 copies,
31 31 diffhelpers,
32 32 encoding,
33 33 error,
34 34 mail,
35 35 mdiff,
36 36 pathutil,
37 37 pycompat,
38 38 scmutil,
39 39 similar,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43 stringio = util.stringio
44 44
45 45 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 46 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 47
48 48 class PatchError(Exception):
49 49 pass
50 50
51 51
52 52 # public functions
53 53
54 54 def split(stream):
55 55 '''return an iterator of individual patches from a stream'''
56 56 def isheader(line, inheader):
57 57 if inheader and line[0] in (' ', '\t'):
58 58 # continuation
59 59 return True
60 60 if line[0] in (' ', '-', '+'):
61 61 # diff line - don't check for header pattern in there
62 62 return False
63 63 l = line.split(': ', 1)
64 64 return len(l) == 2 and ' ' not in l[0]
65 65
66 66 def chunk(lines):
67 67 return stringio(''.join(lines))
68 68
69 69 def hgsplit(stream, cur):
70 70 inheader = True
71 71
72 72 for line in stream:
73 73 if not line.strip():
74 74 inheader = False
75 75 if not inheader and line.startswith('# HG changeset patch'):
76 76 yield chunk(cur)
77 77 cur = []
78 78 inheader = True
79 79
80 80 cur.append(line)
81 81
82 82 if cur:
83 83 yield chunk(cur)
84 84
85 85 def mboxsplit(stream, cur):
86 86 for line in stream:
87 87 if line.startswith('From '):
88 88 for c in split(chunk(cur[1:])):
89 89 yield c
90 90 cur = []
91 91
92 92 cur.append(line)
93 93
94 94 if cur:
95 95 for c in split(chunk(cur[1:])):
96 96 yield c
97 97
98 98 def mimesplit(stream, cur):
99 99 def msgfp(m):
100 100 fp = stringio()
101 101 g = email.Generator.Generator(fp, mangle_from_=False)
102 102 g.flatten(m)
103 103 fp.seek(0)
104 104 return fp
105 105
106 106 for line in stream:
107 107 cur.append(line)
108 108 c = chunk(cur)
109 109
110 110 m = email.Parser.Parser().parse(c)
111 111 if not m.is_multipart():
112 112 yield msgfp(m)
113 113 else:
114 114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 115 for part in m.walk():
116 116 ct = part.get_content_type()
117 117 if ct not in ok_types:
118 118 continue
119 119 yield msgfp(part)
120 120
121 121 def headersplit(stream, cur):
122 122 inheader = False
123 123
124 124 for line in stream:
125 125 if not inheader and isheader(line, inheader):
126 126 yield chunk(cur)
127 127 cur = []
128 128 inheader = True
129 129 if inheader and not isheader(line, inheader):
130 130 inheader = False
131 131
132 132 cur.append(line)
133 133
134 134 if cur:
135 135 yield chunk(cur)
136 136
137 137 def remainder(cur):
138 138 yield chunk(cur)
139 139
140 140 class fiter(object):
141 141 def __init__(self, fp):
142 142 self.fp = fp
143 143
144 144 def __iter__(self):
145 145 return self
146 146
147 147 def next(self):
148 148 l = self.fp.readline()
149 149 if not l:
150 150 raise StopIteration
151 151 return l
152 152
153 153 inheader = False
154 154 cur = []
155 155
156 156 mimeheaders = ['content-type']
157 157
158 158 if not util.safehasattr(stream, 'next'):
159 159 # http responses, for example, have readline but not next
160 160 stream = fiter(stream)
161 161
162 162 for line in stream:
163 163 cur.append(line)
164 164 if line.startswith('# HG changeset patch'):
165 165 return hgsplit(stream, cur)
166 166 elif line.startswith('From '):
167 167 return mboxsplit(stream, cur)
168 168 elif isheader(line, inheader):
169 169 inheader = True
170 170 if line.split(':', 1)[0].lower() in mimeheaders:
171 171 # let email parser handle this
172 172 return mimesplit(stream, cur)
173 173 elif line.startswith('--- ') and inheader:
174 174 # No evil headers seen by diff start, split by hand
175 175 return headersplit(stream, cur)
176 176 # Not enough info, keep reading
177 177
178 178 # if we are here, we have a very plain patch
179 179 return remainder(cur)
180 180
181 181 ## Some facility for extensible patch parsing:
182 182 # list of pairs ("header to match", "data key")
183 183 patchheadermap = [('Date', 'date'),
184 184 ('Branch', 'branch'),
185 185 ('Node ID', 'nodeid'),
186 186 ]
187 187
188 188 def extract(ui, fileobj):
189 189 '''extract patch from data read from fileobj.
190 190
191 191 patch can be a normal patch or contained in an email message.
192 192
193 193 return a dictionary. Standard keys are:
194 194 - filename,
195 195 - message,
196 196 - user,
197 197 - date,
198 198 - branch,
199 199 - node,
200 200 - p1,
201 201 - p2.
202 202 Any item can be missing from the dictionary. If filename is missing,
203 203 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 204
205 205 # attempt to detect the start of a patch
206 206 # (this heuristic is borrowed from quilt)
207 207 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 208 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 209 r'---[ \t].*?^\+\+\+[ \t]|'
210 210 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 211
212 212 data = {}
213 213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 215 try:
216 216 msg = email.Parser.Parser().parse(fileobj)
217 217
218 218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 220 if not subject and not data['user']:
221 221 # Not an email, restore parsed headers if any
222 222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 223
224 224 # should try to parse msg['Date']
225 225 parents = []
226 226
227 227 if subject:
228 228 if subject.startswith('[PATCH'):
229 229 pend = subject.find(']')
230 230 if pend >= 0:
231 231 subject = subject[pend + 1:].lstrip()
232 232 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 233 ui.debug('Subject: %s\n' % subject)
234 234 if data['user']:
235 235 ui.debug('From: %s\n' % data['user'])
236 236 diffs_seen = 0
237 237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 238 message = ''
239 239 for part in msg.walk():
240 240 content_type = part.get_content_type()
241 241 ui.debug('Content-Type: %s\n' % content_type)
242 242 if content_type not in ok_types:
243 243 continue
244 244 payload = part.get_payload(decode=True)
245 245 m = diffre.search(payload)
246 246 if m:
247 247 hgpatch = False
248 248 hgpatchheader = False
249 249 ignoretext = False
250 250
251 251 ui.debug('found patch at byte %d\n' % m.start(0))
252 252 diffs_seen += 1
253 253 cfp = stringio()
254 254 for line in payload[:m.start(0)].splitlines():
255 255 if line.startswith('# HG changeset patch') and not hgpatch:
256 256 ui.debug('patch generated by hg export\n')
257 257 hgpatch = True
258 258 hgpatchheader = True
259 259 # drop earlier commit message content
260 260 cfp.seek(0)
261 261 cfp.truncate()
262 262 subject = None
263 263 elif hgpatchheader:
264 264 if line.startswith('# User '):
265 265 data['user'] = line[7:]
266 266 ui.debug('From: %s\n' % data['user'])
267 267 elif line.startswith("# Parent "):
268 268 parents.append(line[9:].lstrip())
269 269 elif line.startswith("# "):
270 270 for header, key in patchheadermap:
271 271 prefix = '# %s ' % header
272 272 if line.startswith(prefix):
273 273 data[key] = line[len(prefix):]
274 274 else:
275 275 hgpatchheader = False
276 276 elif line == '---':
277 277 ignoretext = True
278 278 if not hgpatchheader and not ignoretext:
279 279 cfp.write(line)
280 280 cfp.write('\n')
281 281 message = cfp.getvalue()
282 282 if tmpfp:
283 283 tmpfp.write(payload)
284 284 if not payload.endswith('\n'):
285 285 tmpfp.write('\n')
286 286 elif not diffs_seen and message and content_type == 'text/plain':
287 287 message += '\n' + payload
288 288 except: # re-raises
289 289 tmpfp.close()
290 290 os.unlink(tmpname)
291 291 raise
292 292
293 293 if subject and not message.startswith(subject):
294 294 message = '%s\n%s' % (subject, message)
295 295 data['message'] = message
296 296 tmpfp.close()
297 297 if parents:
298 298 data['p1'] = parents.pop(0)
299 299 if parents:
300 300 data['p2'] = parents.pop(0)
301 301
302 302 if diffs_seen:
303 303 data['filename'] = tmpname
304 304 else:
305 305 os.unlink(tmpname)
306 306 return data
307 307
308 308 class patchmeta(object):
309 309 """Patched file metadata
310 310
311 311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 315 'islink' is True if the file is a symlink and 'isexec' is True if
316 316 the file is executable. Otherwise, 'mode' is None.
317 317 """
318 318 def __init__(self, path):
319 319 self.path = path
320 320 self.oldpath = None
321 321 self.mode = None
322 322 self.op = 'MODIFY'
323 323 self.binary = False
324 324
325 325 def setmode(self, mode):
326 326 islink = mode & 0o20000
327 327 isexec = mode & 0o100
328 328 self.mode = (islink, isexec)
329 329
330 330 def copy(self):
331 331 other = patchmeta(self.path)
332 332 other.oldpath = self.oldpath
333 333 other.mode = self.mode
334 334 other.op = self.op
335 335 other.binary = self.binary
336 336 return other
337 337
338 338 def _ispatchinga(self, afile):
339 339 if afile == '/dev/null':
340 340 return self.op == 'ADD'
341 341 return afile == 'a/' + (self.oldpath or self.path)
342 342
343 343 def _ispatchingb(self, bfile):
344 344 if bfile == '/dev/null':
345 345 return self.op == 'DELETE'
346 346 return bfile == 'b/' + self.path
347 347
348 348 def ispatching(self, afile, bfile):
349 349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 350
351 351 def __repr__(self):
352 352 return "<patchmeta %s %r>" % (self.op, self.path)
353 353
354 354 def readgitpatch(lr):
355 355 """extract git-style metadata about patches from <patchname>"""
356 356
357 357 # Filter patch for git information
358 358 gp = None
359 359 gitpatches = []
360 360 for line in lr:
361 361 line = line.rstrip(' \r\n')
362 362 if line.startswith('diff --git a/'):
363 363 m = gitre.match(line)
364 364 if m:
365 365 if gp:
366 366 gitpatches.append(gp)
367 367 dst = m.group(2)
368 368 gp = patchmeta(dst)
369 369 elif gp:
370 370 if line.startswith('--- '):
371 371 gitpatches.append(gp)
372 372 gp = None
373 373 continue
374 374 if line.startswith('rename from '):
375 375 gp.op = 'RENAME'
376 376 gp.oldpath = line[12:]
377 377 elif line.startswith('rename to '):
378 378 gp.path = line[10:]
379 379 elif line.startswith('copy from '):
380 380 gp.op = 'COPY'
381 381 gp.oldpath = line[10:]
382 382 elif line.startswith('copy to '):
383 383 gp.path = line[8:]
384 384 elif line.startswith('deleted file'):
385 385 gp.op = 'DELETE'
386 386 elif line.startswith('new file mode '):
387 387 gp.op = 'ADD'
388 388 gp.setmode(int(line[-6:], 8))
389 389 elif line.startswith('new mode '):
390 390 gp.setmode(int(line[-6:], 8))
391 391 elif line.startswith('GIT binary patch'):
392 392 gp.binary = True
393 393 if gp:
394 394 gitpatches.append(gp)
395 395
396 396 return gitpatches
397 397
398 398 class linereader(object):
399 399 # simple class to allow pushing lines back into the input stream
400 400 def __init__(self, fp):
401 401 self.fp = fp
402 402 self.buf = []
403 403
404 404 def push(self, line):
405 405 if line is not None:
406 406 self.buf.append(line)
407 407
408 408 def readline(self):
409 409 if self.buf:
410 410 l = self.buf[0]
411 411 del self.buf[0]
412 412 return l
413 413 return self.fp.readline()
414 414
415 415 def __iter__(self):
416 416 return iter(self.readline, '')
417 417
418 418 class abstractbackend(object):
419 419 def __init__(self, ui):
420 420 self.ui = ui
421 421
422 422 def getfile(self, fname):
423 423 """Return target file data and flags as a (data, (islink,
424 424 isexec)) tuple. Data is None if file is missing/deleted.
425 425 """
426 426 raise NotImplementedError
427 427
428 428 def setfile(self, fname, data, mode, copysource):
429 429 """Write data to target file fname and set its mode. mode is a
430 430 (islink, isexec) tuple. If data is None, the file content should
431 431 be left unchanged. If the file is modified after being copied,
432 432 copysource is set to the original file name.
433 433 """
434 434 raise NotImplementedError
435 435
436 436 def unlink(self, fname):
437 437 """Unlink target file."""
438 438 raise NotImplementedError
439 439
440 440 def writerej(self, fname, failed, total, lines):
441 441 """Write rejected lines for fname. total is the number of hunks
442 442 which failed to apply and total the total number of hunks for this
443 443 files.
444 444 """
445 445 pass
446 446
447 447 def exists(self, fname):
448 448 raise NotImplementedError
449 449
450 450 class fsbackend(abstractbackend):
451 451 def __init__(self, ui, basedir):
452 452 super(fsbackend, self).__init__(ui)
453 453 self.opener = vfsmod.vfs(basedir)
454 454
455 455 def _join(self, f):
456 456 return os.path.join(self.opener.base, f)
457 457
458 458 def getfile(self, fname):
459 459 if self.opener.islink(fname):
460 460 return (self.opener.readlink(fname), (True, False))
461 461
462 462 isexec = False
463 463 try:
464 464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 465 except OSError as e:
466 466 if e.errno != errno.ENOENT:
467 467 raise
468 468 try:
469 469 return (self.opener.read(fname), (False, isexec))
470 470 except IOError as e:
471 471 if e.errno != errno.ENOENT:
472 472 raise
473 473 return None, None
474 474
475 475 def setfile(self, fname, data, mode, copysource):
476 476 islink, isexec = mode
477 477 if data is None:
478 478 self.opener.setflags(fname, islink, isexec)
479 479 return
480 480 if islink:
481 481 self.opener.symlink(data, fname)
482 482 else:
483 483 self.opener.write(fname, data)
484 484 if isexec:
485 485 self.opener.setflags(fname, False, True)
486 486
487 487 def unlink(self, fname):
488 488 self.opener.unlinkpath(fname, ignoremissing=True)
489 489
490 490 def writerej(self, fname, failed, total, lines):
491 491 fname = fname + ".rej"
492 492 self.ui.warn(
493 493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 494 (failed, total, fname))
495 495 fp = self.opener(fname, 'w')
496 496 fp.writelines(lines)
497 497 fp.close()
498 498
499 499 def exists(self, fname):
500 500 return self.opener.lexists(fname)
501 501
502 502 class workingbackend(fsbackend):
503 503 def __init__(self, ui, repo, similarity):
504 504 super(workingbackend, self).__init__(ui, repo.root)
505 505 self.repo = repo
506 506 self.similarity = similarity
507 507 self.removed = set()
508 508 self.changed = set()
509 509 self.copied = []
510 510
511 511 def _checkknown(self, fname):
512 512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 514
515 515 def setfile(self, fname, data, mode, copysource):
516 516 self._checkknown(fname)
517 517 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 518 if copysource is not None:
519 519 self.copied.append((copysource, fname))
520 520 self.changed.add(fname)
521 521
522 522 def unlink(self, fname):
523 523 self._checkknown(fname)
524 524 super(workingbackend, self).unlink(fname)
525 525 self.removed.add(fname)
526 526 self.changed.add(fname)
527 527
528 528 def close(self):
529 529 wctx = self.repo[None]
530 530 changed = set(self.changed)
531 531 for src, dst in self.copied:
532 532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 533 if self.removed:
534 534 wctx.forget(sorted(self.removed))
535 535 for f in self.removed:
536 536 if f not in self.repo.dirstate:
537 537 # File was deleted and no longer belongs to the
538 538 # dirstate, it was probably marked added then
539 539 # deleted, and should not be considered by
540 540 # marktouched().
541 541 changed.discard(f)
542 542 if changed:
543 543 scmutil.marktouched(self.repo, changed, self.similarity)
544 544 return sorted(self.changed)
545 545
546 546 class filestore(object):
547 547 def __init__(self, maxsize=None):
548 548 self.opener = None
549 549 self.files = {}
550 550 self.created = 0
551 551 self.maxsize = maxsize
552 552 if self.maxsize is None:
553 553 self.maxsize = 4*(2**20)
554 554 self.size = 0
555 555 self.data = {}
556 556
557 557 def setfile(self, fname, data, mode, copied=None):
558 558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 559 self.data[fname] = (data, mode, copied)
560 560 self.size += len(data)
561 561 else:
562 562 if self.opener is None:
563 563 root = tempfile.mkdtemp(prefix='hg-patch-')
564 564 self.opener = vfsmod.vfs(root)
565 565 # Avoid filename issues with these simple names
566 566 fn = str(self.created)
567 567 self.opener.write(fn, data)
568 568 self.created += 1
569 569 self.files[fname] = (fn, mode, copied)
570 570
571 571 def getfile(self, fname):
572 572 if fname in self.data:
573 573 return self.data[fname]
574 574 if not self.opener or fname not in self.files:
575 575 return None, None, None
576 576 fn, mode, copied = self.files[fname]
577 577 return self.opener.read(fn), mode, copied
578 578
579 579 def close(self):
580 580 if self.opener:
581 581 shutil.rmtree(self.opener.base)
582 582
583 583 class repobackend(abstractbackend):
584 584 def __init__(self, ui, repo, ctx, store):
585 585 super(repobackend, self).__init__(ui)
586 586 self.repo = repo
587 587 self.ctx = ctx
588 588 self.store = store
589 589 self.changed = set()
590 590 self.removed = set()
591 591 self.copied = {}
592 592
593 593 def _checkknown(self, fname):
594 594 if fname not in self.ctx:
595 595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 596
597 597 def getfile(self, fname):
598 598 try:
599 599 fctx = self.ctx[fname]
600 600 except error.LookupError:
601 601 return None, None
602 602 flags = fctx.flags()
603 603 return fctx.data(), ('l' in flags, 'x' in flags)
604 604
605 605 def setfile(self, fname, data, mode, copysource):
606 606 if copysource:
607 607 self._checkknown(copysource)
608 608 if data is None:
609 609 data = self.ctx[fname].data()
610 610 self.store.setfile(fname, data, mode, copysource)
611 611 self.changed.add(fname)
612 612 if copysource:
613 613 self.copied[fname] = copysource
614 614
615 615 def unlink(self, fname):
616 616 self._checkknown(fname)
617 617 self.removed.add(fname)
618 618
619 619 def exists(self, fname):
620 620 return fname in self.ctx
621 621
622 622 def close(self):
623 623 return self.changed | self.removed
624 624
625 625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 629
630 630 class patchfile(object):
631 631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 632 self.fname = gp.path
633 633 self.eolmode = eolmode
634 634 self.eol = None
635 635 self.backend = backend
636 636 self.ui = ui
637 637 self.lines = []
638 638 self.exists = False
639 639 self.missing = True
640 640 self.mode = gp.mode
641 641 self.copysource = gp.oldpath
642 642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 643 self.remove = gp.op == 'DELETE'
644 644 if self.copysource is None:
645 645 data, mode = backend.getfile(self.fname)
646 646 else:
647 647 data, mode = store.getfile(self.copysource)[:2]
648 648 if data is not None:
649 649 self.exists = self.copysource is None or backend.exists(self.fname)
650 650 self.missing = False
651 651 if data:
652 652 self.lines = mdiff.splitnewlines(data)
653 653 if self.mode is None:
654 654 self.mode = mode
655 655 if self.lines:
656 656 # Normalize line endings
657 657 if self.lines[0].endswith('\r\n'):
658 658 self.eol = '\r\n'
659 659 elif self.lines[0].endswith('\n'):
660 660 self.eol = '\n'
661 661 if eolmode != 'strict':
662 662 nlines = []
663 663 for l in self.lines:
664 664 if l.endswith('\r\n'):
665 665 l = l[:-2] + '\n'
666 666 nlines.append(l)
667 667 self.lines = nlines
668 668 else:
669 669 if self.create:
670 670 self.missing = False
671 671 if self.mode is None:
672 672 self.mode = (False, False)
673 673 if self.missing:
674 674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 676 "current directory)\n"))
677 677
678 678 self.hash = {}
679 679 self.dirty = 0
680 680 self.offset = 0
681 681 self.skew = 0
682 682 self.rej = []
683 683 self.fileprinted = False
684 684 self.printfile(False)
685 685 self.hunks = 0
686 686
687 687 def writelines(self, fname, lines, mode):
688 688 if self.eolmode == 'auto':
689 689 eol = self.eol
690 690 elif self.eolmode == 'crlf':
691 691 eol = '\r\n'
692 692 else:
693 693 eol = '\n'
694 694
695 695 if self.eolmode != 'strict' and eol and eol != '\n':
696 696 rawlines = []
697 697 for l in lines:
698 698 if l and l[-1] == '\n':
699 699 l = l[:-1] + eol
700 700 rawlines.append(l)
701 701 lines = rawlines
702 702
703 703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704 704
705 705 def printfile(self, warn):
706 706 if self.fileprinted:
707 707 return
708 708 if warn or self.ui.verbose:
709 709 self.fileprinted = True
710 710 s = _("patching file %s\n") % self.fname
711 711 if warn:
712 712 self.ui.warn(s)
713 713 else:
714 714 self.ui.note(s)
715 715
716 716
717 717 def findlines(self, l, linenum):
718 718 # looks through the hash and finds candidate lines. The
719 719 # result is a list of line numbers sorted based on distance
720 720 # from linenum
721 721
722 722 cand = self.hash.get(l, [])
723 723 if len(cand) > 1:
724 724 # resort our list of potentials forward then back.
725 725 cand.sort(key=lambda x: abs(x - linenum))
726 726 return cand
727 727
728 728 def write_rej(self):
729 729 # our rejects are a little different from patch(1). This always
730 730 # creates rejects in the same form as the original patch. A file
731 731 # header is inserted so that you can run the reject through patch again
732 732 # without having to type the filename.
733 733 if not self.rej:
734 734 return
735 735 base = os.path.basename(self.fname)
736 736 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 737 for x in self.rej:
738 738 for l in x.hunk:
739 739 lines.append(l)
740 740 if l[-1] != '\n':
741 741 lines.append("\n\ No newline at end of file\n")
742 742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743 743
744 744 def apply(self, h):
745 745 if not h.complete():
746 746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 748 h.lenb))
749 749
750 750 self.hunks += 1
751 751
752 752 if self.missing:
753 753 self.rej.append(h)
754 754 return -1
755 755
756 756 if self.exists and self.create:
757 757 if self.copysource:
758 758 self.ui.warn(_("cannot create %s: destination already "
759 759 "exists\n") % self.fname)
760 760 else:
761 761 self.ui.warn(_("file %s already exists\n") % self.fname)
762 762 self.rej.append(h)
763 763 return -1
764 764
765 765 if isinstance(h, binhunk):
766 766 if self.remove:
767 767 self.backend.unlink(self.fname)
768 768 else:
769 769 l = h.new(self.lines)
770 770 self.lines[:] = l
771 771 self.offset += len(l)
772 772 self.dirty = True
773 773 return 0
774 774
775 775 horig = h
776 776 if (self.eolmode in ('crlf', 'lf')
777 777 or self.eolmode == 'auto' and self.eol):
778 778 # If new eols are going to be normalized, then normalize
779 779 # hunk data before patching. Otherwise, preserve input
780 780 # line-endings.
781 781 h = h.getnormalized()
782 782
783 783 # fast case first, no offsets, no fuzz
784 784 old, oldstart, new, newstart = h.fuzzit(0, False)
785 785 oldstart += self.offset
786 786 orig_start = oldstart
787 787 # if there's skew we want to emit the "(offset %d lines)" even
788 788 # when the hunk cleanly applies at start + skew, so skip the
789 789 # fast case code
790 790 if (self.skew == 0 and
791 791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 792 if self.remove:
793 793 self.backend.unlink(self.fname)
794 794 else:
795 795 self.lines[oldstart:oldstart + len(old)] = new
796 796 self.offset += len(new) - len(old)
797 797 self.dirty = True
798 798 return 0
799 799
800 800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 801 self.hash = {}
802 802 for x, s in enumerate(self.lines):
803 803 self.hash.setdefault(s, []).append(x)
804 804
805 805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
806 806 for toponly in [True, False]:
807 807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 808 oldstart = oldstart + self.offset + self.skew
809 809 oldstart = min(oldstart, len(self.lines))
810 810 if old:
811 811 cand = self.findlines(old[0][1:], oldstart)
812 812 else:
813 813 # Only adding lines with no or fuzzed context, just
814 814 # take the skew in account
815 815 cand = [oldstart]
816 816
817 817 for l in cand:
818 818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 819 self.lines[l : l + len(old)] = new
820 820 self.offset += len(new) - len(old)
821 821 self.skew = l - orig_start
822 822 self.dirty = True
823 823 offset = l - orig_start - fuzzlen
824 824 if fuzzlen:
825 825 msg = _("Hunk #%d succeeded at %d "
826 826 "with fuzz %d "
827 827 "(offset %d lines).\n")
828 828 self.printfile(True)
829 829 self.ui.warn(msg %
830 830 (h.number, l + 1, fuzzlen, offset))
831 831 else:
832 832 msg = _("Hunk #%d succeeded at %d "
833 833 "(offset %d lines).\n")
834 834 self.ui.note(msg % (h.number, l + 1, offset))
835 835 return fuzzlen
836 836 self.printfile(True)
837 837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 838 self.rej.append(horig)
839 839 return -1
840 840
841 841 def close(self):
842 842 if self.dirty:
843 843 self.writelines(self.fname, self.lines, self.mode)
844 844 self.write_rej()
845 845 return len(self.rej)
846 846
847 847 class header(object):
848 848 """patch header
849 849 """
850 850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 851 diff_re = re.compile('diff -r .* (.*)$')
852 852 allhunks_re = re.compile('(?:index|deleted file) ')
853 853 pretty_re = re.compile('(?:new file|deleted file) ')
854 854 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 855 newfile_re = re.compile('(?:new file)')
856 856
857 857 def __init__(self, header):
858 858 self.header = header
859 859 self.hunks = []
860 860
861 861 def binary(self):
862 862 return any(h.startswith('index ') for h in self.header)
863 863
864 864 def pretty(self, fp):
865 865 for h in self.header:
866 866 if h.startswith('index '):
867 867 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 868 break
869 869 if self.pretty_re.match(h):
870 870 fp.write(h)
871 871 if self.binary():
872 872 fp.write(_('this is a binary file\n'))
873 873 break
874 874 if h.startswith('---'):
875 875 fp.write(_('%d hunks, %d lines changed\n') %
876 876 (len(self.hunks),
877 877 sum([max(h.added, h.removed) for h in self.hunks])))
878 878 break
879 879 fp.write(h)
880 880
881 881 def write(self, fp):
882 882 fp.write(''.join(self.header))
883 883
884 884 def allhunks(self):
885 885 return any(self.allhunks_re.match(h) for h in self.header)
886 886
887 887 def files(self):
888 888 match = self.diffgit_re.match(self.header[0])
889 889 if match:
890 890 fromfile, tofile = match.groups()
891 891 if fromfile == tofile:
892 892 return [fromfile]
893 893 return [fromfile, tofile]
894 894 else:
895 895 return self.diff_re.match(self.header[0]).groups()
896 896
897 897 def filename(self):
898 898 return self.files()[-1]
899 899
900 900 def __repr__(self):
901 901 return '<header %s>' % (' '.join(map(repr, self.files())))
902 902
903 903 def isnewfile(self):
904 904 return any(self.newfile_re.match(h) for h in self.header)
905 905
906 906 def special(self):
907 907 # Special files are shown only at the header level and not at the hunk
908 908 # level for example a file that has been deleted is a special file.
909 909 # The user cannot change the content of the operation, in the case of
910 910 # the deleted file he has to take the deletion or not take it, he
911 911 # cannot take some of it.
912 912 # Newly added files are special if they are empty, they are not special
913 913 # if they have some content as we want to be able to change it
914 914 nocontent = len(self.header) == 2
915 915 emptynewfile = self.isnewfile() and nocontent
916 916 return emptynewfile or \
917 917 any(self.special_re.match(h) for h in self.header)
918 918
919 919 class recordhunk(object):
920 920 """patch hunk
921 921
922 922 XXX shouldn't we merge this with the other hunk class?
923 923 """
924 924 maxcontext = 3
925 925
926 926 def __init__(self, header, fromline, toline, proc, before, hunk, after):
927 927 def trimcontext(number, lines):
928 928 delta = len(lines) - self.maxcontext
929 929 if False and delta > 0:
930 930 return number + delta, lines[:self.maxcontext]
931 931 return number, lines
932 932
933 933 self.header = header
934 934 self.fromline, self.before = trimcontext(fromline, before)
935 935 self.toline, self.after = trimcontext(toline, after)
936 936 self.proc = proc
937 937 self.hunk = hunk
938 938 self.added, self.removed = self.countchanges(self.hunk)
939 939
940 940 def __eq__(self, v):
941 941 if not isinstance(v, recordhunk):
942 942 return False
943 943
944 944 return ((v.hunk == self.hunk) and
945 945 (v.proc == self.proc) and
946 946 (self.fromline == v.fromline) and
947 947 (self.header.files() == v.header.files()))
948 948
949 949 def __hash__(self):
950 950 return hash((tuple(self.hunk),
951 951 tuple(self.header.files()),
952 952 self.fromline,
953 953 self.proc))
954 954
955 955 def countchanges(self, hunk):
956 956 """hunk -> (n+,n-)"""
957 957 add = len([h for h in hunk if h[0] == '+'])
958 958 rem = len([h for h in hunk if h[0] == '-'])
959 959 return add, rem
960 960
961 961 def write(self, fp):
962 962 delta = len(self.before) + len(self.after)
963 963 if self.after and self.after[-1] == '\\ No newline at end of file\n':
964 964 delta -= 1
965 965 fromlen = delta + self.removed
966 966 tolen = delta + self.added
967 967 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
968 968 (self.fromline, fromlen, self.toline, tolen,
969 969 self.proc and (' ' + self.proc)))
970 970 fp.write(''.join(self.before + self.hunk + self.after))
971 971
972 972 pretty = write
973 973
974 974 def filename(self):
975 975 return self.header.filename()
976 976
977 977 def __repr__(self):
978 978 return '<hunk %r@%d>' % (self.filename(), self.fromline)
979 979
980 980 def filterpatch(ui, headers, operation=None):
981 981 """Interactively filter patch chunks into applied-only chunks"""
982 982 if operation is None:
983 983 operation = 'record'
984 984 messages = {
985 985 'multiple': {
986 986 'discard': _("discard change %d/%d to '%s'?"),
987 987 'record': _("record change %d/%d to '%s'?"),
988 988 'revert': _("revert change %d/%d to '%s'?"),
989 989 }[operation],
990 990 'single': {
991 991 'discard': _("discard this change to '%s'?"),
992 992 'record': _("record this change to '%s'?"),
993 993 'revert': _("revert this change to '%s'?"),
994 994 }[operation],
995 995 }
996 996
997 997 def prompt(skipfile, skipall, query, chunk):
998 998 """prompt query, and process base inputs
999 999
1000 1000 - y/n for the rest of file
1001 1001 - y/n for the rest
1002 1002 - ? (help)
1003 1003 - q (quit)
1004 1004
1005 1005 Return True/False and possibly updated skipfile and skipall.
1006 1006 """
1007 1007 newpatches = None
1008 1008 if skipall is not None:
1009 1009 return skipall, skipfile, skipall, newpatches
1010 1010 if skipfile is not None:
1011 1011 return skipfile, skipfile, skipall, newpatches
1012 1012 while True:
1013 1013 resps = _('[Ynesfdaq?]'
1014 1014 '$$ &Yes, record this change'
1015 1015 '$$ &No, skip this change'
1016 1016 '$$ &Edit this change manually'
1017 1017 '$$ &Skip remaining changes to this file'
1018 1018 '$$ Record remaining changes to this &file'
1019 1019 '$$ &Done, skip remaining changes and files'
1020 1020 '$$ Record &all changes to all remaining files'
1021 1021 '$$ &Quit, recording no changes'
1022 1022 '$$ &? (display help)')
1023 1023 r = ui.promptchoice("%s %s" % (query, resps))
1024 1024 ui.write("\n")
1025 1025 if r == 8: # ?
1026 1026 for c, t in ui.extractchoices(resps)[1]:
1027 1027 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1028 1028 continue
1029 1029 elif r == 0: # yes
1030 1030 ret = True
1031 1031 elif r == 1: # no
1032 1032 ret = False
1033 1033 elif r == 2: # Edit patch
1034 1034 if chunk is None:
1035 1035 ui.write(_('cannot edit patch for whole file'))
1036 1036 ui.write("\n")
1037 1037 continue
1038 1038 if chunk.header.binary():
1039 1039 ui.write(_('cannot edit patch for binary file'))
1040 1040 ui.write("\n")
1041 1041 continue
1042 1042 # Patch comment based on the Git one (based on comment at end of
1043 1043 # https://mercurial-scm.org/wiki/RecordExtension)
1044 1044 phelp = '---' + _("""
1045 1045 To remove '-' lines, make them ' ' lines (context).
1046 1046 To remove '+' lines, delete them.
1047 1047 Lines starting with # will be removed from the patch.
1048 1048
1049 1049 If the patch applies cleanly, the edited hunk will immediately be
1050 1050 added to the record list. If it does not apply cleanly, a rejects
1051 1051 file will be generated: you can use that when you try again. If
1052 1052 all lines of the hunk are removed, then the edit is aborted and
1053 1053 the hunk is left unchanged.
1054 1054 """)
1055 1055 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1056 1056 suffix=".diff", text=True)
1057 1057 ncpatchfp = None
1058 1058 try:
1059 1059 # Write the initial patch
1060 1060 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1061 1061 chunk.header.write(f)
1062 1062 chunk.write(f)
1063 1063 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1064 1064 f.close()
1065 1065 # Start the editor and wait for it to complete
1066 1066 editor = ui.geteditor()
1067 1067 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1068 1068 environ={'HGUSER': ui.username()},
1069 1069 blockedtag='filterpatch')
1070 1070 if ret != 0:
1071 1071 ui.warn(_("editor exited with exit code %d\n") % ret)
1072 1072 continue
1073 1073 # Remove comment lines
1074 1074 patchfp = open(patchfn)
1075 1075 ncpatchfp = stringio()
1076 1076 for line in util.iterfile(patchfp):
1077 1077 if not line.startswith('#'):
1078 1078 ncpatchfp.write(line)
1079 1079 patchfp.close()
1080 1080 ncpatchfp.seek(0)
1081 1081 newpatches = parsepatch(ncpatchfp)
1082 1082 finally:
1083 1083 os.unlink(patchfn)
1084 1084 del ncpatchfp
1085 1085 # Signal that the chunk shouldn't be applied as-is, but
1086 1086 # provide the new patch to be used instead.
1087 1087 ret = False
1088 1088 elif r == 3: # Skip
1089 1089 ret = skipfile = False
1090 1090 elif r == 4: # file (Record remaining)
1091 1091 ret = skipfile = True
1092 1092 elif r == 5: # done, skip remaining
1093 1093 ret = skipall = False
1094 1094 elif r == 6: # all
1095 1095 ret = skipall = True
1096 1096 elif r == 7: # quit
1097 1097 raise error.Abort(_('user quit'))
1098 1098 return ret, skipfile, skipall, newpatches
1099 1099
1100 1100 seen = set()
1101 1101 applied = {} # 'filename' -> [] of chunks
1102 1102 skipfile, skipall = None, None
1103 1103 pos, total = 1, sum(len(h.hunks) for h in headers)
1104 1104 for h in headers:
1105 1105 pos += len(h.hunks)
1106 1106 skipfile = None
1107 1107 fixoffset = 0
1108 1108 hdr = ''.join(h.header)
1109 1109 if hdr in seen:
1110 1110 continue
1111 1111 seen.add(hdr)
1112 1112 if skipall is None:
1113 1113 h.pretty(ui)
1114 1114 msg = (_('examine changes to %s?') %
1115 1115 _(' and ').join("'%s'" % f for f in h.files()))
1116 1116 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1117 1117 if not r:
1118 1118 continue
1119 1119 applied[h.filename()] = [h]
1120 1120 if h.allhunks():
1121 1121 applied[h.filename()] += h.hunks
1122 1122 continue
1123 1123 for i, chunk in enumerate(h.hunks):
1124 1124 if skipfile is None and skipall is None:
1125 1125 chunk.pretty(ui)
1126 1126 if total == 1:
1127 1127 msg = messages['single'] % chunk.filename()
1128 1128 else:
1129 1129 idx = pos - len(h.hunks) + i
1130 1130 msg = messages['multiple'] % (idx, total, chunk.filename())
1131 1131 r, skipfile, skipall, newpatches = prompt(skipfile,
1132 1132 skipall, msg, chunk)
1133 1133 if r:
1134 1134 if fixoffset:
1135 1135 chunk = copy.copy(chunk)
1136 1136 chunk.toline += fixoffset
1137 1137 applied[chunk.filename()].append(chunk)
1138 1138 elif newpatches is not None:
1139 1139 for newpatch in newpatches:
1140 1140 for newhunk in newpatch.hunks:
1141 1141 if fixoffset:
1142 1142 newhunk.toline += fixoffset
1143 1143 applied[newhunk.filename()].append(newhunk)
1144 1144 else:
1145 1145 fixoffset += chunk.removed - chunk.added
1146 1146 return (sum([h for h in applied.itervalues()
1147 1147 if h[0].special() or len(h) > 1], []), {})
1148 1148 class hunk(object):
1149 1149 def __init__(self, desc, num, lr, context):
1150 1150 self.number = num
1151 1151 self.desc = desc
1152 1152 self.hunk = [desc]
1153 1153 self.a = []
1154 1154 self.b = []
1155 1155 self.starta = self.lena = None
1156 1156 self.startb = self.lenb = None
1157 1157 if lr is not None:
1158 1158 if context:
1159 1159 self.read_context_hunk(lr)
1160 1160 else:
1161 1161 self.read_unified_hunk(lr)
1162 1162
1163 1163 def getnormalized(self):
1164 1164 """Return a copy with line endings normalized to LF."""
1165 1165
1166 1166 def normalize(lines):
1167 1167 nlines = []
1168 1168 for line in lines:
1169 1169 if line.endswith('\r\n'):
1170 1170 line = line[:-2] + '\n'
1171 1171 nlines.append(line)
1172 1172 return nlines
1173 1173
1174 1174 # Dummy object, it is rebuilt manually
1175 1175 nh = hunk(self.desc, self.number, None, None)
1176 1176 nh.number = self.number
1177 1177 nh.desc = self.desc
1178 1178 nh.hunk = self.hunk
1179 1179 nh.a = normalize(self.a)
1180 1180 nh.b = normalize(self.b)
1181 1181 nh.starta = self.starta
1182 1182 nh.startb = self.startb
1183 1183 nh.lena = self.lena
1184 1184 nh.lenb = self.lenb
1185 1185 return nh
1186 1186
1187 1187 def read_unified_hunk(self, lr):
1188 1188 m = unidesc.match(self.desc)
1189 1189 if not m:
1190 1190 raise PatchError(_("bad hunk #%d") % self.number)
1191 1191 self.starta, self.lena, self.startb, self.lenb = m.groups()
1192 1192 if self.lena is None:
1193 1193 self.lena = 1
1194 1194 else:
1195 1195 self.lena = int(self.lena)
1196 1196 if self.lenb is None:
1197 1197 self.lenb = 1
1198 1198 else:
1199 1199 self.lenb = int(self.lenb)
1200 1200 self.starta = int(self.starta)
1201 1201 self.startb = int(self.startb)
1202 1202 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1203 1203 self.b)
1204 1204 # if we hit eof before finishing out the hunk, the last line will
1205 1205 # be zero length. Lets try to fix it up.
1206 1206 while len(self.hunk[-1]) == 0:
1207 1207 del self.hunk[-1]
1208 1208 del self.a[-1]
1209 1209 del self.b[-1]
1210 1210 self.lena -= 1
1211 1211 self.lenb -= 1
1212 1212 self._fixnewline(lr)
1213 1213
1214 1214 def read_context_hunk(self, lr):
1215 1215 self.desc = lr.readline()
1216 1216 m = contextdesc.match(self.desc)
1217 1217 if not m:
1218 1218 raise PatchError(_("bad hunk #%d") % self.number)
1219 1219 self.starta, aend = m.groups()
1220 1220 self.starta = int(self.starta)
1221 1221 if aend is None:
1222 1222 aend = self.starta
1223 1223 self.lena = int(aend) - self.starta
1224 1224 if self.starta:
1225 1225 self.lena += 1
1226 1226 for x in xrange(self.lena):
1227 1227 l = lr.readline()
1228 1228 if l.startswith('---'):
1229 1229 # lines addition, old block is empty
1230 1230 lr.push(l)
1231 1231 break
1232 1232 s = l[2:]
1233 1233 if l.startswith('- ') or l.startswith('! '):
1234 1234 u = '-' + s
1235 1235 elif l.startswith(' '):
1236 1236 u = ' ' + s
1237 1237 else:
1238 1238 raise PatchError(_("bad hunk #%d old text line %d") %
1239 1239 (self.number, x))
1240 1240 self.a.append(u)
1241 1241 self.hunk.append(u)
1242 1242
1243 1243 l = lr.readline()
1244 1244 if l.startswith('\ '):
1245 1245 s = self.a[-1][:-1]
1246 1246 self.a[-1] = s
1247 1247 self.hunk[-1] = s
1248 1248 l = lr.readline()
1249 1249 m = contextdesc.match(l)
1250 1250 if not m:
1251 1251 raise PatchError(_("bad hunk #%d") % self.number)
1252 1252 self.startb, bend = m.groups()
1253 1253 self.startb = int(self.startb)
1254 1254 if bend is None:
1255 1255 bend = self.startb
1256 1256 self.lenb = int(bend) - self.startb
1257 1257 if self.startb:
1258 1258 self.lenb += 1
1259 1259 hunki = 1
1260 1260 for x in xrange(self.lenb):
1261 1261 l = lr.readline()
1262 1262 if l.startswith('\ '):
1263 1263 # XXX: the only way to hit this is with an invalid line range.
1264 1264 # The no-eol marker is not counted in the line range, but I
1265 1265 # guess there are diff(1) out there which behave differently.
1266 1266 s = self.b[-1][:-1]
1267 1267 self.b[-1] = s
1268 1268 self.hunk[hunki - 1] = s
1269 1269 continue
1270 1270 if not l:
1271 1271 # line deletions, new block is empty and we hit EOF
1272 1272 lr.push(l)
1273 1273 break
1274 1274 s = l[2:]
1275 1275 if l.startswith('+ ') or l.startswith('! '):
1276 1276 u = '+' + s
1277 1277 elif l.startswith(' '):
1278 1278 u = ' ' + s
1279 1279 elif len(self.b) == 0:
1280 1280 # line deletions, new block is empty
1281 1281 lr.push(l)
1282 1282 break
1283 1283 else:
1284 1284 raise PatchError(_("bad hunk #%d old text line %d") %
1285 1285 (self.number, x))
1286 1286 self.b.append(s)
1287 1287 while True:
1288 1288 if hunki >= len(self.hunk):
1289 1289 h = ""
1290 1290 else:
1291 1291 h = self.hunk[hunki]
1292 1292 hunki += 1
1293 1293 if h == u:
1294 1294 break
1295 1295 elif h.startswith('-'):
1296 1296 continue
1297 1297 else:
1298 1298 self.hunk.insert(hunki - 1, u)
1299 1299 break
1300 1300
1301 1301 if not self.a:
1302 1302 # this happens when lines were only added to the hunk
1303 1303 for x in self.hunk:
1304 1304 if x.startswith('-') or x.startswith(' '):
1305 1305 self.a.append(x)
1306 1306 if not self.b:
1307 1307 # this happens when lines were only deleted from the hunk
1308 1308 for x in self.hunk:
1309 1309 if x.startswith('+') or x.startswith(' '):
1310 1310 self.b.append(x[1:])
1311 1311 # @@ -start,len +start,len @@
1312 1312 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1313 1313 self.startb, self.lenb)
1314 1314 self.hunk[0] = self.desc
1315 1315 self._fixnewline(lr)
1316 1316
1317 1317 def _fixnewline(self, lr):
1318 1318 l = lr.readline()
1319 1319 if l.startswith('\ '):
1320 1320 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1321 1321 else:
1322 1322 lr.push(l)
1323 1323
1324 1324 def complete(self):
1325 1325 return len(self.a) == self.lena and len(self.b) == self.lenb
1326 1326
1327 1327 def _fuzzit(self, old, new, fuzz, toponly):
1328 1328 # this removes context lines from the top and bottom of list 'l'. It
1329 1329 # checks the hunk to make sure only context lines are removed, and then
1330 1330 # returns a new shortened list of lines.
1331 1331 fuzz = min(fuzz, len(old))
1332 1332 if fuzz:
1333 1333 top = 0
1334 1334 bot = 0
1335 1335 hlen = len(self.hunk)
1336 1336 for x in xrange(hlen - 1):
1337 1337 # the hunk starts with the @@ line, so use x+1
1338 1338 if self.hunk[x + 1][0] == ' ':
1339 1339 top += 1
1340 1340 else:
1341 1341 break
1342 1342 if not toponly:
1343 1343 for x in xrange(hlen - 1):
1344 1344 if self.hunk[hlen - bot - 1][0] == ' ':
1345 1345 bot += 1
1346 1346 else:
1347 1347 break
1348 1348
1349 1349 bot = min(fuzz, bot)
1350 1350 top = min(fuzz, top)
1351 1351 return old[top:len(old) - bot], new[top:len(new) - bot], top
1352 1352 return old, new, 0
1353 1353
1354 1354 def fuzzit(self, fuzz, toponly):
1355 1355 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1356 1356 oldstart = self.starta + top
1357 1357 newstart = self.startb + top
1358 1358 # zero length hunk ranges already have their start decremented
1359 1359 if self.lena and oldstart > 0:
1360 1360 oldstart -= 1
1361 1361 if self.lenb and newstart > 0:
1362 1362 newstart -= 1
1363 1363 return old, oldstart, new, newstart
1364 1364
1365 1365 class binhunk(object):
1366 1366 'A binary patch file.'
1367 1367 def __init__(self, lr, fname):
1368 1368 self.text = None
1369 1369 self.delta = False
1370 1370 self.hunk = ['GIT binary patch\n']
1371 1371 self._fname = fname
1372 1372 self._read(lr)
1373 1373
1374 1374 def complete(self):
1375 1375 return self.text is not None
1376 1376
1377 1377 def new(self, lines):
1378 1378 if self.delta:
1379 1379 return [applybindelta(self.text, ''.join(lines))]
1380 1380 return [self.text]
1381 1381
1382 1382 def _read(self, lr):
1383 1383 def getline(lr, hunk):
1384 1384 l = lr.readline()
1385 1385 hunk.append(l)
1386 1386 return l.rstrip('\r\n')
1387 1387
1388 1388 size = 0
1389 1389 while True:
1390 1390 line = getline(lr, self.hunk)
1391 1391 if not line:
1392 1392 raise PatchError(_('could not extract "%s" binary data')
1393 1393 % self._fname)
1394 1394 if line.startswith('literal '):
1395 1395 size = int(line[8:].rstrip())
1396 1396 break
1397 1397 if line.startswith('delta '):
1398 1398 size = int(line[6:].rstrip())
1399 1399 self.delta = True
1400 1400 break
1401 1401 dec = []
1402 1402 line = getline(lr, self.hunk)
1403 1403 while len(line) > 1:
1404 1404 l = line[0]
1405 1405 if l <= 'Z' and l >= 'A':
1406 1406 l = ord(l) - ord('A') + 1
1407 1407 else:
1408 1408 l = ord(l) - ord('a') + 27
1409 1409 try:
1410 1410 dec.append(base85.b85decode(line[1:])[:l])
1411 1411 except ValueError as e:
1412 1412 raise PatchError(_('could not decode "%s" binary patch: %s')
1413 1413 % (self._fname, str(e)))
1414 1414 line = getline(lr, self.hunk)
1415 1415 text = zlib.decompress(''.join(dec))
1416 1416 if len(text) != size:
1417 1417 raise PatchError(_('"%s" length is %d bytes, should be %d')
1418 1418 % (self._fname, len(text), size))
1419 1419 self.text = text
1420 1420
1421 1421 def parsefilename(str):
1422 1422 # --- filename \t|space stuff
1423 1423 s = str[4:].rstrip('\r\n')
1424 1424 i = s.find('\t')
1425 1425 if i < 0:
1426 1426 i = s.find(' ')
1427 1427 if i < 0:
1428 1428 return s
1429 1429 return s[:i]
1430 1430
1431 1431 def reversehunks(hunks):
1432 1432 '''reverse the signs in the hunks given as argument
1433 1433
1434 1434 This function operates on hunks coming out of patch.filterpatch, that is
1435 1435 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1436 1436
1437 1437 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1438 1438 ... --- a/folder1/g
1439 1439 ... +++ b/folder1/g
1440 1440 ... @@ -1,7 +1,7 @@
1441 1441 ... +firstline
1442 1442 ... c
1443 1443 ... 1
1444 1444 ... 2
1445 1445 ... + 3
1446 1446 ... -4
1447 1447 ... 5
1448 1448 ... d
1449 1449 ... +lastline"""
1450 1450 >>> hunks = parsepatch(rawpatch)
1451 1451 >>> hunkscomingfromfilterpatch = []
1452 1452 >>> for h in hunks:
1453 1453 ... hunkscomingfromfilterpatch.append(h)
1454 1454 ... hunkscomingfromfilterpatch.extend(h.hunks)
1455 1455
1456 1456 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1457 1457 >>> from . import util
1458 1458 >>> fp = util.stringio()
1459 1459 >>> for c in reversedhunks:
1460 1460 ... c.write(fp)
1461 1461 >>> fp.seek(0)
1462 1462 >>> reversedpatch = fp.read()
1463 1463 >>> print reversedpatch
1464 1464 diff --git a/folder1/g b/folder1/g
1465 1465 --- a/folder1/g
1466 1466 +++ b/folder1/g
1467 1467 @@ -1,4 +1,3 @@
1468 1468 -firstline
1469 1469 c
1470 1470 1
1471 1471 2
1472 1472 @@ -1,6 +2,6 @@
1473 1473 c
1474 1474 1
1475 1475 2
1476 1476 - 3
1477 1477 +4
1478 1478 5
1479 1479 d
1480 1480 @@ -5,3 +6,2 @@
1481 1481 5
1482 1482 d
1483 1483 -lastline
1484 1484
1485 1485 '''
1486 1486
1487 1487 from . import crecord as crecordmod
1488 1488 newhunks = []
1489 1489 for c in hunks:
1490 1490 if isinstance(c, crecordmod.uihunk):
1491 1491 # curses hunks encapsulate the record hunk in _hunk
1492 1492 c = c._hunk
1493 1493 if isinstance(c, recordhunk):
1494 1494 for j, line in enumerate(c.hunk):
1495 1495 if line.startswith("-"):
1496 1496 c.hunk[j] = "+" + c.hunk[j][1:]
1497 1497 elif line.startswith("+"):
1498 1498 c.hunk[j] = "-" + c.hunk[j][1:]
1499 1499 c.added, c.removed = c.removed, c.added
1500 1500 newhunks.append(c)
1501 1501 return newhunks
1502 1502
1503 1503 def parsepatch(originalchunks):
1504 1504 """patch -> [] of headers -> [] of hunks """
1505 1505 class parser(object):
1506 1506 """patch parsing state machine"""
1507 1507 def __init__(self):
1508 1508 self.fromline = 0
1509 1509 self.toline = 0
1510 1510 self.proc = ''
1511 1511 self.header = None
1512 1512 self.context = []
1513 1513 self.before = []
1514 1514 self.hunk = []
1515 1515 self.headers = []
1516 1516
1517 1517 def addrange(self, limits):
1518 1518 fromstart, fromend, tostart, toend, proc = limits
1519 1519 self.fromline = int(fromstart)
1520 1520 self.toline = int(tostart)
1521 1521 self.proc = proc
1522 1522
1523 1523 def addcontext(self, context):
1524 1524 if self.hunk:
1525 1525 h = recordhunk(self.header, self.fromline, self.toline,
1526 1526 self.proc, self.before, self.hunk, context)
1527 1527 self.header.hunks.append(h)
1528 1528 self.fromline += len(self.before) + h.removed
1529 1529 self.toline += len(self.before) + h.added
1530 1530 self.before = []
1531 1531 self.hunk = []
1532 1532 self.context = context
1533 1533
1534 1534 def addhunk(self, hunk):
1535 1535 if self.context:
1536 1536 self.before = self.context
1537 1537 self.context = []
1538 1538 self.hunk = hunk
1539 1539
1540 1540 def newfile(self, hdr):
1541 1541 self.addcontext([])
1542 1542 h = header(hdr)
1543 1543 self.headers.append(h)
1544 1544 self.header = h
1545 1545
1546 1546 def addother(self, line):
1547 1547 pass # 'other' lines are ignored
1548 1548
1549 1549 def finished(self):
1550 1550 self.addcontext([])
1551 1551 return self.headers
1552 1552
1553 1553 transitions = {
1554 1554 'file': {'context': addcontext,
1555 1555 'file': newfile,
1556 1556 'hunk': addhunk,
1557 1557 'range': addrange},
1558 1558 'context': {'file': newfile,
1559 1559 'hunk': addhunk,
1560 1560 'range': addrange,
1561 1561 'other': addother},
1562 1562 'hunk': {'context': addcontext,
1563 1563 'file': newfile,
1564 1564 'range': addrange},
1565 1565 'range': {'context': addcontext,
1566 1566 'hunk': addhunk},
1567 1567 'other': {'other': addother},
1568 1568 }
1569 1569
1570 1570 p = parser()
1571 1571 fp = stringio()
1572 1572 fp.write(''.join(originalchunks))
1573 1573 fp.seek(0)
1574 1574
1575 1575 state = 'context'
1576 1576 for newstate, data in scanpatch(fp):
1577 1577 try:
1578 1578 p.transitions[state][newstate](p, data)
1579 1579 except KeyError:
1580 1580 raise PatchError('unhandled transition: %s -> %s' %
1581 1581 (state, newstate))
1582 1582 state = newstate
1583 1583 del fp
1584 1584 return p.finished()
1585 1585
1586 1586 def pathtransform(path, strip, prefix):
1587 1587 '''turn a path from a patch into a path suitable for the repository
1588 1588
1589 1589 prefix, if not empty, is expected to be normalized with a / at the end.
1590 1590
1591 1591 Returns (stripped components, path in repository).
1592 1592
1593 1593 >>> pathtransform('a/b/c', 0, '')
1594 1594 ('', 'a/b/c')
1595 1595 >>> pathtransform(' a/b/c ', 0, '')
1596 1596 ('', ' a/b/c')
1597 1597 >>> pathtransform(' a/b/c ', 2, '')
1598 1598 ('a/b/', 'c')
1599 1599 >>> pathtransform('a/b/c', 0, 'd/e/')
1600 1600 ('', 'd/e/a/b/c')
1601 1601 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1602 1602 ('a//b/', 'd/e/c')
1603 1603 >>> pathtransform('a/b/c', 3, '')
1604 1604 Traceback (most recent call last):
1605 1605 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1606 1606 '''
1607 1607 pathlen = len(path)
1608 1608 i = 0
1609 1609 if strip == 0:
1610 1610 return '', prefix + path.rstrip()
1611 1611 count = strip
1612 1612 while count > 0:
1613 1613 i = path.find('/', i)
1614 1614 if i == -1:
1615 1615 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1616 1616 (count, strip, path))
1617 1617 i += 1
1618 1618 # consume '//' in the path
1619 1619 while i < pathlen - 1 and path[i] == '/':
1620 1620 i += 1
1621 1621 count -= 1
1622 1622 return path[:i].lstrip(), prefix + path[i:].rstrip()
1623 1623
1624 1624 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1625 1625 nulla = afile_orig == "/dev/null"
1626 1626 nullb = bfile_orig == "/dev/null"
1627 1627 create = nulla and hunk.starta == 0 and hunk.lena == 0
1628 1628 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1629 1629 abase, afile = pathtransform(afile_orig, strip, prefix)
1630 1630 gooda = not nulla and backend.exists(afile)
1631 1631 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1632 1632 if afile == bfile:
1633 1633 goodb = gooda
1634 1634 else:
1635 1635 goodb = not nullb and backend.exists(bfile)
1636 1636 missing = not goodb and not gooda and not create
1637 1637
1638 1638 # some diff programs apparently produce patches where the afile is
1639 1639 # not /dev/null, but afile starts with bfile
1640 1640 abasedir = afile[:afile.rfind('/') + 1]
1641 1641 bbasedir = bfile[:bfile.rfind('/') + 1]
1642 1642 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1643 1643 and hunk.starta == 0 and hunk.lena == 0):
1644 1644 create = True
1645 1645 missing = False
1646 1646
1647 1647 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1648 1648 # diff is between a file and its backup. In this case, the original
1649 1649 # file should be patched (see original mpatch code).
1650 1650 isbackup = (abase == bbase and bfile.startswith(afile))
1651 1651 fname = None
1652 1652 if not missing:
1653 1653 if gooda and goodb:
1654 1654 if isbackup:
1655 1655 fname = afile
1656 1656 else:
1657 1657 fname = bfile
1658 1658 elif gooda:
1659 1659 fname = afile
1660 1660
1661 1661 if not fname:
1662 1662 if not nullb:
1663 1663 if isbackup:
1664 1664 fname = afile
1665 1665 else:
1666 1666 fname = bfile
1667 1667 elif not nulla:
1668 1668 fname = afile
1669 1669 else:
1670 1670 raise PatchError(_("undefined source and destination files"))
1671 1671
1672 1672 gp = patchmeta(fname)
1673 1673 if create:
1674 1674 gp.op = 'ADD'
1675 1675 elif remove:
1676 1676 gp.op = 'DELETE'
1677 1677 return gp
1678 1678
1679 1679 def scanpatch(fp):
1680 1680 """like patch.iterhunks, but yield different events
1681 1681
1682 1682 - ('file', [header_lines + fromfile + tofile])
1683 1683 - ('context', [context_lines])
1684 1684 - ('hunk', [hunk_lines])
1685 1685 - ('range', (-start,len, +start,len, proc))
1686 1686 """
1687 1687 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1688 1688 lr = linereader(fp)
1689 1689
1690 1690 def scanwhile(first, p):
1691 1691 """scan lr while predicate holds"""
1692 1692 lines = [first]
1693 1693 for line in iter(lr.readline, ''):
1694 1694 if p(line):
1695 1695 lines.append(line)
1696 1696 else:
1697 1697 lr.push(line)
1698 1698 break
1699 1699 return lines
1700 1700
1701 1701 for line in iter(lr.readline, ''):
1702 1702 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1703 1703 def notheader(line):
1704 1704 s = line.split(None, 1)
1705 1705 return not s or s[0] not in ('---', 'diff')
1706 1706 header = scanwhile(line, notheader)
1707 1707 fromfile = lr.readline()
1708 1708 if fromfile.startswith('---'):
1709 1709 tofile = lr.readline()
1710 1710 header += [fromfile, tofile]
1711 1711 else:
1712 1712 lr.push(fromfile)
1713 1713 yield 'file', header
1714 1714 elif line[0] == ' ':
1715 1715 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1716 1716 elif line[0] in '-+':
1717 1717 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1718 1718 else:
1719 1719 m = lines_re.match(line)
1720 1720 if m:
1721 1721 yield 'range', m.groups()
1722 1722 else:
1723 1723 yield 'other', line
1724 1724
1725 1725 def scangitpatch(lr, firstline):
1726 1726 """
1727 1727 Git patches can emit:
1728 1728 - rename a to b
1729 1729 - change b
1730 1730 - copy a to c
1731 1731 - change c
1732 1732
1733 1733 We cannot apply this sequence as-is, the renamed 'a' could not be
1734 1734 found for it would have been renamed already. And we cannot copy
1735 1735 from 'b' instead because 'b' would have been changed already. So
1736 1736 we scan the git patch for copy and rename commands so we can
1737 1737 perform the copies ahead of time.
1738 1738 """
1739 1739 pos = 0
1740 1740 try:
1741 1741 pos = lr.fp.tell()
1742 1742 fp = lr.fp
1743 1743 except IOError:
1744 1744 fp = stringio(lr.fp.read())
1745 1745 gitlr = linereader(fp)
1746 1746 gitlr.push(firstline)
1747 1747 gitpatches = readgitpatch(gitlr)
1748 1748 fp.seek(pos)
1749 1749 return gitpatches
1750 1750
1751 1751 def iterhunks(fp):
1752 1752 """Read a patch and yield the following events:
1753 1753 - ("file", afile, bfile, firsthunk): select a new target file.
1754 1754 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1755 1755 "file" event.
1756 1756 - ("git", gitchanges): current diff is in git format, gitchanges
1757 1757 maps filenames to gitpatch records. Unique event.
1758 1758 """
1759 1759 afile = ""
1760 1760 bfile = ""
1761 1761 state = None
1762 1762 hunknum = 0
1763 1763 emitfile = newfile = False
1764 1764 gitpatches = None
1765 1765
1766 1766 # our states
1767 1767 BFILE = 1
1768 1768 context = None
1769 1769 lr = linereader(fp)
1770 1770
1771 1771 for x in iter(lr.readline, ''):
1772 1772 if state == BFILE and (
1773 1773 (not context and x[0] == '@')
1774 1774 or (context is not False and x.startswith('***************'))
1775 1775 or x.startswith('GIT binary patch')):
1776 1776 gp = None
1777 1777 if (gitpatches and
1778 1778 gitpatches[-1].ispatching(afile, bfile)):
1779 1779 gp = gitpatches.pop()
1780 1780 if x.startswith('GIT binary patch'):
1781 1781 h = binhunk(lr, gp.path)
1782 1782 else:
1783 1783 if context is None and x.startswith('***************'):
1784 1784 context = True
1785 1785 h = hunk(x, hunknum + 1, lr, context)
1786 1786 hunknum += 1
1787 1787 if emitfile:
1788 1788 emitfile = False
1789 1789 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1790 1790 yield 'hunk', h
1791 1791 elif x.startswith('diff --git a/'):
1792 1792 m = gitre.match(x.rstrip(' \r\n'))
1793 1793 if not m:
1794 1794 continue
1795 1795 if gitpatches is None:
1796 1796 # scan whole input for git metadata
1797 1797 gitpatches = scangitpatch(lr, x)
1798 1798 yield 'git', [g.copy() for g in gitpatches
1799 1799 if g.op in ('COPY', 'RENAME')]
1800 1800 gitpatches.reverse()
1801 1801 afile = 'a/' + m.group(1)
1802 1802 bfile = 'b/' + m.group(2)
1803 1803 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1804 1804 gp = gitpatches.pop()
1805 1805 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1806 1806 if not gitpatches:
1807 1807 raise PatchError(_('failed to synchronize metadata for "%s"')
1808 1808 % afile[2:])
1809 1809 gp = gitpatches[-1]
1810 1810 newfile = True
1811 1811 elif x.startswith('---'):
1812 1812 # check for a unified diff
1813 1813 l2 = lr.readline()
1814 1814 if not l2.startswith('+++'):
1815 1815 lr.push(l2)
1816 1816 continue
1817 1817 newfile = True
1818 1818 context = False
1819 1819 afile = parsefilename(x)
1820 1820 bfile = parsefilename(l2)
1821 1821 elif x.startswith('***'):
1822 1822 # check for a context diff
1823 1823 l2 = lr.readline()
1824 1824 if not l2.startswith('---'):
1825 1825 lr.push(l2)
1826 1826 continue
1827 1827 l3 = lr.readline()
1828 1828 lr.push(l3)
1829 1829 if not l3.startswith("***************"):
1830 1830 lr.push(l2)
1831 1831 continue
1832 1832 newfile = True
1833 1833 context = True
1834 1834 afile = parsefilename(x)
1835 1835 bfile = parsefilename(l2)
1836 1836
1837 1837 if newfile:
1838 1838 newfile = False
1839 1839 emitfile = True
1840 1840 state = BFILE
1841 1841 hunknum = 0
1842 1842
1843 1843 while gitpatches:
1844 1844 gp = gitpatches.pop()
1845 1845 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1846 1846
1847 1847 def applybindelta(binchunk, data):
1848 1848 """Apply a binary delta hunk
1849 1849 The algorithm used is the algorithm from git's patch-delta.c
1850 1850 """
1851 1851 def deltahead(binchunk):
1852 1852 i = 0
1853 1853 for c in binchunk:
1854 1854 i += 1
1855 1855 if not (ord(c) & 0x80):
1856 1856 return i
1857 1857 return i
1858 1858 out = ""
1859 1859 s = deltahead(binchunk)
1860 1860 binchunk = binchunk[s:]
1861 1861 s = deltahead(binchunk)
1862 1862 binchunk = binchunk[s:]
1863 1863 i = 0
1864 1864 while i < len(binchunk):
1865 1865 cmd = ord(binchunk[i])
1866 1866 i += 1
1867 1867 if (cmd & 0x80):
1868 1868 offset = 0
1869 1869 size = 0
1870 1870 if (cmd & 0x01):
1871 1871 offset = ord(binchunk[i])
1872 1872 i += 1
1873 1873 if (cmd & 0x02):
1874 1874 offset |= ord(binchunk[i]) << 8
1875 1875 i += 1
1876 1876 if (cmd & 0x04):
1877 1877 offset |= ord(binchunk[i]) << 16
1878 1878 i += 1
1879 1879 if (cmd & 0x08):
1880 1880 offset |= ord(binchunk[i]) << 24
1881 1881 i += 1
1882 1882 if (cmd & 0x10):
1883 1883 size = ord(binchunk[i])
1884 1884 i += 1
1885 1885 if (cmd & 0x20):
1886 1886 size |= ord(binchunk[i]) << 8
1887 1887 i += 1
1888 1888 if (cmd & 0x40):
1889 1889 size |= ord(binchunk[i]) << 16
1890 1890 i += 1
1891 1891 if size == 0:
1892 1892 size = 0x10000
1893 1893 offset_end = offset + size
1894 1894 out += data[offset:offset_end]
1895 1895 elif cmd != 0:
1896 1896 offset_end = i + cmd
1897 1897 out += binchunk[i:offset_end]
1898 1898 i += cmd
1899 1899 else:
1900 1900 raise PatchError(_('unexpected delta opcode 0'))
1901 1901 return out
1902 1902
1903 1903 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1904 1904 """Reads a patch from fp and tries to apply it.
1905 1905
1906 1906 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1907 1907 there was any fuzz.
1908 1908
1909 1909 If 'eolmode' is 'strict', the patch content and patched file are
1910 1910 read in binary mode. Otherwise, line endings are ignored when
1911 1911 patching then normalized according to 'eolmode'.
1912 1912 """
1913 1913 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1914 1914 prefix=prefix, eolmode=eolmode)
1915 1915
1916 1916 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1917 1917 eolmode='strict'):
1918 1918
1919 1919 if prefix:
1920 1920 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1921 1921 prefix)
1922 1922 if prefix != '':
1923 1923 prefix += '/'
1924 1924 def pstrip(p):
1925 1925 return pathtransform(p, strip - 1, prefix)[1]
1926 1926
1927 1927 rejects = 0
1928 1928 err = 0
1929 1929 current_file = None
1930 1930
1931 1931 for state, values in iterhunks(fp):
1932 1932 if state == 'hunk':
1933 1933 if not current_file:
1934 1934 continue
1935 1935 ret = current_file.apply(values)
1936 1936 if ret > 0:
1937 1937 err = 1
1938 1938 elif state == 'file':
1939 1939 if current_file:
1940 1940 rejects += current_file.close()
1941 1941 current_file = None
1942 1942 afile, bfile, first_hunk, gp = values
1943 1943 if gp:
1944 1944 gp.path = pstrip(gp.path)
1945 1945 if gp.oldpath:
1946 1946 gp.oldpath = pstrip(gp.oldpath)
1947 1947 else:
1948 1948 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1949 1949 prefix)
1950 1950 if gp.op == 'RENAME':
1951 1951 backend.unlink(gp.oldpath)
1952 1952 if not first_hunk:
1953 1953 if gp.op == 'DELETE':
1954 1954 backend.unlink(gp.path)
1955 1955 continue
1956 1956 data, mode = None, None
1957 1957 if gp.op in ('RENAME', 'COPY'):
1958 1958 data, mode = store.getfile(gp.oldpath)[:2]
1959 1959 if data is None:
1960 1960 # This means that the old path does not exist
1961 1961 raise PatchError(_("source file '%s' does not exist")
1962 1962 % gp.oldpath)
1963 1963 if gp.mode:
1964 1964 mode = gp.mode
1965 1965 if gp.op == 'ADD':
1966 1966 # Added files without content have no hunk and
1967 1967 # must be created
1968 1968 data = ''
1969 1969 if data or mode:
1970 1970 if (gp.op in ('ADD', 'RENAME', 'COPY')
1971 1971 and backend.exists(gp.path)):
1972 1972 raise PatchError(_("cannot create %s: destination "
1973 1973 "already exists") % gp.path)
1974 1974 backend.setfile(gp.path, data, mode, gp.oldpath)
1975 1975 continue
1976 1976 try:
1977 1977 current_file = patcher(ui, gp, backend, store,
1978 1978 eolmode=eolmode)
1979 1979 except PatchError as inst:
1980 1980 ui.warn(str(inst) + '\n')
1981 1981 current_file = None
1982 1982 rejects += 1
1983 1983 continue
1984 1984 elif state == 'git':
1985 1985 for gp in values:
1986 1986 path = pstrip(gp.oldpath)
1987 1987 data, mode = backend.getfile(path)
1988 1988 if data is None:
1989 1989 # The error ignored here will trigger a getfile()
1990 1990 # error in a place more appropriate for error
1991 1991 # handling, and will not interrupt the patching
1992 1992 # process.
1993 1993 pass
1994 1994 else:
1995 1995 store.setfile(path, data, mode)
1996 1996 else:
1997 1997 raise error.Abort(_('unsupported parser state: %s') % state)
1998 1998
1999 1999 if current_file:
2000 2000 rejects += current_file.close()
2001 2001
2002 2002 if rejects:
2003 2003 return -1
2004 2004 return err
2005 2005
2006 2006 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2007 2007 similarity):
2008 2008 """use <patcher> to apply <patchname> to the working directory.
2009 2009 returns whether patch was applied with fuzz factor."""
2010 2010
2011 2011 fuzz = False
2012 2012 args = []
2013 2013 cwd = repo.root
2014 2014 if cwd:
2015 2015 args.append('-d %s' % util.shellquote(cwd))
2016 2016 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2017 2017 util.shellquote(patchname)))
2018 2018 try:
2019 2019 for line in util.iterfile(fp):
2020 2020 line = line.rstrip()
2021 2021 ui.note(line + '\n')
2022 2022 if line.startswith('patching file '):
2023 2023 pf = util.parsepatchoutput(line)
2024 2024 printed_file = False
2025 2025 files.add(pf)
2026 2026 elif line.find('with fuzz') >= 0:
2027 2027 fuzz = True
2028 2028 if not printed_file:
2029 2029 ui.warn(pf + '\n')
2030 2030 printed_file = True
2031 2031 ui.warn(line + '\n')
2032 2032 elif line.find('saving rejects to file') >= 0:
2033 2033 ui.warn(line + '\n')
2034 2034 elif line.find('FAILED') >= 0:
2035 2035 if not printed_file:
2036 2036 ui.warn(pf + '\n')
2037 2037 printed_file = True
2038 2038 ui.warn(line + '\n')
2039 2039 finally:
2040 2040 if files:
2041 2041 scmutil.marktouched(repo, files, similarity)
2042 2042 code = fp.close()
2043 2043 if code:
2044 2044 raise PatchError(_("patch command failed: %s") %
2045 2045 util.explainexit(code)[0])
2046 2046 return fuzz
2047 2047
2048 2048 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2049 2049 eolmode='strict'):
2050 2050 if files is None:
2051 2051 files = set()
2052 2052 if eolmode is None:
2053 2053 eolmode = ui.config('patch', 'eol', 'strict')
2054 2054 if eolmode.lower() not in eolmodes:
2055 2055 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2056 2056 eolmode = eolmode.lower()
2057 2057
2058 2058 store = filestore()
2059 2059 try:
2060 2060 fp = open(patchobj, 'rb')
2061 2061 except TypeError:
2062 2062 fp = patchobj
2063 2063 try:
2064 2064 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2065 2065 eolmode=eolmode)
2066 2066 finally:
2067 2067 if fp != patchobj:
2068 2068 fp.close()
2069 2069 files.update(backend.close())
2070 2070 store.close()
2071 2071 if ret < 0:
2072 2072 raise PatchError(_('patch failed to apply'))
2073 2073 return ret > 0
2074 2074
2075 2075 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2076 2076 eolmode='strict', similarity=0):
2077 2077 """use builtin patch to apply <patchobj> to the working directory.
2078 2078 returns whether patch was applied with fuzz factor."""
2079 2079 backend = workingbackend(ui, repo, similarity)
2080 2080 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2081 2081
2082 2082 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2083 2083 eolmode='strict'):
2084 2084 backend = repobackend(ui, repo, ctx, store)
2085 2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2086 2086
2087 2087 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2088 2088 similarity=0):
2089 2089 """Apply <patchname> to the working directory.
2090 2090
2091 2091 'eolmode' specifies how end of lines should be handled. It can be:
2092 2092 - 'strict': inputs are read in binary mode, EOLs are preserved
2093 2093 - 'crlf': EOLs are ignored when patching and reset to CRLF
2094 2094 - 'lf': EOLs are ignored when patching and reset to LF
2095 2095 - None: get it from user settings, default to 'strict'
2096 2096 'eolmode' is ignored when using an external patcher program.
2097 2097
2098 2098 Returns whether patch was applied with fuzz factor.
2099 2099 """
2100 2100 patcher = ui.config('ui', 'patch')
2101 2101 if files is None:
2102 2102 files = set()
2103 2103 if patcher:
2104 2104 return _externalpatch(ui, repo, patcher, patchname, strip,
2105 2105 files, similarity)
2106 2106 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2107 2107 similarity)
2108 2108
2109 2109 def changedfiles(ui, repo, patchpath, strip=1):
2110 2110 backend = fsbackend(ui, repo.root)
2111 2111 with open(patchpath, 'rb') as fp:
2112 2112 changed = set()
2113 2113 for state, values in iterhunks(fp):
2114 2114 if state == 'file':
2115 2115 afile, bfile, first_hunk, gp = values
2116 2116 if gp:
2117 2117 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2118 2118 if gp.oldpath:
2119 2119 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2120 2120 else:
2121 2121 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2122 2122 '')
2123 2123 changed.add(gp.path)
2124 2124 if gp.op == 'RENAME':
2125 2125 changed.add(gp.oldpath)
2126 2126 elif state not in ('hunk', 'git'):
2127 2127 raise error.Abort(_('unsupported parser state: %s') % state)
2128 2128 return changed
2129 2129
2130 2130 class GitDiffRequired(Exception):
2131 2131 pass
2132 2132
2133 2133 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2134 2134 '''return diffopts with all features supported and parsed'''
2135 2135 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2136 2136 git=True, whitespace=True, formatchanging=True)
2137 2137
2138 2138 diffopts = diffallopts
2139 2139
2140 2140 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2141 2141 whitespace=False, formatchanging=False):
2142 2142 '''return diffopts with only opted-in features parsed
2143 2143
2144 2144 Features:
2145 2145 - git: git-style diffs
2146 2146 - whitespace: whitespace options like ignoreblanklines and ignorews
2147 2147 - formatchanging: options that will likely break or cause correctness issues
2148 2148 with most diff parsers
2149 2149 '''
2150 2150 def get(key, name=None, getter=ui.configbool, forceplain=None):
2151 2151 if opts:
2152 2152 v = opts.get(key)
2153 2153 # diffopts flags are either None-default (which is passed
2154 2154 # through unchanged, so we can identify unset values), or
2155 2155 # some other falsey default (eg --unified, which defaults
2156 2156 # to an empty string). We only want to override the config
2157 2157 # entries from hgrc with command line values if they
2158 2158 # appear to have been set, which is any truthy value,
2159 2159 # True, or False.
2160 2160 if v or isinstance(v, bool):
2161 2161 return v
2162 2162 if forceplain is not None and ui.plain():
2163 2163 return forceplain
2164 2164 return getter(section, name or key, None, untrusted=untrusted)
2165 2165
2166 2166 # core options, expected to be understood by every diff parser
2167 2167 buildopts = {
2168 2168 'nodates': get('nodates'),
2169 2169 'showfunc': get('show_function', 'showfunc'),
2170 2170 'context': get('unified', getter=ui.config),
2171 2171 }
2172 2172
2173 2173 if git:
2174 2174 buildopts['git'] = get('git')
2175 2175
2176 2176 # since this is in the experimental section, we need to call
2177 2177 # ui.configbool directory
2178 2178 buildopts['showsimilarity'] = ui.configbool('experimental',
2179 2179 'extendedheader.similarity')
2180 2180
2181 2181 # need to inspect the ui object instead of using get() since we want to
2182 2182 # test for an int
2183 2183 hconf = ui.config('experimental', 'extendedheader.index')
2184 2184 if hconf is not None:
2185 2185 hlen = None
2186 2186 try:
2187 2187 # the hash config could be an integer (for length of hash) or a
2188 2188 # word (e.g. short, full, none)
2189 2189 hlen = int(hconf)
2190 2190 if hlen < 0 or hlen > 40:
2191 2191 msg = _("invalid length for extendedheader.index: '%d'\n")
2192 2192 ui.warn(msg % hlen)
2193 2193 except ValueError:
2194 2194 # default value
2195 2195 if hconf == 'short' or hconf == '':
2196 2196 hlen = 12
2197 2197 elif hconf == 'full':
2198 2198 hlen = 40
2199 2199 elif hconf != 'none':
2200 2200 msg = _("invalid value for extendedheader.index: '%s'\n")
2201 2201 ui.warn(msg % hconf)
2202 2202 finally:
2203 2203 buildopts['index'] = hlen
2204 2204
2205 2205 if whitespace:
2206 2206 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2207 2207 buildopts['ignorewsamount'] = get('ignore_space_change',
2208 2208 'ignorewsamount')
2209 2209 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2210 2210 'ignoreblanklines')
2211 2211 if formatchanging:
2212 2212 buildopts['text'] = opts and opts.get('text')
2213 2213 buildopts['nobinary'] = get('nobinary', forceplain=False)
2214 2214 buildopts['noprefix'] = get('noprefix', forceplain=False)
2215 2215
2216 return mdiff.diffopts(**buildopts)
2216 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2217 2217
2218 2218 def diff(repo, node1=None, node2=None, match=None, changes=None,
2219 2219 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2220 2220 '''yields diff of changes to files between two nodes, or node and
2221 2221 working directory.
2222 2222
2223 2223 if node1 is None, use first dirstate parent instead.
2224 2224 if node2 is None, compare node1 with working directory.
2225 2225
2226 2226 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2227 2227 every time some change cannot be represented with the current
2228 2228 patch format. Return False to upgrade to git patch format, True to
2229 2229 accept the loss or raise an exception to abort the diff. It is
2230 2230 called with the name of current file being diffed as 'fn'. If set
2231 2231 to None, patches will always be upgraded to git format when
2232 2232 necessary.
2233 2233
2234 2234 prefix is a filename prefix that is prepended to all filenames on
2235 2235 display (used for subrepos).
2236 2236
2237 2237 relroot, if not empty, must be normalized with a trailing /. Any match
2238 2238 patterns that fall outside it will be ignored.
2239 2239
2240 2240 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2241 2241 information.'''
2242 2242 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2243 2243 changes=changes, opts=opts,
2244 2244 losedatafn=losedatafn, prefix=prefix,
2245 2245 relroot=relroot, copy=copy):
2246 2246 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2247 2247 if header and (text or len(header) > 1):
2248 2248 yield '\n'.join(header) + '\n'
2249 2249 if text:
2250 2250 yield text
2251 2251
2252 2252 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2253 2253 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2254 2254 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2255 2255 where `header` is a list of diff headers and `hunks` is an iterable of
2256 2256 (`hunkrange`, `hunklines`) tuples.
2257 2257
2258 2258 See diff() for the meaning of parameters.
2259 2259 """
2260 2260
2261 2261 if opts is None:
2262 2262 opts = mdiff.defaultopts
2263 2263
2264 2264 if not node1 and not node2:
2265 2265 node1 = repo.dirstate.p1()
2266 2266
2267 2267 def lrugetfilectx():
2268 2268 cache = {}
2269 2269 order = collections.deque()
2270 2270 def getfilectx(f, ctx):
2271 2271 fctx = ctx.filectx(f, filelog=cache.get(f))
2272 2272 if f not in cache:
2273 2273 if len(cache) > 20:
2274 2274 del cache[order.popleft()]
2275 2275 cache[f] = fctx.filelog()
2276 2276 else:
2277 2277 order.remove(f)
2278 2278 order.append(f)
2279 2279 return fctx
2280 2280 return getfilectx
2281 2281 getfilectx = lrugetfilectx()
2282 2282
2283 2283 ctx1 = repo[node1]
2284 2284 ctx2 = repo[node2]
2285 2285
2286 2286 relfiltered = False
2287 2287 if relroot != '' and match.always():
2288 2288 # as a special case, create a new matcher with just the relroot
2289 2289 pats = [relroot]
2290 2290 match = scmutil.match(ctx2, pats, default='path')
2291 2291 relfiltered = True
2292 2292
2293 2293 if not changes:
2294 2294 changes = repo.status(ctx1, ctx2, match=match)
2295 2295 modified, added, removed = changes[:3]
2296 2296
2297 2297 if not modified and not added and not removed:
2298 2298 return []
2299 2299
2300 2300 if repo.ui.debugflag:
2301 2301 hexfunc = hex
2302 2302 else:
2303 2303 hexfunc = short
2304 2304 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2305 2305
2306 2306 if copy is None:
2307 2307 copy = {}
2308 2308 if opts.git or opts.upgrade:
2309 2309 copy = copies.pathcopies(ctx1, ctx2, match=match)
2310 2310
2311 2311 if relroot is not None:
2312 2312 if not relfiltered:
2313 2313 # XXX this would ideally be done in the matcher, but that is
2314 2314 # generally meant to 'or' patterns, not 'and' them. In this case we
2315 2315 # need to 'and' all the patterns from the matcher with relroot.
2316 2316 def filterrel(l):
2317 2317 return [f for f in l if f.startswith(relroot)]
2318 2318 modified = filterrel(modified)
2319 2319 added = filterrel(added)
2320 2320 removed = filterrel(removed)
2321 2321 relfiltered = True
2322 2322 # filter out copies where either side isn't inside the relative root
2323 2323 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2324 2324 if dst.startswith(relroot)
2325 2325 and src.startswith(relroot)))
2326 2326
2327 2327 modifiedset = set(modified)
2328 2328 addedset = set(added)
2329 2329 removedset = set(removed)
2330 2330 for f in modified:
2331 2331 if f not in ctx1:
2332 2332 # Fix up added, since merged-in additions appear as
2333 2333 # modifications during merges
2334 2334 modifiedset.remove(f)
2335 2335 addedset.add(f)
2336 2336 for f in removed:
2337 2337 if f not in ctx1:
2338 2338 # Merged-in additions that are then removed are reported as removed.
2339 2339 # They are not in ctx1, so We don't want to show them in the diff.
2340 2340 removedset.remove(f)
2341 2341 modified = sorted(modifiedset)
2342 2342 added = sorted(addedset)
2343 2343 removed = sorted(removedset)
2344 2344 for dst, src in copy.items():
2345 2345 if src not in ctx1:
2346 2346 # Files merged in during a merge and then copied/renamed are
2347 2347 # reported as copies. We want to show them in the diff as additions.
2348 2348 del copy[dst]
2349 2349
2350 2350 def difffn(opts, losedata):
2351 2351 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2352 2352 copy, getfilectx, opts, losedata, prefix, relroot)
2353 2353 if opts.upgrade and not opts.git:
2354 2354 try:
2355 2355 def losedata(fn):
2356 2356 if not losedatafn or not losedatafn(fn=fn):
2357 2357 raise GitDiffRequired
2358 2358 # Buffer the whole output until we are sure it can be generated
2359 2359 return list(difffn(opts.copy(git=False), losedata))
2360 2360 except GitDiffRequired:
2361 2361 return difffn(opts.copy(git=True), None)
2362 2362 else:
2363 2363 return difffn(opts, None)
2364 2364
2365 2365 def difflabel(func, *args, **kw):
2366 2366 '''yields 2-tuples of (output, label) based on the output of func()'''
2367 2367 headprefixes = [('diff', 'diff.diffline'),
2368 2368 ('copy', 'diff.extended'),
2369 2369 ('rename', 'diff.extended'),
2370 2370 ('old', 'diff.extended'),
2371 2371 ('new', 'diff.extended'),
2372 2372 ('deleted', 'diff.extended'),
2373 2373 ('index', 'diff.extended'),
2374 2374 ('similarity', 'diff.extended'),
2375 2375 ('---', 'diff.file_a'),
2376 2376 ('+++', 'diff.file_b')]
2377 2377 textprefixes = [('@', 'diff.hunk'),
2378 2378 ('-', 'diff.deleted'),
2379 2379 ('+', 'diff.inserted')]
2380 2380 head = False
2381 2381 for chunk in func(*args, **kw):
2382 2382 lines = chunk.split('\n')
2383 2383 for i, line in enumerate(lines):
2384 2384 if i != 0:
2385 2385 yield ('\n', '')
2386 2386 if head:
2387 2387 if line.startswith('@'):
2388 2388 head = False
2389 2389 else:
2390 2390 if line and line[0] not in ' +-@\\':
2391 2391 head = True
2392 2392 stripline = line
2393 2393 diffline = False
2394 2394 if not head and line and line[0] in '+-':
2395 2395 # highlight tabs and trailing whitespace, but only in
2396 2396 # changed lines
2397 2397 stripline = line.rstrip()
2398 2398 diffline = True
2399 2399
2400 2400 prefixes = textprefixes
2401 2401 if head:
2402 2402 prefixes = headprefixes
2403 2403 for prefix, label in prefixes:
2404 2404 if stripline.startswith(prefix):
2405 2405 if diffline:
2406 2406 for token in tabsplitter.findall(stripline):
2407 2407 if '\t' == token[0]:
2408 2408 yield (token, 'diff.tab')
2409 2409 else:
2410 2410 yield (token, label)
2411 2411 else:
2412 2412 yield (stripline, label)
2413 2413 break
2414 2414 else:
2415 2415 yield (line, '')
2416 2416 if line != stripline:
2417 2417 yield (line[len(stripline):], 'diff.trailingwhitespace')
2418 2418
2419 2419 def diffui(*args, **kw):
2420 2420 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2421 2421 return difflabel(diff, *args, **kw)
2422 2422
2423 2423 def _filepairs(modified, added, removed, copy, opts):
2424 2424 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2425 2425 before and f2 is the the name after. For added files, f1 will be None,
2426 2426 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2427 2427 or 'rename' (the latter two only if opts.git is set).'''
2428 2428 gone = set()
2429 2429
2430 2430 copyto = dict([(v, k) for k, v in copy.items()])
2431 2431
2432 2432 addedset, removedset = set(added), set(removed)
2433 2433
2434 2434 for f in sorted(modified + added + removed):
2435 2435 copyop = None
2436 2436 f1, f2 = f, f
2437 2437 if f in addedset:
2438 2438 f1 = None
2439 2439 if f in copy:
2440 2440 if opts.git:
2441 2441 f1 = copy[f]
2442 2442 if f1 in removedset and f1 not in gone:
2443 2443 copyop = 'rename'
2444 2444 gone.add(f1)
2445 2445 else:
2446 2446 copyop = 'copy'
2447 2447 elif f in removedset:
2448 2448 f2 = None
2449 2449 if opts.git:
2450 2450 # have we already reported a copy above?
2451 2451 if (f in copyto and copyto[f] in addedset
2452 2452 and copy[copyto[f]] == f):
2453 2453 continue
2454 2454 yield f1, f2, copyop
2455 2455
2456 2456 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2457 2457 copy, getfilectx, opts, losedatafn, prefix, relroot):
2458 2458 '''given input data, generate a diff and yield it in blocks
2459 2459
2460 2460 If generating a diff would lose data like flags or binary data and
2461 2461 losedatafn is not None, it will be called.
2462 2462
2463 2463 relroot is removed and prefix is added to every path in the diff output.
2464 2464
2465 2465 If relroot is not empty, this function expects every path in modified,
2466 2466 added, removed and copy to start with it.'''
2467 2467
2468 2468 def gitindex(text):
2469 2469 if not text:
2470 2470 text = ""
2471 2471 l = len(text)
2472 2472 s = hashlib.sha1('blob %d\0' % l)
2473 2473 s.update(text)
2474 2474 return s.hexdigest()
2475 2475
2476 2476 if opts.noprefix:
2477 2477 aprefix = bprefix = ''
2478 2478 else:
2479 2479 aprefix = 'a/'
2480 2480 bprefix = 'b/'
2481 2481
2482 2482 def diffline(f, revs):
2483 2483 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2484 2484 return 'diff %s %s' % (revinfo, f)
2485 2485
2486 2486 date1 = util.datestr(ctx1.date())
2487 2487 date2 = util.datestr(ctx2.date())
2488 2488
2489 2489 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2490 2490
2491 2491 if relroot != '' and (repo.ui.configbool('devel', 'all')
2492 2492 or repo.ui.configbool('devel', 'check-relroot')):
2493 2493 for f in modified + added + removed + copy.keys() + copy.values():
2494 2494 if f is not None and not f.startswith(relroot):
2495 2495 raise AssertionError(
2496 2496 "file %s doesn't start with relroot %s" % (f, relroot))
2497 2497
2498 2498 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2499 2499 content1 = None
2500 2500 content2 = None
2501 2501 flag1 = None
2502 2502 flag2 = None
2503 2503 if f1:
2504 2504 content1 = getfilectx(f1, ctx1).data()
2505 2505 if opts.git or losedatafn:
2506 2506 flag1 = ctx1.flags(f1)
2507 2507 if f2:
2508 2508 content2 = getfilectx(f2, ctx2).data()
2509 2509 if opts.git or losedatafn:
2510 2510 flag2 = ctx2.flags(f2)
2511 2511 binary = False
2512 2512 if opts.git or losedatafn:
2513 2513 binary = util.binary(content1) or util.binary(content2)
2514 2514
2515 2515 if losedatafn and not opts.git:
2516 2516 if (binary or
2517 2517 # copy/rename
2518 2518 f2 in copy or
2519 2519 # empty file creation
2520 2520 (not f1 and not content2) or
2521 2521 # empty file deletion
2522 2522 (not content1 and not f2) or
2523 2523 # create with flags
2524 2524 (not f1 and flag2) or
2525 2525 # change flags
2526 2526 (f1 and f2 and flag1 != flag2)):
2527 2527 losedatafn(f2 or f1)
2528 2528
2529 2529 path1 = f1 or f2
2530 2530 path2 = f2 or f1
2531 2531 path1 = posixpath.join(prefix, path1[len(relroot):])
2532 2532 path2 = posixpath.join(prefix, path2[len(relroot):])
2533 2533 header = []
2534 2534 if opts.git:
2535 2535 header.append('diff --git %s%s %s%s' %
2536 2536 (aprefix, path1, bprefix, path2))
2537 2537 if not f1: # added
2538 2538 header.append('new file mode %s' % gitmode[flag2])
2539 2539 elif not f2: # removed
2540 2540 header.append('deleted file mode %s' % gitmode[flag1])
2541 2541 else: # modified/copied/renamed
2542 2542 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2543 2543 if mode1 != mode2:
2544 2544 header.append('old mode %s' % mode1)
2545 2545 header.append('new mode %s' % mode2)
2546 2546 if copyop is not None:
2547 2547 if opts.showsimilarity:
2548 2548 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2549 2549 header.append('similarity index %d%%' % sim)
2550 2550 header.append('%s from %s' % (copyop, path1))
2551 2551 header.append('%s to %s' % (copyop, path2))
2552 2552 elif revs and not repo.ui.quiet:
2553 2553 header.append(diffline(path1, revs))
2554 2554
2555 2555 if binary and opts.git and not opts.nobinary:
2556 2556 text = mdiff.b85diff(content1, content2)
2557 2557 if text:
2558 2558 header.append('index %s..%s' %
2559 2559 (gitindex(content1), gitindex(content2)))
2560 2560 hunks = (None, [text]),
2561 2561 else:
2562 2562 if opts.git and opts.index > 0:
2563 2563 flag = flag1
2564 2564 if flag is None:
2565 2565 flag = flag2
2566 2566 header.append('index %s..%s %s' %
2567 2567 (gitindex(content1)[0:opts.index],
2568 2568 gitindex(content2)[0:opts.index],
2569 2569 gitmode[flag]))
2570 2570
2571 2571 uheaders, hunks = mdiff.unidiff(content1, date1,
2572 2572 content2, date2,
2573 2573 path1, path2, opts=opts)
2574 2574 header.extend(uheaders)
2575 2575 yield header, hunks
2576 2576
2577 2577 def diffstatsum(stats):
2578 2578 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2579 2579 for f, a, r, b in stats:
2580 2580 maxfile = max(maxfile, encoding.colwidth(f))
2581 2581 maxtotal = max(maxtotal, a + r)
2582 2582 addtotal += a
2583 2583 removetotal += r
2584 2584 binary = binary or b
2585 2585
2586 2586 return maxfile, maxtotal, addtotal, removetotal, binary
2587 2587
2588 2588 def diffstatdata(lines):
2589 2589 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2590 2590
2591 2591 results = []
2592 2592 filename, adds, removes, isbinary = None, 0, 0, False
2593 2593
2594 2594 def addresult():
2595 2595 if filename:
2596 2596 results.append((filename, adds, removes, isbinary))
2597 2597
2598 2598 for line in lines:
2599 2599 if line.startswith('diff'):
2600 2600 addresult()
2601 2601 # set numbers to 0 anyway when starting new file
2602 2602 adds, removes, isbinary = 0, 0, False
2603 2603 if line.startswith('diff --git a/'):
2604 2604 filename = gitre.search(line).group(2)
2605 2605 elif line.startswith('diff -r'):
2606 2606 # format: "diff -r ... -r ... filename"
2607 2607 filename = diffre.search(line).group(1)
2608 2608 elif line.startswith('+') and not line.startswith('+++ '):
2609 2609 adds += 1
2610 2610 elif line.startswith('-') and not line.startswith('--- '):
2611 2611 removes += 1
2612 2612 elif (line.startswith('GIT binary patch') or
2613 2613 line.startswith('Binary file')):
2614 2614 isbinary = True
2615 2615 addresult()
2616 2616 return results
2617 2617
2618 2618 def diffstat(lines, width=80):
2619 2619 output = []
2620 2620 stats = diffstatdata(lines)
2621 2621 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2622 2622
2623 2623 countwidth = len(str(maxtotal))
2624 2624 if hasbinary and countwidth < 3:
2625 2625 countwidth = 3
2626 2626 graphwidth = width - countwidth - maxname - 6
2627 2627 if graphwidth < 10:
2628 2628 graphwidth = 10
2629 2629
2630 2630 def scale(i):
2631 2631 if maxtotal <= graphwidth:
2632 2632 return i
2633 2633 # If diffstat runs out of room it doesn't print anything,
2634 2634 # which isn't very useful, so always print at least one + or -
2635 2635 # if there were at least some changes.
2636 2636 return max(i * graphwidth // maxtotal, int(bool(i)))
2637 2637
2638 2638 for filename, adds, removes, isbinary in stats:
2639 2639 if isbinary:
2640 2640 count = 'Bin'
2641 2641 else:
2642 2642 count = adds + removes
2643 2643 pluses = '+' * scale(adds)
2644 2644 minuses = '-' * scale(removes)
2645 2645 output.append(' %s%s | %*s %s%s\n' %
2646 2646 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2647 2647 countwidth, count, pluses, minuses))
2648 2648
2649 2649 if stats:
2650 2650 output.append(_(' %d files changed, %d insertions(+), '
2651 2651 '%d deletions(-)\n')
2652 2652 % (len(stats), totaladds, totalremoves))
2653 2653
2654 2654 return ''.join(output)
2655 2655
2656 2656 def diffstatui(*args, **kw):
2657 2657 '''like diffstat(), but yields 2-tuples of (output, label) for
2658 2658 ui.write()
2659 2659 '''
2660 2660
2661 2661 for line in diffstat(*args, **kw).splitlines():
2662 2662 if line and line[-1] in '+-':
2663 2663 name, graph = line.rsplit(' ', 1)
2664 2664 yield (name + ' ', '')
2665 2665 m = re.search(r'\++', graph)
2666 2666 if m:
2667 2667 yield (m.group(0), 'diffstat.inserted')
2668 2668 m = re.search(r'-+', graph)
2669 2669 if m:
2670 2670 yield (m.group(0), 'diffstat.deleted')
2671 2671 else:
2672 2672 yield (line, '')
2673 2673 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now