##// END OF EJS Templates
patch: add config knob for displaying the index header...
Sean Farley -
r30788:d1901c4c default
parent child Browse files
Show More
@@ -1,422 +1,423
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 base85,
16 base85,
17 bdiff,
17 bdiff,
18 error,
18 error,
19 mpatch,
19 mpatch,
20 util,
20 util,
21 )
21 )
22
22
23 def splitnewlines(text):
23 def splitnewlines(text):
24 '''like str.splitlines, but only split on newlines.'''
24 '''like str.splitlines, but only split on newlines.'''
25 lines = [l + '\n' for l in text.split('\n')]
25 lines = [l + '\n' for l in text.split('\n')]
26 if lines:
26 if lines:
27 if lines[-1] == '\n':
27 if lines[-1] == '\n':
28 lines.pop()
28 lines.pop()
29 else:
29 else:
30 lines[-1] = lines[-1][:-1]
30 lines[-1] = lines[-1][:-1]
31 return lines
31 return lines
32
32
33 class diffopts(object):
33 class diffopts(object):
34 '''context is the number of context lines
34 '''context is the number of context lines
35 text treats all files as text
35 text treats all files as text
36 showfunc enables diff -p output
36 showfunc enables diff -p output
37 git enables the git extended patch format
37 git enables the git extended patch format
38 nodates removes dates from diff headers
38 nodates removes dates from diff headers
39 nobinary ignores binary files
39 nobinary ignores binary files
40 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
40 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
41 ignorews ignores all whitespace changes in the diff
41 ignorews ignores all whitespace changes in the diff
42 ignorewsamount ignores changes in the amount of whitespace
42 ignorewsamount ignores changes in the amount of whitespace
43 ignoreblanklines ignores changes whose lines are all blank
43 ignoreblanklines ignores changes whose lines are all blank
44 upgrade generates git diffs to avoid data loss
44 upgrade generates git diffs to avoid data loss
45 '''
45 '''
46
46
47 defaults = {
47 defaults = {
48 'context': 3,
48 'context': 3,
49 'text': False,
49 'text': False,
50 'showfunc': False,
50 'showfunc': False,
51 'git': False,
51 'git': False,
52 'nodates': False,
52 'nodates': False,
53 'nobinary': False,
53 'nobinary': False,
54 'noprefix': False,
54 'noprefix': False,
55 'index': 0,
55 'ignorews': False,
56 'ignorews': False,
56 'ignorewsamount': False,
57 'ignorewsamount': False,
57 'ignoreblanklines': False,
58 'ignoreblanklines': False,
58 'upgrade': False,
59 'upgrade': False,
59 }
60 }
60
61
61 def __init__(self, **opts):
62 def __init__(self, **opts):
62 for k in self.defaults.keys():
63 for k in self.defaults.keys():
63 v = opts.get(k)
64 v = opts.get(k)
64 if v is None:
65 if v is None:
65 v = self.defaults[k]
66 v = self.defaults[k]
66 setattr(self, k, v)
67 setattr(self, k, v)
67
68
68 try:
69 try:
69 self.context = int(self.context)
70 self.context = int(self.context)
70 except ValueError:
71 except ValueError:
71 raise error.Abort(_('diff context lines count must be '
72 raise error.Abort(_('diff context lines count must be '
72 'an integer, not %r') % self.context)
73 'an integer, not %r') % self.context)
73
74
74 def copy(self, **kwargs):
75 def copy(self, **kwargs):
75 opts = dict((k, getattr(self, k)) for k in self.defaults)
76 opts = dict((k, getattr(self, k)) for k in self.defaults)
76 opts.update(kwargs)
77 opts.update(kwargs)
77 return diffopts(**opts)
78 return diffopts(**opts)
78
79
79 defaultopts = diffopts()
80 defaultopts = diffopts()
80
81
81 def wsclean(opts, text, blank=True):
82 def wsclean(opts, text, blank=True):
82 if opts.ignorews:
83 if opts.ignorews:
83 text = bdiff.fixws(text, 1)
84 text = bdiff.fixws(text, 1)
84 elif opts.ignorewsamount:
85 elif opts.ignorewsamount:
85 text = bdiff.fixws(text, 0)
86 text = bdiff.fixws(text, 0)
86 if blank and opts.ignoreblanklines:
87 if blank and opts.ignoreblanklines:
87 text = re.sub('\n+', '\n', text).strip('\n')
88 text = re.sub('\n+', '\n', text).strip('\n')
88 return text
89 return text
89
90
90 def splitblock(base1, lines1, base2, lines2, opts):
91 def splitblock(base1, lines1, base2, lines2, opts):
91 # The input lines matches except for interwoven blank lines. We
92 # The input lines matches except for interwoven blank lines. We
92 # transform it into a sequence of matching blocks and blank blocks.
93 # transform it into a sequence of matching blocks and blank blocks.
93 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
94 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
94 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
95 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
95 s1, e1 = 0, len(lines1)
96 s1, e1 = 0, len(lines1)
96 s2, e2 = 0, len(lines2)
97 s2, e2 = 0, len(lines2)
97 while s1 < e1 or s2 < e2:
98 while s1 < e1 or s2 < e2:
98 i1, i2, btype = s1, s2, '='
99 i1, i2, btype = s1, s2, '='
99 if (i1 >= e1 or lines1[i1] == 0
100 if (i1 >= e1 or lines1[i1] == 0
100 or i2 >= e2 or lines2[i2] == 0):
101 or i2 >= e2 or lines2[i2] == 0):
101 # Consume the block of blank lines
102 # Consume the block of blank lines
102 btype = '~'
103 btype = '~'
103 while i1 < e1 and lines1[i1] == 0:
104 while i1 < e1 and lines1[i1] == 0:
104 i1 += 1
105 i1 += 1
105 while i2 < e2 and lines2[i2] == 0:
106 while i2 < e2 and lines2[i2] == 0:
106 i2 += 1
107 i2 += 1
107 else:
108 else:
108 # Consume the matching lines
109 # Consume the matching lines
109 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
110 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
110 i1 += 1
111 i1 += 1
111 i2 += 1
112 i2 += 1
112 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
113 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
113 s1 = i1
114 s1 = i1
114 s2 = i2
115 s2 = i2
115
116
116 def blocksinrange(blocks, rangeb):
117 def blocksinrange(blocks, rangeb):
117 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
118 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
118 `rangeb` from ``(b1, b2)`` point of view.
119 `rangeb` from ``(b1, b2)`` point of view.
119
120
120 Return `filteredblocks, rangea` where:
121 Return `filteredblocks, rangea` where:
121
122
122 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
123 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
123 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
124 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
124 block ``(b1, b2)`` being inside `rangeb` if
125 block ``(b1, b2)`` being inside `rangeb` if
125 ``rangeb[0] < b2 and b1 < rangeb[1]``;
126 ``rangeb[0] < b2 and b1 < rangeb[1]``;
126 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
127 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
127 """
128 """
128 lbb, ubb = rangeb
129 lbb, ubb = rangeb
129 lba, uba = None, None
130 lba, uba = None, None
130 filteredblocks = []
131 filteredblocks = []
131 for block in blocks:
132 for block in blocks:
132 (a1, a2, b1, b2), stype = block
133 (a1, a2, b1, b2), stype = block
133 if lbb >= b1 and ubb <= b2 and stype == '=':
134 if lbb >= b1 and ubb <= b2 and stype == '=':
134 # rangeb is within a single "=" hunk, restrict back linerange1
135 # rangeb is within a single "=" hunk, restrict back linerange1
135 # by offsetting rangeb
136 # by offsetting rangeb
136 lba = lbb - b1 + a1
137 lba = lbb - b1 + a1
137 uba = ubb - b1 + a1
138 uba = ubb - b1 + a1
138 else:
139 else:
139 if b1 <= lbb < b2:
140 if b1 <= lbb < b2:
140 if stype == '=':
141 if stype == '=':
141 lba = a2 - (b2 - lbb)
142 lba = a2 - (b2 - lbb)
142 else:
143 else:
143 lba = a1
144 lba = a1
144 if b1 < ubb <= b2:
145 if b1 < ubb <= b2:
145 if stype == '=':
146 if stype == '=':
146 uba = a1 + (ubb - b1)
147 uba = a1 + (ubb - b1)
147 else:
148 else:
148 uba = a2
149 uba = a2
149 if lbb < b2 and b1 < ubb:
150 if lbb < b2 and b1 < ubb:
150 filteredblocks.append(block)
151 filteredblocks.append(block)
151 if lba is None or uba is None or uba < lba:
152 if lba is None or uba is None or uba < lba:
152 raise error.Abort(_('line range exceeds file size'))
153 raise error.Abort(_('line range exceeds file size'))
153 return filteredblocks, (lba, uba)
154 return filteredblocks, (lba, uba)
154
155
155 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
156 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
156 """Return (block, type) tuples, where block is an mdiff.blocks
157 """Return (block, type) tuples, where block is an mdiff.blocks
157 line entry. type is '=' for blocks matching exactly one another
158 line entry. type is '=' for blocks matching exactly one another
158 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
159 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
159 matching only after having filtered blank lines.
160 matching only after having filtered blank lines.
160 line1 and line2 are text1 and text2 split with splitnewlines() if
161 line1 and line2 are text1 and text2 split with splitnewlines() if
161 they are already available.
162 they are already available.
162 """
163 """
163 if opts is None:
164 if opts is None:
164 opts = defaultopts
165 opts = defaultopts
165 if opts.ignorews or opts.ignorewsamount:
166 if opts.ignorews or opts.ignorewsamount:
166 text1 = wsclean(opts, text1, False)
167 text1 = wsclean(opts, text1, False)
167 text2 = wsclean(opts, text2, False)
168 text2 = wsclean(opts, text2, False)
168 diff = bdiff.blocks(text1, text2)
169 diff = bdiff.blocks(text1, text2)
169 for i, s1 in enumerate(diff):
170 for i, s1 in enumerate(diff):
170 # The first match is special.
171 # The first match is special.
171 # we've either found a match starting at line 0 or a match later
172 # we've either found a match starting at line 0 or a match later
172 # in the file. If it starts later, old and new below will both be
173 # in the file. If it starts later, old and new below will both be
173 # empty and we'll continue to the next match.
174 # empty and we'll continue to the next match.
174 if i > 0:
175 if i > 0:
175 s = diff[i - 1]
176 s = diff[i - 1]
176 else:
177 else:
177 s = [0, 0, 0, 0]
178 s = [0, 0, 0, 0]
178 s = [s[1], s1[0], s[3], s1[2]]
179 s = [s[1], s1[0], s[3], s1[2]]
179
180
180 # bdiff sometimes gives huge matches past eof, this check eats them,
181 # bdiff sometimes gives huge matches past eof, this check eats them,
181 # and deals with the special first match case described above
182 # and deals with the special first match case described above
182 if s[0] != s[1] or s[2] != s[3]:
183 if s[0] != s[1] or s[2] != s[3]:
183 type = '!'
184 type = '!'
184 if opts.ignoreblanklines:
185 if opts.ignoreblanklines:
185 if lines1 is None:
186 if lines1 is None:
186 lines1 = splitnewlines(text1)
187 lines1 = splitnewlines(text1)
187 if lines2 is None:
188 if lines2 is None:
188 lines2 = splitnewlines(text2)
189 lines2 = splitnewlines(text2)
189 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
190 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
190 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
191 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
191 if old == new:
192 if old == new:
192 type = '~'
193 type = '~'
193 yield s, type
194 yield s, type
194 yield s1, '='
195 yield s1, '='
195
196
196 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
197 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
197 def datetag(date, fn=None):
198 def datetag(date, fn=None):
198 if not opts.git and not opts.nodates:
199 if not opts.git and not opts.nodates:
199 return '\t%s\n' % date
200 return '\t%s\n' % date
200 if fn and ' ' in fn:
201 if fn and ' ' in fn:
201 return '\t\n'
202 return '\t\n'
202 return '\n'
203 return '\n'
203
204
204 if not a and not b:
205 if not a and not b:
205 return ""
206 return ""
206
207
207 if opts.noprefix:
208 if opts.noprefix:
208 aprefix = bprefix = ''
209 aprefix = bprefix = ''
209 else:
210 else:
210 aprefix = 'a/'
211 aprefix = 'a/'
211 bprefix = 'b/'
212 bprefix = 'b/'
212
213
213 epoch = util.datestr((0, 0))
214 epoch = util.datestr((0, 0))
214
215
215 fn1 = util.pconvert(fn1)
216 fn1 = util.pconvert(fn1)
216 fn2 = util.pconvert(fn2)
217 fn2 = util.pconvert(fn2)
217
218
218 if not opts.text and (util.binary(a) or util.binary(b)):
219 if not opts.text and (util.binary(a) or util.binary(b)):
219 if a and b and len(a) == len(b) and a == b:
220 if a and b and len(a) == len(b) and a == b:
220 return ""
221 return ""
221 l = ['Binary file %s has changed\n' % fn1]
222 l = ['Binary file %s has changed\n' % fn1]
222 elif not a:
223 elif not a:
223 b = splitnewlines(b)
224 b = splitnewlines(b)
224 if a is None:
225 if a is None:
225 l1 = '--- /dev/null%s' % datetag(epoch)
226 l1 = '--- /dev/null%s' % datetag(epoch)
226 else:
227 else:
227 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
228 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
228 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
229 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
229 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
230 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
230 l = [l1, l2, l3] + ["+" + e for e in b]
231 l = [l1, l2, l3] + ["+" + e for e in b]
231 elif not b:
232 elif not b:
232 a = splitnewlines(a)
233 a = splitnewlines(a)
233 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
234 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
234 if b is None:
235 if b is None:
235 l2 = '+++ /dev/null%s' % datetag(epoch)
236 l2 = '+++ /dev/null%s' % datetag(epoch)
236 else:
237 else:
237 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
238 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
238 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
239 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
239 l = [l1, l2, l3] + ["-" + e for e in a]
240 l = [l1, l2, l3] + ["-" + e for e in a]
240 else:
241 else:
241 al = splitnewlines(a)
242 al = splitnewlines(a)
242 bl = splitnewlines(b)
243 bl = splitnewlines(b)
243 l = list(_unidiff(a, b, al, bl, opts=opts))
244 l = list(_unidiff(a, b, al, bl, opts=opts))
244 if not l:
245 if not l:
245 return ""
246 return ""
246
247
247 l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)))
248 l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)))
248 l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)))
249 l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)))
249
250
250 for ln in xrange(len(l)):
251 for ln in xrange(len(l)):
251 if l[ln][-1] != '\n':
252 if l[ln][-1] != '\n':
252 l[ln] += "\n\ No newline at end of file\n"
253 l[ln] += "\n\ No newline at end of file\n"
253
254
254 return "".join(l)
255 return "".join(l)
255
256
256 # creates a headerless unified diff
257 # creates a headerless unified diff
257 # t1 and t2 are the text to be diffed
258 # t1 and t2 are the text to be diffed
258 # l1 and l2 are the text broken up into lines
259 # l1 and l2 are the text broken up into lines
259 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
260 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
260 def contextend(l, len):
261 def contextend(l, len):
261 ret = l + opts.context
262 ret = l + opts.context
262 if ret > len:
263 if ret > len:
263 ret = len
264 ret = len
264 return ret
265 return ret
265
266
266 def contextstart(l):
267 def contextstart(l):
267 ret = l - opts.context
268 ret = l - opts.context
268 if ret < 0:
269 if ret < 0:
269 return 0
270 return 0
270 return ret
271 return ret
271
272
272 lastfunc = [0, '']
273 lastfunc = [0, '']
273 def yieldhunk(hunk):
274 def yieldhunk(hunk):
274 (astart, a2, bstart, b2, delta) = hunk
275 (astart, a2, bstart, b2, delta) = hunk
275 aend = contextend(a2, len(l1))
276 aend = contextend(a2, len(l1))
276 alen = aend - astart
277 alen = aend - astart
277 blen = b2 - bstart + aend - a2
278 blen = b2 - bstart + aend - a2
278
279
279 func = ""
280 func = ""
280 if opts.showfunc:
281 if opts.showfunc:
281 lastpos, func = lastfunc
282 lastpos, func = lastfunc
282 # walk backwards from the start of the context up to the start of
283 # walk backwards from the start of the context up to the start of
283 # the previous hunk context until we find a line starting with an
284 # the previous hunk context until we find a line starting with an
284 # alphanumeric char.
285 # alphanumeric char.
285 for i in xrange(astart - 1, lastpos - 1, -1):
286 for i in xrange(astart - 1, lastpos - 1, -1):
286 if l1[i][0].isalnum():
287 if l1[i][0].isalnum():
287 func = ' ' + l1[i].rstrip()[:40]
288 func = ' ' + l1[i].rstrip()[:40]
288 lastfunc[1] = func
289 lastfunc[1] = func
289 break
290 break
290 # by recording this hunk's starting point as the next place to
291 # by recording this hunk's starting point as the next place to
291 # start looking for function lines, we avoid reading any line in
292 # start looking for function lines, we avoid reading any line in
292 # the file more than once.
293 # the file more than once.
293 lastfunc[0] = astart
294 lastfunc[0] = astart
294
295
295 # zero-length hunk ranges report their start line as one less
296 # zero-length hunk ranges report their start line as one less
296 if alen:
297 if alen:
297 astart += 1
298 astart += 1
298 if blen:
299 if blen:
299 bstart += 1
300 bstart += 1
300
301
301 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
302 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
302 bstart, blen, func)
303 bstart, blen, func)
303 for x in delta:
304 for x in delta:
304 yield x
305 yield x
305 for x in xrange(a2, aend):
306 for x in xrange(a2, aend):
306 yield ' ' + l1[x]
307 yield ' ' + l1[x]
307
308
308 # bdiff.blocks gives us the matching sequences in the files. The loop
309 # bdiff.blocks gives us the matching sequences in the files. The loop
309 # below finds the spaces between those matching sequences and translates
310 # below finds the spaces between those matching sequences and translates
310 # them into diff output.
311 # them into diff output.
311 #
312 #
312 hunk = None
313 hunk = None
313 ignoredlines = 0
314 ignoredlines = 0
314 for s, stype in allblocks(t1, t2, opts, l1, l2):
315 for s, stype in allblocks(t1, t2, opts, l1, l2):
315 a1, a2, b1, b2 = s
316 a1, a2, b1, b2 = s
316 if stype != '!':
317 if stype != '!':
317 if stype == '~':
318 if stype == '~':
318 # The diff context lines are based on t1 content. When
319 # The diff context lines are based on t1 content. When
319 # blank lines are ignored, the new lines offsets must
320 # blank lines are ignored, the new lines offsets must
320 # be adjusted as if equivalent blocks ('~') had the
321 # be adjusted as if equivalent blocks ('~') had the
321 # same sizes on both sides.
322 # same sizes on both sides.
322 ignoredlines += (b2 - b1) - (a2 - a1)
323 ignoredlines += (b2 - b1) - (a2 - a1)
323 continue
324 continue
324 delta = []
325 delta = []
325 old = l1[a1:a2]
326 old = l1[a1:a2]
326 new = l2[b1:b2]
327 new = l2[b1:b2]
327
328
328 b1 -= ignoredlines
329 b1 -= ignoredlines
329 b2 -= ignoredlines
330 b2 -= ignoredlines
330 astart = contextstart(a1)
331 astart = contextstart(a1)
331 bstart = contextstart(b1)
332 bstart = contextstart(b1)
332 prev = None
333 prev = None
333 if hunk:
334 if hunk:
334 # join with the previous hunk if it falls inside the context
335 # join with the previous hunk if it falls inside the context
335 if astart < hunk[1] + opts.context + 1:
336 if astart < hunk[1] + opts.context + 1:
336 prev = hunk
337 prev = hunk
337 astart = hunk[1]
338 astart = hunk[1]
338 bstart = hunk[3]
339 bstart = hunk[3]
339 else:
340 else:
340 for x in yieldhunk(hunk):
341 for x in yieldhunk(hunk):
341 yield x
342 yield x
342 if prev:
343 if prev:
343 # we've joined the previous hunk, record the new ending points.
344 # we've joined the previous hunk, record the new ending points.
344 hunk[1] = a2
345 hunk[1] = a2
345 hunk[3] = b2
346 hunk[3] = b2
346 delta = hunk[4]
347 delta = hunk[4]
347 else:
348 else:
348 # create a new hunk
349 # create a new hunk
349 hunk = [astart, a2, bstart, b2, delta]
350 hunk = [astart, a2, bstart, b2, delta]
350
351
351 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
352 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
352 delta[len(delta):] = ['-' + x for x in old]
353 delta[len(delta):] = ['-' + x for x in old]
353 delta[len(delta):] = ['+' + x for x in new]
354 delta[len(delta):] = ['+' + x for x in new]
354
355
355 if hunk:
356 if hunk:
356 for x in yieldhunk(hunk):
357 for x in yieldhunk(hunk):
357 yield x
358 yield x
358
359
359 def b85diff(to, tn):
360 def b85diff(to, tn):
360 '''print base85-encoded binary diff'''
361 '''print base85-encoded binary diff'''
361 def fmtline(line):
362 def fmtline(line):
362 l = len(line)
363 l = len(line)
363 if l <= 26:
364 if l <= 26:
364 l = chr(ord('A') + l - 1)
365 l = chr(ord('A') + l - 1)
365 else:
366 else:
366 l = chr(l - 26 + ord('a') - 1)
367 l = chr(l - 26 + ord('a') - 1)
367 return '%c%s\n' % (l, base85.b85encode(line, True))
368 return '%c%s\n' % (l, base85.b85encode(line, True))
368
369
369 def chunk(text, csize=52):
370 def chunk(text, csize=52):
370 l = len(text)
371 l = len(text)
371 i = 0
372 i = 0
372 while i < l:
373 while i < l:
373 yield text[i:i + csize]
374 yield text[i:i + csize]
374 i += csize
375 i += csize
375
376
376 if to is None:
377 if to is None:
377 to = ''
378 to = ''
378 if tn is None:
379 if tn is None:
379 tn = ''
380 tn = ''
380
381
381 if to == tn:
382 if to == tn:
382 return ''
383 return ''
383
384
384 # TODO: deltas
385 # TODO: deltas
385 ret = []
386 ret = []
386 ret.append('GIT binary patch\n')
387 ret.append('GIT binary patch\n')
387 ret.append('literal %s\n' % len(tn))
388 ret.append('literal %s\n' % len(tn))
388 for l in chunk(zlib.compress(tn)):
389 for l in chunk(zlib.compress(tn)):
389 ret.append(fmtline(l))
390 ret.append(fmtline(l))
390 ret.append('\n')
391 ret.append('\n')
391
392
392 return ''.join(ret)
393 return ''.join(ret)
393
394
394 def patchtext(bin):
395 def patchtext(bin):
395 pos = 0
396 pos = 0
396 t = []
397 t = []
397 while pos < len(bin):
398 while pos < len(bin):
398 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
399 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
399 pos += 12
400 pos += 12
400 t.append(bin[pos:pos + l])
401 t.append(bin[pos:pos + l])
401 pos += l
402 pos += l
402 return "".join(t)
403 return "".join(t)
403
404
404 def patch(a, bin):
405 def patch(a, bin):
405 if len(a) == 0:
406 if len(a) == 0:
406 # skip over trivial delta header
407 # skip over trivial delta header
407 return util.buffer(bin, 12)
408 return util.buffer(bin, 12)
408 return mpatch.patches(a, [bin])
409 return mpatch.patches(a, [bin])
409
410
410 # similar to difflib.SequenceMatcher.get_matching_blocks
411 # similar to difflib.SequenceMatcher.get_matching_blocks
411 def get_matching_blocks(a, b):
412 def get_matching_blocks(a, b):
412 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
413 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
413
414
414 def trivialdiffheader(length):
415 def trivialdiffheader(length):
415 return struct.pack(">lll", 0, 0, length) if length else ''
416 return struct.pack(">lll", 0, 0, length) if length else ''
416
417
417 def replacediffheader(oldlen, newlen):
418 def replacediffheader(oldlen, newlen):
418 return struct.pack(">lll", 0, oldlen, newlen)
419 return struct.pack(">lll", 0, oldlen, newlen)
419
420
420 patches = mpatch.patches
421 patches = mpatch.patches
421 patchedsize = mpatch.patchedsize
422 patchedsize = mpatch.patchedsize
422 textdiff = bdiff.bdiff
423 textdiff = bdiff.bdiff
@@ -1,2608 +1,2633
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 scmutil,
37 scmutil,
38 util,
38 util,
39 )
39 )
40 stringio = util.stringio
40 stringio = util.stringio
41
41
42 gitre = re.compile('diff --git a/(.*) b/(.*)')
42 gitre = re.compile('diff --git a/(.*) b/(.*)')
43 tabsplitter = re.compile(r'(\t+|[^\t]+)')
43 tabsplitter = re.compile(r'(\t+|[^\t]+)')
44
44
45 class PatchError(Exception):
45 class PatchError(Exception):
46 pass
46 pass
47
47
48
48
49 # public functions
49 # public functions
50
50
51 def split(stream):
51 def split(stream):
52 '''return an iterator of individual patches from a stream'''
52 '''return an iterator of individual patches from a stream'''
53 def isheader(line, inheader):
53 def isheader(line, inheader):
54 if inheader and line[0] in (' ', '\t'):
54 if inheader and line[0] in (' ', '\t'):
55 # continuation
55 # continuation
56 return True
56 return True
57 if line[0] in (' ', '-', '+'):
57 if line[0] in (' ', '-', '+'):
58 # diff line - don't check for header pattern in there
58 # diff line - don't check for header pattern in there
59 return False
59 return False
60 l = line.split(': ', 1)
60 l = line.split(': ', 1)
61 return len(l) == 2 and ' ' not in l[0]
61 return len(l) == 2 and ' ' not in l[0]
62
62
63 def chunk(lines):
63 def chunk(lines):
64 return stringio(''.join(lines))
64 return stringio(''.join(lines))
65
65
66 def hgsplit(stream, cur):
66 def hgsplit(stream, cur):
67 inheader = True
67 inheader = True
68
68
69 for line in stream:
69 for line in stream:
70 if not line.strip():
70 if not line.strip():
71 inheader = False
71 inheader = False
72 if not inheader and line.startswith('# HG changeset patch'):
72 if not inheader and line.startswith('# HG changeset patch'):
73 yield chunk(cur)
73 yield chunk(cur)
74 cur = []
74 cur = []
75 inheader = True
75 inheader = True
76
76
77 cur.append(line)
77 cur.append(line)
78
78
79 if cur:
79 if cur:
80 yield chunk(cur)
80 yield chunk(cur)
81
81
82 def mboxsplit(stream, cur):
82 def mboxsplit(stream, cur):
83 for line in stream:
83 for line in stream:
84 if line.startswith('From '):
84 if line.startswith('From '):
85 for c in split(chunk(cur[1:])):
85 for c in split(chunk(cur[1:])):
86 yield c
86 yield c
87 cur = []
87 cur = []
88
88
89 cur.append(line)
89 cur.append(line)
90
90
91 if cur:
91 if cur:
92 for c in split(chunk(cur[1:])):
92 for c in split(chunk(cur[1:])):
93 yield c
93 yield c
94
94
95 def mimesplit(stream, cur):
95 def mimesplit(stream, cur):
96 def msgfp(m):
96 def msgfp(m):
97 fp = stringio()
97 fp = stringio()
98 g = email.Generator.Generator(fp, mangle_from_=False)
98 g = email.Generator.Generator(fp, mangle_from_=False)
99 g.flatten(m)
99 g.flatten(m)
100 fp.seek(0)
100 fp.seek(0)
101 return fp
101 return fp
102
102
103 for line in stream:
103 for line in stream:
104 cur.append(line)
104 cur.append(line)
105 c = chunk(cur)
105 c = chunk(cur)
106
106
107 m = email.Parser.Parser().parse(c)
107 m = email.Parser.Parser().parse(c)
108 if not m.is_multipart():
108 if not m.is_multipart():
109 yield msgfp(m)
109 yield msgfp(m)
110 else:
110 else:
111 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
111 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
112 for part in m.walk():
112 for part in m.walk():
113 ct = part.get_content_type()
113 ct = part.get_content_type()
114 if ct not in ok_types:
114 if ct not in ok_types:
115 continue
115 continue
116 yield msgfp(part)
116 yield msgfp(part)
117
117
118 def headersplit(stream, cur):
118 def headersplit(stream, cur):
119 inheader = False
119 inheader = False
120
120
121 for line in stream:
121 for line in stream:
122 if not inheader and isheader(line, inheader):
122 if not inheader and isheader(line, inheader):
123 yield chunk(cur)
123 yield chunk(cur)
124 cur = []
124 cur = []
125 inheader = True
125 inheader = True
126 if inheader and not isheader(line, inheader):
126 if inheader and not isheader(line, inheader):
127 inheader = False
127 inheader = False
128
128
129 cur.append(line)
129 cur.append(line)
130
130
131 if cur:
131 if cur:
132 yield chunk(cur)
132 yield chunk(cur)
133
133
134 def remainder(cur):
134 def remainder(cur):
135 yield chunk(cur)
135 yield chunk(cur)
136
136
137 class fiter(object):
137 class fiter(object):
138 def __init__(self, fp):
138 def __init__(self, fp):
139 self.fp = fp
139 self.fp = fp
140
140
141 def __iter__(self):
141 def __iter__(self):
142 return self
142 return self
143
143
144 def next(self):
144 def next(self):
145 l = self.fp.readline()
145 l = self.fp.readline()
146 if not l:
146 if not l:
147 raise StopIteration
147 raise StopIteration
148 return l
148 return l
149
149
150 inheader = False
150 inheader = False
151 cur = []
151 cur = []
152
152
153 mimeheaders = ['content-type']
153 mimeheaders = ['content-type']
154
154
155 if not util.safehasattr(stream, 'next'):
155 if not util.safehasattr(stream, 'next'):
156 # http responses, for example, have readline but not next
156 # http responses, for example, have readline but not next
157 stream = fiter(stream)
157 stream = fiter(stream)
158
158
159 for line in stream:
159 for line in stream:
160 cur.append(line)
160 cur.append(line)
161 if line.startswith('# HG changeset patch'):
161 if line.startswith('# HG changeset patch'):
162 return hgsplit(stream, cur)
162 return hgsplit(stream, cur)
163 elif line.startswith('From '):
163 elif line.startswith('From '):
164 return mboxsplit(stream, cur)
164 return mboxsplit(stream, cur)
165 elif isheader(line, inheader):
165 elif isheader(line, inheader):
166 inheader = True
166 inheader = True
167 if line.split(':', 1)[0].lower() in mimeheaders:
167 if line.split(':', 1)[0].lower() in mimeheaders:
168 # let email parser handle this
168 # let email parser handle this
169 return mimesplit(stream, cur)
169 return mimesplit(stream, cur)
170 elif line.startswith('--- ') and inheader:
170 elif line.startswith('--- ') and inheader:
171 # No evil headers seen by diff start, split by hand
171 # No evil headers seen by diff start, split by hand
172 return headersplit(stream, cur)
172 return headersplit(stream, cur)
173 # Not enough info, keep reading
173 # Not enough info, keep reading
174
174
175 # if we are here, we have a very plain patch
175 # if we are here, we have a very plain patch
176 return remainder(cur)
176 return remainder(cur)
177
177
178 ## Some facility for extensible patch parsing:
178 ## Some facility for extensible patch parsing:
179 # list of pairs ("header to match", "data key")
179 # list of pairs ("header to match", "data key")
180 patchheadermap = [('Date', 'date'),
180 patchheadermap = [('Date', 'date'),
181 ('Branch', 'branch'),
181 ('Branch', 'branch'),
182 ('Node ID', 'nodeid'),
182 ('Node ID', 'nodeid'),
183 ]
183 ]
184
184
185 def extract(ui, fileobj):
185 def extract(ui, fileobj):
186 '''extract patch from data read from fileobj.
186 '''extract patch from data read from fileobj.
187
187
188 patch can be a normal patch or contained in an email message.
188 patch can be a normal patch or contained in an email message.
189
189
190 return a dictionary. Standard keys are:
190 return a dictionary. Standard keys are:
191 - filename,
191 - filename,
192 - message,
192 - message,
193 - user,
193 - user,
194 - date,
194 - date,
195 - branch,
195 - branch,
196 - node,
196 - node,
197 - p1,
197 - p1,
198 - p2.
198 - p2.
199 Any item can be missing from the dictionary. If filename is missing,
199 Any item can be missing from the dictionary. If filename is missing,
200 fileobj did not contain a patch. Caller must unlink filename when done.'''
200 fileobj did not contain a patch. Caller must unlink filename when done.'''
201
201
202 # attempt to detect the start of a patch
202 # attempt to detect the start of a patch
203 # (this heuristic is borrowed from quilt)
203 # (this heuristic is borrowed from quilt)
204 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
204 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
205 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
205 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
206 r'---[ \t].*?^\+\+\+[ \t]|'
206 r'---[ \t].*?^\+\+\+[ \t]|'
207 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
207 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
208
208
209 data = {}
209 data = {}
210 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
210 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
211 tmpfp = os.fdopen(fd, 'w')
211 tmpfp = os.fdopen(fd, 'w')
212 try:
212 try:
213 msg = email.Parser.Parser().parse(fileobj)
213 msg = email.Parser.Parser().parse(fileobj)
214
214
215 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
215 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
216 data['user'] = msg['From'] and mail.headdecode(msg['From'])
216 data['user'] = msg['From'] and mail.headdecode(msg['From'])
217 if not subject and not data['user']:
217 if not subject and not data['user']:
218 # Not an email, restore parsed headers if any
218 # Not an email, restore parsed headers if any
219 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
219 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
220
220
221 # should try to parse msg['Date']
221 # should try to parse msg['Date']
222 parents = []
222 parents = []
223
223
224 if subject:
224 if subject:
225 if subject.startswith('[PATCH'):
225 if subject.startswith('[PATCH'):
226 pend = subject.find(']')
226 pend = subject.find(']')
227 if pend >= 0:
227 if pend >= 0:
228 subject = subject[pend + 1:].lstrip()
228 subject = subject[pend + 1:].lstrip()
229 subject = re.sub(r'\n[ \t]+', ' ', subject)
229 subject = re.sub(r'\n[ \t]+', ' ', subject)
230 ui.debug('Subject: %s\n' % subject)
230 ui.debug('Subject: %s\n' % subject)
231 if data['user']:
231 if data['user']:
232 ui.debug('From: %s\n' % data['user'])
232 ui.debug('From: %s\n' % data['user'])
233 diffs_seen = 0
233 diffs_seen = 0
234 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
234 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
235 message = ''
235 message = ''
236 for part in msg.walk():
236 for part in msg.walk():
237 content_type = part.get_content_type()
237 content_type = part.get_content_type()
238 ui.debug('Content-Type: %s\n' % content_type)
238 ui.debug('Content-Type: %s\n' % content_type)
239 if content_type not in ok_types:
239 if content_type not in ok_types:
240 continue
240 continue
241 payload = part.get_payload(decode=True)
241 payload = part.get_payload(decode=True)
242 m = diffre.search(payload)
242 m = diffre.search(payload)
243 if m:
243 if m:
244 hgpatch = False
244 hgpatch = False
245 hgpatchheader = False
245 hgpatchheader = False
246 ignoretext = False
246 ignoretext = False
247
247
248 ui.debug('found patch at byte %d\n' % m.start(0))
248 ui.debug('found patch at byte %d\n' % m.start(0))
249 diffs_seen += 1
249 diffs_seen += 1
250 cfp = stringio()
250 cfp = stringio()
251 for line in payload[:m.start(0)].splitlines():
251 for line in payload[:m.start(0)].splitlines():
252 if line.startswith('# HG changeset patch') and not hgpatch:
252 if line.startswith('# HG changeset patch') and not hgpatch:
253 ui.debug('patch generated by hg export\n')
253 ui.debug('patch generated by hg export\n')
254 hgpatch = True
254 hgpatch = True
255 hgpatchheader = True
255 hgpatchheader = True
256 # drop earlier commit message content
256 # drop earlier commit message content
257 cfp.seek(0)
257 cfp.seek(0)
258 cfp.truncate()
258 cfp.truncate()
259 subject = None
259 subject = None
260 elif hgpatchheader:
260 elif hgpatchheader:
261 if line.startswith('# User '):
261 if line.startswith('# User '):
262 data['user'] = line[7:]
262 data['user'] = line[7:]
263 ui.debug('From: %s\n' % data['user'])
263 ui.debug('From: %s\n' % data['user'])
264 elif line.startswith("# Parent "):
264 elif line.startswith("# Parent "):
265 parents.append(line[9:].lstrip())
265 parents.append(line[9:].lstrip())
266 elif line.startswith("# "):
266 elif line.startswith("# "):
267 for header, key in patchheadermap:
267 for header, key in patchheadermap:
268 prefix = '# %s ' % header
268 prefix = '# %s ' % header
269 if line.startswith(prefix):
269 if line.startswith(prefix):
270 data[key] = line[len(prefix):]
270 data[key] = line[len(prefix):]
271 else:
271 else:
272 hgpatchheader = False
272 hgpatchheader = False
273 elif line == '---':
273 elif line == '---':
274 ignoretext = True
274 ignoretext = True
275 if not hgpatchheader and not ignoretext:
275 if not hgpatchheader and not ignoretext:
276 cfp.write(line)
276 cfp.write(line)
277 cfp.write('\n')
277 cfp.write('\n')
278 message = cfp.getvalue()
278 message = cfp.getvalue()
279 if tmpfp:
279 if tmpfp:
280 tmpfp.write(payload)
280 tmpfp.write(payload)
281 if not payload.endswith('\n'):
281 if not payload.endswith('\n'):
282 tmpfp.write('\n')
282 tmpfp.write('\n')
283 elif not diffs_seen and message and content_type == 'text/plain':
283 elif not diffs_seen and message and content_type == 'text/plain':
284 message += '\n' + payload
284 message += '\n' + payload
285 except: # re-raises
285 except: # re-raises
286 tmpfp.close()
286 tmpfp.close()
287 os.unlink(tmpname)
287 os.unlink(tmpname)
288 raise
288 raise
289
289
290 if subject and not message.startswith(subject):
290 if subject and not message.startswith(subject):
291 message = '%s\n%s' % (subject, message)
291 message = '%s\n%s' % (subject, message)
292 data['message'] = message
292 data['message'] = message
293 tmpfp.close()
293 tmpfp.close()
294 if parents:
294 if parents:
295 data['p1'] = parents.pop(0)
295 data['p1'] = parents.pop(0)
296 if parents:
296 if parents:
297 data['p2'] = parents.pop(0)
297 data['p2'] = parents.pop(0)
298
298
299 if diffs_seen:
299 if diffs_seen:
300 data['filename'] = tmpname
300 data['filename'] = tmpname
301 else:
301 else:
302 os.unlink(tmpname)
302 os.unlink(tmpname)
303 return data
303 return data
304
304
305 class patchmeta(object):
305 class patchmeta(object):
306 """Patched file metadata
306 """Patched file metadata
307
307
308 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
308 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
309 or COPY. 'path' is patched file path. 'oldpath' is set to the
309 or COPY. 'path' is patched file path. 'oldpath' is set to the
310 origin file when 'op' is either COPY or RENAME, None otherwise. If
310 origin file when 'op' is either COPY or RENAME, None otherwise. If
311 file mode is changed, 'mode' is a tuple (islink, isexec) where
311 file mode is changed, 'mode' is a tuple (islink, isexec) where
312 'islink' is True if the file is a symlink and 'isexec' is True if
312 'islink' is True if the file is a symlink and 'isexec' is True if
313 the file is executable. Otherwise, 'mode' is None.
313 the file is executable. Otherwise, 'mode' is None.
314 """
314 """
315 def __init__(self, path):
315 def __init__(self, path):
316 self.path = path
316 self.path = path
317 self.oldpath = None
317 self.oldpath = None
318 self.mode = None
318 self.mode = None
319 self.op = 'MODIFY'
319 self.op = 'MODIFY'
320 self.binary = False
320 self.binary = False
321
321
322 def setmode(self, mode):
322 def setmode(self, mode):
323 islink = mode & 0o20000
323 islink = mode & 0o20000
324 isexec = mode & 0o100
324 isexec = mode & 0o100
325 self.mode = (islink, isexec)
325 self.mode = (islink, isexec)
326
326
327 def copy(self):
327 def copy(self):
328 other = patchmeta(self.path)
328 other = patchmeta(self.path)
329 other.oldpath = self.oldpath
329 other.oldpath = self.oldpath
330 other.mode = self.mode
330 other.mode = self.mode
331 other.op = self.op
331 other.op = self.op
332 other.binary = self.binary
332 other.binary = self.binary
333 return other
333 return other
334
334
335 def _ispatchinga(self, afile):
335 def _ispatchinga(self, afile):
336 if afile == '/dev/null':
336 if afile == '/dev/null':
337 return self.op == 'ADD'
337 return self.op == 'ADD'
338 return afile == 'a/' + (self.oldpath or self.path)
338 return afile == 'a/' + (self.oldpath or self.path)
339
339
340 def _ispatchingb(self, bfile):
340 def _ispatchingb(self, bfile):
341 if bfile == '/dev/null':
341 if bfile == '/dev/null':
342 return self.op == 'DELETE'
342 return self.op == 'DELETE'
343 return bfile == 'b/' + self.path
343 return bfile == 'b/' + self.path
344
344
345 def ispatching(self, afile, bfile):
345 def ispatching(self, afile, bfile):
346 return self._ispatchinga(afile) and self._ispatchingb(bfile)
346 return self._ispatchinga(afile) and self._ispatchingb(bfile)
347
347
348 def __repr__(self):
348 def __repr__(self):
349 return "<patchmeta %s %r>" % (self.op, self.path)
349 return "<patchmeta %s %r>" % (self.op, self.path)
350
350
351 def readgitpatch(lr):
351 def readgitpatch(lr):
352 """extract git-style metadata about patches from <patchname>"""
352 """extract git-style metadata about patches from <patchname>"""
353
353
354 # Filter patch for git information
354 # Filter patch for git information
355 gp = None
355 gp = None
356 gitpatches = []
356 gitpatches = []
357 for line in lr:
357 for line in lr:
358 line = line.rstrip(' \r\n')
358 line = line.rstrip(' \r\n')
359 if line.startswith('diff --git a/'):
359 if line.startswith('diff --git a/'):
360 m = gitre.match(line)
360 m = gitre.match(line)
361 if m:
361 if m:
362 if gp:
362 if gp:
363 gitpatches.append(gp)
363 gitpatches.append(gp)
364 dst = m.group(2)
364 dst = m.group(2)
365 gp = patchmeta(dst)
365 gp = patchmeta(dst)
366 elif gp:
366 elif gp:
367 if line.startswith('--- '):
367 if line.startswith('--- '):
368 gitpatches.append(gp)
368 gitpatches.append(gp)
369 gp = None
369 gp = None
370 continue
370 continue
371 if line.startswith('rename from '):
371 if line.startswith('rename from '):
372 gp.op = 'RENAME'
372 gp.op = 'RENAME'
373 gp.oldpath = line[12:]
373 gp.oldpath = line[12:]
374 elif line.startswith('rename to '):
374 elif line.startswith('rename to '):
375 gp.path = line[10:]
375 gp.path = line[10:]
376 elif line.startswith('copy from '):
376 elif line.startswith('copy from '):
377 gp.op = 'COPY'
377 gp.op = 'COPY'
378 gp.oldpath = line[10:]
378 gp.oldpath = line[10:]
379 elif line.startswith('copy to '):
379 elif line.startswith('copy to '):
380 gp.path = line[8:]
380 gp.path = line[8:]
381 elif line.startswith('deleted file'):
381 elif line.startswith('deleted file'):
382 gp.op = 'DELETE'
382 gp.op = 'DELETE'
383 elif line.startswith('new file mode '):
383 elif line.startswith('new file mode '):
384 gp.op = 'ADD'
384 gp.op = 'ADD'
385 gp.setmode(int(line[-6:], 8))
385 gp.setmode(int(line[-6:], 8))
386 elif line.startswith('new mode '):
386 elif line.startswith('new mode '):
387 gp.setmode(int(line[-6:], 8))
387 gp.setmode(int(line[-6:], 8))
388 elif line.startswith('GIT binary patch'):
388 elif line.startswith('GIT binary patch'):
389 gp.binary = True
389 gp.binary = True
390 if gp:
390 if gp:
391 gitpatches.append(gp)
391 gitpatches.append(gp)
392
392
393 return gitpatches
393 return gitpatches
394
394
395 class linereader(object):
395 class linereader(object):
396 # simple class to allow pushing lines back into the input stream
396 # simple class to allow pushing lines back into the input stream
397 def __init__(self, fp):
397 def __init__(self, fp):
398 self.fp = fp
398 self.fp = fp
399 self.buf = []
399 self.buf = []
400
400
401 def push(self, line):
401 def push(self, line):
402 if line is not None:
402 if line is not None:
403 self.buf.append(line)
403 self.buf.append(line)
404
404
405 def readline(self):
405 def readline(self):
406 if self.buf:
406 if self.buf:
407 l = self.buf[0]
407 l = self.buf[0]
408 del self.buf[0]
408 del self.buf[0]
409 return l
409 return l
410 return self.fp.readline()
410 return self.fp.readline()
411
411
412 def __iter__(self):
412 def __iter__(self):
413 return iter(self.readline, '')
413 return iter(self.readline, '')
414
414
415 class abstractbackend(object):
415 class abstractbackend(object):
416 def __init__(self, ui):
416 def __init__(self, ui):
417 self.ui = ui
417 self.ui = ui
418
418
419 def getfile(self, fname):
419 def getfile(self, fname):
420 """Return target file data and flags as a (data, (islink,
420 """Return target file data and flags as a (data, (islink,
421 isexec)) tuple. Data is None if file is missing/deleted.
421 isexec)) tuple. Data is None if file is missing/deleted.
422 """
422 """
423 raise NotImplementedError
423 raise NotImplementedError
424
424
425 def setfile(self, fname, data, mode, copysource):
425 def setfile(self, fname, data, mode, copysource):
426 """Write data to target file fname and set its mode. mode is a
426 """Write data to target file fname and set its mode. mode is a
427 (islink, isexec) tuple. If data is None, the file content should
427 (islink, isexec) tuple. If data is None, the file content should
428 be left unchanged. If the file is modified after being copied,
428 be left unchanged. If the file is modified after being copied,
429 copysource is set to the original file name.
429 copysource is set to the original file name.
430 """
430 """
431 raise NotImplementedError
431 raise NotImplementedError
432
432
433 def unlink(self, fname):
433 def unlink(self, fname):
434 """Unlink target file."""
434 """Unlink target file."""
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def writerej(self, fname, failed, total, lines):
437 def writerej(self, fname, failed, total, lines):
438 """Write rejected lines for fname. total is the number of hunks
438 """Write rejected lines for fname. total is the number of hunks
439 which failed to apply and total the total number of hunks for this
439 which failed to apply and total the total number of hunks for this
440 files.
440 files.
441 """
441 """
442 pass
442 pass
443
443
444 def exists(self, fname):
444 def exists(self, fname):
445 raise NotImplementedError
445 raise NotImplementedError
446
446
447 class fsbackend(abstractbackend):
447 class fsbackend(abstractbackend):
448 def __init__(self, ui, basedir):
448 def __init__(self, ui, basedir):
449 super(fsbackend, self).__init__(ui)
449 super(fsbackend, self).__init__(ui)
450 self.opener = scmutil.opener(basedir)
450 self.opener = scmutil.opener(basedir)
451
451
452 def _join(self, f):
452 def _join(self, f):
453 return os.path.join(self.opener.base, f)
453 return os.path.join(self.opener.base, f)
454
454
455 def getfile(self, fname):
455 def getfile(self, fname):
456 if self.opener.islink(fname):
456 if self.opener.islink(fname):
457 return (self.opener.readlink(fname), (True, False))
457 return (self.opener.readlink(fname), (True, False))
458
458
459 isexec = False
459 isexec = False
460 try:
460 try:
461 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
461 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
462 except OSError as e:
462 except OSError as e:
463 if e.errno != errno.ENOENT:
463 if e.errno != errno.ENOENT:
464 raise
464 raise
465 try:
465 try:
466 return (self.opener.read(fname), (False, isexec))
466 return (self.opener.read(fname), (False, isexec))
467 except IOError as e:
467 except IOError as e:
468 if e.errno != errno.ENOENT:
468 if e.errno != errno.ENOENT:
469 raise
469 raise
470 return None, None
470 return None, None
471
471
472 def setfile(self, fname, data, mode, copysource):
472 def setfile(self, fname, data, mode, copysource):
473 islink, isexec = mode
473 islink, isexec = mode
474 if data is None:
474 if data is None:
475 self.opener.setflags(fname, islink, isexec)
475 self.opener.setflags(fname, islink, isexec)
476 return
476 return
477 if islink:
477 if islink:
478 self.opener.symlink(data, fname)
478 self.opener.symlink(data, fname)
479 else:
479 else:
480 self.opener.write(fname, data)
480 self.opener.write(fname, data)
481 if isexec:
481 if isexec:
482 self.opener.setflags(fname, False, True)
482 self.opener.setflags(fname, False, True)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self.opener.unlinkpath(fname, ignoremissing=True)
485 self.opener.unlinkpath(fname, ignoremissing=True)
486
486
487 def writerej(self, fname, failed, total, lines):
487 def writerej(self, fname, failed, total, lines):
488 fname = fname + ".rej"
488 fname = fname + ".rej"
489 self.ui.warn(
489 self.ui.warn(
490 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
490 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
491 (failed, total, fname))
491 (failed, total, fname))
492 fp = self.opener(fname, 'w')
492 fp = self.opener(fname, 'w')
493 fp.writelines(lines)
493 fp.writelines(lines)
494 fp.close()
494 fp.close()
495
495
496 def exists(self, fname):
496 def exists(self, fname):
497 return self.opener.lexists(fname)
497 return self.opener.lexists(fname)
498
498
499 class workingbackend(fsbackend):
499 class workingbackend(fsbackend):
500 def __init__(self, ui, repo, similarity):
500 def __init__(self, ui, repo, similarity):
501 super(workingbackend, self).__init__(ui, repo.root)
501 super(workingbackend, self).__init__(ui, repo.root)
502 self.repo = repo
502 self.repo = repo
503 self.similarity = similarity
503 self.similarity = similarity
504 self.removed = set()
504 self.removed = set()
505 self.changed = set()
505 self.changed = set()
506 self.copied = []
506 self.copied = []
507
507
508 def _checkknown(self, fname):
508 def _checkknown(self, fname):
509 if self.repo.dirstate[fname] == '?' and self.exists(fname):
509 if self.repo.dirstate[fname] == '?' and self.exists(fname):
510 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
510 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
511
511
512 def setfile(self, fname, data, mode, copysource):
512 def setfile(self, fname, data, mode, copysource):
513 self._checkknown(fname)
513 self._checkknown(fname)
514 super(workingbackend, self).setfile(fname, data, mode, copysource)
514 super(workingbackend, self).setfile(fname, data, mode, copysource)
515 if copysource is not None:
515 if copysource is not None:
516 self.copied.append((copysource, fname))
516 self.copied.append((copysource, fname))
517 self.changed.add(fname)
517 self.changed.add(fname)
518
518
519 def unlink(self, fname):
519 def unlink(self, fname):
520 self._checkknown(fname)
520 self._checkknown(fname)
521 super(workingbackend, self).unlink(fname)
521 super(workingbackend, self).unlink(fname)
522 self.removed.add(fname)
522 self.removed.add(fname)
523 self.changed.add(fname)
523 self.changed.add(fname)
524
524
525 def close(self):
525 def close(self):
526 wctx = self.repo[None]
526 wctx = self.repo[None]
527 changed = set(self.changed)
527 changed = set(self.changed)
528 for src, dst in self.copied:
528 for src, dst in self.copied:
529 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
529 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
530 if self.removed:
530 if self.removed:
531 wctx.forget(sorted(self.removed))
531 wctx.forget(sorted(self.removed))
532 for f in self.removed:
532 for f in self.removed:
533 if f not in self.repo.dirstate:
533 if f not in self.repo.dirstate:
534 # File was deleted and no longer belongs to the
534 # File was deleted and no longer belongs to the
535 # dirstate, it was probably marked added then
535 # dirstate, it was probably marked added then
536 # deleted, and should not be considered by
536 # deleted, and should not be considered by
537 # marktouched().
537 # marktouched().
538 changed.discard(f)
538 changed.discard(f)
539 if changed:
539 if changed:
540 scmutil.marktouched(self.repo, changed, self.similarity)
540 scmutil.marktouched(self.repo, changed, self.similarity)
541 return sorted(self.changed)
541 return sorted(self.changed)
542
542
543 class filestore(object):
543 class filestore(object):
544 def __init__(self, maxsize=None):
544 def __init__(self, maxsize=None):
545 self.opener = None
545 self.opener = None
546 self.files = {}
546 self.files = {}
547 self.created = 0
547 self.created = 0
548 self.maxsize = maxsize
548 self.maxsize = maxsize
549 if self.maxsize is None:
549 if self.maxsize is None:
550 self.maxsize = 4*(2**20)
550 self.maxsize = 4*(2**20)
551 self.size = 0
551 self.size = 0
552 self.data = {}
552 self.data = {}
553
553
554 def setfile(self, fname, data, mode, copied=None):
554 def setfile(self, fname, data, mode, copied=None):
555 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
555 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
556 self.data[fname] = (data, mode, copied)
556 self.data[fname] = (data, mode, copied)
557 self.size += len(data)
557 self.size += len(data)
558 else:
558 else:
559 if self.opener is None:
559 if self.opener is None:
560 root = tempfile.mkdtemp(prefix='hg-patch-')
560 root = tempfile.mkdtemp(prefix='hg-patch-')
561 self.opener = scmutil.opener(root)
561 self.opener = scmutil.opener(root)
562 # Avoid filename issues with these simple names
562 # Avoid filename issues with these simple names
563 fn = str(self.created)
563 fn = str(self.created)
564 self.opener.write(fn, data)
564 self.opener.write(fn, data)
565 self.created += 1
565 self.created += 1
566 self.files[fname] = (fn, mode, copied)
566 self.files[fname] = (fn, mode, copied)
567
567
568 def getfile(self, fname):
568 def getfile(self, fname):
569 if fname in self.data:
569 if fname in self.data:
570 return self.data[fname]
570 return self.data[fname]
571 if not self.opener or fname not in self.files:
571 if not self.opener or fname not in self.files:
572 return None, None, None
572 return None, None, None
573 fn, mode, copied = self.files[fname]
573 fn, mode, copied = self.files[fname]
574 return self.opener.read(fn), mode, copied
574 return self.opener.read(fn), mode, copied
575
575
576 def close(self):
576 def close(self):
577 if self.opener:
577 if self.opener:
578 shutil.rmtree(self.opener.base)
578 shutil.rmtree(self.opener.base)
579
579
580 class repobackend(abstractbackend):
580 class repobackend(abstractbackend):
581 def __init__(self, ui, repo, ctx, store):
581 def __init__(self, ui, repo, ctx, store):
582 super(repobackend, self).__init__(ui)
582 super(repobackend, self).__init__(ui)
583 self.repo = repo
583 self.repo = repo
584 self.ctx = ctx
584 self.ctx = ctx
585 self.store = store
585 self.store = store
586 self.changed = set()
586 self.changed = set()
587 self.removed = set()
587 self.removed = set()
588 self.copied = {}
588 self.copied = {}
589
589
590 def _checkknown(self, fname):
590 def _checkknown(self, fname):
591 if fname not in self.ctx:
591 if fname not in self.ctx:
592 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
592 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
593
593
594 def getfile(self, fname):
594 def getfile(self, fname):
595 try:
595 try:
596 fctx = self.ctx[fname]
596 fctx = self.ctx[fname]
597 except error.LookupError:
597 except error.LookupError:
598 return None, None
598 return None, None
599 flags = fctx.flags()
599 flags = fctx.flags()
600 return fctx.data(), ('l' in flags, 'x' in flags)
600 return fctx.data(), ('l' in flags, 'x' in flags)
601
601
602 def setfile(self, fname, data, mode, copysource):
602 def setfile(self, fname, data, mode, copysource):
603 if copysource:
603 if copysource:
604 self._checkknown(copysource)
604 self._checkknown(copysource)
605 if data is None:
605 if data is None:
606 data = self.ctx[fname].data()
606 data = self.ctx[fname].data()
607 self.store.setfile(fname, data, mode, copysource)
607 self.store.setfile(fname, data, mode, copysource)
608 self.changed.add(fname)
608 self.changed.add(fname)
609 if copysource:
609 if copysource:
610 self.copied[fname] = copysource
610 self.copied[fname] = copysource
611
611
612 def unlink(self, fname):
612 def unlink(self, fname):
613 self._checkknown(fname)
613 self._checkknown(fname)
614 self.removed.add(fname)
614 self.removed.add(fname)
615
615
616 def exists(self, fname):
616 def exists(self, fname):
617 return fname in self.ctx
617 return fname in self.ctx
618
618
619 def close(self):
619 def close(self):
620 return self.changed | self.removed
620 return self.changed | self.removed
621
621
622 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
622 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
623 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
623 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
624 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
624 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
625 eolmodes = ['strict', 'crlf', 'lf', 'auto']
625 eolmodes = ['strict', 'crlf', 'lf', 'auto']
626
626
627 class patchfile(object):
627 class patchfile(object):
628 def __init__(self, ui, gp, backend, store, eolmode='strict'):
628 def __init__(self, ui, gp, backend, store, eolmode='strict'):
629 self.fname = gp.path
629 self.fname = gp.path
630 self.eolmode = eolmode
630 self.eolmode = eolmode
631 self.eol = None
631 self.eol = None
632 self.backend = backend
632 self.backend = backend
633 self.ui = ui
633 self.ui = ui
634 self.lines = []
634 self.lines = []
635 self.exists = False
635 self.exists = False
636 self.missing = True
636 self.missing = True
637 self.mode = gp.mode
637 self.mode = gp.mode
638 self.copysource = gp.oldpath
638 self.copysource = gp.oldpath
639 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
639 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
640 self.remove = gp.op == 'DELETE'
640 self.remove = gp.op == 'DELETE'
641 if self.copysource is None:
641 if self.copysource is None:
642 data, mode = backend.getfile(self.fname)
642 data, mode = backend.getfile(self.fname)
643 else:
643 else:
644 data, mode = store.getfile(self.copysource)[:2]
644 data, mode = store.getfile(self.copysource)[:2]
645 if data is not None:
645 if data is not None:
646 self.exists = self.copysource is None or backend.exists(self.fname)
646 self.exists = self.copysource is None or backend.exists(self.fname)
647 self.missing = False
647 self.missing = False
648 if data:
648 if data:
649 self.lines = mdiff.splitnewlines(data)
649 self.lines = mdiff.splitnewlines(data)
650 if self.mode is None:
650 if self.mode is None:
651 self.mode = mode
651 self.mode = mode
652 if self.lines:
652 if self.lines:
653 # Normalize line endings
653 # Normalize line endings
654 if self.lines[0].endswith('\r\n'):
654 if self.lines[0].endswith('\r\n'):
655 self.eol = '\r\n'
655 self.eol = '\r\n'
656 elif self.lines[0].endswith('\n'):
656 elif self.lines[0].endswith('\n'):
657 self.eol = '\n'
657 self.eol = '\n'
658 if eolmode != 'strict':
658 if eolmode != 'strict':
659 nlines = []
659 nlines = []
660 for l in self.lines:
660 for l in self.lines:
661 if l.endswith('\r\n'):
661 if l.endswith('\r\n'):
662 l = l[:-2] + '\n'
662 l = l[:-2] + '\n'
663 nlines.append(l)
663 nlines.append(l)
664 self.lines = nlines
664 self.lines = nlines
665 else:
665 else:
666 if self.create:
666 if self.create:
667 self.missing = False
667 self.missing = False
668 if self.mode is None:
668 if self.mode is None:
669 self.mode = (False, False)
669 self.mode = (False, False)
670 if self.missing:
670 if self.missing:
671 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
671 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
672 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
672 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
673 "current directory)\n"))
673 "current directory)\n"))
674
674
675 self.hash = {}
675 self.hash = {}
676 self.dirty = 0
676 self.dirty = 0
677 self.offset = 0
677 self.offset = 0
678 self.skew = 0
678 self.skew = 0
679 self.rej = []
679 self.rej = []
680 self.fileprinted = False
680 self.fileprinted = False
681 self.printfile(False)
681 self.printfile(False)
682 self.hunks = 0
682 self.hunks = 0
683
683
684 def writelines(self, fname, lines, mode):
684 def writelines(self, fname, lines, mode):
685 if self.eolmode == 'auto':
685 if self.eolmode == 'auto':
686 eol = self.eol
686 eol = self.eol
687 elif self.eolmode == 'crlf':
687 elif self.eolmode == 'crlf':
688 eol = '\r\n'
688 eol = '\r\n'
689 else:
689 else:
690 eol = '\n'
690 eol = '\n'
691
691
692 if self.eolmode != 'strict' and eol and eol != '\n':
692 if self.eolmode != 'strict' and eol and eol != '\n':
693 rawlines = []
693 rawlines = []
694 for l in lines:
694 for l in lines:
695 if l and l[-1] == '\n':
695 if l and l[-1] == '\n':
696 l = l[:-1] + eol
696 l = l[:-1] + eol
697 rawlines.append(l)
697 rawlines.append(l)
698 lines = rawlines
698 lines = rawlines
699
699
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701
701
702 def printfile(self, warn):
702 def printfile(self, warn):
703 if self.fileprinted:
703 if self.fileprinted:
704 return
704 return
705 if warn or self.ui.verbose:
705 if warn or self.ui.verbose:
706 self.fileprinted = True
706 self.fileprinted = True
707 s = _("patching file %s\n") % self.fname
707 s = _("patching file %s\n") % self.fname
708 if warn:
708 if warn:
709 self.ui.warn(s)
709 self.ui.warn(s)
710 else:
710 else:
711 self.ui.note(s)
711 self.ui.note(s)
712
712
713
713
714 def findlines(self, l, linenum):
714 def findlines(self, l, linenum):
715 # looks through the hash and finds candidate lines. The
715 # looks through the hash and finds candidate lines. The
716 # result is a list of line numbers sorted based on distance
716 # result is a list of line numbers sorted based on distance
717 # from linenum
717 # from linenum
718
718
719 cand = self.hash.get(l, [])
719 cand = self.hash.get(l, [])
720 if len(cand) > 1:
720 if len(cand) > 1:
721 # resort our list of potentials forward then back.
721 # resort our list of potentials forward then back.
722 cand.sort(key=lambda x: abs(x - linenum))
722 cand.sort(key=lambda x: abs(x - linenum))
723 return cand
723 return cand
724
724
725 def write_rej(self):
725 def write_rej(self):
726 # our rejects are a little different from patch(1). This always
726 # our rejects are a little different from patch(1). This always
727 # creates rejects in the same form as the original patch. A file
727 # creates rejects in the same form as the original patch. A file
728 # header is inserted so that you can run the reject through patch again
728 # header is inserted so that you can run the reject through patch again
729 # without having to type the filename.
729 # without having to type the filename.
730 if not self.rej:
730 if not self.rej:
731 return
731 return
732 base = os.path.basename(self.fname)
732 base = os.path.basename(self.fname)
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 for x in self.rej:
734 for x in self.rej:
735 for l in x.hunk:
735 for l in x.hunk:
736 lines.append(l)
736 lines.append(l)
737 if l[-1] != '\n':
737 if l[-1] != '\n':
738 lines.append("\n\ No newline at end of file\n")
738 lines.append("\n\ No newline at end of file\n")
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740
740
741 def apply(self, h):
741 def apply(self, h):
742 if not h.complete():
742 if not h.complete():
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 h.lenb))
745 h.lenb))
746
746
747 self.hunks += 1
747 self.hunks += 1
748
748
749 if self.missing:
749 if self.missing:
750 self.rej.append(h)
750 self.rej.append(h)
751 return -1
751 return -1
752
752
753 if self.exists and self.create:
753 if self.exists and self.create:
754 if self.copysource:
754 if self.copysource:
755 self.ui.warn(_("cannot create %s: destination already "
755 self.ui.warn(_("cannot create %s: destination already "
756 "exists\n") % self.fname)
756 "exists\n") % self.fname)
757 else:
757 else:
758 self.ui.warn(_("file %s already exists\n") % self.fname)
758 self.ui.warn(_("file %s already exists\n") % self.fname)
759 self.rej.append(h)
759 self.rej.append(h)
760 return -1
760 return -1
761
761
762 if isinstance(h, binhunk):
762 if isinstance(h, binhunk):
763 if self.remove:
763 if self.remove:
764 self.backend.unlink(self.fname)
764 self.backend.unlink(self.fname)
765 else:
765 else:
766 l = h.new(self.lines)
766 l = h.new(self.lines)
767 self.lines[:] = l
767 self.lines[:] = l
768 self.offset += len(l)
768 self.offset += len(l)
769 self.dirty = True
769 self.dirty = True
770 return 0
770 return 0
771
771
772 horig = h
772 horig = h
773 if (self.eolmode in ('crlf', 'lf')
773 if (self.eolmode in ('crlf', 'lf')
774 or self.eolmode == 'auto' and self.eol):
774 or self.eolmode == 'auto' and self.eol):
775 # If new eols are going to be normalized, then normalize
775 # If new eols are going to be normalized, then normalize
776 # hunk data before patching. Otherwise, preserve input
776 # hunk data before patching. Otherwise, preserve input
777 # line-endings.
777 # line-endings.
778 h = h.getnormalized()
778 h = h.getnormalized()
779
779
780 # fast case first, no offsets, no fuzz
780 # fast case first, no offsets, no fuzz
781 old, oldstart, new, newstart = h.fuzzit(0, False)
781 old, oldstart, new, newstart = h.fuzzit(0, False)
782 oldstart += self.offset
782 oldstart += self.offset
783 orig_start = oldstart
783 orig_start = oldstart
784 # if there's skew we want to emit the "(offset %d lines)" even
784 # if there's skew we want to emit the "(offset %d lines)" even
785 # when the hunk cleanly applies at start + skew, so skip the
785 # when the hunk cleanly applies at start + skew, so skip the
786 # fast case code
786 # fast case code
787 if (self.skew == 0 and
787 if (self.skew == 0 and
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 if self.remove:
789 if self.remove:
790 self.backend.unlink(self.fname)
790 self.backend.unlink(self.fname)
791 else:
791 else:
792 self.lines[oldstart:oldstart + len(old)] = new
792 self.lines[oldstart:oldstart + len(old)] = new
793 self.offset += len(new) - len(old)
793 self.offset += len(new) - len(old)
794 self.dirty = True
794 self.dirty = True
795 return 0
795 return 0
796
796
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 self.hash = {}
798 self.hash = {}
799 for x, s in enumerate(self.lines):
799 for x, s in enumerate(self.lines):
800 self.hash.setdefault(s, []).append(x)
800 self.hash.setdefault(s, []).append(x)
801
801
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 for toponly in [True, False]:
803 for toponly in [True, False]:
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 oldstart = oldstart + self.offset + self.skew
805 oldstart = oldstart + self.offset + self.skew
806 oldstart = min(oldstart, len(self.lines))
806 oldstart = min(oldstart, len(self.lines))
807 if old:
807 if old:
808 cand = self.findlines(old[0][1:], oldstart)
808 cand = self.findlines(old[0][1:], oldstart)
809 else:
809 else:
810 # Only adding lines with no or fuzzed context, just
810 # Only adding lines with no or fuzzed context, just
811 # take the skew in account
811 # take the skew in account
812 cand = [oldstart]
812 cand = [oldstart]
813
813
814 for l in cand:
814 for l in cand:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 self.lines[l : l + len(old)] = new
816 self.lines[l : l + len(old)] = new
817 self.offset += len(new) - len(old)
817 self.offset += len(new) - len(old)
818 self.skew = l - orig_start
818 self.skew = l - orig_start
819 self.dirty = True
819 self.dirty = True
820 offset = l - orig_start - fuzzlen
820 offset = l - orig_start - fuzzlen
821 if fuzzlen:
821 if fuzzlen:
822 msg = _("Hunk #%d succeeded at %d "
822 msg = _("Hunk #%d succeeded at %d "
823 "with fuzz %d "
823 "with fuzz %d "
824 "(offset %d lines).\n")
824 "(offset %d lines).\n")
825 self.printfile(True)
825 self.printfile(True)
826 self.ui.warn(msg %
826 self.ui.warn(msg %
827 (h.number, l + 1, fuzzlen, offset))
827 (h.number, l + 1, fuzzlen, offset))
828 else:
828 else:
829 msg = _("Hunk #%d succeeded at %d "
829 msg = _("Hunk #%d succeeded at %d "
830 "(offset %d lines).\n")
830 "(offset %d lines).\n")
831 self.ui.note(msg % (h.number, l + 1, offset))
831 self.ui.note(msg % (h.number, l + 1, offset))
832 return fuzzlen
832 return fuzzlen
833 self.printfile(True)
833 self.printfile(True)
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 self.rej.append(horig)
835 self.rej.append(horig)
836 return -1
836 return -1
837
837
838 def close(self):
838 def close(self):
839 if self.dirty:
839 if self.dirty:
840 self.writelines(self.fname, self.lines, self.mode)
840 self.writelines(self.fname, self.lines, self.mode)
841 self.write_rej()
841 self.write_rej()
842 return len(self.rej)
842 return len(self.rej)
843
843
844 class header(object):
844 class header(object):
845 """patch header
845 """patch header
846 """
846 """
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
849 allhunks_re = re.compile('(?:index|deleted file) ')
849 allhunks_re = re.compile('(?:index|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 newfile_re = re.compile('(?:new file)')
852 newfile_re = re.compile('(?:new file)')
853
853
854 def __init__(self, header):
854 def __init__(self, header):
855 self.header = header
855 self.header = header
856 self.hunks = []
856 self.hunks = []
857
857
858 def binary(self):
858 def binary(self):
859 return any(h.startswith('index ') for h in self.header)
859 return any(h.startswith('index ') for h in self.header)
860
860
861 def pretty(self, fp):
861 def pretty(self, fp):
862 for h in self.header:
862 for h in self.header:
863 if h.startswith('index '):
863 if h.startswith('index '):
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 break
865 break
866 if self.pretty_re.match(h):
866 if self.pretty_re.match(h):
867 fp.write(h)
867 fp.write(h)
868 if self.binary():
868 if self.binary():
869 fp.write(_('this is a binary file\n'))
869 fp.write(_('this is a binary file\n'))
870 break
870 break
871 if h.startswith('---'):
871 if h.startswith('---'):
872 fp.write(_('%d hunks, %d lines changed\n') %
872 fp.write(_('%d hunks, %d lines changed\n') %
873 (len(self.hunks),
873 (len(self.hunks),
874 sum([max(h.added, h.removed) for h in self.hunks])))
874 sum([max(h.added, h.removed) for h in self.hunks])))
875 break
875 break
876 fp.write(h)
876 fp.write(h)
877
877
878 def write(self, fp):
878 def write(self, fp):
879 fp.write(''.join(self.header))
879 fp.write(''.join(self.header))
880
880
881 def allhunks(self):
881 def allhunks(self):
882 return any(self.allhunks_re.match(h) for h in self.header)
882 return any(self.allhunks_re.match(h) for h in self.header)
883
883
884 def files(self):
884 def files(self):
885 match = self.diffgit_re.match(self.header[0])
885 match = self.diffgit_re.match(self.header[0])
886 if match:
886 if match:
887 fromfile, tofile = match.groups()
887 fromfile, tofile = match.groups()
888 if fromfile == tofile:
888 if fromfile == tofile:
889 return [fromfile]
889 return [fromfile]
890 return [fromfile, tofile]
890 return [fromfile, tofile]
891 else:
891 else:
892 return self.diff_re.match(self.header[0]).groups()
892 return self.diff_re.match(self.header[0]).groups()
893
893
894 def filename(self):
894 def filename(self):
895 return self.files()[-1]
895 return self.files()[-1]
896
896
897 def __repr__(self):
897 def __repr__(self):
898 return '<header %s>' % (' '.join(map(repr, self.files())))
898 return '<header %s>' % (' '.join(map(repr, self.files())))
899
899
900 def isnewfile(self):
900 def isnewfile(self):
901 return any(self.newfile_re.match(h) for h in self.header)
901 return any(self.newfile_re.match(h) for h in self.header)
902
902
903 def special(self):
903 def special(self):
904 # Special files are shown only at the header level and not at the hunk
904 # Special files are shown only at the header level and not at the hunk
905 # level for example a file that has been deleted is a special file.
905 # level for example a file that has been deleted is a special file.
906 # The user cannot change the content of the operation, in the case of
906 # The user cannot change the content of the operation, in the case of
907 # the deleted file he has to take the deletion or not take it, he
907 # the deleted file he has to take the deletion or not take it, he
908 # cannot take some of it.
908 # cannot take some of it.
909 # Newly added files are special if they are empty, they are not special
909 # Newly added files are special if they are empty, they are not special
910 # if they have some content as we want to be able to change it
910 # if they have some content as we want to be able to change it
911 nocontent = len(self.header) == 2
911 nocontent = len(self.header) == 2
912 emptynewfile = self.isnewfile() and nocontent
912 emptynewfile = self.isnewfile() and nocontent
913 return emptynewfile or \
913 return emptynewfile or \
914 any(self.special_re.match(h) for h in self.header)
914 any(self.special_re.match(h) for h in self.header)
915
915
916 class recordhunk(object):
916 class recordhunk(object):
917 """patch hunk
917 """patch hunk
918
918
919 XXX shouldn't we merge this with the other hunk class?
919 XXX shouldn't we merge this with the other hunk class?
920 """
920 """
921 maxcontext = 3
921 maxcontext = 3
922
922
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 def trimcontext(number, lines):
924 def trimcontext(number, lines):
925 delta = len(lines) - self.maxcontext
925 delta = len(lines) - self.maxcontext
926 if False and delta > 0:
926 if False and delta > 0:
927 return number + delta, lines[:self.maxcontext]
927 return number + delta, lines[:self.maxcontext]
928 return number, lines
928 return number, lines
929
929
930 self.header = header
930 self.header = header
931 self.fromline, self.before = trimcontext(fromline, before)
931 self.fromline, self.before = trimcontext(fromline, before)
932 self.toline, self.after = trimcontext(toline, after)
932 self.toline, self.after = trimcontext(toline, after)
933 self.proc = proc
933 self.proc = proc
934 self.hunk = hunk
934 self.hunk = hunk
935 self.added, self.removed = self.countchanges(self.hunk)
935 self.added, self.removed = self.countchanges(self.hunk)
936
936
937 def __eq__(self, v):
937 def __eq__(self, v):
938 if not isinstance(v, recordhunk):
938 if not isinstance(v, recordhunk):
939 return False
939 return False
940
940
941 return ((v.hunk == self.hunk) and
941 return ((v.hunk == self.hunk) and
942 (v.proc == self.proc) and
942 (v.proc == self.proc) and
943 (self.fromline == v.fromline) and
943 (self.fromline == v.fromline) and
944 (self.header.files() == v.header.files()))
944 (self.header.files() == v.header.files()))
945
945
946 def __hash__(self):
946 def __hash__(self):
947 return hash((tuple(self.hunk),
947 return hash((tuple(self.hunk),
948 tuple(self.header.files()),
948 tuple(self.header.files()),
949 self.fromline,
949 self.fromline,
950 self.proc))
950 self.proc))
951
951
952 def countchanges(self, hunk):
952 def countchanges(self, hunk):
953 """hunk -> (n+,n-)"""
953 """hunk -> (n+,n-)"""
954 add = len([h for h in hunk if h[0] == '+'])
954 add = len([h for h in hunk if h[0] == '+'])
955 rem = len([h for h in hunk if h[0] == '-'])
955 rem = len([h for h in hunk if h[0] == '-'])
956 return add, rem
956 return add, rem
957
957
958 def write(self, fp):
958 def write(self, fp):
959 delta = len(self.before) + len(self.after)
959 delta = len(self.before) + len(self.after)
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 delta -= 1
961 delta -= 1
962 fromlen = delta + self.removed
962 fromlen = delta + self.removed
963 tolen = delta + self.added
963 tolen = delta + self.added
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 (self.fromline, fromlen, self.toline, tolen,
965 (self.fromline, fromlen, self.toline, tolen,
966 self.proc and (' ' + self.proc)))
966 self.proc and (' ' + self.proc)))
967 fp.write(''.join(self.before + self.hunk + self.after))
967 fp.write(''.join(self.before + self.hunk + self.after))
968
968
969 pretty = write
969 pretty = write
970
970
971 def filename(self):
971 def filename(self):
972 return self.header.filename()
972 return self.header.filename()
973
973
974 def __repr__(self):
974 def __repr__(self):
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976
976
977 def filterpatch(ui, headers, operation=None):
977 def filterpatch(ui, headers, operation=None):
978 """Interactively filter patch chunks into applied-only chunks"""
978 """Interactively filter patch chunks into applied-only chunks"""
979 if operation is None:
979 if operation is None:
980 operation = 'record'
980 operation = 'record'
981 messages = {
981 messages = {
982 'multiple': {
982 'multiple': {
983 'discard': _("discard change %d/%d to '%s'?"),
983 'discard': _("discard change %d/%d to '%s'?"),
984 'record': _("record change %d/%d to '%s'?"),
984 'record': _("record change %d/%d to '%s'?"),
985 'revert': _("revert change %d/%d to '%s'?"),
985 'revert': _("revert change %d/%d to '%s'?"),
986 }[operation],
986 }[operation],
987 'single': {
987 'single': {
988 'discard': _("discard this change to '%s'?"),
988 'discard': _("discard this change to '%s'?"),
989 'record': _("record this change to '%s'?"),
989 'record': _("record this change to '%s'?"),
990 'revert': _("revert this change to '%s'?"),
990 'revert': _("revert this change to '%s'?"),
991 }[operation],
991 }[operation],
992 }
992 }
993
993
994 def prompt(skipfile, skipall, query, chunk):
994 def prompt(skipfile, skipall, query, chunk):
995 """prompt query, and process base inputs
995 """prompt query, and process base inputs
996
996
997 - y/n for the rest of file
997 - y/n for the rest of file
998 - y/n for the rest
998 - y/n for the rest
999 - ? (help)
999 - ? (help)
1000 - q (quit)
1000 - q (quit)
1001
1001
1002 Return True/False and possibly updated skipfile and skipall.
1002 Return True/False and possibly updated skipfile and skipall.
1003 """
1003 """
1004 newpatches = None
1004 newpatches = None
1005 if skipall is not None:
1005 if skipall is not None:
1006 return skipall, skipfile, skipall, newpatches
1006 return skipall, skipfile, skipall, newpatches
1007 if skipfile is not None:
1007 if skipfile is not None:
1008 return skipfile, skipfile, skipall, newpatches
1008 return skipfile, skipfile, skipall, newpatches
1009 while True:
1009 while True:
1010 resps = _('[Ynesfdaq?]'
1010 resps = _('[Ynesfdaq?]'
1011 '$$ &Yes, record this change'
1011 '$$ &Yes, record this change'
1012 '$$ &No, skip this change'
1012 '$$ &No, skip this change'
1013 '$$ &Edit this change manually'
1013 '$$ &Edit this change manually'
1014 '$$ &Skip remaining changes to this file'
1014 '$$ &Skip remaining changes to this file'
1015 '$$ Record remaining changes to this &file'
1015 '$$ Record remaining changes to this &file'
1016 '$$ &Done, skip remaining changes and files'
1016 '$$ &Done, skip remaining changes and files'
1017 '$$ Record &all changes to all remaining files'
1017 '$$ Record &all changes to all remaining files'
1018 '$$ &Quit, recording no changes'
1018 '$$ &Quit, recording no changes'
1019 '$$ &? (display help)')
1019 '$$ &? (display help)')
1020 r = ui.promptchoice("%s %s" % (query, resps))
1020 r = ui.promptchoice("%s %s" % (query, resps))
1021 ui.write("\n")
1021 ui.write("\n")
1022 if r == 8: # ?
1022 if r == 8: # ?
1023 for c, t in ui.extractchoices(resps)[1]:
1023 for c, t in ui.extractchoices(resps)[1]:
1024 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1024 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1025 continue
1025 continue
1026 elif r == 0: # yes
1026 elif r == 0: # yes
1027 ret = True
1027 ret = True
1028 elif r == 1: # no
1028 elif r == 1: # no
1029 ret = False
1029 ret = False
1030 elif r == 2: # Edit patch
1030 elif r == 2: # Edit patch
1031 if chunk is None:
1031 if chunk is None:
1032 ui.write(_('cannot edit patch for whole file'))
1032 ui.write(_('cannot edit patch for whole file'))
1033 ui.write("\n")
1033 ui.write("\n")
1034 continue
1034 continue
1035 if chunk.header.binary():
1035 if chunk.header.binary():
1036 ui.write(_('cannot edit patch for binary file'))
1036 ui.write(_('cannot edit patch for binary file'))
1037 ui.write("\n")
1037 ui.write("\n")
1038 continue
1038 continue
1039 # Patch comment based on the Git one (based on comment at end of
1039 # Patch comment based on the Git one (based on comment at end of
1040 # https://mercurial-scm.org/wiki/RecordExtension)
1040 # https://mercurial-scm.org/wiki/RecordExtension)
1041 phelp = '---' + _("""
1041 phelp = '---' + _("""
1042 To remove '-' lines, make them ' ' lines (context).
1042 To remove '-' lines, make them ' ' lines (context).
1043 To remove '+' lines, delete them.
1043 To remove '+' lines, delete them.
1044 Lines starting with # will be removed from the patch.
1044 Lines starting with # will be removed from the patch.
1045
1045
1046 If the patch applies cleanly, the edited hunk will immediately be
1046 If the patch applies cleanly, the edited hunk will immediately be
1047 added to the record list. If it does not apply cleanly, a rejects
1047 added to the record list. If it does not apply cleanly, a rejects
1048 file will be generated: you can use that when you try again. If
1048 file will be generated: you can use that when you try again. If
1049 all lines of the hunk are removed, then the edit is aborted and
1049 all lines of the hunk are removed, then the edit is aborted and
1050 the hunk is left unchanged.
1050 the hunk is left unchanged.
1051 """)
1051 """)
1052 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1052 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1053 suffix=".diff", text=True)
1053 suffix=".diff", text=True)
1054 ncpatchfp = None
1054 ncpatchfp = None
1055 try:
1055 try:
1056 # Write the initial patch
1056 # Write the initial patch
1057 f = os.fdopen(patchfd, "w")
1057 f = os.fdopen(patchfd, "w")
1058 chunk.header.write(f)
1058 chunk.header.write(f)
1059 chunk.write(f)
1059 chunk.write(f)
1060 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1060 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1061 f.close()
1061 f.close()
1062 # Start the editor and wait for it to complete
1062 # Start the editor and wait for it to complete
1063 editor = ui.geteditor()
1063 editor = ui.geteditor()
1064 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1064 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1065 environ={'HGUSER': ui.username()})
1065 environ={'HGUSER': ui.username()})
1066 if ret != 0:
1066 if ret != 0:
1067 ui.warn(_("editor exited with exit code %d\n") % ret)
1067 ui.warn(_("editor exited with exit code %d\n") % ret)
1068 continue
1068 continue
1069 # Remove comment lines
1069 # Remove comment lines
1070 patchfp = open(patchfn)
1070 patchfp = open(patchfn)
1071 ncpatchfp = stringio()
1071 ncpatchfp = stringio()
1072 for line in util.iterfile(patchfp):
1072 for line in util.iterfile(patchfp):
1073 if not line.startswith('#'):
1073 if not line.startswith('#'):
1074 ncpatchfp.write(line)
1074 ncpatchfp.write(line)
1075 patchfp.close()
1075 patchfp.close()
1076 ncpatchfp.seek(0)
1076 ncpatchfp.seek(0)
1077 newpatches = parsepatch(ncpatchfp)
1077 newpatches = parsepatch(ncpatchfp)
1078 finally:
1078 finally:
1079 os.unlink(patchfn)
1079 os.unlink(patchfn)
1080 del ncpatchfp
1080 del ncpatchfp
1081 # Signal that the chunk shouldn't be applied as-is, but
1081 # Signal that the chunk shouldn't be applied as-is, but
1082 # provide the new patch to be used instead.
1082 # provide the new patch to be used instead.
1083 ret = False
1083 ret = False
1084 elif r == 3: # Skip
1084 elif r == 3: # Skip
1085 ret = skipfile = False
1085 ret = skipfile = False
1086 elif r == 4: # file (Record remaining)
1086 elif r == 4: # file (Record remaining)
1087 ret = skipfile = True
1087 ret = skipfile = True
1088 elif r == 5: # done, skip remaining
1088 elif r == 5: # done, skip remaining
1089 ret = skipall = False
1089 ret = skipall = False
1090 elif r == 6: # all
1090 elif r == 6: # all
1091 ret = skipall = True
1091 ret = skipall = True
1092 elif r == 7: # quit
1092 elif r == 7: # quit
1093 raise error.Abort(_('user quit'))
1093 raise error.Abort(_('user quit'))
1094 return ret, skipfile, skipall, newpatches
1094 return ret, skipfile, skipall, newpatches
1095
1095
1096 seen = set()
1096 seen = set()
1097 applied = {} # 'filename' -> [] of chunks
1097 applied = {} # 'filename' -> [] of chunks
1098 skipfile, skipall = None, None
1098 skipfile, skipall = None, None
1099 pos, total = 1, sum(len(h.hunks) for h in headers)
1099 pos, total = 1, sum(len(h.hunks) for h in headers)
1100 for h in headers:
1100 for h in headers:
1101 pos += len(h.hunks)
1101 pos += len(h.hunks)
1102 skipfile = None
1102 skipfile = None
1103 fixoffset = 0
1103 fixoffset = 0
1104 hdr = ''.join(h.header)
1104 hdr = ''.join(h.header)
1105 if hdr in seen:
1105 if hdr in seen:
1106 continue
1106 continue
1107 seen.add(hdr)
1107 seen.add(hdr)
1108 if skipall is None:
1108 if skipall is None:
1109 h.pretty(ui)
1109 h.pretty(ui)
1110 msg = (_('examine changes to %s?') %
1110 msg = (_('examine changes to %s?') %
1111 _(' and ').join("'%s'" % f for f in h.files()))
1111 _(' and ').join("'%s'" % f for f in h.files()))
1112 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1112 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1113 if not r:
1113 if not r:
1114 continue
1114 continue
1115 applied[h.filename()] = [h]
1115 applied[h.filename()] = [h]
1116 if h.allhunks():
1116 if h.allhunks():
1117 applied[h.filename()] += h.hunks
1117 applied[h.filename()] += h.hunks
1118 continue
1118 continue
1119 for i, chunk in enumerate(h.hunks):
1119 for i, chunk in enumerate(h.hunks):
1120 if skipfile is None and skipall is None:
1120 if skipfile is None and skipall is None:
1121 chunk.pretty(ui)
1121 chunk.pretty(ui)
1122 if total == 1:
1122 if total == 1:
1123 msg = messages['single'] % chunk.filename()
1123 msg = messages['single'] % chunk.filename()
1124 else:
1124 else:
1125 idx = pos - len(h.hunks) + i
1125 idx = pos - len(h.hunks) + i
1126 msg = messages['multiple'] % (idx, total, chunk.filename())
1126 msg = messages['multiple'] % (idx, total, chunk.filename())
1127 r, skipfile, skipall, newpatches = prompt(skipfile,
1127 r, skipfile, skipall, newpatches = prompt(skipfile,
1128 skipall, msg, chunk)
1128 skipall, msg, chunk)
1129 if r:
1129 if r:
1130 if fixoffset:
1130 if fixoffset:
1131 chunk = copy.copy(chunk)
1131 chunk = copy.copy(chunk)
1132 chunk.toline += fixoffset
1132 chunk.toline += fixoffset
1133 applied[chunk.filename()].append(chunk)
1133 applied[chunk.filename()].append(chunk)
1134 elif newpatches is not None:
1134 elif newpatches is not None:
1135 for newpatch in newpatches:
1135 for newpatch in newpatches:
1136 for newhunk in newpatch.hunks:
1136 for newhunk in newpatch.hunks:
1137 if fixoffset:
1137 if fixoffset:
1138 newhunk.toline += fixoffset
1138 newhunk.toline += fixoffset
1139 applied[newhunk.filename()].append(newhunk)
1139 applied[newhunk.filename()].append(newhunk)
1140 else:
1140 else:
1141 fixoffset += chunk.removed - chunk.added
1141 fixoffset += chunk.removed - chunk.added
1142 return (sum([h for h in applied.itervalues()
1142 return (sum([h for h in applied.itervalues()
1143 if h[0].special() or len(h) > 1], []), {})
1143 if h[0].special() or len(h) > 1], []), {})
1144 class hunk(object):
1144 class hunk(object):
1145 def __init__(self, desc, num, lr, context):
1145 def __init__(self, desc, num, lr, context):
1146 self.number = num
1146 self.number = num
1147 self.desc = desc
1147 self.desc = desc
1148 self.hunk = [desc]
1148 self.hunk = [desc]
1149 self.a = []
1149 self.a = []
1150 self.b = []
1150 self.b = []
1151 self.starta = self.lena = None
1151 self.starta = self.lena = None
1152 self.startb = self.lenb = None
1152 self.startb = self.lenb = None
1153 if lr is not None:
1153 if lr is not None:
1154 if context:
1154 if context:
1155 self.read_context_hunk(lr)
1155 self.read_context_hunk(lr)
1156 else:
1156 else:
1157 self.read_unified_hunk(lr)
1157 self.read_unified_hunk(lr)
1158
1158
1159 def getnormalized(self):
1159 def getnormalized(self):
1160 """Return a copy with line endings normalized to LF."""
1160 """Return a copy with line endings normalized to LF."""
1161
1161
1162 def normalize(lines):
1162 def normalize(lines):
1163 nlines = []
1163 nlines = []
1164 for line in lines:
1164 for line in lines:
1165 if line.endswith('\r\n'):
1165 if line.endswith('\r\n'):
1166 line = line[:-2] + '\n'
1166 line = line[:-2] + '\n'
1167 nlines.append(line)
1167 nlines.append(line)
1168 return nlines
1168 return nlines
1169
1169
1170 # Dummy object, it is rebuilt manually
1170 # Dummy object, it is rebuilt manually
1171 nh = hunk(self.desc, self.number, None, None)
1171 nh = hunk(self.desc, self.number, None, None)
1172 nh.number = self.number
1172 nh.number = self.number
1173 nh.desc = self.desc
1173 nh.desc = self.desc
1174 nh.hunk = self.hunk
1174 nh.hunk = self.hunk
1175 nh.a = normalize(self.a)
1175 nh.a = normalize(self.a)
1176 nh.b = normalize(self.b)
1176 nh.b = normalize(self.b)
1177 nh.starta = self.starta
1177 nh.starta = self.starta
1178 nh.startb = self.startb
1178 nh.startb = self.startb
1179 nh.lena = self.lena
1179 nh.lena = self.lena
1180 nh.lenb = self.lenb
1180 nh.lenb = self.lenb
1181 return nh
1181 return nh
1182
1182
1183 def read_unified_hunk(self, lr):
1183 def read_unified_hunk(self, lr):
1184 m = unidesc.match(self.desc)
1184 m = unidesc.match(self.desc)
1185 if not m:
1185 if not m:
1186 raise PatchError(_("bad hunk #%d") % self.number)
1186 raise PatchError(_("bad hunk #%d") % self.number)
1187 self.starta, self.lena, self.startb, self.lenb = m.groups()
1187 self.starta, self.lena, self.startb, self.lenb = m.groups()
1188 if self.lena is None:
1188 if self.lena is None:
1189 self.lena = 1
1189 self.lena = 1
1190 else:
1190 else:
1191 self.lena = int(self.lena)
1191 self.lena = int(self.lena)
1192 if self.lenb is None:
1192 if self.lenb is None:
1193 self.lenb = 1
1193 self.lenb = 1
1194 else:
1194 else:
1195 self.lenb = int(self.lenb)
1195 self.lenb = int(self.lenb)
1196 self.starta = int(self.starta)
1196 self.starta = int(self.starta)
1197 self.startb = int(self.startb)
1197 self.startb = int(self.startb)
1198 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1198 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1199 self.b)
1199 self.b)
1200 # if we hit eof before finishing out the hunk, the last line will
1200 # if we hit eof before finishing out the hunk, the last line will
1201 # be zero length. Lets try to fix it up.
1201 # be zero length. Lets try to fix it up.
1202 while len(self.hunk[-1]) == 0:
1202 while len(self.hunk[-1]) == 0:
1203 del self.hunk[-1]
1203 del self.hunk[-1]
1204 del self.a[-1]
1204 del self.a[-1]
1205 del self.b[-1]
1205 del self.b[-1]
1206 self.lena -= 1
1206 self.lena -= 1
1207 self.lenb -= 1
1207 self.lenb -= 1
1208 self._fixnewline(lr)
1208 self._fixnewline(lr)
1209
1209
1210 def read_context_hunk(self, lr):
1210 def read_context_hunk(self, lr):
1211 self.desc = lr.readline()
1211 self.desc = lr.readline()
1212 m = contextdesc.match(self.desc)
1212 m = contextdesc.match(self.desc)
1213 if not m:
1213 if not m:
1214 raise PatchError(_("bad hunk #%d") % self.number)
1214 raise PatchError(_("bad hunk #%d") % self.number)
1215 self.starta, aend = m.groups()
1215 self.starta, aend = m.groups()
1216 self.starta = int(self.starta)
1216 self.starta = int(self.starta)
1217 if aend is None:
1217 if aend is None:
1218 aend = self.starta
1218 aend = self.starta
1219 self.lena = int(aend) - self.starta
1219 self.lena = int(aend) - self.starta
1220 if self.starta:
1220 if self.starta:
1221 self.lena += 1
1221 self.lena += 1
1222 for x in xrange(self.lena):
1222 for x in xrange(self.lena):
1223 l = lr.readline()
1223 l = lr.readline()
1224 if l.startswith('---'):
1224 if l.startswith('---'):
1225 # lines addition, old block is empty
1225 # lines addition, old block is empty
1226 lr.push(l)
1226 lr.push(l)
1227 break
1227 break
1228 s = l[2:]
1228 s = l[2:]
1229 if l.startswith('- ') or l.startswith('! '):
1229 if l.startswith('- ') or l.startswith('! '):
1230 u = '-' + s
1230 u = '-' + s
1231 elif l.startswith(' '):
1231 elif l.startswith(' '):
1232 u = ' ' + s
1232 u = ' ' + s
1233 else:
1233 else:
1234 raise PatchError(_("bad hunk #%d old text line %d") %
1234 raise PatchError(_("bad hunk #%d old text line %d") %
1235 (self.number, x))
1235 (self.number, x))
1236 self.a.append(u)
1236 self.a.append(u)
1237 self.hunk.append(u)
1237 self.hunk.append(u)
1238
1238
1239 l = lr.readline()
1239 l = lr.readline()
1240 if l.startswith('\ '):
1240 if l.startswith('\ '):
1241 s = self.a[-1][:-1]
1241 s = self.a[-1][:-1]
1242 self.a[-1] = s
1242 self.a[-1] = s
1243 self.hunk[-1] = s
1243 self.hunk[-1] = s
1244 l = lr.readline()
1244 l = lr.readline()
1245 m = contextdesc.match(l)
1245 m = contextdesc.match(l)
1246 if not m:
1246 if not m:
1247 raise PatchError(_("bad hunk #%d") % self.number)
1247 raise PatchError(_("bad hunk #%d") % self.number)
1248 self.startb, bend = m.groups()
1248 self.startb, bend = m.groups()
1249 self.startb = int(self.startb)
1249 self.startb = int(self.startb)
1250 if bend is None:
1250 if bend is None:
1251 bend = self.startb
1251 bend = self.startb
1252 self.lenb = int(bend) - self.startb
1252 self.lenb = int(bend) - self.startb
1253 if self.startb:
1253 if self.startb:
1254 self.lenb += 1
1254 self.lenb += 1
1255 hunki = 1
1255 hunki = 1
1256 for x in xrange(self.lenb):
1256 for x in xrange(self.lenb):
1257 l = lr.readline()
1257 l = lr.readline()
1258 if l.startswith('\ '):
1258 if l.startswith('\ '):
1259 # XXX: the only way to hit this is with an invalid line range.
1259 # XXX: the only way to hit this is with an invalid line range.
1260 # The no-eol marker is not counted in the line range, but I
1260 # The no-eol marker is not counted in the line range, but I
1261 # guess there are diff(1) out there which behave differently.
1261 # guess there are diff(1) out there which behave differently.
1262 s = self.b[-1][:-1]
1262 s = self.b[-1][:-1]
1263 self.b[-1] = s
1263 self.b[-1] = s
1264 self.hunk[hunki - 1] = s
1264 self.hunk[hunki - 1] = s
1265 continue
1265 continue
1266 if not l:
1266 if not l:
1267 # line deletions, new block is empty and we hit EOF
1267 # line deletions, new block is empty and we hit EOF
1268 lr.push(l)
1268 lr.push(l)
1269 break
1269 break
1270 s = l[2:]
1270 s = l[2:]
1271 if l.startswith('+ ') or l.startswith('! '):
1271 if l.startswith('+ ') or l.startswith('! '):
1272 u = '+' + s
1272 u = '+' + s
1273 elif l.startswith(' '):
1273 elif l.startswith(' '):
1274 u = ' ' + s
1274 u = ' ' + s
1275 elif len(self.b) == 0:
1275 elif len(self.b) == 0:
1276 # line deletions, new block is empty
1276 # line deletions, new block is empty
1277 lr.push(l)
1277 lr.push(l)
1278 break
1278 break
1279 else:
1279 else:
1280 raise PatchError(_("bad hunk #%d old text line %d") %
1280 raise PatchError(_("bad hunk #%d old text line %d") %
1281 (self.number, x))
1281 (self.number, x))
1282 self.b.append(s)
1282 self.b.append(s)
1283 while True:
1283 while True:
1284 if hunki >= len(self.hunk):
1284 if hunki >= len(self.hunk):
1285 h = ""
1285 h = ""
1286 else:
1286 else:
1287 h = self.hunk[hunki]
1287 h = self.hunk[hunki]
1288 hunki += 1
1288 hunki += 1
1289 if h == u:
1289 if h == u:
1290 break
1290 break
1291 elif h.startswith('-'):
1291 elif h.startswith('-'):
1292 continue
1292 continue
1293 else:
1293 else:
1294 self.hunk.insert(hunki - 1, u)
1294 self.hunk.insert(hunki - 1, u)
1295 break
1295 break
1296
1296
1297 if not self.a:
1297 if not self.a:
1298 # this happens when lines were only added to the hunk
1298 # this happens when lines were only added to the hunk
1299 for x in self.hunk:
1299 for x in self.hunk:
1300 if x.startswith('-') or x.startswith(' '):
1300 if x.startswith('-') or x.startswith(' '):
1301 self.a.append(x)
1301 self.a.append(x)
1302 if not self.b:
1302 if not self.b:
1303 # this happens when lines were only deleted from the hunk
1303 # this happens when lines were only deleted from the hunk
1304 for x in self.hunk:
1304 for x in self.hunk:
1305 if x.startswith('+') or x.startswith(' '):
1305 if x.startswith('+') or x.startswith(' '):
1306 self.b.append(x[1:])
1306 self.b.append(x[1:])
1307 # @@ -start,len +start,len @@
1307 # @@ -start,len +start,len @@
1308 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1308 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1309 self.startb, self.lenb)
1309 self.startb, self.lenb)
1310 self.hunk[0] = self.desc
1310 self.hunk[0] = self.desc
1311 self._fixnewline(lr)
1311 self._fixnewline(lr)
1312
1312
1313 def _fixnewline(self, lr):
1313 def _fixnewline(self, lr):
1314 l = lr.readline()
1314 l = lr.readline()
1315 if l.startswith('\ '):
1315 if l.startswith('\ '):
1316 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1316 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1317 else:
1317 else:
1318 lr.push(l)
1318 lr.push(l)
1319
1319
1320 def complete(self):
1320 def complete(self):
1321 return len(self.a) == self.lena and len(self.b) == self.lenb
1321 return len(self.a) == self.lena and len(self.b) == self.lenb
1322
1322
1323 def _fuzzit(self, old, new, fuzz, toponly):
1323 def _fuzzit(self, old, new, fuzz, toponly):
1324 # this removes context lines from the top and bottom of list 'l'. It
1324 # this removes context lines from the top and bottom of list 'l'. It
1325 # checks the hunk to make sure only context lines are removed, and then
1325 # checks the hunk to make sure only context lines are removed, and then
1326 # returns a new shortened list of lines.
1326 # returns a new shortened list of lines.
1327 fuzz = min(fuzz, len(old))
1327 fuzz = min(fuzz, len(old))
1328 if fuzz:
1328 if fuzz:
1329 top = 0
1329 top = 0
1330 bot = 0
1330 bot = 0
1331 hlen = len(self.hunk)
1331 hlen = len(self.hunk)
1332 for x in xrange(hlen - 1):
1332 for x in xrange(hlen - 1):
1333 # the hunk starts with the @@ line, so use x+1
1333 # the hunk starts with the @@ line, so use x+1
1334 if self.hunk[x + 1][0] == ' ':
1334 if self.hunk[x + 1][0] == ' ':
1335 top += 1
1335 top += 1
1336 else:
1336 else:
1337 break
1337 break
1338 if not toponly:
1338 if not toponly:
1339 for x in xrange(hlen - 1):
1339 for x in xrange(hlen - 1):
1340 if self.hunk[hlen - bot - 1][0] == ' ':
1340 if self.hunk[hlen - bot - 1][0] == ' ':
1341 bot += 1
1341 bot += 1
1342 else:
1342 else:
1343 break
1343 break
1344
1344
1345 bot = min(fuzz, bot)
1345 bot = min(fuzz, bot)
1346 top = min(fuzz, top)
1346 top = min(fuzz, top)
1347 return old[top:len(old) - bot], new[top:len(new) - bot], top
1347 return old[top:len(old) - bot], new[top:len(new) - bot], top
1348 return old, new, 0
1348 return old, new, 0
1349
1349
1350 def fuzzit(self, fuzz, toponly):
1350 def fuzzit(self, fuzz, toponly):
1351 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1351 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1352 oldstart = self.starta + top
1352 oldstart = self.starta + top
1353 newstart = self.startb + top
1353 newstart = self.startb + top
1354 # zero length hunk ranges already have their start decremented
1354 # zero length hunk ranges already have their start decremented
1355 if self.lena and oldstart > 0:
1355 if self.lena and oldstart > 0:
1356 oldstart -= 1
1356 oldstart -= 1
1357 if self.lenb and newstart > 0:
1357 if self.lenb and newstart > 0:
1358 newstart -= 1
1358 newstart -= 1
1359 return old, oldstart, new, newstart
1359 return old, oldstart, new, newstart
1360
1360
1361 class binhunk(object):
1361 class binhunk(object):
1362 'A binary patch file.'
1362 'A binary patch file.'
1363 def __init__(self, lr, fname):
1363 def __init__(self, lr, fname):
1364 self.text = None
1364 self.text = None
1365 self.delta = False
1365 self.delta = False
1366 self.hunk = ['GIT binary patch\n']
1366 self.hunk = ['GIT binary patch\n']
1367 self._fname = fname
1367 self._fname = fname
1368 self._read(lr)
1368 self._read(lr)
1369
1369
1370 def complete(self):
1370 def complete(self):
1371 return self.text is not None
1371 return self.text is not None
1372
1372
1373 def new(self, lines):
1373 def new(self, lines):
1374 if self.delta:
1374 if self.delta:
1375 return [applybindelta(self.text, ''.join(lines))]
1375 return [applybindelta(self.text, ''.join(lines))]
1376 return [self.text]
1376 return [self.text]
1377
1377
1378 def _read(self, lr):
1378 def _read(self, lr):
1379 def getline(lr, hunk):
1379 def getline(lr, hunk):
1380 l = lr.readline()
1380 l = lr.readline()
1381 hunk.append(l)
1381 hunk.append(l)
1382 return l.rstrip('\r\n')
1382 return l.rstrip('\r\n')
1383
1383
1384 size = 0
1384 size = 0
1385 while True:
1385 while True:
1386 line = getline(lr, self.hunk)
1386 line = getline(lr, self.hunk)
1387 if not line:
1387 if not line:
1388 raise PatchError(_('could not extract "%s" binary data')
1388 raise PatchError(_('could not extract "%s" binary data')
1389 % self._fname)
1389 % self._fname)
1390 if line.startswith('literal '):
1390 if line.startswith('literal '):
1391 size = int(line[8:].rstrip())
1391 size = int(line[8:].rstrip())
1392 break
1392 break
1393 if line.startswith('delta '):
1393 if line.startswith('delta '):
1394 size = int(line[6:].rstrip())
1394 size = int(line[6:].rstrip())
1395 self.delta = True
1395 self.delta = True
1396 break
1396 break
1397 dec = []
1397 dec = []
1398 line = getline(lr, self.hunk)
1398 line = getline(lr, self.hunk)
1399 while len(line) > 1:
1399 while len(line) > 1:
1400 l = line[0]
1400 l = line[0]
1401 if l <= 'Z' and l >= 'A':
1401 if l <= 'Z' and l >= 'A':
1402 l = ord(l) - ord('A') + 1
1402 l = ord(l) - ord('A') + 1
1403 else:
1403 else:
1404 l = ord(l) - ord('a') + 27
1404 l = ord(l) - ord('a') + 27
1405 try:
1405 try:
1406 dec.append(base85.b85decode(line[1:])[:l])
1406 dec.append(base85.b85decode(line[1:])[:l])
1407 except ValueError as e:
1407 except ValueError as e:
1408 raise PatchError(_('could not decode "%s" binary patch: %s')
1408 raise PatchError(_('could not decode "%s" binary patch: %s')
1409 % (self._fname, str(e)))
1409 % (self._fname, str(e)))
1410 line = getline(lr, self.hunk)
1410 line = getline(lr, self.hunk)
1411 text = zlib.decompress(''.join(dec))
1411 text = zlib.decompress(''.join(dec))
1412 if len(text) != size:
1412 if len(text) != size:
1413 raise PatchError(_('"%s" length is %d bytes, should be %d')
1413 raise PatchError(_('"%s" length is %d bytes, should be %d')
1414 % (self._fname, len(text), size))
1414 % (self._fname, len(text), size))
1415 self.text = text
1415 self.text = text
1416
1416
1417 def parsefilename(str):
1417 def parsefilename(str):
1418 # --- filename \t|space stuff
1418 # --- filename \t|space stuff
1419 s = str[4:].rstrip('\r\n')
1419 s = str[4:].rstrip('\r\n')
1420 i = s.find('\t')
1420 i = s.find('\t')
1421 if i < 0:
1421 if i < 0:
1422 i = s.find(' ')
1422 i = s.find(' ')
1423 if i < 0:
1423 if i < 0:
1424 return s
1424 return s
1425 return s[:i]
1425 return s[:i]
1426
1426
1427 def reversehunks(hunks):
1427 def reversehunks(hunks):
1428 '''reverse the signs in the hunks given as argument
1428 '''reverse the signs in the hunks given as argument
1429
1429
1430 This function operates on hunks coming out of patch.filterpatch, that is
1430 This function operates on hunks coming out of patch.filterpatch, that is
1431 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1431 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1432
1432
1433 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1433 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1434 ... --- a/folder1/g
1434 ... --- a/folder1/g
1435 ... +++ b/folder1/g
1435 ... +++ b/folder1/g
1436 ... @@ -1,7 +1,7 @@
1436 ... @@ -1,7 +1,7 @@
1437 ... +firstline
1437 ... +firstline
1438 ... c
1438 ... c
1439 ... 1
1439 ... 1
1440 ... 2
1440 ... 2
1441 ... + 3
1441 ... + 3
1442 ... -4
1442 ... -4
1443 ... 5
1443 ... 5
1444 ... d
1444 ... d
1445 ... +lastline"""
1445 ... +lastline"""
1446 >>> hunks = parsepatch(rawpatch)
1446 >>> hunks = parsepatch(rawpatch)
1447 >>> hunkscomingfromfilterpatch = []
1447 >>> hunkscomingfromfilterpatch = []
1448 >>> for h in hunks:
1448 >>> for h in hunks:
1449 ... hunkscomingfromfilterpatch.append(h)
1449 ... hunkscomingfromfilterpatch.append(h)
1450 ... hunkscomingfromfilterpatch.extend(h.hunks)
1450 ... hunkscomingfromfilterpatch.extend(h.hunks)
1451
1451
1452 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1452 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1453 >>> from . import util
1453 >>> from . import util
1454 >>> fp = util.stringio()
1454 >>> fp = util.stringio()
1455 >>> for c in reversedhunks:
1455 >>> for c in reversedhunks:
1456 ... c.write(fp)
1456 ... c.write(fp)
1457 >>> fp.seek(0)
1457 >>> fp.seek(0)
1458 >>> reversedpatch = fp.read()
1458 >>> reversedpatch = fp.read()
1459 >>> print reversedpatch
1459 >>> print reversedpatch
1460 diff --git a/folder1/g b/folder1/g
1460 diff --git a/folder1/g b/folder1/g
1461 --- a/folder1/g
1461 --- a/folder1/g
1462 +++ b/folder1/g
1462 +++ b/folder1/g
1463 @@ -1,4 +1,3 @@
1463 @@ -1,4 +1,3 @@
1464 -firstline
1464 -firstline
1465 c
1465 c
1466 1
1466 1
1467 2
1467 2
1468 @@ -1,6 +2,6 @@
1468 @@ -1,6 +2,6 @@
1469 c
1469 c
1470 1
1470 1
1471 2
1471 2
1472 - 3
1472 - 3
1473 +4
1473 +4
1474 5
1474 5
1475 d
1475 d
1476 @@ -5,3 +6,2 @@
1476 @@ -5,3 +6,2 @@
1477 5
1477 5
1478 d
1478 d
1479 -lastline
1479 -lastline
1480
1480
1481 '''
1481 '''
1482
1482
1483 from . import crecord as crecordmod
1483 from . import crecord as crecordmod
1484 newhunks = []
1484 newhunks = []
1485 for c in hunks:
1485 for c in hunks:
1486 if isinstance(c, crecordmod.uihunk):
1486 if isinstance(c, crecordmod.uihunk):
1487 # curses hunks encapsulate the record hunk in _hunk
1487 # curses hunks encapsulate the record hunk in _hunk
1488 c = c._hunk
1488 c = c._hunk
1489 if isinstance(c, recordhunk):
1489 if isinstance(c, recordhunk):
1490 for j, line in enumerate(c.hunk):
1490 for j, line in enumerate(c.hunk):
1491 if line.startswith("-"):
1491 if line.startswith("-"):
1492 c.hunk[j] = "+" + c.hunk[j][1:]
1492 c.hunk[j] = "+" + c.hunk[j][1:]
1493 elif line.startswith("+"):
1493 elif line.startswith("+"):
1494 c.hunk[j] = "-" + c.hunk[j][1:]
1494 c.hunk[j] = "-" + c.hunk[j][1:]
1495 c.added, c.removed = c.removed, c.added
1495 c.added, c.removed = c.removed, c.added
1496 newhunks.append(c)
1496 newhunks.append(c)
1497 return newhunks
1497 return newhunks
1498
1498
1499 def parsepatch(originalchunks):
1499 def parsepatch(originalchunks):
1500 """patch -> [] of headers -> [] of hunks """
1500 """patch -> [] of headers -> [] of hunks """
1501 class parser(object):
1501 class parser(object):
1502 """patch parsing state machine"""
1502 """patch parsing state machine"""
1503 def __init__(self):
1503 def __init__(self):
1504 self.fromline = 0
1504 self.fromline = 0
1505 self.toline = 0
1505 self.toline = 0
1506 self.proc = ''
1506 self.proc = ''
1507 self.header = None
1507 self.header = None
1508 self.context = []
1508 self.context = []
1509 self.before = []
1509 self.before = []
1510 self.hunk = []
1510 self.hunk = []
1511 self.headers = []
1511 self.headers = []
1512
1512
1513 def addrange(self, limits):
1513 def addrange(self, limits):
1514 fromstart, fromend, tostart, toend, proc = limits
1514 fromstart, fromend, tostart, toend, proc = limits
1515 self.fromline = int(fromstart)
1515 self.fromline = int(fromstart)
1516 self.toline = int(tostart)
1516 self.toline = int(tostart)
1517 self.proc = proc
1517 self.proc = proc
1518
1518
1519 def addcontext(self, context):
1519 def addcontext(self, context):
1520 if self.hunk:
1520 if self.hunk:
1521 h = recordhunk(self.header, self.fromline, self.toline,
1521 h = recordhunk(self.header, self.fromline, self.toline,
1522 self.proc, self.before, self.hunk, context)
1522 self.proc, self.before, self.hunk, context)
1523 self.header.hunks.append(h)
1523 self.header.hunks.append(h)
1524 self.fromline += len(self.before) + h.removed
1524 self.fromline += len(self.before) + h.removed
1525 self.toline += len(self.before) + h.added
1525 self.toline += len(self.before) + h.added
1526 self.before = []
1526 self.before = []
1527 self.hunk = []
1527 self.hunk = []
1528 self.context = context
1528 self.context = context
1529
1529
1530 def addhunk(self, hunk):
1530 def addhunk(self, hunk):
1531 if self.context:
1531 if self.context:
1532 self.before = self.context
1532 self.before = self.context
1533 self.context = []
1533 self.context = []
1534 self.hunk = hunk
1534 self.hunk = hunk
1535
1535
1536 def newfile(self, hdr):
1536 def newfile(self, hdr):
1537 self.addcontext([])
1537 self.addcontext([])
1538 h = header(hdr)
1538 h = header(hdr)
1539 self.headers.append(h)
1539 self.headers.append(h)
1540 self.header = h
1540 self.header = h
1541
1541
1542 def addother(self, line):
1542 def addother(self, line):
1543 pass # 'other' lines are ignored
1543 pass # 'other' lines are ignored
1544
1544
1545 def finished(self):
1545 def finished(self):
1546 self.addcontext([])
1546 self.addcontext([])
1547 return self.headers
1547 return self.headers
1548
1548
1549 transitions = {
1549 transitions = {
1550 'file': {'context': addcontext,
1550 'file': {'context': addcontext,
1551 'file': newfile,
1551 'file': newfile,
1552 'hunk': addhunk,
1552 'hunk': addhunk,
1553 'range': addrange},
1553 'range': addrange},
1554 'context': {'file': newfile,
1554 'context': {'file': newfile,
1555 'hunk': addhunk,
1555 'hunk': addhunk,
1556 'range': addrange,
1556 'range': addrange,
1557 'other': addother},
1557 'other': addother},
1558 'hunk': {'context': addcontext,
1558 'hunk': {'context': addcontext,
1559 'file': newfile,
1559 'file': newfile,
1560 'range': addrange},
1560 'range': addrange},
1561 'range': {'context': addcontext,
1561 'range': {'context': addcontext,
1562 'hunk': addhunk},
1562 'hunk': addhunk},
1563 'other': {'other': addother},
1563 'other': {'other': addother},
1564 }
1564 }
1565
1565
1566 p = parser()
1566 p = parser()
1567 fp = stringio()
1567 fp = stringio()
1568 fp.write(''.join(originalchunks))
1568 fp.write(''.join(originalchunks))
1569 fp.seek(0)
1569 fp.seek(0)
1570
1570
1571 state = 'context'
1571 state = 'context'
1572 for newstate, data in scanpatch(fp):
1572 for newstate, data in scanpatch(fp):
1573 try:
1573 try:
1574 p.transitions[state][newstate](p, data)
1574 p.transitions[state][newstate](p, data)
1575 except KeyError:
1575 except KeyError:
1576 raise PatchError('unhandled transition: %s -> %s' %
1576 raise PatchError('unhandled transition: %s -> %s' %
1577 (state, newstate))
1577 (state, newstate))
1578 state = newstate
1578 state = newstate
1579 del fp
1579 del fp
1580 return p.finished()
1580 return p.finished()
1581
1581
1582 def pathtransform(path, strip, prefix):
1582 def pathtransform(path, strip, prefix):
1583 '''turn a path from a patch into a path suitable for the repository
1583 '''turn a path from a patch into a path suitable for the repository
1584
1584
1585 prefix, if not empty, is expected to be normalized with a / at the end.
1585 prefix, if not empty, is expected to be normalized with a / at the end.
1586
1586
1587 Returns (stripped components, path in repository).
1587 Returns (stripped components, path in repository).
1588
1588
1589 >>> pathtransform('a/b/c', 0, '')
1589 >>> pathtransform('a/b/c', 0, '')
1590 ('', 'a/b/c')
1590 ('', 'a/b/c')
1591 >>> pathtransform(' a/b/c ', 0, '')
1591 >>> pathtransform(' a/b/c ', 0, '')
1592 ('', ' a/b/c')
1592 ('', ' a/b/c')
1593 >>> pathtransform(' a/b/c ', 2, '')
1593 >>> pathtransform(' a/b/c ', 2, '')
1594 ('a/b/', 'c')
1594 ('a/b/', 'c')
1595 >>> pathtransform('a/b/c', 0, 'd/e/')
1595 >>> pathtransform('a/b/c', 0, 'd/e/')
1596 ('', 'd/e/a/b/c')
1596 ('', 'd/e/a/b/c')
1597 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1597 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1598 ('a//b/', 'd/e/c')
1598 ('a//b/', 'd/e/c')
1599 >>> pathtransform('a/b/c', 3, '')
1599 >>> pathtransform('a/b/c', 3, '')
1600 Traceback (most recent call last):
1600 Traceback (most recent call last):
1601 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1601 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1602 '''
1602 '''
1603 pathlen = len(path)
1603 pathlen = len(path)
1604 i = 0
1604 i = 0
1605 if strip == 0:
1605 if strip == 0:
1606 return '', prefix + path.rstrip()
1606 return '', prefix + path.rstrip()
1607 count = strip
1607 count = strip
1608 while count > 0:
1608 while count > 0:
1609 i = path.find('/', i)
1609 i = path.find('/', i)
1610 if i == -1:
1610 if i == -1:
1611 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1611 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1612 (count, strip, path))
1612 (count, strip, path))
1613 i += 1
1613 i += 1
1614 # consume '//' in the path
1614 # consume '//' in the path
1615 while i < pathlen - 1 and path[i] == '/':
1615 while i < pathlen - 1 and path[i] == '/':
1616 i += 1
1616 i += 1
1617 count -= 1
1617 count -= 1
1618 return path[:i].lstrip(), prefix + path[i:].rstrip()
1618 return path[:i].lstrip(), prefix + path[i:].rstrip()
1619
1619
1620 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1620 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1621 nulla = afile_orig == "/dev/null"
1621 nulla = afile_orig == "/dev/null"
1622 nullb = bfile_orig == "/dev/null"
1622 nullb = bfile_orig == "/dev/null"
1623 create = nulla and hunk.starta == 0 and hunk.lena == 0
1623 create = nulla and hunk.starta == 0 and hunk.lena == 0
1624 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1624 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1625 abase, afile = pathtransform(afile_orig, strip, prefix)
1625 abase, afile = pathtransform(afile_orig, strip, prefix)
1626 gooda = not nulla and backend.exists(afile)
1626 gooda = not nulla and backend.exists(afile)
1627 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1627 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1628 if afile == bfile:
1628 if afile == bfile:
1629 goodb = gooda
1629 goodb = gooda
1630 else:
1630 else:
1631 goodb = not nullb and backend.exists(bfile)
1631 goodb = not nullb and backend.exists(bfile)
1632 missing = not goodb and not gooda and not create
1632 missing = not goodb and not gooda and not create
1633
1633
1634 # some diff programs apparently produce patches where the afile is
1634 # some diff programs apparently produce patches where the afile is
1635 # not /dev/null, but afile starts with bfile
1635 # not /dev/null, but afile starts with bfile
1636 abasedir = afile[:afile.rfind('/') + 1]
1636 abasedir = afile[:afile.rfind('/') + 1]
1637 bbasedir = bfile[:bfile.rfind('/') + 1]
1637 bbasedir = bfile[:bfile.rfind('/') + 1]
1638 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1638 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1639 and hunk.starta == 0 and hunk.lena == 0):
1639 and hunk.starta == 0 and hunk.lena == 0):
1640 create = True
1640 create = True
1641 missing = False
1641 missing = False
1642
1642
1643 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1643 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1644 # diff is between a file and its backup. In this case, the original
1644 # diff is between a file and its backup. In this case, the original
1645 # file should be patched (see original mpatch code).
1645 # file should be patched (see original mpatch code).
1646 isbackup = (abase == bbase and bfile.startswith(afile))
1646 isbackup = (abase == bbase and bfile.startswith(afile))
1647 fname = None
1647 fname = None
1648 if not missing:
1648 if not missing:
1649 if gooda and goodb:
1649 if gooda and goodb:
1650 if isbackup:
1650 if isbackup:
1651 fname = afile
1651 fname = afile
1652 else:
1652 else:
1653 fname = bfile
1653 fname = bfile
1654 elif gooda:
1654 elif gooda:
1655 fname = afile
1655 fname = afile
1656
1656
1657 if not fname:
1657 if not fname:
1658 if not nullb:
1658 if not nullb:
1659 if isbackup:
1659 if isbackup:
1660 fname = afile
1660 fname = afile
1661 else:
1661 else:
1662 fname = bfile
1662 fname = bfile
1663 elif not nulla:
1663 elif not nulla:
1664 fname = afile
1664 fname = afile
1665 else:
1665 else:
1666 raise PatchError(_("undefined source and destination files"))
1666 raise PatchError(_("undefined source and destination files"))
1667
1667
1668 gp = patchmeta(fname)
1668 gp = patchmeta(fname)
1669 if create:
1669 if create:
1670 gp.op = 'ADD'
1670 gp.op = 'ADD'
1671 elif remove:
1671 elif remove:
1672 gp.op = 'DELETE'
1672 gp.op = 'DELETE'
1673 return gp
1673 return gp
1674
1674
1675 def scanpatch(fp):
1675 def scanpatch(fp):
1676 """like patch.iterhunks, but yield different events
1676 """like patch.iterhunks, but yield different events
1677
1677
1678 - ('file', [header_lines + fromfile + tofile])
1678 - ('file', [header_lines + fromfile + tofile])
1679 - ('context', [context_lines])
1679 - ('context', [context_lines])
1680 - ('hunk', [hunk_lines])
1680 - ('hunk', [hunk_lines])
1681 - ('range', (-start,len, +start,len, proc))
1681 - ('range', (-start,len, +start,len, proc))
1682 """
1682 """
1683 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1683 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1684 lr = linereader(fp)
1684 lr = linereader(fp)
1685
1685
1686 def scanwhile(first, p):
1686 def scanwhile(first, p):
1687 """scan lr while predicate holds"""
1687 """scan lr while predicate holds"""
1688 lines = [first]
1688 lines = [first]
1689 for line in iter(lr.readline, ''):
1689 for line in iter(lr.readline, ''):
1690 if p(line):
1690 if p(line):
1691 lines.append(line)
1691 lines.append(line)
1692 else:
1692 else:
1693 lr.push(line)
1693 lr.push(line)
1694 break
1694 break
1695 return lines
1695 return lines
1696
1696
1697 for line in iter(lr.readline, ''):
1697 for line in iter(lr.readline, ''):
1698 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1698 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1699 def notheader(line):
1699 def notheader(line):
1700 s = line.split(None, 1)
1700 s = line.split(None, 1)
1701 return not s or s[0] not in ('---', 'diff')
1701 return not s or s[0] not in ('---', 'diff')
1702 header = scanwhile(line, notheader)
1702 header = scanwhile(line, notheader)
1703 fromfile = lr.readline()
1703 fromfile = lr.readline()
1704 if fromfile.startswith('---'):
1704 if fromfile.startswith('---'):
1705 tofile = lr.readline()
1705 tofile = lr.readline()
1706 header += [fromfile, tofile]
1706 header += [fromfile, tofile]
1707 else:
1707 else:
1708 lr.push(fromfile)
1708 lr.push(fromfile)
1709 yield 'file', header
1709 yield 'file', header
1710 elif line[0] == ' ':
1710 elif line[0] == ' ':
1711 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1711 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1712 elif line[0] in '-+':
1712 elif line[0] in '-+':
1713 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1713 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1714 else:
1714 else:
1715 m = lines_re.match(line)
1715 m = lines_re.match(line)
1716 if m:
1716 if m:
1717 yield 'range', m.groups()
1717 yield 'range', m.groups()
1718 else:
1718 else:
1719 yield 'other', line
1719 yield 'other', line
1720
1720
1721 def scangitpatch(lr, firstline):
1721 def scangitpatch(lr, firstline):
1722 """
1722 """
1723 Git patches can emit:
1723 Git patches can emit:
1724 - rename a to b
1724 - rename a to b
1725 - change b
1725 - change b
1726 - copy a to c
1726 - copy a to c
1727 - change c
1727 - change c
1728
1728
1729 We cannot apply this sequence as-is, the renamed 'a' could not be
1729 We cannot apply this sequence as-is, the renamed 'a' could not be
1730 found for it would have been renamed already. And we cannot copy
1730 found for it would have been renamed already. And we cannot copy
1731 from 'b' instead because 'b' would have been changed already. So
1731 from 'b' instead because 'b' would have been changed already. So
1732 we scan the git patch for copy and rename commands so we can
1732 we scan the git patch for copy and rename commands so we can
1733 perform the copies ahead of time.
1733 perform the copies ahead of time.
1734 """
1734 """
1735 pos = 0
1735 pos = 0
1736 try:
1736 try:
1737 pos = lr.fp.tell()
1737 pos = lr.fp.tell()
1738 fp = lr.fp
1738 fp = lr.fp
1739 except IOError:
1739 except IOError:
1740 fp = stringio(lr.fp.read())
1740 fp = stringio(lr.fp.read())
1741 gitlr = linereader(fp)
1741 gitlr = linereader(fp)
1742 gitlr.push(firstline)
1742 gitlr.push(firstline)
1743 gitpatches = readgitpatch(gitlr)
1743 gitpatches = readgitpatch(gitlr)
1744 fp.seek(pos)
1744 fp.seek(pos)
1745 return gitpatches
1745 return gitpatches
1746
1746
1747 def iterhunks(fp):
1747 def iterhunks(fp):
1748 """Read a patch and yield the following events:
1748 """Read a patch and yield the following events:
1749 - ("file", afile, bfile, firsthunk): select a new target file.
1749 - ("file", afile, bfile, firsthunk): select a new target file.
1750 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1750 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1751 "file" event.
1751 "file" event.
1752 - ("git", gitchanges): current diff is in git format, gitchanges
1752 - ("git", gitchanges): current diff is in git format, gitchanges
1753 maps filenames to gitpatch records. Unique event.
1753 maps filenames to gitpatch records. Unique event.
1754 """
1754 """
1755 afile = ""
1755 afile = ""
1756 bfile = ""
1756 bfile = ""
1757 state = None
1757 state = None
1758 hunknum = 0
1758 hunknum = 0
1759 emitfile = newfile = False
1759 emitfile = newfile = False
1760 gitpatches = None
1760 gitpatches = None
1761
1761
1762 # our states
1762 # our states
1763 BFILE = 1
1763 BFILE = 1
1764 context = None
1764 context = None
1765 lr = linereader(fp)
1765 lr = linereader(fp)
1766
1766
1767 for x in iter(lr.readline, ''):
1767 for x in iter(lr.readline, ''):
1768 if state == BFILE and (
1768 if state == BFILE and (
1769 (not context and x[0] == '@')
1769 (not context and x[0] == '@')
1770 or (context is not False and x.startswith('***************'))
1770 or (context is not False and x.startswith('***************'))
1771 or x.startswith('GIT binary patch')):
1771 or x.startswith('GIT binary patch')):
1772 gp = None
1772 gp = None
1773 if (gitpatches and
1773 if (gitpatches and
1774 gitpatches[-1].ispatching(afile, bfile)):
1774 gitpatches[-1].ispatching(afile, bfile)):
1775 gp = gitpatches.pop()
1775 gp = gitpatches.pop()
1776 if x.startswith('GIT binary patch'):
1776 if x.startswith('GIT binary patch'):
1777 h = binhunk(lr, gp.path)
1777 h = binhunk(lr, gp.path)
1778 else:
1778 else:
1779 if context is None and x.startswith('***************'):
1779 if context is None and x.startswith('***************'):
1780 context = True
1780 context = True
1781 h = hunk(x, hunknum + 1, lr, context)
1781 h = hunk(x, hunknum + 1, lr, context)
1782 hunknum += 1
1782 hunknum += 1
1783 if emitfile:
1783 if emitfile:
1784 emitfile = False
1784 emitfile = False
1785 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1785 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1786 yield 'hunk', h
1786 yield 'hunk', h
1787 elif x.startswith('diff --git a/'):
1787 elif x.startswith('diff --git a/'):
1788 m = gitre.match(x.rstrip(' \r\n'))
1788 m = gitre.match(x.rstrip(' \r\n'))
1789 if not m:
1789 if not m:
1790 continue
1790 continue
1791 if gitpatches is None:
1791 if gitpatches is None:
1792 # scan whole input for git metadata
1792 # scan whole input for git metadata
1793 gitpatches = scangitpatch(lr, x)
1793 gitpatches = scangitpatch(lr, x)
1794 yield 'git', [g.copy() for g in gitpatches
1794 yield 'git', [g.copy() for g in gitpatches
1795 if g.op in ('COPY', 'RENAME')]
1795 if g.op in ('COPY', 'RENAME')]
1796 gitpatches.reverse()
1796 gitpatches.reverse()
1797 afile = 'a/' + m.group(1)
1797 afile = 'a/' + m.group(1)
1798 bfile = 'b/' + m.group(2)
1798 bfile = 'b/' + m.group(2)
1799 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1799 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1800 gp = gitpatches.pop()
1800 gp = gitpatches.pop()
1801 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1801 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1802 if not gitpatches:
1802 if not gitpatches:
1803 raise PatchError(_('failed to synchronize metadata for "%s"')
1803 raise PatchError(_('failed to synchronize metadata for "%s"')
1804 % afile[2:])
1804 % afile[2:])
1805 gp = gitpatches[-1]
1805 gp = gitpatches[-1]
1806 newfile = True
1806 newfile = True
1807 elif x.startswith('---'):
1807 elif x.startswith('---'):
1808 # check for a unified diff
1808 # check for a unified diff
1809 l2 = lr.readline()
1809 l2 = lr.readline()
1810 if not l2.startswith('+++'):
1810 if not l2.startswith('+++'):
1811 lr.push(l2)
1811 lr.push(l2)
1812 continue
1812 continue
1813 newfile = True
1813 newfile = True
1814 context = False
1814 context = False
1815 afile = parsefilename(x)
1815 afile = parsefilename(x)
1816 bfile = parsefilename(l2)
1816 bfile = parsefilename(l2)
1817 elif x.startswith('***'):
1817 elif x.startswith('***'):
1818 # check for a context diff
1818 # check for a context diff
1819 l2 = lr.readline()
1819 l2 = lr.readline()
1820 if not l2.startswith('---'):
1820 if not l2.startswith('---'):
1821 lr.push(l2)
1821 lr.push(l2)
1822 continue
1822 continue
1823 l3 = lr.readline()
1823 l3 = lr.readline()
1824 lr.push(l3)
1824 lr.push(l3)
1825 if not l3.startswith("***************"):
1825 if not l3.startswith("***************"):
1826 lr.push(l2)
1826 lr.push(l2)
1827 continue
1827 continue
1828 newfile = True
1828 newfile = True
1829 context = True
1829 context = True
1830 afile = parsefilename(x)
1830 afile = parsefilename(x)
1831 bfile = parsefilename(l2)
1831 bfile = parsefilename(l2)
1832
1832
1833 if newfile:
1833 if newfile:
1834 newfile = False
1834 newfile = False
1835 emitfile = True
1835 emitfile = True
1836 state = BFILE
1836 state = BFILE
1837 hunknum = 0
1837 hunknum = 0
1838
1838
1839 while gitpatches:
1839 while gitpatches:
1840 gp = gitpatches.pop()
1840 gp = gitpatches.pop()
1841 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1841 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1842
1842
1843 def applybindelta(binchunk, data):
1843 def applybindelta(binchunk, data):
1844 """Apply a binary delta hunk
1844 """Apply a binary delta hunk
1845 The algorithm used is the algorithm from git's patch-delta.c
1845 The algorithm used is the algorithm from git's patch-delta.c
1846 """
1846 """
1847 def deltahead(binchunk):
1847 def deltahead(binchunk):
1848 i = 0
1848 i = 0
1849 for c in binchunk:
1849 for c in binchunk:
1850 i += 1
1850 i += 1
1851 if not (ord(c) & 0x80):
1851 if not (ord(c) & 0x80):
1852 return i
1852 return i
1853 return i
1853 return i
1854 out = ""
1854 out = ""
1855 s = deltahead(binchunk)
1855 s = deltahead(binchunk)
1856 binchunk = binchunk[s:]
1856 binchunk = binchunk[s:]
1857 s = deltahead(binchunk)
1857 s = deltahead(binchunk)
1858 binchunk = binchunk[s:]
1858 binchunk = binchunk[s:]
1859 i = 0
1859 i = 0
1860 while i < len(binchunk):
1860 while i < len(binchunk):
1861 cmd = ord(binchunk[i])
1861 cmd = ord(binchunk[i])
1862 i += 1
1862 i += 1
1863 if (cmd & 0x80):
1863 if (cmd & 0x80):
1864 offset = 0
1864 offset = 0
1865 size = 0
1865 size = 0
1866 if (cmd & 0x01):
1866 if (cmd & 0x01):
1867 offset = ord(binchunk[i])
1867 offset = ord(binchunk[i])
1868 i += 1
1868 i += 1
1869 if (cmd & 0x02):
1869 if (cmd & 0x02):
1870 offset |= ord(binchunk[i]) << 8
1870 offset |= ord(binchunk[i]) << 8
1871 i += 1
1871 i += 1
1872 if (cmd & 0x04):
1872 if (cmd & 0x04):
1873 offset |= ord(binchunk[i]) << 16
1873 offset |= ord(binchunk[i]) << 16
1874 i += 1
1874 i += 1
1875 if (cmd & 0x08):
1875 if (cmd & 0x08):
1876 offset |= ord(binchunk[i]) << 24
1876 offset |= ord(binchunk[i]) << 24
1877 i += 1
1877 i += 1
1878 if (cmd & 0x10):
1878 if (cmd & 0x10):
1879 size = ord(binchunk[i])
1879 size = ord(binchunk[i])
1880 i += 1
1880 i += 1
1881 if (cmd & 0x20):
1881 if (cmd & 0x20):
1882 size |= ord(binchunk[i]) << 8
1882 size |= ord(binchunk[i]) << 8
1883 i += 1
1883 i += 1
1884 if (cmd & 0x40):
1884 if (cmd & 0x40):
1885 size |= ord(binchunk[i]) << 16
1885 size |= ord(binchunk[i]) << 16
1886 i += 1
1886 i += 1
1887 if size == 0:
1887 if size == 0:
1888 size = 0x10000
1888 size = 0x10000
1889 offset_end = offset + size
1889 offset_end = offset + size
1890 out += data[offset:offset_end]
1890 out += data[offset:offset_end]
1891 elif cmd != 0:
1891 elif cmd != 0:
1892 offset_end = i + cmd
1892 offset_end = i + cmd
1893 out += binchunk[i:offset_end]
1893 out += binchunk[i:offset_end]
1894 i += cmd
1894 i += cmd
1895 else:
1895 else:
1896 raise PatchError(_('unexpected delta opcode 0'))
1896 raise PatchError(_('unexpected delta opcode 0'))
1897 return out
1897 return out
1898
1898
1899 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1899 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1900 """Reads a patch from fp and tries to apply it.
1900 """Reads a patch from fp and tries to apply it.
1901
1901
1902 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1902 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1903 there was any fuzz.
1903 there was any fuzz.
1904
1904
1905 If 'eolmode' is 'strict', the patch content and patched file are
1905 If 'eolmode' is 'strict', the patch content and patched file are
1906 read in binary mode. Otherwise, line endings are ignored when
1906 read in binary mode. Otherwise, line endings are ignored when
1907 patching then normalized according to 'eolmode'.
1907 patching then normalized according to 'eolmode'.
1908 """
1908 """
1909 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1909 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1910 prefix=prefix, eolmode=eolmode)
1910 prefix=prefix, eolmode=eolmode)
1911
1911
1912 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1912 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1913 eolmode='strict'):
1913 eolmode='strict'):
1914
1914
1915 if prefix:
1915 if prefix:
1916 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1916 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1917 prefix)
1917 prefix)
1918 if prefix != '':
1918 if prefix != '':
1919 prefix += '/'
1919 prefix += '/'
1920 def pstrip(p):
1920 def pstrip(p):
1921 return pathtransform(p, strip - 1, prefix)[1]
1921 return pathtransform(p, strip - 1, prefix)[1]
1922
1922
1923 rejects = 0
1923 rejects = 0
1924 err = 0
1924 err = 0
1925 current_file = None
1925 current_file = None
1926
1926
1927 for state, values in iterhunks(fp):
1927 for state, values in iterhunks(fp):
1928 if state == 'hunk':
1928 if state == 'hunk':
1929 if not current_file:
1929 if not current_file:
1930 continue
1930 continue
1931 ret = current_file.apply(values)
1931 ret = current_file.apply(values)
1932 if ret > 0:
1932 if ret > 0:
1933 err = 1
1933 err = 1
1934 elif state == 'file':
1934 elif state == 'file':
1935 if current_file:
1935 if current_file:
1936 rejects += current_file.close()
1936 rejects += current_file.close()
1937 current_file = None
1937 current_file = None
1938 afile, bfile, first_hunk, gp = values
1938 afile, bfile, first_hunk, gp = values
1939 if gp:
1939 if gp:
1940 gp.path = pstrip(gp.path)
1940 gp.path = pstrip(gp.path)
1941 if gp.oldpath:
1941 if gp.oldpath:
1942 gp.oldpath = pstrip(gp.oldpath)
1942 gp.oldpath = pstrip(gp.oldpath)
1943 else:
1943 else:
1944 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1944 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1945 prefix)
1945 prefix)
1946 if gp.op == 'RENAME':
1946 if gp.op == 'RENAME':
1947 backend.unlink(gp.oldpath)
1947 backend.unlink(gp.oldpath)
1948 if not first_hunk:
1948 if not first_hunk:
1949 if gp.op == 'DELETE':
1949 if gp.op == 'DELETE':
1950 backend.unlink(gp.path)
1950 backend.unlink(gp.path)
1951 continue
1951 continue
1952 data, mode = None, None
1952 data, mode = None, None
1953 if gp.op in ('RENAME', 'COPY'):
1953 if gp.op in ('RENAME', 'COPY'):
1954 data, mode = store.getfile(gp.oldpath)[:2]
1954 data, mode = store.getfile(gp.oldpath)[:2]
1955 if data is None:
1955 if data is None:
1956 # This means that the old path does not exist
1956 # This means that the old path does not exist
1957 raise PatchError(_("source file '%s' does not exist")
1957 raise PatchError(_("source file '%s' does not exist")
1958 % gp.oldpath)
1958 % gp.oldpath)
1959 if gp.mode:
1959 if gp.mode:
1960 mode = gp.mode
1960 mode = gp.mode
1961 if gp.op == 'ADD':
1961 if gp.op == 'ADD':
1962 # Added files without content have no hunk and
1962 # Added files without content have no hunk and
1963 # must be created
1963 # must be created
1964 data = ''
1964 data = ''
1965 if data or mode:
1965 if data or mode:
1966 if (gp.op in ('ADD', 'RENAME', 'COPY')
1966 if (gp.op in ('ADD', 'RENAME', 'COPY')
1967 and backend.exists(gp.path)):
1967 and backend.exists(gp.path)):
1968 raise PatchError(_("cannot create %s: destination "
1968 raise PatchError(_("cannot create %s: destination "
1969 "already exists") % gp.path)
1969 "already exists") % gp.path)
1970 backend.setfile(gp.path, data, mode, gp.oldpath)
1970 backend.setfile(gp.path, data, mode, gp.oldpath)
1971 continue
1971 continue
1972 try:
1972 try:
1973 current_file = patcher(ui, gp, backend, store,
1973 current_file = patcher(ui, gp, backend, store,
1974 eolmode=eolmode)
1974 eolmode=eolmode)
1975 except PatchError as inst:
1975 except PatchError as inst:
1976 ui.warn(str(inst) + '\n')
1976 ui.warn(str(inst) + '\n')
1977 current_file = None
1977 current_file = None
1978 rejects += 1
1978 rejects += 1
1979 continue
1979 continue
1980 elif state == 'git':
1980 elif state == 'git':
1981 for gp in values:
1981 for gp in values:
1982 path = pstrip(gp.oldpath)
1982 path = pstrip(gp.oldpath)
1983 data, mode = backend.getfile(path)
1983 data, mode = backend.getfile(path)
1984 if data is None:
1984 if data is None:
1985 # The error ignored here will trigger a getfile()
1985 # The error ignored here will trigger a getfile()
1986 # error in a place more appropriate for error
1986 # error in a place more appropriate for error
1987 # handling, and will not interrupt the patching
1987 # handling, and will not interrupt the patching
1988 # process.
1988 # process.
1989 pass
1989 pass
1990 else:
1990 else:
1991 store.setfile(path, data, mode)
1991 store.setfile(path, data, mode)
1992 else:
1992 else:
1993 raise error.Abort(_('unsupported parser state: %s') % state)
1993 raise error.Abort(_('unsupported parser state: %s') % state)
1994
1994
1995 if current_file:
1995 if current_file:
1996 rejects += current_file.close()
1996 rejects += current_file.close()
1997
1997
1998 if rejects:
1998 if rejects:
1999 return -1
1999 return -1
2000 return err
2000 return err
2001
2001
2002 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2002 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2003 similarity):
2003 similarity):
2004 """use <patcher> to apply <patchname> to the working directory.
2004 """use <patcher> to apply <patchname> to the working directory.
2005 returns whether patch was applied with fuzz factor."""
2005 returns whether patch was applied with fuzz factor."""
2006
2006
2007 fuzz = False
2007 fuzz = False
2008 args = []
2008 args = []
2009 cwd = repo.root
2009 cwd = repo.root
2010 if cwd:
2010 if cwd:
2011 args.append('-d %s' % util.shellquote(cwd))
2011 args.append('-d %s' % util.shellquote(cwd))
2012 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2012 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2013 util.shellquote(patchname)))
2013 util.shellquote(patchname)))
2014 try:
2014 try:
2015 for line in util.iterfile(fp):
2015 for line in util.iterfile(fp):
2016 line = line.rstrip()
2016 line = line.rstrip()
2017 ui.note(line + '\n')
2017 ui.note(line + '\n')
2018 if line.startswith('patching file '):
2018 if line.startswith('patching file '):
2019 pf = util.parsepatchoutput(line)
2019 pf = util.parsepatchoutput(line)
2020 printed_file = False
2020 printed_file = False
2021 files.add(pf)
2021 files.add(pf)
2022 elif line.find('with fuzz') >= 0:
2022 elif line.find('with fuzz') >= 0:
2023 fuzz = True
2023 fuzz = True
2024 if not printed_file:
2024 if not printed_file:
2025 ui.warn(pf + '\n')
2025 ui.warn(pf + '\n')
2026 printed_file = True
2026 printed_file = True
2027 ui.warn(line + '\n')
2027 ui.warn(line + '\n')
2028 elif line.find('saving rejects to file') >= 0:
2028 elif line.find('saving rejects to file') >= 0:
2029 ui.warn(line + '\n')
2029 ui.warn(line + '\n')
2030 elif line.find('FAILED') >= 0:
2030 elif line.find('FAILED') >= 0:
2031 if not printed_file:
2031 if not printed_file:
2032 ui.warn(pf + '\n')
2032 ui.warn(pf + '\n')
2033 printed_file = True
2033 printed_file = True
2034 ui.warn(line + '\n')
2034 ui.warn(line + '\n')
2035 finally:
2035 finally:
2036 if files:
2036 if files:
2037 scmutil.marktouched(repo, files, similarity)
2037 scmutil.marktouched(repo, files, similarity)
2038 code = fp.close()
2038 code = fp.close()
2039 if code:
2039 if code:
2040 raise PatchError(_("patch command failed: %s") %
2040 raise PatchError(_("patch command failed: %s") %
2041 util.explainexit(code)[0])
2041 util.explainexit(code)[0])
2042 return fuzz
2042 return fuzz
2043
2043
2044 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2044 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2045 eolmode='strict'):
2045 eolmode='strict'):
2046 if files is None:
2046 if files is None:
2047 files = set()
2047 files = set()
2048 if eolmode is None:
2048 if eolmode is None:
2049 eolmode = ui.config('patch', 'eol', 'strict')
2049 eolmode = ui.config('patch', 'eol', 'strict')
2050 if eolmode.lower() not in eolmodes:
2050 if eolmode.lower() not in eolmodes:
2051 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2051 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2052 eolmode = eolmode.lower()
2052 eolmode = eolmode.lower()
2053
2053
2054 store = filestore()
2054 store = filestore()
2055 try:
2055 try:
2056 fp = open(patchobj, 'rb')
2056 fp = open(patchobj, 'rb')
2057 except TypeError:
2057 except TypeError:
2058 fp = patchobj
2058 fp = patchobj
2059 try:
2059 try:
2060 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2060 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2061 eolmode=eolmode)
2061 eolmode=eolmode)
2062 finally:
2062 finally:
2063 if fp != patchobj:
2063 if fp != patchobj:
2064 fp.close()
2064 fp.close()
2065 files.update(backend.close())
2065 files.update(backend.close())
2066 store.close()
2066 store.close()
2067 if ret < 0:
2067 if ret < 0:
2068 raise PatchError(_('patch failed to apply'))
2068 raise PatchError(_('patch failed to apply'))
2069 return ret > 0
2069 return ret > 0
2070
2070
2071 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2071 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2072 eolmode='strict', similarity=0):
2072 eolmode='strict', similarity=0):
2073 """use builtin patch to apply <patchobj> to the working directory.
2073 """use builtin patch to apply <patchobj> to the working directory.
2074 returns whether patch was applied with fuzz factor."""
2074 returns whether patch was applied with fuzz factor."""
2075 backend = workingbackend(ui, repo, similarity)
2075 backend = workingbackend(ui, repo, similarity)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077
2077
2078 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2078 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2079 eolmode='strict'):
2079 eolmode='strict'):
2080 backend = repobackend(ui, repo, ctx, store)
2080 backend = repobackend(ui, repo, ctx, store)
2081 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2081 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2082
2082
2083 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2083 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2084 similarity=0):
2084 similarity=0):
2085 """Apply <patchname> to the working directory.
2085 """Apply <patchname> to the working directory.
2086
2086
2087 'eolmode' specifies how end of lines should be handled. It can be:
2087 'eolmode' specifies how end of lines should be handled. It can be:
2088 - 'strict': inputs are read in binary mode, EOLs are preserved
2088 - 'strict': inputs are read in binary mode, EOLs are preserved
2089 - 'crlf': EOLs are ignored when patching and reset to CRLF
2089 - 'crlf': EOLs are ignored when patching and reset to CRLF
2090 - 'lf': EOLs are ignored when patching and reset to LF
2090 - 'lf': EOLs are ignored when patching and reset to LF
2091 - None: get it from user settings, default to 'strict'
2091 - None: get it from user settings, default to 'strict'
2092 'eolmode' is ignored when using an external patcher program.
2092 'eolmode' is ignored when using an external patcher program.
2093
2093
2094 Returns whether patch was applied with fuzz factor.
2094 Returns whether patch was applied with fuzz factor.
2095 """
2095 """
2096 patcher = ui.config('ui', 'patch')
2096 patcher = ui.config('ui', 'patch')
2097 if files is None:
2097 if files is None:
2098 files = set()
2098 files = set()
2099 if patcher:
2099 if patcher:
2100 return _externalpatch(ui, repo, patcher, patchname, strip,
2100 return _externalpatch(ui, repo, patcher, patchname, strip,
2101 files, similarity)
2101 files, similarity)
2102 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2102 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2103 similarity)
2103 similarity)
2104
2104
2105 def changedfiles(ui, repo, patchpath, strip=1):
2105 def changedfiles(ui, repo, patchpath, strip=1):
2106 backend = fsbackend(ui, repo.root)
2106 backend = fsbackend(ui, repo.root)
2107 with open(patchpath, 'rb') as fp:
2107 with open(patchpath, 'rb') as fp:
2108 changed = set()
2108 changed = set()
2109 for state, values in iterhunks(fp):
2109 for state, values in iterhunks(fp):
2110 if state == 'file':
2110 if state == 'file':
2111 afile, bfile, first_hunk, gp = values
2111 afile, bfile, first_hunk, gp = values
2112 if gp:
2112 if gp:
2113 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2113 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2114 if gp.oldpath:
2114 if gp.oldpath:
2115 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2115 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2116 else:
2116 else:
2117 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2117 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2118 '')
2118 '')
2119 changed.add(gp.path)
2119 changed.add(gp.path)
2120 if gp.op == 'RENAME':
2120 if gp.op == 'RENAME':
2121 changed.add(gp.oldpath)
2121 changed.add(gp.oldpath)
2122 elif state not in ('hunk', 'git'):
2122 elif state not in ('hunk', 'git'):
2123 raise error.Abort(_('unsupported parser state: %s') % state)
2123 raise error.Abort(_('unsupported parser state: %s') % state)
2124 return changed
2124 return changed
2125
2125
2126 class GitDiffRequired(Exception):
2126 class GitDiffRequired(Exception):
2127 pass
2127 pass
2128
2128
2129 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2129 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2130 '''return diffopts with all features supported and parsed'''
2130 '''return diffopts with all features supported and parsed'''
2131 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2131 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2132 git=True, whitespace=True, formatchanging=True)
2132 git=True, whitespace=True, formatchanging=True)
2133
2133
2134 diffopts = diffallopts
2134 diffopts = diffallopts
2135
2135
2136 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2136 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2137 whitespace=False, formatchanging=False):
2137 whitespace=False, formatchanging=False):
2138 '''return diffopts with only opted-in features parsed
2138 '''return diffopts with only opted-in features parsed
2139
2139
2140 Features:
2140 Features:
2141 - git: git-style diffs
2141 - git: git-style diffs
2142 - whitespace: whitespace options like ignoreblanklines and ignorews
2142 - whitespace: whitespace options like ignoreblanklines and ignorews
2143 - formatchanging: options that will likely break or cause correctness issues
2143 - formatchanging: options that will likely break or cause correctness issues
2144 with most diff parsers
2144 with most diff parsers
2145 '''
2145 '''
2146 def get(key, name=None, getter=ui.configbool, forceplain=None):
2146 def get(key, name=None, getter=ui.configbool, forceplain=None):
2147 if opts:
2147 if opts:
2148 v = opts.get(key)
2148 v = opts.get(key)
2149 # diffopts flags are either None-default (which is passed
2149 # diffopts flags are either None-default (which is passed
2150 # through unchanged, so we can identify unset values), or
2150 # through unchanged, so we can identify unset values), or
2151 # some other falsey default (eg --unified, which defaults
2151 # some other falsey default (eg --unified, which defaults
2152 # to an empty string). We only want to override the config
2152 # to an empty string). We only want to override the config
2153 # entries from hgrc with command line values if they
2153 # entries from hgrc with command line values if they
2154 # appear to have been set, which is any truthy value,
2154 # appear to have been set, which is any truthy value,
2155 # True, or False.
2155 # True, or False.
2156 if v or isinstance(v, bool):
2156 if v or isinstance(v, bool):
2157 return v
2157 return v
2158 if forceplain is not None and ui.plain():
2158 if forceplain is not None and ui.plain():
2159 return forceplain
2159 return forceplain
2160 return getter(section, name or key, None, untrusted=untrusted)
2160 return getter(section, name or key, None, untrusted=untrusted)
2161
2161
2162 # core options, expected to be understood by every diff parser
2162 # core options, expected to be understood by every diff parser
2163 buildopts = {
2163 buildopts = {
2164 'nodates': get('nodates'),
2164 'nodates': get('nodates'),
2165 'showfunc': get('show_function', 'showfunc'),
2165 'showfunc': get('show_function', 'showfunc'),
2166 'context': get('unified', getter=ui.config),
2166 'context': get('unified', getter=ui.config),
2167 }
2167 }
2168
2168
2169 if git:
2169 if git:
2170 buildopts['git'] = get('git')
2170 buildopts['git'] = get('git')
2171
2172 # need to inspect the ui object instead of using get() since we want to
2173 # test for an int
2174 hconf = ui.config('experimental', 'extendedheader.index')
2175 if hconf is not None:
2176 hlen = None
2177 try:
2178 # the hash config could be an integer (for length of hash) or a
2179 # word (e.g. short, full, none)
2180 hlen = int(hconf)
2181 except ValueError:
2182 # default value
2183 if hconf == 'short' or hconf == '':
2184 hlen = 12
2185 elif hconf == 'full':
2186 hlen = 40
2187 elif hconf != 'none':
2188 msg = _("invalid value for extendedheader.index: '%s'\n")
2189 ui.warn(msg % hconf)
2190 finally:
2191 if hlen < 0 or hlen > 40:
2192 msg = _("invalid length for extendedheader.index: '%d'\n")
2193 ui.warn(msg % hlen)
2194 buildopts['index'] = hlen
2195
2171 if whitespace:
2196 if whitespace:
2172 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2197 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2173 buildopts['ignorewsamount'] = get('ignore_space_change',
2198 buildopts['ignorewsamount'] = get('ignore_space_change',
2174 'ignorewsamount')
2199 'ignorewsamount')
2175 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2200 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2176 'ignoreblanklines')
2201 'ignoreblanklines')
2177 if formatchanging:
2202 if formatchanging:
2178 buildopts['text'] = opts and opts.get('text')
2203 buildopts['text'] = opts and opts.get('text')
2179 buildopts['nobinary'] = get('nobinary', forceplain=False)
2204 buildopts['nobinary'] = get('nobinary', forceplain=False)
2180 buildopts['noprefix'] = get('noprefix', forceplain=False)
2205 buildopts['noprefix'] = get('noprefix', forceplain=False)
2181
2206
2182 return mdiff.diffopts(**buildopts)
2207 return mdiff.diffopts(**buildopts)
2183
2208
2184 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2209 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2185 losedatafn=None, prefix='', relroot='', copy=None):
2210 losedatafn=None, prefix='', relroot='', copy=None):
2186 '''yields diff of changes to files between two nodes, or node and
2211 '''yields diff of changes to files between two nodes, or node and
2187 working directory.
2212 working directory.
2188
2213
2189 if node1 is None, use first dirstate parent instead.
2214 if node1 is None, use first dirstate parent instead.
2190 if node2 is None, compare node1 with working directory.
2215 if node2 is None, compare node1 with working directory.
2191
2216
2192 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2217 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2193 every time some change cannot be represented with the current
2218 every time some change cannot be represented with the current
2194 patch format. Return False to upgrade to git patch format, True to
2219 patch format. Return False to upgrade to git patch format, True to
2195 accept the loss or raise an exception to abort the diff. It is
2220 accept the loss or raise an exception to abort the diff. It is
2196 called with the name of current file being diffed as 'fn'. If set
2221 called with the name of current file being diffed as 'fn'. If set
2197 to None, patches will always be upgraded to git format when
2222 to None, patches will always be upgraded to git format when
2198 necessary.
2223 necessary.
2199
2224
2200 prefix is a filename prefix that is prepended to all filenames on
2225 prefix is a filename prefix that is prepended to all filenames on
2201 display (used for subrepos).
2226 display (used for subrepos).
2202
2227
2203 relroot, if not empty, must be normalized with a trailing /. Any match
2228 relroot, if not empty, must be normalized with a trailing /. Any match
2204 patterns that fall outside it will be ignored.
2229 patterns that fall outside it will be ignored.
2205
2230
2206 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2231 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2207 information.'''
2232 information.'''
2208
2233
2209 if opts is None:
2234 if opts is None:
2210 opts = mdiff.defaultopts
2235 opts = mdiff.defaultopts
2211
2236
2212 if not node1 and not node2:
2237 if not node1 and not node2:
2213 node1 = repo.dirstate.p1()
2238 node1 = repo.dirstate.p1()
2214
2239
2215 def lrugetfilectx():
2240 def lrugetfilectx():
2216 cache = {}
2241 cache = {}
2217 order = collections.deque()
2242 order = collections.deque()
2218 def getfilectx(f, ctx):
2243 def getfilectx(f, ctx):
2219 fctx = ctx.filectx(f, filelog=cache.get(f))
2244 fctx = ctx.filectx(f, filelog=cache.get(f))
2220 if f not in cache:
2245 if f not in cache:
2221 if len(cache) > 20:
2246 if len(cache) > 20:
2222 del cache[order.popleft()]
2247 del cache[order.popleft()]
2223 cache[f] = fctx.filelog()
2248 cache[f] = fctx.filelog()
2224 else:
2249 else:
2225 order.remove(f)
2250 order.remove(f)
2226 order.append(f)
2251 order.append(f)
2227 return fctx
2252 return fctx
2228 return getfilectx
2253 return getfilectx
2229 getfilectx = lrugetfilectx()
2254 getfilectx = lrugetfilectx()
2230
2255
2231 ctx1 = repo[node1]
2256 ctx1 = repo[node1]
2232 ctx2 = repo[node2]
2257 ctx2 = repo[node2]
2233
2258
2234 relfiltered = False
2259 relfiltered = False
2235 if relroot != '' and match.always():
2260 if relroot != '' and match.always():
2236 # as a special case, create a new matcher with just the relroot
2261 # as a special case, create a new matcher with just the relroot
2237 pats = [relroot]
2262 pats = [relroot]
2238 match = scmutil.match(ctx2, pats, default='path')
2263 match = scmutil.match(ctx2, pats, default='path')
2239 relfiltered = True
2264 relfiltered = True
2240
2265
2241 if not changes:
2266 if not changes:
2242 changes = repo.status(ctx1, ctx2, match=match)
2267 changes = repo.status(ctx1, ctx2, match=match)
2243 modified, added, removed = changes[:3]
2268 modified, added, removed = changes[:3]
2244
2269
2245 if not modified and not added and not removed:
2270 if not modified and not added and not removed:
2246 return []
2271 return []
2247
2272
2248 if repo.ui.debugflag:
2273 if repo.ui.debugflag:
2249 hexfunc = hex
2274 hexfunc = hex
2250 else:
2275 else:
2251 hexfunc = short
2276 hexfunc = short
2252 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2277 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2253
2278
2254 if copy is None:
2279 if copy is None:
2255 copy = {}
2280 copy = {}
2256 if opts.git or opts.upgrade:
2281 if opts.git or opts.upgrade:
2257 copy = copies.pathcopies(ctx1, ctx2, match=match)
2282 copy = copies.pathcopies(ctx1, ctx2, match=match)
2258
2283
2259 if relroot is not None:
2284 if relroot is not None:
2260 if not relfiltered:
2285 if not relfiltered:
2261 # XXX this would ideally be done in the matcher, but that is
2286 # XXX this would ideally be done in the matcher, but that is
2262 # generally meant to 'or' patterns, not 'and' them. In this case we
2287 # generally meant to 'or' patterns, not 'and' them. In this case we
2263 # need to 'and' all the patterns from the matcher with relroot.
2288 # need to 'and' all the patterns from the matcher with relroot.
2264 def filterrel(l):
2289 def filterrel(l):
2265 return [f for f in l if f.startswith(relroot)]
2290 return [f for f in l if f.startswith(relroot)]
2266 modified = filterrel(modified)
2291 modified = filterrel(modified)
2267 added = filterrel(added)
2292 added = filterrel(added)
2268 removed = filterrel(removed)
2293 removed = filterrel(removed)
2269 relfiltered = True
2294 relfiltered = True
2270 # filter out copies where either side isn't inside the relative root
2295 # filter out copies where either side isn't inside the relative root
2271 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2296 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2272 if dst.startswith(relroot)
2297 if dst.startswith(relroot)
2273 and src.startswith(relroot)))
2298 and src.startswith(relroot)))
2274
2299
2275 modifiedset = set(modified)
2300 modifiedset = set(modified)
2276 addedset = set(added)
2301 addedset = set(added)
2277 removedset = set(removed)
2302 removedset = set(removed)
2278 for f in modified:
2303 for f in modified:
2279 if f not in ctx1:
2304 if f not in ctx1:
2280 # Fix up added, since merged-in additions appear as
2305 # Fix up added, since merged-in additions appear as
2281 # modifications during merges
2306 # modifications during merges
2282 modifiedset.remove(f)
2307 modifiedset.remove(f)
2283 addedset.add(f)
2308 addedset.add(f)
2284 for f in removed:
2309 for f in removed:
2285 if f not in ctx1:
2310 if f not in ctx1:
2286 # Merged-in additions that are then removed are reported as removed.
2311 # Merged-in additions that are then removed are reported as removed.
2287 # They are not in ctx1, so We don't want to show them in the diff.
2312 # They are not in ctx1, so We don't want to show them in the diff.
2288 removedset.remove(f)
2313 removedset.remove(f)
2289 modified = sorted(modifiedset)
2314 modified = sorted(modifiedset)
2290 added = sorted(addedset)
2315 added = sorted(addedset)
2291 removed = sorted(removedset)
2316 removed = sorted(removedset)
2292 for dst, src in copy.items():
2317 for dst, src in copy.items():
2293 if src not in ctx1:
2318 if src not in ctx1:
2294 # Files merged in during a merge and then copied/renamed are
2319 # Files merged in during a merge and then copied/renamed are
2295 # reported as copies. We want to show them in the diff as additions.
2320 # reported as copies. We want to show them in the diff as additions.
2296 del copy[dst]
2321 del copy[dst]
2297
2322
2298 def difffn(opts, losedata):
2323 def difffn(opts, losedata):
2299 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2324 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2300 copy, getfilectx, opts, losedata, prefix, relroot)
2325 copy, getfilectx, opts, losedata, prefix, relroot)
2301 if opts.upgrade and not opts.git:
2326 if opts.upgrade and not opts.git:
2302 try:
2327 try:
2303 def losedata(fn):
2328 def losedata(fn):
2304 if not losedatafn or not losedatafn(fn=fn):
2329 if not losedatafn or not losedatafn(fn=fn):
2305 raise GitDiffRequired
2330 raise GitDiffRequired
2306 # Buffer the whole output until we are sure it can be generated
2331 # Buffer the whole output until we are sure it can be generated
2307 return list(difffn(opts.copy(git=False), losedata))
2332 return list(difffn(opts.copy(git=False), losedata))
2308 except GitDiffRequired:
2333 except GitDiffRequired:
2309 return difffn(opts.copy(git=True), None)
2334 return difffn(opts.copy(git=True), None)
2310 else:
2335 else:
2311 return difffn(opts, None)
2336 return difffn(opts, None)
2312
2337
2313 def difflabel(func, *args, **kw):
2338 def difflabel(func, *args, **kw):
2314 '''yields 2-tuples of (output, label) based on the output of func()'''
2339 '''yields 2-tuples of (output, label) based on the output of func()'''
2315 headprefixes = [('diff', 'diff.diffline'),
2340 headprefixes = [('diff', 'diff.diffline'),
2316 ('copy', 'diff.extended'),
2341 ('copy', 'diff.extended'),
2317 ('rename', 'diff.extended'),
2342 ('rename', 'diff.extended'),
2318 ('old', 'diff.extended'),
2343 ('old', 'diff.extended'),
2319 ('new', 'diff.extended'),
2344 ('new', 'diff.extended'),
2320 ('deleted', 'diff.extended'),
2345 ('deleted', 'diff.extended'),
2321 ('---', 'diff.file_a'),
2346 ('---', 'diff.file_a'),
2322 ('+++', 'diff.file_b')]
2347 ('+++', 'diff.file_b')]
2323 textprefixes = [('@', 'diff.hunk'),
2348 textprefixes = [('@', 'diff.hunk'),
2324 ('-', 'diff.deleted'),
2349 ('-', 'diff.deleted'),
2325 ('+', 'diff.inserted')]
2350 ('+', 'diff.inserted')]
2326 head = False
2351 head = False
2327 for chunk in func(*args, **kw):
2352 for chunk in func(*args, **kw):
2328 lines = chunk.split('\n')
2353 lines = chunk.split('\n')
2329 for i, line in enumerate(lines):
2354 for i, line in enumerate(lines):
2330 if i != 0:
2355 if i != 0:
2331 yield ('\n', '')
2356 yield ('\n', '')
2332 if head:
2357 if head:
2333 if line.startswith('@'):
2358 if line.startswith('@'):
2334 head = False
2359 head = False
2335 else:
2360 else:
2336 if line and line[0] not in ' +-@\\':
2361 if line and line[0] not in ' +-@\\':
2337 head = True
2362 head = True
2338 stripline = line
2363 stripline = line
2339 diffline = False
2364 diffline = False
2340 if not head and line and line[0] in '+-':
2365 if not head and line and line[0] in '+-':
2341 # highlight tabs and trailing whitespace, but only in
2366 # highlight tabs and trailing whitespace, but only in
2342 # changed lines
2367 # changed lines
2343 stripline = line.rstrip()
2368 stripline = line.rstrip()
2344 diffline = True
2369 diffline = True
2345
2370
2346 prefixes = textprefixes
2371 prefixes = textprefixes
2347 if head:
2372 if head:
2348 prefixes = headprefixes
2373 prefixes = headprefixes
2349 for prefix, label in prefixes:
2374 for prefix, label in prefixes:
2350 if stripline.startswith(prefix):
2375 if stripline.startswith(prefix):
2351 if diffline:
2376 if diffline:
2352 for token in tabsplitter.findall(stripline):
2377 for token in tabsplitter.findall(stripline):
2353 if '\t' == token[0]:
2378 if '\t' == token[0]:
2354 yield (token, 'diff.tab')
2379 yield (token, 'diff.tab')
2355 else:
2380 else:
2356 yield (token, label)
2381 yield (token, label)
2357 else:
2382 else:
2358 yield (stripline, label)
2383 yield (stripline, label)
2359 break
2384 break
2360 else:
2385 else:
2361 yield (line, '')
2386 yield (line, '')
2362 if line != stripline:
2387 if line != stripline:
2363 yield (line[len(stripline):], 'diff.trailingwhitespace')
2388 yield (line[len(stripline):], 'diff.trailingwhitespace')
2364
2389
2365 def diffui(*args, **kw):
2390 def diffui(*args, **kw):
2366 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2391 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2367 return difflabel(diff, *args, **kw)
2392 return difflabel(diff, *args, **kw)
2368
2393
2369 def _filepairs(modified, added, removed, copy, opts):
2394 def _filepairs(modified, added, removed, copy, opts):
2370 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2395 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2371 before and f2 is the the name after. For added files, f1 will be None,
2396 before and f2 is the the name after. For added files, f1 will be None,
2372 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2397 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2373 or 'rename' (the latter two only if opts.git is set).'''
2398 or 'rename' (the latter two only if opts.git is set).'''
2374 gone = set()
2399 gone = set()
2375
2400
2376 copyto = dict([(v, k) for k, v in copy.items()])
2401 copyto = dict([(v, k) for k, v in copy.items()])
2377
2402
2378 addedset, removedset = set(added), set(removed)
2403 addedset, removedset = set(added), set(removed)
2379
2404
2380 for f in sorted(modified + added + removed):
2405 for f in sorted(modified + added + removed):
2381 copyop = None
2406 copyop = None
2382 f1, f2 = f, f
2407 f1, f2 = f, f
2383 if f in addedset:
2408 if f in addedset:
2384 f1 = None
2409 f1 = None
2385 if f in copy:
2410 if f in copy:
2386 if opts.git:
2411 if opts.git:
2387 f1 = copy[f]
2412 f1 = copy[f]
2388 if f1 in removedset and f1 not in gone:
2413 if f1 in removedset and f1 not in gone:
2389 copyop = 'rename'
2414 copyop = 'rename'
2390 gone.add(f1)
2415 gone.add(f1)
2391 else:
2416 else:
2392 copyop = 'copy'
2417 copyop = 'copy'
2393 elif f in removedset:
2418 elif f in removedset:
2394 f2 = None
2419 f2 = None
2395 if opts.git:
2420 if opts.git:
2396 # have we already reported a copy above?
2421 # have we already reported a copy above?
2397 if (f in copyto and copyto[f] in addedset
2422 if (f in copyto and copyto[f] in addedset
2398 and copy[copyto[f]] == f):
2423 and copy[copyto[f]] == f):
2399 continue
2424 continue
2400 yield f1, f2, copyop
2425 yield f1, f2, copyop
2401
2426
2402 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2427 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2403 copy, getfilectx, opts, losedatafn, prefix, relroot):
2428 copy, getfilectx, opts, losedatafn, prefix, relroot):
2404 '''given input data, generate a diff and yield it in blocks
2429 '''given input data, generate a diff and yield it in blocks
2405
2430
2406 If generating a diff would lose data like flags or binary data and
2431 If generating a diff would lose data like flags or binary data and
2407 losedatafn is not None, it will be called.
2432 losedatafn is not None, it will be called.
2408
2433
2409 relroot is removed and prefix is added to every path in the diff output.
2434 relroot is removed and prefix is added to every path in the diff output.
2410
2435
2411 If relroot is not empty, this function expects every path in modified,
2436 If relroot is not empty, this function expects every path in modified,
2412 added, removed and copy to start with it.'''
2437 added, removed and copy to start with it.'''
2413
2438
2414 def gitindex(text):
2439 def gitindex(text):
2415 if not text:
2440 if not text:
2416 text = ""
2441 text = ""
2417 l = len(text)
2442 l = len(text)
2418 s = hashlib.sha1('blob %d\0' % l)
2443 s = hashlib.sha1('blob %d\0' % l)
2419 s.update(text)
2444 s.update(text)
2420 return s.hexdigest()
2445 return s.hexdigest()
2421
2446
2422 if opts.noprefix:
2447 if opts.noprefix:
2423 aprefix = bprefix = ''
2448 aprefix = bprefix = ''
2424 else:
2449 else:
2425 aprefix = 'a/'
2450 aprefix = 'a/'
2426 bprefix = 'b/'
2451 bprefix = 'b/'
2427
2452
2428 def diffline(f, revs):
2453 def diffline(f, revs):
2429 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2454 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2430 return 'diff %s %s' % (revinfo, f)
2455 return 'diff %s %s' % (revinfo, f)
2431
2456
2432 date1 = util.datestr(ctx1.date())
2457 date1 = util.datestr(ctx1.date())
2433 date2 = util.datestr(ctx2.date())
2458 date2 = util.datestr(ctx2.date())
2434
2459
2435 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2460 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2436
2461
2437 if relroot != '' and (repo.ui.configbool('devel', 'all')
2462 if relroot != '' and (repo.ui.configbool('devel', 'all')
2438 or repo.ui.configbool('devel', 'check-relroot')):
2463 or repo.ui.configbool('devel', 'check-relroot')):
2439 for f in modified + added + removed + copy.keys() + copy.values():
2464 for f in modified + added + removed + copy.keys() + copy.values():
2440 if f is not None and not f.startswith(relroot):
2465 if f is not None and not f.startswith(relroot):
2441 raise AssertionError(
2466 raise AssertionError(
2442 "file %s doesn't start with relroot %s" % (f, relroot))
2467 "file %s doesn't start with relroot %s" % (f, relroot))
2443
2468
2444 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2469 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2445 content1 = None
2470 content1 = None
2446 content2 = None
2471 content2 = None
2447 flag1 = None
2472 flag1 = None
2448 flag2 = None
2473 flag2 = None
2449 if f1:
2474 if f1:
2450 content1 = getfilectx(f1, ctx1).data()
2475 content1 = getfilectx(f1, ctx1).data()
2451 if opts.git or losedatafn:
2476 if opts.git or losedatafn:
2452 flag1 = ctx1.flags(f1)
2477 flag1 = ctx1.flags(f1)
2453 if f2:
2478 if f2:
2454 content2 = getfilectx(f2, ctx2).data()
2479 content2 = getfilectx(f2, ctx2).data()
2455 if opts.git or losedatafn:
2480 if opts.git or losedatafn:
2456 flag2 = ctx2.flags(f2)
2481 flag2 = ctx2.flags(f2)
2457 binary = False
2482 binary = False
2458 if opts.git or losedatafn:
2483 if opts.git or losedatafn:
2459 binary = util.binary(content1) or util.binary(content2)
2484 binary = util.binary(content1) or util.binary(content2)
2460
2485
2461 if losedatafn and not opts.git:
2486 if losedatafn and not opts.git:
2462 if (binary or
2487 if (binary or
2463 # copy/rename
2488 # copy/rename
2464 f2 in copy or
2489 f2 in copy or
2465 # empty file creation
2490 # empty file creation
2466 (not f1 and not content2) or
2491 (not f1 and not content2) or
2467 # empty file deletion
2492 # empty file deletion
2468 (not content1 and not f2) or
2493 (not content1 and not f2) or
2469 # create with flags
2494 # create with flags
2470 (not f1 and flag2) or
2495 (not f1 and flag2) or
2471 # change flags
2496 # change flags
2472 (f1 and f2 and flag1 != flag2)):
2497 (f1 and f2 and flag1 != flag2)):
2473 losedatafn(f2 or f1)
2498 losedatafn(f2 or f1)
2474
2499
2475 path1 = f1 or f2
2500 path1 = f1 or f2
2476 path2 = f2 or f1
2501 path2 = f2 or f1
2477 path1 = posixpath.join(prefix, path1[len(relroot):])
2502 path1 = posixpath.join(prefix, path1[len(relroot):])
2478 path2 = posixpath.join(prefix, path2[len(relroot):])
2503 path2 = posixpath.join(prefix, path2[len(relroot):])
2479 header = []
2504 header = []
2480 if opts.git:
2505 if opts.git:
2481 header.append('diff --git %s%s %s%s' %
2506 header.append('diff --git %s%s %s%s' %
2482 (aprefix, path1, bprefix, path2))
2507 (aprefix, path1, bprefix, path2))
2483 if not f1: # added
2508 if not f1: # added
2484 header.append('new file mode %s' % gitmode[flag2])
2509 header.append('new file mode %s' % gitmode[flag2])
2485 elif not f2: # removed
2510 elif not f2: # removed
2486 header.append('deleted file mode %s' % gitmode[flag1])
2511 header.append('deleted file mode %s' % gitmode[flag1])
2487 else: # modified/copied/renamed
2512 else: # modified/copied/renamed
2488 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2513 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2489 if mode1 != mode2:
2514 if mode1 != mode2:
2490 header.append('old mode %s' % mode1)
2515 header.append('old mode %s' % mode1)
2491 header.append('new mode %s' % mode2)
2516 header.append('new mode %s' % mode2)
2492 if copyop is not None:
2517 if copyop is not None:
2493 header.append('%s from %s' % (copyop, path1))
2518 header.append('%s from %s' % (copyop, path1))
2494 header.append('%s to %s' % (copyop, path2))
2519 header.append('%s to %s' % (copyop, path2))
2495 elif revs and not repo.ui.quiet:
2520 elif revs and not repo.ui.quiet:
2496 header.append(diffline(path1, revs))
2521 header.append(diffline(path1, revs))
2497
2522
2498 if binary and opts.git and not opts.nobinary:
2523 if binary and opts.git and not opts.nobinary:
2499 text = mdiff.b85diff(content1, content2)
2524 text = mdiff.b85diff(content1, content2)
2500 if text:
2525 if text:
2501 header.append('index %s..%s' %
2526 header.append('index %s..%s' %
2502 (gitindex(content1), gitindex(content2)))
2527 (gitindex(content1), gitindex(content2)))
2503 else:
2528 else:
2504 text = mdiff.unidiff(content1, date1,
2529 text = mdiff.unidiff(content1, date1,
2505 content2, date2,
2530 content2, date2,
2506 path1, path2, opts=opts)
2531 path1, path2, opts=opts)
2507 if header and (text or len(header) > 1):
2532 if header and (text or len(header) > 1):
2508 yield '\n'.join(header) + '\n'
2533 yield '\n'.join(header) + '\n'
2509 if text:
2534 if text:
2510 yield text
2535 yield text
2511
2536
2512 def diffstatsum(stats):
2537 def diffstatsum(stats):
2513 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2538 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2514 for f, a, r, b in stats:
2539 for f, a, r, b in stats:
2515 maxfile = max(maxfile, encoding.colwidth(f))
2540 maxfile = max(maxfile, encoding.colwidth(f))
2516 maxtotal = max(maxtotal, a + r)
2541 maxtotal = max(maxtotal, a + r)
2517 addtotal += a
2542 addtotal += a
2518 removetotal += r
2543 removetotal += r
2519 binary = binary or b
2544 binary = binary or b
2520
2545
2521 return maxfile, maxtotal, addtotal, removetotal, binary
2546 return maxfile, maxtotal, addtotal, removetotal, binary
2522
2547
2523 def diffstatdata(lines):
2548 def diffstatdata(lines):
2524 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2549 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2525
2550
2526 results = []
2551 results = []
2527 filename, adds, removes, isbinary = None, 0, 0, False
2552 filename, adds, removes, isbinary = None, 0, 0, False
2528
2553
2529 def addresult():
2554 def addresult():
2530 if filename:
2555 if filename:
2531 results.append((filename, adds, removes, isbinary))
2556 results.append((filename, adds, removes, isbinary))
2532
2557
2533 for line in lines:
2558 for line in lines:
2534 if line.startswith('diff'):
2559 if line.startswith('diff'):
2535 addresult()
2560 addresult()
2536 # set numbers to 0 anyway when starting new file
2561 # set numbers to 0 anyway when starting new file
2537 adds, removes, isbinary = 0, 0, False
2562 adds, removes, isbinary = 0, 0, False
2538 if line.startswith('diff --git a/'):
2563 if line.startswith('diff --git a/'):
2539 filename = gitre.search(line).group(2)
2564 filename = gitre.search(line).group(2)
2540 elif line.startswith('diff -r'):
2565 elif line.startswith('diff -r'):
2541 # format: "diff -r ... -r ... filename"
2566 # format: "diff -r ... -r ... filename"
2542 filename = diffre.search(line).group(1)
2567 filename = diffre.search(line).group(1)
2543 elif line.startswith('+') and not line.startswith('+++ '):
2568 elif line.startswith('+') and not line.startswith('+++ '):
2544 adds += 1
2569 adds += 1
2545 elif line.startswith('-') and not line.startswith('--- '):
2570 elif line.startswith('-') and not line.startswith('--- '):
2546 removes += 1
2571 removes += 1
2547 elif (line.startswith('GIT binary patch') or
2572 elif (line.startswith('GIT binary patch') or
2548 line.startswith('Binary file')):
2573 line.startswith('Binary file')):
2549 isbinary = True
2574 isbinary = True
2550 addresult()
2575 addresult()
2551 return results
2576 return results
2552
2577
2553 def diffstat(lines, width=80):
2578 def diffstat(lines, width=80):
2554 output = []
2579 output = []
2555 stats = diffstatdata(lines)
2580 stats = diffstatdata(lines)
2556 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2581 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2557
2582
2558 countwidth = len(str(maxtotal))
2583 countwidth = len(str(maxtotal))
2559 if hasbinary and countwidth < 3:
2584 if hasbinary and countwidth < 3:
2560 countwidth = 3
2585 countwidth = 3
2561 graphwidth = width - countwidth - maxname - 6
2586 graphwidth = width - countwidth - maxname - 6
2562 if graphwidth < 10:
2587 if graphwidth < 10:
2563 graphwidth = 10
2588 graphwidth = 10
2564
2589
2565 def scale(i):
2590 def scale(i):
2566 if maxtotal <= graphwidth:
2591 if maxtotal <= graphwidth:
2567 return i
2592 return i
2568 # If diffstat runs out of room it doesn't print anything,
2593 # If diffstat runs out of room it doesn't print anything,
2569 # which isn't very useful, so always print at least one + or -
2594 # which isn't very useful, so always print at least one + or -
2570 # if there were at least some changes.
2595 # if there were at least some changes.
2571 return max(i * graphwidth // maxtotal, int(bool(i)))
2596 return max(i * graphwidth // maxtotal, int(bool(i)))
2572
2597
2573 for filename, adds, removes, isbinary in stats:
2598 for filename, adds, removes, isbinary in stats:
2574 if isbinary:
2599 if isbinary:
2575 count = 'Bin'
2600 count = 'Bin'
2576 else:
2601 else:
2577 count = adds + removes
2602 count = adds + removes
2578 pluses = '+' * scale(adds)
2603 pluses = '+' * scale(adds)
2579 minuses = '-' * scale(removes)
2604 minuses = '-' * scale(removes)
2580 output.append(' %s%s | %*s %s%s\n' %
2605 output.append(' %s%s | %*s %s%s\n' %
2581 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2606 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2582 countwidth, count, pluses, minuses))
2607 countwidth, count, pluses, minuses))
2583
2608
2584 if stats:
2609 if stats:
2585 output.append(_(' %d files changed, %d insertions(+), '
2610 output.append(_(' %d files changed, %d insertions(+), '
2586 '%d deletions(-)\n')
2611 '%d deletions(-)\n')
2587 % (len(stats), totaladds, totalremoves))
2612 % (len(stats), totaladds, totalremoves))
2588
2613
2589 return ''.join(output)
2614 return ''.join(output)
2590
2615
2591 def diffstatui(*args, **kw):
2616 def diffstatui(*args, **kw):
2592 '''like diffstat(), but yields 2-tuples of (output, label) for
2617 '''like diffstat(), but yields 2-tuples of (output, label) for
2593 ui.write()
2618 ui.write()
2594 '''
2619 '''
2595
2620
2596 for line in diffstat(*args, **kw).splitlines():
2621 for line in diffstat(*args, **kw).splitlines():
2597 if line and line[-1] in '+-':
2622 if line and line[-1] in '+-':
2598 name, graph = line.rsplit(' ', 1)
2623 name, graph = line.rsplit(' ', 1)
2599 yield (name + ' ', '')
2624 yield (name + ' ', '')
2600 m = re.search(r'\++', graph)
2625 m = re.search(r'\++', graph)
2601 if m:
2626 if m:
2602 yield (m.group(0), 'diffstat.inserted')
2627 yield (m.group(0), 'diffstat.inserted')
2603 m = re.search(r'-+', graph)
2628 m = re.search(r'-+', graph)
2604 if m:
2629 if m:
2605 yield (m.group(0), 'diffstat.deleted')
2630 yield (m.group(0), 'diffstat.deleted')
2606 else:
2631 else:
2607 yield (line, '')
2632 yield (line, '')
2608 yield ('\n', '')
2633 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now