##// END OF EJS Templates
diff: slice over bytes to make sure conditions work normally...
Pulkit Goyal -
r31715:6c80f985 default
parent child Browse files
Show More
@@ -1,459 +1,459
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 base85,
16 base85,
17 bdiff,
17 bdiff,
18 error,
18 error,
19 mpatch,
19 mpatch,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23
23
24 def splitnewlines(text):
24 def splitnewlines(text):
25 '''like str.splitlines, but only split on newlines.'''
25 '''like str.splitlines, but only split on newlines.'''
26 lines = [l + '\n' for l in text.split('\n')]
26 lines = [l + '\n' for l in text.split('\n')]
27 if lines:
27 if lines:
28 if lines[-1] == '\n':
28 if lines[-1] == '\n':
29 lines.pop()
29 lines.pop()
30 else:
30 else:
31 lines[-1] = lines[-1][:-1]
31 lines[-1] = lines[-1][:-1]
32 return lines
32 return lines
33
33
34 class diffopts(object):
34 class diffopts(object):
35 '''context is the number of context lines
35 '''context is the number of context lines
36 text treats all files as text
36 text treats all files as text
37 showfunc enables diff -p output
37 showfunc enables diff -p output
38 git enables the git extended patch format
38 git enables the git extended patch format
39 nodates removes dates from diff headers
39 nodates removes dates from diff headers
40 nobinary ignores binary files
40 nobinary ignores binary files
41 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
41 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
42 ignorews ignores all whitespace changes in the diff
42 ignorews ignores all whitespace changes in the diff
43 ignorewsamount ignores changes in the amount of whitespace
43 ignorewsamount ignores changes in the amount of whitespace
44 ignoreblanklines ignores changes whose lines are all blank
44 ignoreblanklines ignores changes whose lines are all blank
45 upgrade generates git diffs to avoid data loss
45 upgrade generates git diffs to avoid data loss
46 '''
46 '''
47
47
48 defaults = {
48 defaults = {
49 'context': 3,
49 'context': 3,
50 'text': False,
50 'text': False,
51 'showfunc': False,
51 'showfunc': False,
52 'git': False,
52 'git': False,
53 'nodates': False,
53 'nodates': False,
54 'nobinary': False,
54 'nobinary': False,
55 'noprefix': False,
55 'noprefix': False,
56 'index': 0,
56 'index': 0,
57 'ignorews': False,
57 'ignorews': False,
58 'ignorewsamount': False,
58 'ignorewsamount': False,
59 'ignoreblanklines': False,
59 'ignoreblanklines': False,
60 'upgrade': False,
60 'upgrade': False,
61 'showsimilarity': False,
61 'showsimilarity': False,
62 }
62 }
63
63
64 def __init__(self, **opts):
64 def __init__(self, **opts):
65 opts = pycompat.byteskwargs(opts)
65 opts = pycompat.byteskwargs(opts)
66 for k in self.defaults.keys():
66 for k in self.defaults.keys():
67 v = opts.get(k)
67 v = opts.get(k)
68 if v is None:
68 if v is None:
69 v = self.defaults[k]
69 v = self.defaults[k]
70 setattr(self, k, v)
70 setattr(self, k, v)
71
71
72 try:
72 try:
73 self.context = int(self.context)
73 self.context = int(self.context)
74 except ValueError:
74 except ValueError:
75 raise error.Abort(_('diff context lines count must be '
75 raise error.Abort(_('diff context lines count must be '
76 'an integer, not %r') % self.context)
76 'an integer, not %r') % self.context)
77
77
78 def copy(self, **kwargs):
78 def copy(self, **kwargs):
79 opts = dict((k, getattr(self, k)) for k in self.defaults)
79 opts = dict((k, getattr(self, k)) for k in self.defaults)
80 opts.update(kwargs)
80 opts.update(kwargs)
81 return diffopts(**opts)
81 return diffopts(**opts)
82
82
83 defaultopts = diffopts()
83 defaultopts = diffopts()
84
84
85 def wsclean(opts, text, blank=True):
85 def wsclean(opts, text, blank=True):
86 if opts.ignorews:
86 if opts.ignorews:
87 text = bdiff.fixws(text, 1)
87 text = bdiff.fixws(text, 1)
88 elif opts.ignorewsamount:
88 elif opts.ignorewsamount:
89 text = bdiff.fixws(text, 0)
89 text = bdiff.fixws(text, 0)
90 if blank and opts.ignoreblanklines:
90 if blank and opts.ignoreblanklines:
91 text = re.sub('\n+', '\n', text).strip('\n')
91 text = re.sub('\n+', '\n', text).strip('\n')
92 return text
92 return text
93
93
94 def splitblock(base1, lines1, base2, lines2, opts):
94 def splitblock(base1, lines1, base2, lines2, opts):
95 # The input lines matches except for interwoven blank lines. We
95 # The input lines matches except for interwoven blank lines. We
96 # transform it into a sequence of matching blocks and blank blocks.
96 # transform it into a sequence of matching blocks and blank blocks.
97 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
97 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
98 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
98 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
99 s1, e1 = 0, len(lines1)
99 s1, e1 = 0, len(lines1)
100 s2, e2 = 0, len(lines2)
100 s2, e2 = 0, len(lines2)
101 while s1 < e1 or s2 < e2:
101 while s1 < e1 or s2 < e2:
102 i1, i2, btype = s1, s2, '='
102 i1, i2, btype = s1, s2, '='
103 if (i1 >= e1 or lines1[i1] == 0
103 if (i1 >= e1 or lines1[i1] == 0
104 or i2 >= e2 or lines2[i2] == 0):
104 or i2 >= e2 or lines2[i2] == 0):
105 # Consume the block of blank lines
105 # Consume the block of blank lines
106 btype = '~'
106 btype = '~'
107 while i1 < e1 and lines1[i1] == 0:
107 while i1 < e1 and lines1[i1] == 0:
108 i1 += 1
108 i1 += 1
109 while i2 < e2 and lines2[i2] == 0:
109 while i2 < e2 and lines2[i2] == 0:
110 i2 += 1
110 i2 += 1
111 else:
111 else:
112 # Consume the matching lines
112 # Consume the matching lines
113 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
113 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
114 i1 += 1
114 i1 += 1
115 i2 += 1
115 i2 += 1
116 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
116 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
117 s1 = i1
117 s1 = i1
118 s2 = i2
118 s2 = i2
119
119
120 def blocksinrange(blocks, rangeb):
120 def blocksinrange(blocks, rangeb):
121 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
121 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
122 `rangeb` from ``(b1, b2)`` point of view.
122 `rangeb` from ``(b1, b2)`` point of view.
123
123
124 Return `filteredblocks, rangea` where:
124 Return `filteredblocks, rangea` where:
125
125
126 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
126 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
127 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
127 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
128 block ``(b1, b2)`` being inside `rangeb` if
128 block ``(b1, b2)`` being inside `rangeb` if
129 ``rangeb[0] < b2 and b1 < rangeb[1]``;
129 ``rangeb[0] < b2 and b1 < rangeb[1]``;
130 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
130 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
131 """
131 """
132 lbb, ubb = rangeb
132 lbb, ubb = rangeb
133 lba, uba = None, None
133 lba, uba = None, None
134 filteredblocks = []
134 filteredblocks = []
135 for block in blocks:
135 for block in blocks:
136 (a1, a2, b1, b2), stype = block
136 (a1, a2, b1, b2), stype = block
137 if lbb >= b1 and ubb <= b2 and stype == '=':
137 if lbb >= b1 and ubb <= b2 and stype == '=':
138 # rangeb is within a single "=" hunk, restrict back linerange1
138 # rangeb is within a single "=" hunk, restrict back linerange1
139 # by offsetting rangeb
139 # by offsetting rangeb
140 lba = lbb - b1 + a1
140 lba = lbb - b1 + a1
141 uba = ubb - b1 + a1
141 uba = ubb - b1 + a1
142 else:
142 else:
143 if b1 <= lbb < b2:
143 if b1 <= lbb < b2:
144 if stype == '=':
144 if stype == '=':
145 lba = a2 - (b2 - lbb)
145 lba = a2 - (b2 - lbb)
146 else:
146 else:
147 lba = a1
147 lba = a1
148 if b1 < ubb <= b2:
148 if b1 < ubb <= b2:
149 if stype == '=':
149 if stype == '=':
150 uba = a1 + (ubb - b1)
150 uba = a1 + (ubb - b1)
151 else:
151 else:
152 uba = a2
152 uba = a2
153 if lbb < b2 and b1 < ubb:
153 if lbb < b2 and b1 < ubb:
154 filteredblocks.append(block)
154 filteredblocks.append(block)
155 if lba is None or uba is None or uba < lba:
155 if lba is None or uba is None or uba < lba:
156 raise error.Abort(_('line range exceeds file size'))
156 raise error.Abort(_('line range exceeds file size'))
157 return filteredblocks, (lba, uba)
157 return filteredblocks, (lba, uba)
158
158
159 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
159 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
160 """Return (block, type) tuples, where block is an mdiff.blocks
160 """Return (block, type) tuples, where block is an mdiff.blocks
161 line entry. type is '=' for blocks matching exactly one another
161 line entry. type is '=' for blocks matching exactly one another
162 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
162 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
163 matching only after having filtered blank lines.
163 matching only after having filtered blank lines.
164 line1 and line2 are text1 and text2 split with splitnewlines() if
164 line1 and line2 are text1 and text2 split with splitnewlines() if
165 they are already available.
165 they are already available.
166 """
166 """
167 if opts is None:
167 if opts is None:
168 opts = defaultopts
168 opts = defaultopts
169 if opts.ignorews or opts.ignorewsamount:
169 if opts.ignorews or opts.ignorewsamount:
170 text1 = wsclean(opts, text1, False)
170 text1 = wsclean(opts, text1, False)
171 text2 = wsclean(opts, text2, False)
171 text2 = wsclean(opts, text2, False)
172 diff = bdiff.blocks(text1, text2)
172 diff = bdiff.blocks(text1, text2)
173 for i, s1 in enumerate(diff):
173 for i, s1 in enumerate(diff):
174 # The first match is special.
174 # The first match is special.
175 # we've either found a match starting at line 0 or a match later
175 # we've either found a match starting at line 0 or a match later
176 # in the file. If it starts later, old and new below will both be
176 # in the file. If it starts later, old and new below will both be
177 # empty and we'll continue to the next match.
177 # empty and we'll continue to the next match.
178 if i > 0:
178 if i > 0:
179 s = diff[i - 1]
179 s = diff[i - 1]
180 else:
180 else:
181 s = [0, 0, 0, 0]
181 s = [0, 0, 0, 0]
182 s = [s[1], s1[0], s[3], s1[2]]
182 s = [s[1], s1[0], s[3], s1[2]]
183
183
184 # bdiff sometimes gives huge matches past eof, this check eats them,
184 # bdiff sometimes gives huge matches past eof, this check eats them,
185 # and deals with the special first match case described above
185 # and deals with the special first match case described above
186 if s[0] != s[1] or s[2] != s[3]:
186 if s[0] != s[1] or s[2] != s[3]:
187 type = '!'
187 type = '!'
188 if opts.ignoreblanklines:
188 if opts.ignoreblanklines:
189 if lines1 is None:
189 if lines1 is None:
190 lines1 = splitnewlines(text1)
190 lines1 = splitnewlines(text1)
191 if lines2 is None:
191 if lines2 is None:
192 lines2 = splitnewlines(text2)
192 lines2 = splitnewlines(text2)
193 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
193 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
194 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
194 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
195 if old == new:
195 if old == new:
196 type = '~'
196 type = '~'
197 yield s, type
197 yield s, type
198 yield s1, '='
198 yield s1, '='
199
199
200 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
200 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
201 """Return a unified diff as a (headers, hunks) tuple.
201 """Return a unified diff as a (headers, hunks) tuple.
202
202
203 If the diff is not null, `headers` is a list with unified diff header
203 If the diff is not null, `headers` is a list with unified diff header
204 lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
204 lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
205 (hunkrange, hunklines) coming from _unidiff().
205 (hunkrange, hunklines) coming from _unidiff().
206 Otherwise, `headers` and `hunks` are empty.
206 Otherwise, `headers` and `hunks` are empty.
207 """
207 """
208 def datetag(date, fn=None):
208 def datetag(date, fn=None):
209 if not opts.git and not opts.nodates:
209 if not opts.git and not opts.nodates:
210 return '\t%s' % date
210 return '\t%s' % date
211 if fn and ' ' in fn:
211 if fn and ' ' in fn:
212 return '\t'
212 return '\t'
213 return ''
213 return ''
214
214
215 sentinel = [], ()
215 sentinel = [], ()
216 if not a and not b:
216 if not a and not b:
217 return sentinel
217 return sentinel
218
218
219 if opts.noprefix:
219 if opts.noprefix:
220 aprefix = bprefix = ''
220 aprefix = bprefix = ''
221 else:
221 else:
222 aprefix = 'a/'
222 aprefix = 'a/'
223 bprefix = 'b/'
223 bprefix = 'b/'
224
224
225 epoch = util.datestr((0, 0))
225 epoch = util.datestr((0, 0))
226
226
227 fn1 = util.pconvert(fn1)
227 fn1 = util.pconvert(fn1)
228 fn2 = util.pconvert(fn2)
228 fn2 = util.pconvert(fn2)
229
229
230 def checknonewline(lines):
230 def checknonewline(lines):
231 for text in lines:
231 for text in lines:
232 if text[-1] != '\n':
232 if text[-1:] != '\n':
233 text += "\n\ No newline at end of file\n"
233 text += "\n\ No newline at end of file\n"
234 yield text
234 yield text
235
235
236 if not opts.text and (util.binary(a) or util.binary(b)):
236 if not opts.text and (util.binary(a) or util.binary(b)):
237 if a and b and len(a) == len(b) and a == b:
237 if a and b and len(a) == len(b) and a == b:
238 return sentinel
238 return sentinel
239 headerlines = []
239 headerlines = []
240 hunks = (None, ['Binary file %s has changed\n' % fn1]),
240 hunks = (None, ['Binary file %s has changed\n' % fn1]),
241 elif not a:
241 elif not a:
242 b = splitnewlines(b)
242 b = splitnewlines(b)
243 if a is None:
243 if a is None:
244 l1 = '--- /dev/null%s' % datetag(epoch)
244 l1 = '--- /dev/null%s' % datetag(epoch)
245 else:
245 else:
246 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
246 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
247 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
247 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
248 headerlines = [l1, l2]
248 headerlines = [l1, l2]
249 size = len(b)
249 size = len(b)
250 hunkrange = (0, 0, 1, size)
250 hunkrange = (0, 0, 1, size)
251 hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
251 hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
252 hunks = (hunkrange, checknonewline(hunklines)),
252 hunks = (hunkrange, checknonewline(hunklines)),
253 elif not b:
253 elif not b:
254 a = splitnewlines(a)
254 a = splitnewlines(a)
255 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
255 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
256 if b is None:
256 if b is None:
257 l2 = '+++ /dev/null%s' % datetag(epoch)
257 l2 = '+++ /dev/null%s' % datetag(epoch)
258 else:
258 else:
259 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
259 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
260 headerlines = [l1, l2]
260 headerlines = [l1, l2]
261 size = len(a)
261 size = len(a)
262 hunkrange = (1, size, 0, 0)
262 hunkrange = (1, size, 0, 0)
263 hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
263 hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
264 hunks = (hunkrange, checknonewline(hunklines)),
264 hunks = (hunkrange, checknonewline(hunklines)),
265 else:
265 else:
266 diffhunks = _unidiff(a, b, opts=opts)
266 diffhunks = _unidiff(a, b, opts=opts)
267 try:
267 try:
268 hunkrange, hunklines = next(diffhunks)
268 hunkrange, hunklines = next(diffhunks)
269 except StopIteration:
269 except StopIteration:
270 return sentinel
270 return sentinel
271
271
272 headerlines = [
272 headerlines = [
273 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
273 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
274 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
274 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
275 ]
275 ]
276 def rewindhunks():
276 def rewindhunks():
277 yield hunkrange, checknonewline(hunklines)
277 yield hunkrange, checknonewline(hunklines)
278 for hr, hl in diffhunks:
278 for hr, hl in diffhunks:
279 yield hr, checknonewline(hl)
279 yield hr, checknonewline(hl)
280
280
281 hunks = rewindhunks()
281 hunks = rewindhunks()
282
282
283 return headerlines, hunks
283 return headerlines, hunks
284
284
285 def _unidiff(t1, t2, opts=defaultopts):
285 def _unidiff(t1, t2, opts=defaultopts):
286 """Yield hunks of a headerless unified diff from t1 and t2 texts.
286 """Yield hunks of a headerless unified diff from t1 and t2 texts.
287
287
288 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
288 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
289 tuple (s1, l1, s2, l2) representing the range information of the hunk to
289 tuple (s1, l1, s2, l2) representing the range information of the hunk to
290 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
290 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
291 of the hunk combining said header followed by line additions and
291 of the hunk combining said header followed by line additions and
292 deletions.
292 deletions.
293 """
293 """
294 l1 = splitnewlines(t1)
294 l1 = splitnewlines(t1)
295 l2 = splitnewlines(t2)
295 l2 = splitnewlines(t2)
296 def contextend(l, len):
296 def contextend(l, len):
297 ret = l + opts.context
297 ret = l + opts.context
298 if ret > len:
298 if ret > len:
299 ret = len
299 ret = len
300 return ret
300 return ret
301
301
302 def contextstart(l):
302 def contextstart(l):
303 ret = l - opts.context
303 ret = l - opts.context
304 if ret < 0:
304 if ret < 0:
305 return 0
305 return 0
306 return ret
306 return ret
307
307
308 lastfunc = [0, '']
308 lastfunc = [0, '']
309 def yieldhunk(hunk):
309 def yieldhunk(hunk):
310 (astart, a2, bstart, b2, delta) = hunk
310 (astart, a2, bstart, b2, delta) = hunk
311 aend = contextend(a2, len(l1))
311 aend = contextend(a2, len(l1))
312 alen = aend - astart
312 alen = aend - astart
313 blen = b2 - bstart + aend - a2
313 blen = b2 - bstart + aend - a2
314
314
315 func = ""
315 func = ""
316 if opts.showfunc:
316 if opts.showfunc:
317 lastpos, func = lastfunc
317 lastpos, func = lastfunc
318 # walk backwards from the start of the context up to the start of
318 # walk backwards from the start of the context up to the start of
319 # the previous hunk context until we find a line starting with an
319 # the previous hunk context until we find a line starting with an
320 # alphanumeric char.
320 # alphanumeric char.
321 for i in xrange(astart - 1, lastpos - 1, -1):
321 for i in xrange(astart - 1, lastpos - 1, -1):
322 if l1[i][0].isalnum():
322 if l1[i][0].isalnum():
323 func = ' ' + l1[i].rstrip()[:40]
323 func = ' ' + l1[i].rstrip()[:40]
324 lastfunc[1] = func
324 lastfunc[1] = func
325 break
325 break
326 # by recording this hunk's starting point as the next place to
326 # by recording this hunk's starting point as the next place to
327 # start looking for function lines, we avoid reading any line in
327 # start looking for function lines, we avoid reading any line in
328 # the file more than once.
328 # the file more than once.
329 lastfunc[0] = astart
329 lastfunc[0] = astart
330
330
331 # zero-length hunk ranges report their start line as one less
331 # zero-length hunk ranges report their start line as one less
332 if alen:
332 if alen:
333 astart += 1
333 astart += 1
334 if blen:
334 if blen:
335 bstart += 1
335 bstart += 1
336
336
337 hunkrange = astart, alen, bstart, blen
337 hunkrange = astart, alen, bstart, blen
338 hunklines = (
338 hunklines = (
339 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
339 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
340 + delta
340 + delta
341 + [' ' + l1[x] for x in xrange(a2, aend)]
341 + [' ' + l1[x] for x in xrange(a2, aend)]
342 )
342 )
343 yield hunkrange, hunklines
343 yield hunkrange, hunklines
344
344
345 # bdiff.blocks gives us the matching sequences in the files. The loop
345 # bdiff.blocks gives us the matching sequences in the files. The loop
346 # below finds the spaces between those matching sequences and translates
346 # below finds the spaces between those matching sequences and translates
347 # them into diff output.
347 # them into diff output.
348 #
348 #
349 hunk = None
349 hunk = None
350 ignoredlines = 0
350 ignoredlines = 0
351 for s, stype in allblocks(t1, t2, opts, l1, l2):
351 for s, stype in allblocks(t1, t2, opts, l1, l2):
352 a1, a2, b1, b2 = s
352 a1, a2, b1, b2 = s
353 if stype != '!':
353 if stype != '!':
354 if stype == '~':
354 if stype == '~':
355 # The diff context lines are based on t1 content. When
355 # The diff context lines are based on t1 content. When
356 # blank lines are ignored, the new lines offsets must
356 # blank lines are ignored, the new lines offsets must
357 # be adjusted as if equivalent blocks ('~') had the
357 # be adjusted as if equivalent blocks ('~') had the
358 # same sizes on both sides.
358 # same sizes on both sides.
359 ignoredlines += (b2 - b1) - (a2 - a1)
359 ignoredlines += (b2 - b1) - (a2 - a1)
360 continue
360 continue
361 delta = []
361 delta = []
362 old = l1[a1:a2]
362 old = l1[a1:a2]
363 new = l2[b1:b2]
363 new = l2[b1:b2]
364
364
365 b1 -= ignoredlines
365 b1 -= ignoredlines
366 b2 -= ignoredlines
366 b2 -= ignoredlines
367 astart = contextstart(a1)
367 astart = contextstart(a1)
368 bstart = contextstart(b1)
368 bstart = contextstart(b1)
369 prev = None
369 prev = None
370 if hunk:
370 if hunk:
371 # join with the previous hunk if it falls inside the context
371 # join with the previous hunk if it falls inside the context
372 if astart < hunk[1] + opts.context + 1:
372 if astart < hunk[1] + opts.context + 1:
373 prev = hunk
373 prev = hunk
374 astart = hunk[1]
374 astart = hunk[1]
375 bstart = hunk[3]
375 bstart = hunk[3]
376 else:
376 else:
377 for x in yieldhunk(hunk):
377 for x in yieldhunk(hunk):
378 yield x
378 yield x
379 if prev:
379 if prev:
380 # we've joined the previous hunk, record the new ending points.
380 # we've joined the previous hunk, record the new ending points.
381 hunk[1] = a2
381 hunk[1] = a2
382 hunk[3] = b2
382 hunk[3] = b2
383 delta = hunk[4]
383 delta = hunk[4]
384 else:
384 else:
385 # create a new hunk
385 # create a new hunk
386 hunk = [astart, a2, bstart, b2, delta]
386 hunk = [astart, a2, bstart, b2, delta]
387
387
388 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
388 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
389 delta[len(delta):] = ['-' + x for x in old]
389 delta[len(delta):] = ['-' + x for x in old]
390 delta[len(delta):] = ['+' + x for x in new]
390 delta[len(delta):] = ['+' + x for x in new]
391
391
392 if hunk:
392 if hunk:
393 for x in yieldhunk(hunk):
393 for x in yieldhunk(hunk):
394 yield x
394 yield x
395
395
396 def b85diff(to, tn):
396 def b85diff(to, tn):
397 '''print base85-encoded binary diff'''
397 '''print base85-encoded binary diff'''
398 def fmtline(line):
398 def fmtline(line):
399 l = len(line)
399 l = len(line)
400 if l <= 26:
400 if l <= 26:
401 l = chr(ord('A') + l - 1)
401 l = chr(ord('A') + l - 1)
402 else:
402 else:
403 l = chr(l - 26 + ord('a') - 1)
403 l = chr(l - 26 + ord('a') - 1)
404 return '%c%s\n' % (l, base85.b85encode(line, True))
404 return '%c%s\n' % (l, base85.b85encode(line, True))
405
405
406 def chunk(text, csize=52):
406 def chunk(text, csize=52):
407 l = len(text)
407 l = len(text)
408 i = 0
408 i = 0
409 while i < l:
409 while i < l:
410 yield text[i:i + csize]
410 yield text[i:i + csize]
411 i += csize
411 i += csize
412
412
413 if to is None:
413 if to is None:
414 to = ''
414 to = ''
415 if tn is None:
415 if tn is None:
416 tn = ''
416 tn = ''
417
417
418 if to == tn:
418 if to == tn:
419 return ''
419 return ''
420
420
421 # TODO: deltas
421 # TODO: deltas
422 ret = []
422 ret = []
423 ret.append('GIT binary patch\n')
423 ret.append('GIT binary patch\n')
424 ret.append('literal %s\n' % len(tn))
424 ret.append('literal %s\n' % len(tn))
425 for l in chunk(zlib.compress(tn)):
425 for l in chunk(zlib.compress(tn)):
426 ret.append(fmtline(l))
426 ret.append(fmtline(l))
427 ret.append('\n')
427 ret.append('\n')
428
428
429 return ''.join(ret)
429 return ''.join(ret)
430
430
431 def patchtext(bin):
431 def patchtext(bin):
432 pos = 0
432 pos = 0
433 t = []
433 t = []
434 while pos < len(bin):
434 while pos < len(bin):
435 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
435 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
436 pos += 12
436 pos += 12
437 t.append(bin[pos:pos + l])
437 t.append(bin[pos:pos + l])
438 pos += l
438 pos += l
439 return "".join(t)
439 return "".join(t)
440
440
441 def patch(a, bin):
441 def patch(a, bin):
442 if len(a) == 0:
442 if len(a) == 0:
443 # skip over trivial delta header
443 # skip over trivial delta header
444 return util.buffer(bin, 12)
444 return util.buffer(bin, 12)
445 return mpatch.patches(a, [bin])
445 return mpatch.patches(a, [bin])
446
446
447 # similar to difflib.SequenceMatcher.get_matching_blocks
447 # similar to difflib.SequenceMatcher.get_matching_blocks
448 def get_matching_blocks(a, b):
448 def get_matching_blocks(a, b):
449 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
449 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
450
450
451 def trivialdiffheader(length):
451 def trivialdiffheader(length):
452 return struct.pack(">lll", 0, 0, length) if length else ''
452 return struct.pack(">lll", 0, 0, length) if length else ''
453
453
454 def replacediffheader(oldlen, newlen):
454 def replacediffheader(oldlen, newlen):
455 return struct.pack(">lll", 0, oldlen, newlen)
455 return struct.pack(">lll", 0, oldlen, newlen)
456
456
457 patches = mpatch.patches
457 patches = mpatch.patches
458 patchedsize = mpatch.patchedsize
458 patchedsize = mpatch.patchedsize
459 textdiff = bdiff.bdiff
459 textdiff = bdiff.bdiff
@@ -1,2673 +1,2673
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 stringio = util.stringio
43 stringio = util.stringio
44
44
45 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
45 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 tabsplitter = re.compile(br'(\t+|[^\t]+)')
46 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47
47
48 class PatchError(Exception):
48 class PatchError(Exception):
49 pass
49 pass
50
50
51
51
52 # public functions
52 # public functions
53
53
54 def split(stream):
54 def split(stream):
55 '''return an iterator of individual patches from a stream'''
55 '''return an iterator of individual patches from a stream'''
56 def isheader(line, inheader):
56 def isheader(line, inheader):
57 if inheader and line[0] in (' ', '\t'):
57 if inheader and line[0] in (' ', '\t'):
58 # continuation
58 # continuation
59 return True
59 return True
60 if line[0] in (' ', '-', '+'):
60 if line[0] in (' ', '-', '+'):
61 # diff line - don't check for header pattern in there
61 # diff line - don't check for header pattern in there
62 return False
62 return False
63 l = line.split(': ', 1)
63 l = line.split(': ', 1)
64 return len(l) == 2 and ' ' not in l[0]
64 return len(l) == 2 and ' ' not in l[0]
65
65
66 def chunk(lines):
66 def chunk(lines):
67 return stringio(''.join(lines))
67 return stringio(''.join(lines))
68
68
69 def hgsplit(stream, cur):
69 def hgsplit(stream, cur):
70 inheader = True
70 inheader = True
71
71
72 for line in stream:
72 for line in stream:
73 if not line.strip():
73 if not line.strip():
74 inheader = False
74 inheader = False
75 if not inheader and line.startswith('# HG changeset patch'):
75 if not inheader and line.startswith('# HG changeset patch'):
76 yield chunk(cur)
76 yield chunk(cur)
77 cur = []
77 cur = []
78 inheader = True
78 inheader = True
79
79
80 cur.append(line)
80 cur.append(line)
81
81
82 if cur:
82 if cur:
83 yield chunk(cur)
83 yield chunk(cur)
84
84
85 def mboxsplit(stream, cur):
85 def mboxsplit(stream, cur):
86 for line in stream:
86 for line in stream:
87 if line.startswith('From '):
87 if line.startswith('From '):
88 for c in split(chunk(cur[1:])):
88 for c in split(chunk(cur[1:])):
89 yield c
89 yield c
90 cur = []
90 cur = []
91
91
92 cur.append(line)
92 cur.append(line)
93
93
94 if cur:
94 if cur:
95 for c in split(chunk(cur[1:])):
95 for c in split(chunk(cur[1:])):
96 yield c
96 yield c
97
97
98 def mimesplit(stream, cur):
98 def mimesplit(stream, cur):
99 def msgfp(m):
99 def msgfp(m):
100 fp = stringio()
100 fp = stringio()
101 g = email.Generator.Generator(fp, mangle_from_=False)
101 g = email.Generator.Generator(fp, mangle_from_=False)
102 g.flatten(m)
102 g.flatten(m)
103 fp.seek(0)
103 fp.seek(0)
104 return fp
104 return fp
105
105
106 for line in stream:
106 for line in stream:
107 cur.append(line)
107 cur.append(line)
108 c = chunk(cur)
108 c = chunk(cur)
109
109
110 m = email.Parser.Parser().parse(c)
110 m = email.Parser.Parser().parse(c)
111 if not m.is_multipart():
111 if not m.is_multipart():
112 yield msgfp(m)
112 yield msgfp(m)
113 else:
113 else:
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 for part in m.walk():
115 for part in m.walk():
116 ct = part.get_content_type()
116 ct = part.get_content_type()
117 if ct not in ok_types:
117 if ct not in ok_types:
118 continue
118 continue
119 yield msgfp(part)
119 yield msgfp(part)
120
120
121 def headersplit(stream, cur):
121 def headersplit(stream, cur):
122 inheader = False
122 inheader = False
123
123
124 for line in stream:
124 for line in stream:
125 if not inheader and isheader(line, inheader):
125 if not inheader and isheader(line, inheader):
126 yield chunk(cur)
126 yield chunk(cur)
127 cur = []
127 cur = []
128 inheader = True
128 inheader = True
129 if inheader and not isheader(line, inheader):
129 if inheader and not isheader(line, inheader):
130 inheader = False
130 inheader = False
131
131
132 cur.append(line)
132 cur.append(line)
133
133
134 if cur:
134 if cur:
135 yield chunk(cur)
135 yield chunk(cur)
136
136
137 def remainder(cur):
137 def remainder(cur):
138 yield chunk(cur)
138 yield chunk(cur)
139
139
140 class fiter(object):
140 class fiter(object):
141 def __init__(self, fp):
141 def __init__(self, fp):
142 self.fp = fp
142 self.fp = fp
143
143
144 def __iter__(self):
144 def __iter__(self):
145 return self
145 return self
146
146
147 def next(self):
147 def next(self):
148 l = self.fp.readline()
148 l = self.fp.readline()
149 if not l:
149 if not l:
150 raise StopIteration
150 raise StopIteration
151 return l
151 return l
152
152
153 inheader = False
153 inheader = False
154 cur = []
154 cur = []
155
155
156 mimeheaders = ['content-type']
156 mimeheaders = ['content-type']
157
157
158 if not util.safehasattr(stream, 'next'):
158 if not util.safehasattr(stream, 'next'):
159 # http responses, for example, have readline but not next
159 # http responses, for example, have readline but not next
160 stream = fiter(stream)
160 stream = fiter(stream)
161
161
162 for line in stream:
162 for line in stream:
163 cur.append(line)
163 cur.append(line)
164 if line.startswith('# HG changeset patch'):
164 if line.startswith('# HG changeset patch'):
165 return hgsplit(stream, cur)
165 return hgsplit(stream, cur)
166 elif line.startswith('From '):
166 elif line.startswith('From '):
167 return mboxsplit(stream, cur)
167 return mboxsplit(stream, cur)
168 elif isheader(line, inheader):
168 elif isheader(line, inheader):
169 inheader = True
169 inheader = True
170 if line.split(':', 1)[0].lower() in mimeheaders:
170 if line.split(':', 1)[0].lower() in mimeheaders:
171 # let email parser handle this
171 # let email parser handle this
172 return mimesplit(stream, cur)
172 return mimesplit(stream, cur)
173 elif line.startswith('--- ') and inheader:
173 elif line.startswith('--- ') and inheader:
174 # No evil headers seen by diff start, split by hand
174 # No evil headers seen by diff start, split by hand
175 return headersplit(stream, cur)
175 return headersplit(stream, cur)
176 # Not enough info, keep reading
176 # Not enough info, keep reading
177
177
178 # if we are here, we have a very plain patch
178 # if we are here, we have a very plain patch
179 return remainder(cur)
179 return remainder(cur)
180
180
181 ## Some facility for extensible patch parsing:
181 ## Some facility for extensible patch parsing:
182 # list of pairs ("header to match", "data key")
182 # list of pairs ("header to match", "data key")
183 patchheadermap = [('Date', 'date'),
183 patchheadermap = [('Date', 'date'),
184 ('Branch', 'branch'),
184 ('Branch', 'branch'),
185 ('Node ID', 'nodeid'),
185 ('Node ID', 'nodeid'),
186 ]
186 ]
187
187
188 def extract(ui, fileobj):
188 def extract(ui, fileobj):
189 '''extract patch from data read from fileobj.
189 '''extract patch from data read from fileobj.
190
190
191 patch can be a normal patch or contained in an email message.
191 patch can be a normal patch or contained in an email message.
192
192
193 return a dictionary. Standard keys are:
193 return a dictionary. Standard keys are:
194 - filename,
194 - filename,
195 - message,
195 - message,
196 - user,
196 - user,
197 - date,
197 - date,
198 - branch,
198 - branch,
199 - node,
199 - node,
200 - p1,
200 - p1,
201 - p2.
201 - p2.
202 Any item can be missing from the dictionary. If filename is missing,
202 Any item can be missing from the dictionary. If filename is missing,
203 fileobj did not contain a patch. Caller must unlink filename when done.'''
203 fileobj did not contain a patch. Caller must unlink filename when done.'''
204
204
205 # attempt to detect the start of a patch
205 # attempt to detect the start of a patch
206 # (this heuristic is borrowed from quilt)
206 # (this heuristic is borrowed from quilt)
207 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'---[ \t].*?^\+\+\+[ \t]|'
209 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
210 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211
211
212 data = {}
212 data = {}
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 try:
215 try:
216 msg = email.Parser.Parser().parse(fileobj)
216 msg = email.Parser.Parser().parse(fileobj)
217
217
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 if not subject and not data['user']:
220 if not subject and not data['user']:
221 # Not an email, restore parsed headers if any
221 # Not an email, restore parsed headers if any
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223
223
224 # should try to parse msg['Date']
224 # should try to parse msg['Date']
225 parents = []
225 parents = []
226
226
227 if subject:
227 if subject:
228 if subject.startswith('[PATCH'):
228 if subject.startswith('[PATCH'):
229 pend = subject.find(']')
229 pend = subject.find(']')
230 if pend >= 0:
230 if pend >= 0:
231 subject = subject[pend + 1:].lstrip()
231 subject = subject[pend + 1:].lstrip()
232 subject = re.sub(r'\n[ \t]+', ' ', subject)
232 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 ui.debug('Subject: %s\n' % subject)
233 ui.debug('Subject: %s\n' % subject)
234 if data['user']:
234 if data['user']:
235 ui.debug('From: %s\n' % data['user'])
235 ui.debug('From: %s\n' % data['user'])
236 diffs_seen = 0
236 diffs_seen = 0
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 message = ''
238 message = ''
239 for part in msg.walk():
239 for part in msg.walk():
240 content_type = part.get_content_type()
240 content_type = part.get_content_type()
241 ui.debug('Content-Type: %s\n' % content_type)
241 ui.debug('Content-Type: %s\n' % content_type)
242 if content_type not in ok_types:
242 if content_type not in ok_types:
243 continue
243 continue
244 payload = part.get_payload(decode=True)
244 payload = part.get_payload(decode=True)
245 m = diffre.search(payload)
245 m = diffre.search(payload)
246 if m:
246 if m:
247 hgpatch = False
247 hgpatch = False
248 hgpatchheader = False
248 hgpatchheader = False
249 ignoretext = False
249 ignoretext = False
250
250
251 ui.debug('found patch at byte %d\n' % m.start(0))
251 ui.debug('found patch at byte %d\n' % m.start(0))
252 diffs_seen += 1
252 diffs_seen += 1
253 cfp = stringio()
253 cfp = stringio()
254 for line in payload[:m.start(0)].splitlines():
254 for line in payload[:m.start(0)].splitlines():
255 if line.startswith('# HG changeset patch') and not hgpatch:
255 if line.startswith('# HG changeset patch') and not hgpatch:
256 ui.debug('patch generated by hg export\n')
256 ui.debug('patch generated by hg export\n')
257 hgpatch = True
257 hgpatch = True
258 hgpatchheader = True
258 hgpatchheader = True
259 # drop earlier commit message content
259 # drop earlier commit message content
260 cfp.seek(0)
260 cfp.seek(0)
261 cfp.truncate()
261 cfp.truncate()
262 subject = None
262 subject = None
263 elif hgpatchheader:
263 elif hgpatchheader:
264 if line.startswith('# User '):
264 if line.startswith('# User '):
265 data['user'] = line[7:]
265 data['user'] = line[7:]
266 ui.debug('From: %s\n' % data['user'])
266 ui.debug('From: %s\n' % data['user'])
267 elif line.startswith("# Parent "):
267 elif line.startswith("# Parent "):
268 parents.append(line[9:].lstrip())
268 parents.append(line[9:].lstrip())
269 elif line.startswith("# "):
269 elif line.startswith("# "):
270 for header, key in patchheadermap:
270 for header, key in patchheadermap:
271 prefix = '# %s ' % header
271 prefix = '# %s ' % header
272 if line.startswith(prefix):
272 if line.startswith(prefix):
273 data[key] = line[len(prefix):]
273 data[key] = line[len(prefix):]
274 else:
274 else:
275 hgpatchheader = False
275 hgpatchheader = False
276 elif line == '---':
276 elif line == '---':
277 ignoretext = True
277 ignoretext = True
278 if not hgpatchheader and not ignoretext:
278 if not hgpatchheader and not ignoretext:
279 cfp.write(line)
279 cfp.write(line)
280 cfp.write('\n')
280 cfp.write('\n')
281 message = cfp.getvalue()
281 message = cfp.getvalue()
282 if tmpfp:
282 if tmpfp:
283 tmpfp.write(payload)
283 tmpfp.write(payload)
284 if not payload.endswith('\n'):
284 if not payload.endswith('\n'):
285 tmpfp.write('\n')
285 tmpfp.write('\n')
286 elif not diffs_seen and message and content_type == 'text/plain':
286 elif not diffs_seen and message and content_type == 'text/plain':
287 message += '\n' + payload
287 message += '\n' + payload
288 except: # re-raises
288 except: # re-raises
289 tmpfp.close()
289 tmpfp.close()
290 os.unlink(tmpname)
290 os.unlink(tmpname)
291 raise
291 raise
292
292
293 if subject and not message.startswith(subject):
293 if subject and not message.startswith(subject):
294 message = '%s\n%s' % (subject, message)
294 message = '%s\n%s' % (subject, message)
295 data['message'] = message
295 data['message'] = message
296 tmpfp.close()
296 tmpfp.close()
297 if parents:
297 if parents:
298 data['p1'] = parents.pop(0)
298 data['p1'] = parents.pop(0)
299 if parents:
299 if parents:
300 data['p2'] = parents.pop(0)
300 data['p2'] = parents.pop(0)
301
301
302 if diffs_seen:
302 if diffs_seen:
303 data['filename'] = tmpname
303 data['filename'] = tmpname
304 else:
304 else:
305 os.unlink(tmpname)
305 os.unlink(tmpname)
306 return data
306 return data
307
307
308 class patchmeta(object):
308 class patchmeta(object):
309 """Patched file metadata
309 """Patched file metadata
310
310
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 'islink' is True if the file is a symlink and 'isexec' is True if
315 'islink' is True if the file is a symlink and 'isexec' is True if
316 the file is executable. Otherwise, 'mode' is None.
316 the file is executable. Otherwise, 'mode' is None.
317 """
317 """
318 def __init__(self, path):
318 def __init__(self, path):
319 self.path = path
319 self.path = path
320 self.oldpath = None
320 self.oldpath = None
321 self.mode = None
321 self.mode = None
322 self.op = 'MODIFY'
322 self.op = 'MODIFY'
323 self.binary = False
323 self.binary = False
324
324
325 def setmode(self, mode):
325 def setmode(self, mode):
326 islink = mode & 0o20000
326 islink = mode & 0o20000
327 isexec = mode & 0o100
327 isexec = mode & 0o100
328 self.mode = (islink, isexec)
328 self.mode = (islink, isexec)
329
329
330 def copy(self):
330 def copy(self):
331 other = patchmeta(self.path)
331 other = patchmeta(self.path)
332 other.oldpath = self.oldpath
332 other.oldpath = self.oldpath
333 other.mode = self.mode
333 other.mode = self.mode
334 other.op = self.op
334 other.op = self.op
335 other.binary = self.binary
335 other.binary = self.binary
336 return other
336 return other
337
337
338 def _ispatchinga(self, afile):
338 def _ispatchinga(self, afile):
339 if afile == '/dev/null':
339 if afile == '/dev/null':
340 return self.op == 'ADD'
340 return self.op == 'ADD'
341 return afile == 'a/' + (self.oldpath or self.path)
341 return afile == 'a/' + (self.oldpath or self.path)
342
342
343 def _ispatchingb(self, bfile):
343 def _ispatchingb(self, bfile):
344 if bfile == '/dev/null':
344 if bfile == '/dev/null':
345 return self.op == 'DELETE'
345 return self.op == 'DELETE'
346 return bfile == 'b/' + self.path
346 return bfile == 'b/' + self.path
347
347
348 def ispatching(self, afile, bfile):
348 def ispatching(self, afile, bfile):
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350
350
351 def __repr__(self):
351 def __repr__(self):
352 return "<patchmeta %s %r>" % (self.op, self.path)
352 return "<patchmeta %s %r>" % (self.op, self.path)
353
353
354 def readgitpatch(lr):
354 def readgitpatch(lr):
355 """extract git-style metadata about patches from <patchname>"""
355 """extract git-style metadata about patches from <patchname>"""
356
356
357 # Filter patch for git information
357 # Filter patch for git information
358 gp = None
358 gp = None
359 gitpatches = []
359 gitpatches = []
360 for line in lr:
360 for line in lr:
361 line = line.rstrip(' \r\n')
361 line = line.rstrip(' \r\n')
362 if line.startswith('diff --git a/'):
362 if line.startswith('diff --git a/'):
363 m = gitre.match(line)
363 m = gitre.match(line)
364 if m:
364 if m:
365 if gp:
365 if gp:
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 dst = m.group(2)
367 dst = m.group(2)
368 gp = patchmeta(dst)
368 gp = patchmeta(dst)
369 elif gp:
369 elif gp:
370 if line.startswith('--- '):
370 if line.startswith('--- '):
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 gp = None
372 gp = None
373 continue
373 continue
374 if line.startswith('rename from '):
374 if line.startswith('rename from '):
375 gp.op = 'RENAME'
375 gp.op = 'RENAME'
376 gp.oldpath = line[12:]
376 gp.oldpath = line[12:]
377 elif line.startswith('rename to '):
377 elif line.startswith('rename to '):
378 gp.path = line[10:]
378 gp.path = line[10:]
379 elif line.startswith('copy from '):
379 elif line.startswith('copy from '):
380 gp.op = 'COPY'
380 gp.op = 'COPY'
381 gp.oldpath = line[10:]
381 gp.oldpath = line[10:]
382 elif line.startswith('copy to '):
382 elif line.startswith('copy to '):
383 gp.path = line[8:]
383 gp.path = line[8:]
384 elif line.startswith('deleted file'):
384 elif line.startswith('deleted file'):
385 gp.op = 'DELETE'
385 gp.op = 'DELETE'
386 elif line.startswith('new file mode '):
386 elif line.startswith('new file mode '):
387 gp.op = 'ADD'
387 gp.op = 'ADD'
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('new mode '):
389 elif line.startswith('new mode '):
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('GIT binary patch'):
391 elif line.startswith('GIT binary patch'):
392 gp.binary = True
392 gp.binary = True
393 if gp:
393 if gp:
394 gitpatches.append(gp)
394 gitpatches.append(gp)
395
395
396 return gitpatches
396 return gitpatches
397
397
398 class linereader(object):
398 class linereader(object):
399 # simple class to allow pushing lines back into the input stream
399 # simple class to allow pushing lines back into the input stream
400 def __init__(self, fp):
400 def __init__(self, fp):
401 self.fp = fp
401 self.fp = fp
402 self.buf = []
402 self.buf = []
403
403
404 def push(self, line):
404 def push(self, line):
405 if line is not None:
405 if line is not None:
406 self.buf.append(line)
406 self.buf.append(line)
407
407
408 def readline(self):
408 def readline(self):
409 if self.buf:
409 if self.buf:
410 l = self.buf[0]
410 l = self.buf[0]
411 del self.buf[0]
411 del self.buf[0]
412 return l
412 return l
413 return self.fp.readline()
413 return self.fp.readline()
414
414
415 def __iter__(self):
415 def __iter__(self):
416 return iter(self.readline, '')
416 return iter(self.readline, '')
417
417
418 class abstractbackend(object):
418 class abstractbackend(object):
419 def __init__(self, ui):
419 def __init__(self, ui):
420 self.ui = ui
420 self.ui = ui
421
421
422 def getfile(self, fname):
422 def getfile(self, fname):
423 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
424 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
425 """
425 """
426 raise NotImplementedError
426 raise NotImplementedError
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
430 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
431 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
432 copysource is set to the original file name.
432 copysource is set to the original file name.
433 """
433 """
434 raise NotImplementedError
434 raise NotImplementedError
435
435
436 def unlink(self, fname):
436 def unlink(self, fname):
437 """Unlink target file."""
437 """Unlink target file."""
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
441 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
442 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
443 files.
443 files.
444 """
444 """
445 pass
445 pass
446
446
447 def exists(self, fname):
447 def exists(self, fname):
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 class fsbackend(abstractbackend):
450 class fsbackend(abstractbackend):
451 def __init__(self, ui, basedir):
451 def __init__(self, ui, basedir):
452 super(fsbackend, self).__init__(ui)
452 super(fsbackend, self).__init__(ui)
453 self.opener = vfsmod.vfs(basedir)
453 self.opener = vfsmod.vfs(basedir)
454
454
455 def _join(self, f):
455 def _join(self, f):
456 return os.path.join(self.opener.base, f)
456 return os.path.join(self.opener.base, f)
457
457
458 def getfile(self, fname):
458 def getfile(self, fname):
459 if self.opener.islink(fname):
459 if self.opener.islink(fname):
460 return (self.opener.readlink(fname), (True, False))
460 return (self.opener.readlink(fname), (True, False))
461
461
462 isexec = False
462 isexec = False
463 try:
463 try:
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 except OSError as e:
465 except OSError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 try:
468 try:
469 return (self.opener.read(fname), (False, isexec))
469 return (self.opener.read(fname), (False, isexec))
470 except IOError as e:
470 except IOError as e:
471 if e.errno != errno.ENOENT:
471 if e.errno != errno.ENOENT:
472 raise
472 raise
473 return None, None
473 return None, None
474
474
475 def setfile(self, fname, data, mode, copysource):
475 def setfile(self, fname, data, mode, copysource):
476 islink, isexec = mode
476 islink, isexec = mode
477 if data is None:
477 if data is None:
478 self.opener.setflags(fname, islink, isexec)
478 self.opener.setflags(fname, islink, isexec)
479 return
479 return
480 if islink:
480 if islink:
481 self.opener.symlink(data, fname)
481 self.opener.symlink(data, fname)
482 else:
482 else:
483 self.opener.write(fname, data)
483 self.opener.write(fname, data)
484 if isexec:
484 if isexec:
485 self.opener.setflags(fname, False, True)
485 self.opener.setflags(fname, False, True)
486
486
487 def unlink(self, fname):
487 def unlink(self, fname):
488 self.opener.unlinkpath(fname, ignoremissing=True)
488 self.opener.unlinkpath(fname, ignoremissing=True)
489
489
490 def writerej(self, fname, failed, total, lines):
490 def writerej(self, fname, failed, total, lines):
491 fname = fname + ".rej"
491 fname = fname + ".rej"
492 self.ui.warn(
492 self.ui.warn(
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 (failed, total, fname))
494 (failed, total, fname))
495 fp = self.opener(fname, 'w')
495 fp = self.opener(fname, 'w')
496 fp.writelines(lines)
496 fp.writelines(lines)
497 fp.close()
497 fp.close()
498
498
499 def exists(self, fname):
499 def exists(self, fname):
500 return self.opener.lexists(fname)
500 return self.opener.lexists(fname)
501
501
502 class workingbackend(fsbackend):
502 class workingbackend(fsbackend):
503 def __init__(self, ui, repo, similarity):
503 def __init__(self, ui, repo, similarity):
504 super(workingbackend, self).__init__(ui, repo.root)
504 super(workingbackend, self).__init__(ui, repo.root)
505 self.repo = repo
505 self.repo = repo
506 self.similarity = similarity
506 self.similarity = similarity
507 self.removed = set()
507 self.removed = set()
508 self.changed = set()
508 self.changed = set()
509 self.copied = []
509 self.copied = []
510
510
511 def _checkknown(self, fname):
511 def _checkknown(self, fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514
514
515 def setfile(self, fname, data, mode, copysource):
515 def setfile(self, fname, data, mode, copysource):
516 self._checkknown(fname)
516 self._checkknown(fname)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 if copysource is not None:
518 if copysource is not None:
519 self.copied.append((copysource, fname))
519 self.copied.append((copysource, fname))
520 self.changed.add(fname)
520 self.changed.add(fname)
521
521
522 def unlink(self, fname):
522 def unlink(self, fname):
523 self._checkknown(fname)
523 self._checkknown(fname)
524 super(workingbackend, self).unlink(fname)
524 super(workingbackend, self).unlink(fname)
525 self.removed.add(fname)
525 self.removed.add(fname)
526 self.changed.add(fname)
526 self.changed.add(fname)
527
527
528 def close(self):
528 def close(self):
529 wctx = self.repo[None]
529 wctx = self.repo[None]
530 changed = set(self.changed)
530 changed = set(self.changed)
531 for src, dst in self.copied:
531 for src, dst in self.copied:
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 if self.removed:
533 if self.removed:
534 wctx.forget(sorted(self.removed))
534 wctx.forget(sorted(self.removed))
535 for f in self.removed:
535 for f in self.removed:
536 if f not in self.repo.dirstate:
536 if f not in self.repo.dirstate:
537 # File was deleted and no longer belongs to the
537 # File was deleted and no longer belongs to the
538 # dirstate, it was probably marked added then
538 # dirstate, it was probably marked added then
539 # deleted, and should not be considered by
539 # deleted, and should not be considered by
540 # marktouched().
540 # marktouched().
541 changed.discard(f)
541 changed.discard(f)
542 if changed:
542 if changed:
543 scmutil.marktouched(self.repo, changed, self.similarity)
543 scmutil.marktouched(self.repo, changed, self.similarity)
544 return sorted(self.changed)
544 return sorted(self.changed)
545
545
546 class filestore(object):
546 class filestore(object):
547 def __init__(self, maxsize=None):
547 def __init__(self, maxsize=None):
548 self.opener = None
548 self.opener = None
549 self.files = {}
549 self.files = {}
550 self.created = 0
550 self.created = 0
551 self.maxsize = maxsize
551 self.maxsize = maxsize
552 if self.maxsize is None:
552 if self.maxsize is None:
553 self.maxsize = 4*(2**20)
553 self.maxsize = 4*(2**20)
554 self.size = 0
554 self.size = 0
555 self.data = {}
555 self.data = {}
556
556
557 def setfile(self, fname, data, mode, copied=None):
557 def setfile(self, fname, data, mode, copied=None):
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 self.data[fname] = (data, mode, copied)
559 self.data[fname] = (data, mode, copied)
560 self.size += len(data)
560 self.size += len(data)
561 else:
561 else:
562 if self.opener is None:
562 if self.opener is None:
563 root = tempfile.mkdtemp(prefix='hg-patch-')
563 root = tempfile.mkdtemp(prefix='hg-patch-')
564 self.opener = vfsmod.vfs(root)
564 self.opener = vfsmod.vfs(root)
565 # Avoid filename issues with these simple names
565 # Avoid filename issues with these simple names
566 fn = str(self.created)
566 fn = str(self.created)
567 self.opener.write(fn, data)
567 self.opener.write(fn, data)
568 self.created += 1
568 self.created += 1
569 self.files[fname] = (fn, mode, copied)
569 self.files[fname] = (fn, mode, copied)
570
570
571 def getfile(self, fname):
571 def getfile(self, fname):
572 if fname in self.data:
572 if fname in self.data:
573 return self.data[fname]
573 return self.data[fname]
574 if not self.opener or fname not in self.files:
574 if not self.opener or fname not in self.files:
575 return None, None, None
575 return None, None, None
576 fn, mode, copied = self.files[fname]
576 fn, mode, copied = self.files[fname]
577 return self.opener.read(fn), mode, copied
577 return self.opener.read(fn), mode, copied
578
578
579 def close(self):
579 def close(self):
580 if self.opener:
580 if self.opener:
581 shutil.rmtree(self.opener.base)
581 shutil.rmtree(self.opener.base)
582
582
583 class repobackend(abstractbackend):
583 class repobackend(abstractbackend):
584 def __init__(self, ui, repo, ctx, store):
584 def __init__(self, ui, repo, ctx, store):
585 super(repobackend, self).__init__(ui)
585 super(repobackend, self).__init__(ui)
586 self.repo = repo
586 self.repo = repo
587 self.ctx = ctx
587 self.ctx = ctx
588 self.store = store
588 self.store = store
589 self.changed = set()
589 self.changed = set()
590 self.removed = set()
590 self.removed = set()
591 self.copied = {}
591 self.copied = {}
592
592
593 def _checkknown(self, fname):
593 def _checkknown(self, fname):
594 if fname not in self.ctx:
594 if fname not in self.ctx:
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596
596
597 def getfile(self, fname):
597 def getfile(self, fname):
598 try:
598 try:
599 fctx = self.ctx[fname]
599 fctx = self.ctx[fname]
600 except error.LookupError:
600 except error.LookupError:
601 return None, None
601 return None, None
602 flags = fctx.flags()
602 flags = fctx.flags()
603 return fctx.data(), ('l' in flags, 'x' in flags)
603 return fctx.data(), ('l' in flags, 'x' in flags)
604
604
605 def setfile(self, fname, data, mode, copysource):
605 def setfile(self, fname, data, mode, copysource):
606 if copysource:
606 if copysource:
607 self._checkknown(copysource)
607 self._checkknown(copysource)
608 if data is None:
608 if data is None:
609 data = self.ctx[fname].data()
609 data = self.ctx[fname].data()
610 self.store.setfile(fname, data, mode, copysource)
610 self.store.setfile(fname, data, mode, copysource)
611 self.changed.add(fname)
611 self.changed.add(fname)
612 if copysource:
612 if copysource:
613 self.copied[fname] = copysource
613 self.copied[fname] = copysource
614
614
615 def unlink(self, fname):
615 def unlink(self, fname):
616 self._checkknown(fname)
616 self._checkknown(fname)
617 self.removed.add(fname)
617 self.removed.add(fname)
618
618
619 def exists(self, fname):
619 def exists(self, fname):
620 return fname in self.ctx
620 return fname in self.ctx
621
621
622 def close(self):
622 def close(self):
623 return self.changed | self.removed
623 return self.changed | self.removed
624
624
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629
629
630 class patchfile(object):
630 class patchfile(object):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 self.fname = gp.path
632 self.fname = gp.path
633 self.eolmode = eolmode
633 self.eolmode = eolmode
634 self.eol = None
634 self.eol = None
635 self.backend = backend
635 self.backend = backend
636 self.ui = ui
636 self.ui = ui
637 self.lines = []
637 self.lines = []
638 self.exists = False
638 self.exists = False
639 self.missing = True
639 self.missing = True
640 self.mode = gp.mode
640 self.mode = gp.mode
641 self.copysource = gp.oldpath
641 self.copysource = gp.oldpath
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.remove = gp.op == 'DELETE'
643 self.remove = gp.op == 'DELETE'
644 if self.copysource is None:
644 if self.copysource is None:
645 data, mode = backend.getfile(self.fname)
645 data, mode = backend.getfile(self.fname)
646 else:
646 else:
647 data, mode = store.getfile(self.copysource)[:2]
647 data, mode = store.getfile(self.copysource)[:2]
648 if data is not None:
648 if data is not None:
649 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.missing = False
650 self.missing = False
651 if data:
651 if data:
652 self.lines = mdiff.splitnewlines(data)
652 self.lines = mdiff.splitnewlines(data)
653 if self.mode is None:
653 if self.mode is None:
654 self.mode = mode
654 self.mode = mode
655 if self.lines:
655 if self.lines:
656 # Normalize line endings
656 # Normalize line endings
657 if self.lines[0].endswith('\r\n'):
657 if self.lines[0].endswith('\r\n'):
658 self.eol = '\r\n'
658 self.eol = '\r\n'
659 elif self.lines[0].endswith('\n'):
659 elif self.lines[0].endswith('\n'):
660 self.eol = '\n'
660 self.eol = '\n'
661 if eolmode != 'strict':
661 if eolmode != 'strict':
662 nlines = []
662 nlines = []
663 for l in self.lines:
663 for l in self.lines:
664 if l.endswith('\r\n'):
664 if l.endswith('\r\n'):
665 l = l[:-2] + '\n'
665 l = l[:-2] + '\n'
666 nlines.append(l)
666 nlines.append(l)
667 self.lines = nlines
667 self.lines = nlines
668 else:
668 else:
669 if self.create:
669 if self.create:
670 self.missing = False
670 self.missing = False
671 if self.mode is None:
671 if self.mode is None:
672 self.mode = (False, False)
672 self.mode = (False, False)
673 if self.missing:
673 if self.missing:
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 "current directory)\n"))
676 "current directory)\n"))
677
677
678 self.hash = {}
678 self.hash = {}
679 self.dirty = 0
679 self.dirty = 0
680 self.offset = 0
680 self.offset = 0
681 self.skew = 0
681 self.skew = 0
682 self.rej = []
682 self.rej = []
683 self.fileprinted = False
683 self.fileprinted = False
684 self.printfile(False)
684 self.printfile(False)
685 self.hunks = 0
685 self.hunks = 0
686
686
687 def writelines(self, fname, lines, mode):
687 def writelines(self, fname, lines, mode):
688 if self.eolmode == 'auto':
688 if self.eolmode == 'auto':
689 eol = self.eol
689 eol = self.eol
690 elif self.eolmode == 'crlf':
690 elif self.eolmode == 'crlf':
691 eol = '\r\n'
691 eol = '\r\n'
692 else:
692 else:
693 eol = '\n'
693 eol = '\n'
694
694
695 if self.eolmode != 'strict' and eol and eol != '\n':
695 if self.eolmode != 'strict' and eol and eol != '\n':
696 rawlines = []
696 rawlines = []
697 for l in lines:
697 for l in lines:
698 if l and l[-1] == '\n':
698 if l and l[-1] == '\n':
699 l = l[:-1] + eol
699 l = l[:-1] + eol
700 rawlines.append(l)
700 rawlines.append(l)
701 lines = rawlines
701 lines = rawlines
702
702
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704
704
705 def printfile(self, warn):
705 def printfile(self, warn):
706 if self.fileprinted:
706 if self.fileprinted:
707 return
707 return
708 if warn or self.ui.verbose:
708 if warn or self.ui.verbose:
709 self.fileprinted = True
709 self.fileprinted = True
710 s = _("patching file %s\n") % self.fname
710 s = _("patching file %s\n") % self.fname
711 if warn:
711 if warn:
712 self.ui.warn(s)
712 self.ui.warn(s)
713 else:
713 else:
714 self.ui.note(s)
714 self.ui.note(s)
715
715
716
716
717 def findlines(self, l, linenum):
717 def findlines(self, l, linenum):
718 # looks through the hash and finds candidate lines. The
718 # looks through the hash and finds candidate lines. The
719 # result is a list of line numbers sorted based on distance
719 # result is a list of line numbers sorted based on distance
720 # from linenum
720 # from linenum
721
721
722 cand = self.hash.get(l, [])
722 cand = self.hash.get(l, [])
723 if len(cand) > 1:
723 if len(cand) > 1:
724 # resort our list of potentials forward then back.
724 # resort our list of potentials forward then back.
725 cand.sort(key=lambda x: abs(x - linenum))
725 cand.sort(key=lambda x: abs(x - linenum))
726 return cand
726 return cand
727
727
728 def write_rej(self):
728 def write_rej(self):
729 # our rejects are a little different from patch(1). This always
729 # our rejects are a little different from patch(1). This always
730 # creates rejects in the same form as the original patch. A file
730 # creates rejects in the same form as the original patch. A file
731 # header is inserted so that you can run the reject through patch again
731 # header is inserted so that you can run the reject through patch again
732 # without having to type the filename.
732 # without having to type the filename.
733 if not self.rej:
733 if not self.rej:
734 return
734 return
735 base = os.path.basename(self.fname)
735 base = os.path.basename(self.fname)
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 for x in self.rej:
737 for x in self.rej:
738 for l in x.hunk:
738 for l in x.hunk:
739 lines.append(l)
739 lines.append(l)
740 if l[-1] != '\n':
740 if l[-1:] != '\n':
741 lines.append("\n\ No newline at end of file\n")
741 lines.append("\n\ No newline at end of file\n")
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743
743
744 def apply(self, h):
744 def apply(self, h):
745 if not h.complete():
745 if not h.complete():
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 h.lenb))
748 h.lenb))
749
749
750 self.hunks += 1
750 self.hunks += 1
751
751
752 if self.missing:
752 if self.missing:
753 self.rej.append(h)
753 self.rej.append(h)
754 return -1
754 return -1
755
755
756 if self.exists and self.create:
756 if self.exists and self.create:
757 if self.copysource:
757 if self.copysource:
758 self.ui.warn(_("cannot create %s: destination already "
758 self.ui.warn(_("cannot create %s: destination already "
759 "exists\n") % self.fname)
759 "exists\n") % self.fname)
760 else:
760 else:
761 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.rej.append(h)
762 self.rej.append(h)
763 return -1
763 return -1
764
764
765 if isinstance(h, binhunk):
765 if isinstance(h, binhunk):
766 if self.remove:
766 if self.remove:
767 self.backend.unlink(self.fname)
767 self.backend.unlink(self.fname)
768 else:
768 else:
769 l = h.new(self.lines)
769 l = h.new(self.lines)
770 self.lines[:] = l
770 self.lines[:] = l
771 self.offset += len(l)
771 self.offset += len(l)
772 self.dirty = True
772 self.dirty = True
773 return 0
773 return 0
774
774
775 horig = h
775 horig = h
776 if (self.eolmode in ('crlf', 'lf')
776 if (self.eolmode in ('crlf', 'lf')
777 or self.eolmode == 'auto' and self.eol):
777 or self.eolmode == 'auto' and self.eol):
778 # If new eols are going to be normalized, then normalize
778 # If new eols are going to be normalized, then normalize
779 # hunk data before patching. Otherwise, preserve input
779 # hunk data before patching. Otherwise, preserve input
780 # line-endings.
780 # line-endings.
781 h = h.getnormalized()
781 h = h.getnormalized()
782
782
783 # fast case first, no offsets, no fuzz
783 # fast case first, no offsets, no fuzz
784 old, oldstart, new, newstart = h.fuzzit(0, False)
784 old, oldstart, new, newstart = h.fuzzit(0, False)
785 oldstart += self.offset
785 oldstart += self.offset
786 orig_start = oldstart
786 orig_start = oldstart
787 # if there's skew we want to emit the "(offset %d lines)" even
787 # if there's skew we want to emit the "(offset %d lines)" even
788 # when the hunk cleanly applies at start + skew, so skip the
788 # when the hunk cleanly applies at start + skew, so skip the
789 # fast case code
789 # fast case code
790 if (self.skew == 0 and
790 if (self.skew == 0 and
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 if self.remove:
792 if self.remove:
793 self.backend.unlink(self.fname)
793 self.backend.unlink(self.fname)
794 else:
794 else:
795 self.lines[oldstart:oldstart + len(old)] = new
795 self.lines[oldstart:oldstart + len(old)] = new
796 self.offset += len(new) - len(old)
796 self.offset += len(new) - len(old)
797 self.dirty = True
797 self.dirty = True
798 return 0
798 return 0
799
799
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 self.hash = {}
801 self.hash = {}
802 for x, s in enumerate(self.lines):
802 for x, s in enumerate(self.lines):
803 self.hash.setdefault(s, []).append(x)
803 self.hash.setdefault(s, []).append(x)
804
804
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
806 for toponly in [True, False]:
806 for toponly in [True, False]:
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 oldstart = oldstart + self.offset + self.skew
808 oldstart = oldstart + self.offset + self.skew
809 oldstart = min(oldstart, len(self.lines))
809 oldstart = min(oldstart, len(self.lines))
810 if old:
810 if old:
811 cand = self.findlines(old[0][1:], oldstart)
811 cand = self.findlines(old[0][1:], oldstart)
812 else:
812 else:
813 # Only adding lines with no or fuzzed context, just
813 # Only adding lines with no or fuzzed context, just
814 # take the skew in account
814 # take the skew in account
815 cand = [oldstart]
815 cand = [oldstart]
816
816
817 for l in cand:
817 for l in cand:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 self.lines[l : l + len(old)] = new
819 self.lines[l : l + len(old)] = new
820 self.offset += len(new) - len(old)
820 self.offset += len(new) - len(old)
821 self.skew = l - orig_start
821 self.skew = l - orig_start
822 self.dirty = True
822 self.dirty = True
823 offset = l - orig_start - fuzzlen
823 offset = l - orig_start - fuzzlen
824 if fuzzlen:
824 if fuzzlen:
825 msg = _("Hunk #%d succeeded at %d "
825 msg = _("Hunk #%d succeeded at %d "
826 "with fuzz %d "
826 "with fuzz %d "
827 "(offset %d lines).\n")
827 "(offset %d lines).\n")
828 self.printfile(True)
828 self.printfile(True)
829 self.ui.warn(msg %
829 self.ui.warn(msg %
830 (h.number, l + 1, fuzzlen, offset))
830 (h.number, l + 1, fuzzlen, offset))
831 else:
831 else:
832 msg = _("Hunk #%d succeeded at %d "
832 msg = _("Hunk #%d succeeded at %d "
833 "(offset %d lines).\n")
833 "(offset %d lines).\n")
834 self.ui.note(msg % (h.number, l + 1, offset))
834 self.ui.note(msg % (h.number, l + 1, offset))
835 return fuzzlen
835 return fuzzlen
836 self.printfile(True)
836 self.printfile(True)
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.rej.append(horig)
838 self.rej.append(horig)
839 return -1
839 return -1
840
840
841 def close(self):
841 def close(self):
842 if self.dirty:
842 if self.dirty:
843 self.writelines(self.fname, self.lines, self.mode)
843 self.writelines(self.fname, self.lines, self.mode)
844 self.write_rej()
844 self.write_rej()
845 return len(self.rej)
845 return len(self.rej)
846
846
847 class header(object):
847 class header(object):
848 """patch header
848 """patch header
849 """
849 """
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
852 allhunks_re = re.compile('(?:index|deleted file) ')
852 allhunks_re = re.compile('(?:index|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 newfile_re = re.compile('(?:new file)')
855 newfile_re = re.compile('(?:new file)')
856
856
857 def __init__(self, header):
857 def __init__(self, header):
858 self.header = header
858 self.header = header
859 self.hunks = []
859 self.hunks = []
860
860
861 def binary(self):
861 def binary(self):
862 return any(h.startswith('index ') for h in self.header)
862 return any(h.startswith('index ') for h in self.header)
863
863
864 def pretty(self, fp):
864 def pretty(self, fp):
865 for h in self.header:
865 for h in self.header:
866 if h.startswith('index '):
866 if h.startswith('index '):
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 break
868 break
869 if self.pretty_re.match(h):
869 if self.pretty_re.match(h):
870 fp.write(h)
870 fp.write(h)
871 if self.binary():
871 if self.binary():
872 fp.write(_('this is a binary file\n'))
872 fp.write(_('this is a binary file\n'))
873 break
873 break
874 if h.startswith('---'):
874 if h.startswith('---'):
875 fp.write(_('%d hunks, %d lines changed\n') %
875 fp.write(_('%d hunks, %d lines changed\n') %
876 (len(self.hunks),
876 (len(self.hunks),
877 sum([max(h.added, h.removed) for h in self.hunks])))
877 sum([max(h.added, h.removed) for h in self.hunks])))
878 break
878 break
879 fp.write(h)
879 fp.write(h)
880
880
881 def write(self, fp):
881 def write(self, fp):
882 fp.write(''.join(self.header))
882 fp.write(''.join(self.header))
883
883
884 def allhunks(self):
884 def allhunks(self):
885 return any(self.allhunks_re.match(h) for h in self.header)
885 return any(self.allhunks_re.match(h) for h in self.header)
886
886
887 def files(self):
887 def files(self):
888 match = self.diffgit_re.match(self.header[0])
888 match = self.diffgit_re.match(self.header[0])
889 if match:
889 if match:
890 fromfile, tofile = match.groups()
890 fromfile, tofile = match.groups()
891 if fromfile == tofile:
891 if fromfile == tofile:
892 return [fromfile]
892 return [fromfile]
893 return [fromfile, tofile]
893 return [fromfile, tofile]
894 else:
894 else:
895 return self.diff_re.match(self.header[0]).groups()
895 return self.diff_re.match(self.header[0]).groups()
896
896
897 def filename(self):
897 def filename(self):
898 return self.files()[-1]
898 return self.files()[-1]
899
899
900 def __repr__(self):
900 def __repr__(self):
901 return '<header %s>' % (' '.join(map(repr, self.files())))
901 return '<header %s>' % (' '.join(map(repr, self.files())))
902
902
903 def isnewfile(self):
903 def isnewfile(self):
904 return any(self.newfile_re.match(h) for h in self.header)
904 return any(self.newfile_re.match(h) for h in self.header)
905
905
906 def special(self):
906 def special(self):
907 # Special files are shown only at the header level and not at the hunk
907 # Special files are shown only at the header level and not at the hunk
908 # level for example a file that has been deleted is a special file.
908 # level for example a file that has been deleted is a special file.
909 # The user cannot change the content of the operation, in the case of
909 # The user cannot change the content of the operation, in the case of
910 # the deleted file he has to take the deletion or not take it, he
910 # the deleted file he has to take the deletion or not take it, he
911 # cannot take some of it.
911 # cannot take some of it.
912 # Newly added files are special if they are empty, they are not special
912 # Newly added files are special if they are empty, they are not special
913 # if they have some content as we want to be able to change it
913 # if they have some content as we want to be able to change it
914 nocontent = len(self.header) == 2
914 nocontent = len(self.header) == 2
915 emptynewfile = self.isnewfile() and nocontent
915 emptynewfile = self.isnewfile() and nocontent
916 return emptynewfile or \
916 return emptynewfile or \
917 any(self.special_re.match(h) for h in self.header)
917 any(self.special_re.match(h) for h in self.header)
918
918
919 class recordhunk(object):
919 class recordhunk(object):
920 """patch hunk
920 """patch hunk
921
921
922 XXX shouldn't we merge this with the other hunk class?
922 XXX shouldn't we merge this with the other hunk class?
923 """
923 """
924 maxcontext = 3
924 maxcontext = 3
925
925
926 def __init__(self, header, fromline, toline, proc, before, hunk, after):
926 def __init__(self, header, fromline, toline, proc, before, hunk, after):
927 def trimcontext(number, lines):
927 def trimcontext(number, lines):
928 delta = len(lines) - self.maxcontext
928 delta = len(lines) - self.maxcontext
929 if False and delta > 0:
929 if False and delta > 0:
930 return number + delta, lines[:self.maxcontext]
930 return number + delta, lines[:self.maxcontext]
931 return number, lines
931 return number, lines
932
932
933 self.header = header
933 self.header = header
934 self.fromline, self.before = trimcontext(fromline, before)
934 self.fromline, self.before = trimcontext(fromline, before)
935 self.toline, self.after = trimcontext(toline, after)
935 self.toline, self.after = trimcontext(toline, after)
936 self.proc = proc
936 self.proc = proc
937 self.hunk = hunk
937 self.hunk = hunk
938 self.added, self.removed = self.countchanges(self.hunk)
938 self.added, self.removed = self.countchanges(self.hunk)
939
939
940 def __eq__(self, v):
940 def __eq__(self, v):
941 if not isinstance(v, recordhunk):
941 if not isinstance(v, recordhunk):
942 return False
942 return False
943
943
944 return ((v.hunk == self.hunk) and
944 return ((v.hunk == self.hunk) and
945 (v.proc == self.proc) and
945 (v.proc == self.proc) and
946 (self.fromline == v.fromline) and
946 (self.fromline == v.fromline) and
947 (self.header.files() == v.header.files()))
947 (self.header.files() == v.header.files()))
948
948
949 def __hash__(self):
949 def __hash__(self):
950 return hash((tuple(self.hunk),
950 return hash((tuple(self.hunk),
951 tuple(self.header.files()),
951 tuple(self.header.files()),
952 self.fromline,
952 self.fromline,
953 self.proc))
953 self.proc))
954
954
955 def countchanges(self, hunk):
955 def countchanges(self, hunk):
956 """hunk -> (n+,n-)"""
956 """hunk -> (n+,n-)"""
957 add = len([h for h in hunk if h[0] == '+'])
957 add = len([h for h in hunk if h[0] == '+'])
958 rem = len([h for h in hunk if h[0] == '-'])
958 rem = len([h for h in hunk if h[0] == '-'])
959 return add, rem
959 return add, rem
960
960
961 def write(self, fp):
961 def write(self, fp):
962 delta = len(self.before) + len(self.after)
962 delta = len(self.before) + len(self.after)
963 if self.after and self.after[-1] == '\\ No newline at end of file\n':
963 if self.after and self.after[-1] == '\\ No newline at end of file\n':
964 delta -= 1
964 delta -= 1
965 fromlen = delta + self.removed
965 fromlen = delta + self.removed
966 tolen = delta + self.added
966 tolen = delta + self.added
967 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
967 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
968 (self.fromline, fromlen, self.toline, tolen,
968 (self.fromline, fromlen, self.toline, tolen,
969 self.proc and (' ' + self.proc)))
969 self.proc and (' ' + self.proc)))
970 fp.write(''.join(self.before + self.hunk + self.after))
970 fp.write(''.join(self.before + self.hunk + self.after))
971
971
972 pretty = write
972 pretty = write
973
973
974 def filename(self):
974 def filename(self):
975 return self.header.filename()
975 return self.header.filename()
976
976
977 def __repr__(self):
977 def __repr__(self):
978 return '<hunk %r@%d>' % (self.filename(), self.fromline)
978 return '<hunk %r@%d>' % (self.filename(), self.fromline)
979
979
980 def filterpatch(ui, headers, operation=None):
980 def filterpatch(ui, headers, operation=None):
981 """Interactively filter patch chunks into applied-only chunks"""
981 """Interactively filter patch chunks into applied-only chunks"""
982 if operation is None:
982 if operation is None:
983 operation = 'record'
983 operation = 'record'
984 messages = {
984 messages = {
985 'multiple': {
985 'multiple': {
986 'discard': _("discard change %d/%d to '%s'?"),
986 'discard': _("discard change %d/%d to '%s'?"),
987 'record': _("record change %d/%d to '%s'?"),
987 'record': _("record change %d/%d to '%s'?"),
988 'revert': _("revert change %d/%d to '%s'?"),
988 'revert': _("revert change %d/%d to '%s'?"),
989 }[operation],
989 }[operation],
990 'single': {
990 'single': {
991 'discard': _("discard this change to '%s'?"),
991 'discard': _("discard this change to '%s'?"),
992 'record': _("record this change to '%s'?"),
992 'record': _("record this change to '%s'?"),
993 'revert': _("revert this change to '%s'?"),
993 'revert': _("revert this change to '%s'?"),
994 }[operation],
994 }[operation],
995 }
995 }
996
996
997 def prompt(skipfile, skipall, query, chunk):
997 def prompt(skipfile, skipall, query, chunk):
998 """prompt query, and process base inputs
998 """prompt query, and process base inputs
999
999
1000 - y/n for the rest of file
1000 - y/n for the rest of file
1001 - y/n for the rest
1001 - y/n for the rest
1002 - ? (help)
1002 - ? (help)
1003 - q (quit)
1003 - q (quit)
1004
1004
1005 Return True/False and possibly updated skipfile and skipall.
1005 Return True/False and possibly updated skipfile and skipall.
1006 """
1006 """
1007 newpatches = None
1007 newpatches = None
1008 if skipall is not None:
1008 if skipall is not None:
1009 return skipall, skipfile, skipall, newpatches
1009 return skipall, skipfile, skipall, newpatches
1010 if skipfile is not None:
1010 if skipfile is not None:
1011 return skipfile, skipfile, skipall, newpatches
1011 return skipfile, skipfile, skipall, newpatches
1012 while True:
1012 while True:
1013 resps = _('[Ynesfdaq?]'
1013 resps = _('[Ynesfdaq?]'
1014 '$$ &Yes, record this change'
1014 '$$ &Yes, record this change'
1015 '$$ &No, skip this change'
1015 '$$ &No, skip this change'
1016 '$$ &Edit this change manually'
1016 '$$ &Edit this change manually'
1017 '$$ &Skip remaining changes to this file'
1017 '$$ &Skip remaining changes to this file'
1018 '$$ Record remaining changes to this &file'
1018 '$$ Record remaining changes to this &file'
1019 '$$ &Done, skip remaining changes and files'
1019 '$$ &Done, skip remaining changes and files'
1020 '$$ Record &all changes to all remaining files'
1020 '$$ Record &all changes to all remaining files'
1021 '$$ &Quit, recording no changes'
1021 '$$ &Quit, recording no changes'
1022 '$$ &? (display help)')
1022 '$$ &? (display help)')
1023 r = ui.promptchoice("%s %s" % (query, resps))
1023 r = ui.promptchoice("%s %s" % (query, resps))
1024 ui.write("\n")
1024 ui.write("\n")
1025 if r == 8: # ?
1025 if r == 8: # ?
1026 for c, t in ui.extractchoices(resps)[1]:
1026 for c, t in ui.extractchoices(resps)[1]:
1027 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1027 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1028 continue
1028 continue
1029 elif r == 0: # yes
1029 elif r == 0: # yes
1030 ret = True
1030 ret = True
1031 elif r == 1: # no
1031 elif r == 1: # no
1032 ret = False
1032 ret = False
1033 elif r == 2: # Edit patch
1033 elif r == 2: # Edit patch
1034 if chunk is None:
1034 if chunk is None:
1035 ui.write(_('cannot edit patch for whole file'))
1035 ui.write(_('cannot edit patch for whole file'))
1036 ui.write("\n")
1036 ui.write("\n")
1037 continue
1037 continue
1038 if chunk.header.binary():
1038 if chunk.header.binary():
1039 ui.write(_('cannot edit patch for binary file'))
1039 ui.write(_('cannot edit patch for binary file'))
1040 ui.write("\n")
1040 ui.write("\n")
1041 continue
1041 continue
1042 # Patch comment based on the Git one (based on comment at end of
1042 # Patch comment based on the Git one (based on comment at end of
1043 # https://mercurial-scm.org/wiki/RecordExtension)
1043 # https://mercurial-scm.org/wiki/RecordExtension)
1044 phelp = '---' + _("""
1044 phelp = '---' + _("""
1045 To remove '-' lines, make them ' ' lines (context).
1045 To remove '-' lines, make them ' ' lines (context).
1046 To remove '+' lines, delete them.
1046 To remove '+' lines, delete them.
1047 Lines starting with # will be removed from the patch.
1047 Lines starting with # will be removed from the patch.
1048
1048
1049 If the patch applies cleanly, the edited hunk will immediately be
1049 If the patch applies cleanly, the edited hunk will immediately be
1050 added to the record list. If it does not apply cleanly, a rejects
1050 added to the record list. If it does not apply cleanly, a rejects
1051 file will be generated: you can use that when you try again. If
1051 file will be generated: you can use that when you try again. If
1052 all lines of the hunk are removed, then the edit is aborted and
1052 all lines of the hunk are removed, then the edit is aborted and
1053 the hunk is left unchanged.
1053 the hunk is left unchanged.
1054 """)
1054 """)
1055 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1055 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1056 suffix=".diff", text=True)
1056 suffix=".diff", text=True)
1057 ncpatchfp = None
1057 ncpatchfp = None
1058 try:
1058 try:
1059 # Write the initial patch
1059 # Write the initial patch
1060 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1060 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1061 chunk.header.write(f)
1061 chunk.header.write(f)
1062 chunk.write(f)
1062 chunk.write(f)
1063 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1063 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1064 f.close()
1064 f.close()
1065 # Start the editor and wait for it to complete
1065 # Start the editor and wait for it to complete
1066 editor = ui.geteditor()
1066 editor = ui.geteditor()
1067 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1067 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1068 environ={'HGUSER': ui.username()},
1068 environ={'HGUSER': ui.username()},
1069 blockedtag='filterpatch')
1069 blockedtag='filterpatch')
1070 if ret != 0:
1070 if ret != 0:
1071 ui.warn(_("editor exited with exit code %d\n") % ret)
1071 ui.warn(_("editor exited with exit code %d\n") % ret)
1072 continue
1072 continue
1073 # Remove comment lines
1073 # Remove comment lines
1074 patchfp = open(patchfn)
1074 patchfp = open(patchfn)
1075 ncpatchfp = stringio()
1075 ncpatchfp = stringio()
1076 for line in util.iterfile(patchfp):
1076 for line in util.iterfile(patchfp):
1077 if not line.startswith('#'):
1077 if not line.startswith('#'):
1078 ncpatchfp.write(line)
1078 ncpatchfp.write(line)
1079 patchfp.close()
1079 patchfp.close()
1080 ncpatchfp.seek(0)
1080 ncpatchfp.seek(0)
1081 newpatches = parsepatch(ncpatchfp)
1081 newpatches = parsepatch(ncpatchfp)
1082 finally:
1082 finally:
1083 os.unlink(patchfn)
1083 os.unlink(patchfn)
1084 del ncpatchfp
1084 del ncpatchfp
1085 # Signal that the chunk shouldn't be applied as-is, but
1085 # Signal that the chunk shouldn't be applied as-is, but
1086 # provide the new patch to be used instead.
1086 # provide the new patch to be used instead.
1087 ret = False
1087 ret = False
1088 elif r == 3: # Skip
1088 elif r == 3: # Skip
1089 ret = skipfile = False
1089 ret = skipfile = False
1090 elif r == 4: # file (Record remaining)
1090 elif r == 4: # file (Record remaining)
1091 ret = skipfile = True
1091 ret = skipfile = True
1092 elif r == 5: # done, skip remaining
1092 elif r == 5: # done, skip remaining
1093 ret = skipall = False
1093 ret = skipall = False
1094 elif r == 6: # all
1094 elif r == 6: # all
1095 ret = skipall = True
1095 ret = skipall = True
1096 elif r == 7: # quit
1096 elif r == 7: # quit
1097 raise error.Abort(_('user quit'))
1097 raise error.Abort(_('user quit'))
1098 return ret, skipfile, skipall, newpatches
1098 return ret, skipfile, skipall, newpatches
1099
1099
1100 seen = set()
1100 seen = set()
1101 applied = {} # 'filename' -> [] of chunks
1101 applied = {} # 'filename' -> [] of chunks
1102 skipfile, skipall = None, None
1102 skipfile, skipall = None, None
1103 pos, total = 1, sum(len(h.hunks) for h in headers)
1103 pos, total = 1, sum(len(h.hunks) for h in headers)
1104 for h in headers:
1104 for h in headers:
1105 pos += len(h.hunks)
1105 pos += len(h.hunks)
1106 skipfile = None
1106 skipfile = None
1107 fixoffset = 0
1107 fixoffset = 0
1108 hdr = ''.join(h.header)
1108 hdr = ''.join(h.header)
1109 if hdr in seen:
1109 if hdr in seen:
1110 continue
1110 continue
1111 seen.add(hdr)
1111 seen.add(hdr)
1112 if skipall is None:
1112 if skipall is None:
1113 h.pretty(ui)
1113 h.pretty(ui)
1114 msg = (_('examine changes to %s?') %
1114 msg = (_('examine changes to %s?') %
1115 _(' and ').join("'%s'" % f for f in h.files()))
1115 _(' and ').join("'%s'" % f for f in h.files()))
1116 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1116 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1117 if not r:
1117 if not r:
1118 continue
1118 continue
1119 applied[h.filename()] = [h]
1119 applied[h.filename()] = [h]
1120 if h.allhunks():
1120 if h.allhunks():
1121 applied[h.filename()] += h.hunks
1121 applied[h.filename()] += h.hunks
1122 continue
1122 continue
1123 for i, chunk in enumerate(h.hunks):
1123 for i, chunk in enumerate(h.hunks):
1124 if skipfile is None and skipall is None:
1124 if skipfile is None and skipall is None:
1125 chunk.pretty(ui)
1125 chunk.pretty(ui)
1126 if total == 1:
1126 if total == 1:
1127 msg = messages['single'] % chunk.filename()
1127 msg = messages['single'] % chunk.filename()
1128 else:
1128 else:
1129 idx = pos - len(h.hunks) + i
1129 idx = pos - len(h.hunks) + i
1130 msg = messages['multiple'] % (idx, total, chunk.filename())
1130 msg = messages['multiple'] % (idx, total, chunk.filename())
1131 r, skipfile, skipall, newpatches = prompt(skipfile,
1131 r, skipfile, skipall, newpatches = prompt(skipfile,
1132 skipall, msg, chunk)
1132 skipall, msg, chunk)
1133 if r:
1133 if r:
1134 if fixoffset:
1134 if fixoffset:
1135 chunk = copy.copy(chunk)
1135 chunk = copy.copy(chunk)
1136 chunk.toline += fixoffset
1136 chunk.toline += fixoffset
1137 applied[chunk.filename()].append(chunk)
1137 applied[chunk.filename()].append(chunk)
1138 elif newpatches is not None:
1138 elif newpatches is not None:
1139 for newpatch in newpatches:
1139 for newpatch in newpatches:
1140 for newhunk in newpatch.hunks:
1140 for newhunk in newpatch.hunks:
1141 if fixoffset:
1141 if fixoffset:
1142 newhunk.toline += fixoffset
1142 newhunk.toline += fixoffset
1143 applied[newhunk.filename()].append(newhunk)
1143 applied[newhunk.filename()].append(newhunk)
1144 else:
1144 else:
1145 fixoffset += chunk.removed - chunk.added
1145 fixoffset += chunk.removed - chunk.added
1146 return (sum([h for h in applied.itervalues()
1146 return (sum([h for h in applied.itervalues()
1147 if h[0].special() or len(h) > 1], []), {})
1147 if h[0].special() or len(h) > 1], []), {})
1148 class hunk(object):
1148 class hunk(object):
1149 def __init__(self, desc, num, lr, context):
1149 def __init__(self, desc, num, lr, context):
1150 self.number = num
1150 self.number = num
1151 self.desc = desc
1151 self.desc = desc
1152 self.hunk = [desc]
1152 self.hunk = [desc]
1153 self.a = []
1153 self.a = []
1154 self.b = []
1154 self.b = []
1155 self.starta = self.lena = None
1155 self.starta = self.lena = None
1156 self.startb = self.lenb = None
1156 self.startb = self.lenb = None
1157 if lr is not None:
1157 if lr is not None:
1158 if context:
1158 if context:
1159 self.read_context_hunk(lr)
1159 self.read_context_hunk(lr)
1160 else:
1160 else:
1161 self.read_unified_hunk(lr)
1161 self.read_unified_hunk(lr)
1162
1162
1163 def getnormalized(self):
1163 def getnormalized(self):
1164 """Return a copy with line endings normalized to LF."""
1164 """Return a copy with line endings normalized to LF."""
1165
1165
1166 def normalize(lines):
1166 def normalize(lines):
1167 nlines = []
1167 nlines = []
1168 for line in lines:
1168 for line in lines:
1169 if line.endswith('\r\n'):
1169 if line.endswith('\r\n'):
1170 line = line[:-2] + '\n'
1170 line = line[:-2] + '\n'
1171 nlines.append(line)
1171 nlines.append(line)
1172 return nlines
1172 return nlines
1173
1173
1174 # Dummy object, it is rebuilt manually
1174 # Dummy object, it is rebuilt manually
1175 nh = hunk(self.desc, self.number, None, None)
1175 nh = hunk(self.desc, self.number, None, None)
1176 nh.number = self.number
1176 nh.number = self.number
1177 nh.desc = self.desc
1177 nh.desc = self.desc
1178 nh.hunk = self.hunk
1178 nh.hunk = self.hunk
1179 nh.a = normalize(self.a)
1179 nh.a = normalize(self.a)
1180 nh.b = normalize(self.b)
1180 nh.b = normalize(self.b)
1181 nh.starta = self.starta
1181 nh.starta = self.starta
1182 nh.startb = self.startb
1182 nh.startb = self.startb
1183 nh.lena = self.lena
1183 nh.lena = self.lena
1184 nh.lenb = self.lenb
1184 nh.lenb = self.lenb
1185 return nh
1185 return nh
1186
1186
1187 def read_unified_hunk(self, lr):
1187 def read_unified_hunk(self, lr):
1188 m = unidesc.match(self.desc)
1188 m = unidesc.match(self.desc)
1189 if not m:
1189 if not m:
1190 raise PatchError(_("bad hunk #%d") % self.number)
1190 raise PatchError(_("bad hunk #%d") % self.number)
1191 self.starta, self.lena, self.startb, self.lenb = m.groups()
1191 self.starta, self.lena, self.startb, self.lenb = m.groups()
1192 if self.lena is None:
1192 if self.lena is None:
1193 self.lena = 1
1193 self.lena = 1
1194 else:
1194 else:
1195 self.lena = int(self.lena)
1195 self.lena = int(self.lena)
1196 if self.lenb is None:
1196 if self.lenb is None:
1197 self.lenb = 1
1197 self.lenb = 1
1198 else:
1198 else:
1199 self.lenb = int(self.lenb)
1199 self.lenb = int(self.lenb)
1200 self.starta = int(self.starta)
1200 self.starta = int(self.starta)
1201 self.startb = int(self.startb)
1201 self.startb = int(self.startb)
1202 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1202 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1203 self.b)
1203 self.b)
1204 # if we hit eof before finishing out the hunk, the last line will
1204 # if we hit eof before finishing out the hunk, the last line will
1205 # be zero length. Lets try to fix it up.
1205 # be zero length. Lets try to fix it up.
1206 while len(self.hunk[-1]) == 0:
1206 while len(self.hunk[-1]) == 0:
1207 del self.hunk[-1]
1207 del self.hunk[-1]
1208 del self.a[-1]
1208 del self.a[-1]
1209 del self.b[-1]
1209 del self.b[-1]
1210 self.lena -= 1
1210 self.lena -= 1
1211 self.lenb -= 1
1211 self.lenb -= 1
1212 self._fixnewline(lr)
1212 self._fixnewline(lr)
1213
1213
1214 def read_context_hunk(self, lr):
1214 def read_context_hunk(self, lr):
1215 self.desc = lr.readline()
1215 self.desc = lr.readline()
1216 m = contextdesc.match(self.desc)
1216 m = contextdesc.match(self.desc)
1217 if not m:
1217 if not m:
1218 raise PatchError(_("bad hunk #%d") % self.number)
1218 raise PatchError(_("bad hunk #%d") % self.number)
1219 self.starta, aend = m.groups()
1219 self.starta, aend = m.groups()
1220 self.starta = int(self.starta)
1220 self.starta = int(self.starta)
1221 if aend is None:
1221 if aend is None:
1222 aend = self.starta
1222 aend = self.starta
1223 self.lena = int(aend) - self.starta
1223 self.lena = int(aend) - self.starta
1224 if self.starta:
1224 if self.starta:
1225 self.lena += 1
1225 self.lena += 1
1226 for x in xrange(self.lena):
1226 for x in xrange(self.lena):
1227 l = lr.readline()
1227 l = lr.readline()
1228 if l.startswith('---'):
1228 if l.startswith('---'):
1229 # lines addition, old block is empty
1229 # lines addition, old block is empty
1230 lr.push(l)
1230 lr.push(l)
1231 break
1231 break
1232 s = l[2:]
1232 s = l[2:]
1233 if l.startswith('- ') or l.startswith('! '):
1233 if l.startswith('- ') or l.startswith('! '):
1234 u = '-' + s
1234 u = '-' + s
1235 elif l.startswith(' '):
1235 elif l.startswith(' '):
1236 u = ' ' + s
1236 u = ' ' + s
1237 else:
1237 else:
1238 raise PatchError(_("bad hunk #%d old text line %d") %
1238 raise PatchError(_("bad hunk #%d old text line %d") %
1239 (self.number, x))
1239 (self.number, x))
1240 self.a.append(u)
1240 self.a.append(u)
1241 self.hunk.append(u)
1241 self.hunk.append(u)
1242
1242
1243 l = lr.readline()
1243 l = lr.readline()
1244 if l.startswith('\ '):
1244 if l.startswith('\ '):
1245 s = self.a[-1][:-1]
1245 s = self.a[-1][:-1]
1246 self.a[-1] = s
1246 self.a[-1] = s
1247 self.hunk[-1] = s
1247 self.hunk[-1] = s
1248 l = lr.readline()
1248 l = lr.readline()
1249 m = contextdesc.match(l)
1249 m = contextdesc.match(l)
1250 if not m:
1250 if not m:
1251 raise PatchError(_("bad hunk #%d") % self.number)
1251 raise PatchError(_("bad hunk #%d") % self.number)
1252 self.startb, bend = m.groups()
1252 self.startb, bend = m.groups()
1253 self.startb = int(self.startb)
1253 self.startb = int(self.startb)
1254 if bend is None:
1254 if bend is None:
1255 bend = self.startb
1255 bend = self.startb
1256 self.lenb = int(bend) - self.startb
1256 self.lenb = int(bend) - self.startb
1257 if self.startb:
1257 if self.startb:
1258 self.lenb += 1
1258 self.lenb += 1
1259 hunki = 1
1259 hunki = 1
1260 for x in xrange(self.lenb):
1260 for x in xrange(self.lenb):
1261 l = lr.readline()
1261 l = lr.readline()
1262 if l.startswith('\ '):
1262 if l.startswith('\ '):
1263 # XXX: the only way to hit this is with an invalid line range.
1263 # XXX: the only way to hit this is with an invalid line range.
1264 # The no-eol marker is not counted in the line range, but I
1264 # The no-eol marker is not counted in the line range, but I
1265 # guess there are diff(1) out there which behave differently.
1265 # guess there are diff(1) out there which behave differently.
1266 s = self.b[-1][:-1]
1266 s = self.b[-1][:-1]
1267 self.b[-1] = s
1267 self.b[-1] = s
1268 self.hunk[hunki - 1] = s
1268 self.hunk[hunki - 1] = s
1269 continue
1269 continue
1270 if not l:
1270 if not l:
1271 # line deletions, new block is empty and we hit EOF
1271 # line deletions, new block is empty and we hit EOF
1272 lr.push(l)
1272 lr.push(l)
1273 break
1273 break
1274 s = l[2:]
1274 s = l[2:]
1275 if l.startswith('+ ') or l.startswith('! '):
1275 if l.startswith('+ ') or l.startswith('! '):
1276 u = '+' + s
1276 u = '+' + s
1277 elif l.startswith(' '):
1277 elif l.startswith(' '):
1278 u = ' ' + s
1278 u = ' ' + s
1279 elif len(self.b) == 0:
1279 elif len(self.b) == 0:
1280 # line deletions, new block is empty
1280 # line deletions, new block is empty
1281 lr.push(l)
1281 lr.push(l)
1282 break
1282 break
1283 else:
1283 else:
1284 raise PatchError(_("bad hunk #%d old text line %d") %
1284 raise PatchError(_("bad hunk #%d old text line %d") %
1285 (self.number, x))
1285 (self.number, x))
1286 self.b.append(s)
1286 self.b.append(s)
1287 while True:
1287 while True:
1288 if hunki >= len(self.hunk):
1288 if hunki >= len(self.hunk):
1289 h = ""
1289 h = ""
1290 else:
1290 else:
1291 h = self.hunk[hunki]
1291 h = self.hunk[hunki]
1292 hunki += 1
1292 hunki += 1
1293 if h == u:
1293 if h == u:
1294 break
1294 break
1295 elif h.startswith('-'):
1295 elif h.startswith('-'):
1296 continue
1296 continue
1297 else:
1297 else:
1298 self.hunk.insert(hunki - 1, u)
1298 self.hunk.insert(hunki - 1, u)
1299 break
1299 break
1300
1300
1301 if not self.a:
1301 if not self.a:
1302 # this happens when lines were only added to the hunk
1302 # this happens when lines were only added to the hunk
1303 for x in self.hunk:
1303 for x in self.hunk:
1304 if x.startswith('-') or x.startswith(' '):
1304 if x.startswith('-') or x.startswith(' '):
1305 self.a.append(x)
1305 self.a.append(x)
1306 if not self.b:
1306 if not self.b:
1307 # this happens when lines were only deleted from the hunk
1307 # this happens when lines were only deleted from the hunk
1308 for x in self.hunk:
1308 for x in self.hunk:
1309 if x.startswith('+') or x.startswith(' '):
1309 if x.startswith('+') or x.startswith(' '):
1310 self.b.append(x[1:])
1310 self.b.append(x[1:])
1311 # @@ -start,len +start,len @@
1311 # @@ -start,len +start,len @@
1312 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1312 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1313 self.startb, self.lenb)
1313 self.startb, self.lenb)
1314 self.hunk[0] = self.desc
1314 self.hunk[0] = self.desc
1315 self._fixnewline(lr)
1315 self._fixnewline(lr)
1316
1316
1317 def _fixnewline(self, lr):
1317 def _fixnewline(self, lr):
1318 l = lr.readline()
1318 l = lr.readline()
1319 if l.startswith('\ '):
1319 if l.startswith('\ '):
1320 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1320 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1321 else:
1321 else:
1322 lr.push(l)
1322 lr.push(l)
1323
1323
1324 def complete(self):
1324 def complete(self):
1325 return len(self.a) == self.lena and len(self.b) == self.lenb
1325 return len(self.a) == self.lena and len(self.b) == self.lenb
1326
1326
1327 def _fuzzit(self, old, new, fuzz, toponly):
1327 def _fuzzit(self, old, new, fuzz, toponly):
1328 # this removes context lines from the top and bottom of list 'l'. It
1328 # this removes context lines from the top and bottom of list 'l'. It
1329 # checks the hunk to make sure only context lines are removed, and then
1329 # checks the hunk to make sure only context lines are removed, and then
1330 # returns a new shortened list of lines.
1330 # returns a new shortened list of lines.
1331 fuzz = min(fuzz, len(old))
1331 fuzz = min(fuzz, len(old))
1332 if fuzz:
1332 if fuzz:
1333 top = 0
1333 top = 0
1334 bot = 0
1334 bot = 0
1335 hlen = len(self.hunk)
1335 hlen = len(self.hunk)
1336 for x in xrange(hlen - 1):
1336 for x in xrange(hlen - 1):
1337 # the hunk starts with the @@ line, so use x+1
1337 # the hunk starts with the @@ line, so use x+1
1338 if self.hunk[x + 1][0] == ' ':
1338 if self.hunk[x + 1][0] == ' ':
1339 top += 1
1339 top += 1
1340 else:
1340 else:
1341 break
1341 break
1342 if not toponly:
1342 if not toponly:
1343 for x in xrange(hlen - 1):
1343 for x in xrange(hlen - 1):
1344 if self.hunk[hlen - bot - 1][0] == ' ':
1344 if self.hunk[hlen - bot - 1][0] == ' ':
1345 bot += 1
1345 bot += 1
1346 else:
1346 else:
1347 break
1347 break
1348
1348
1349 bot = min(fuzz, bot)
1349 bot = min(fuzz, bot)
1350 top = min(fuzz, top)
1350 top = min(fuzz, top)
1351 return old[top:len(old) - bot], new[top:len(new) - bot], top
1351 return old[top:len(old) - bot], new[top:len(new) - bot], top
1352 return old, new, 0
1352 return old, new, 0
1353
1353
1354 def fuzzit(self, fuzz, toponly):
1354 def fuzzit(self, fuzz, toponly):
1355 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1355 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1356 oldstart = self.starta + top
1356 oldstart = self.starta + top
1357 newstart = self.startb + top
1357 newstart = self.startb + top
1358 # zero length hunk ranges already have their start decremented
1358 # zero length hunk ranges already have their start decremented
1359 if self.lena and oldstart > 0:
1359 if self.lena and oldstart > 0:
1360 oldstart -= 1
1360 oldstart -= 1
1361 if self.lenb and newstart > 0:
1361 if self.lenb and newstart > 0:
1362 newstart -= 1
1362 newstart -= 1
1363 return old, oldstart, new, newstart
1363 return old, oldstart, new, newstart
1364
1364
1365 class binhunk(object):
1365 class binhunk(object):
1366 'A binary patch file.'
1366 'A binary patch file.'
1367 def __init__(self, lr, fname):
1367 def __init__(self, lr, fname):
1368 self.text = None
1368 self.text = None
1369 self.delta = False
1369 self.delta = False
1370 self.hunk = ['GIT binary patch\n']
1370 self.hunk = ['GIT binary patch\n']
1371 self._fname = fname
1371 self._fname = fname
1372 self._read(lr)
1372 self._read(lr)
1373
1373
1374 def complete(self):
1374 def complete(self):
1375 return self.text is not None
1375 return self.text is not None
1376
1376
1377 def new(self, lines):
1377 def new(self, lines):
1378 if self.delta:
1378 if self.delta:
1379 return [applybindelta(self.text, ''.join(lines))]
1379 return [applybindelta(self.text, ''.join(lines))]
1380 return [self.text]
1380 return [self.text]
1381
1381
1382 def _read(self, lr):
1382 def _read(self, lr):
1383 def getline(lr, hunk):
1383 def getline(lr, hunk):
1384 l = lr.readline()
1384 l = lr.readline()
1385 hunk.append(l)
1385 hunk.append(l)
1386 return l.rstrip('\r\n')
1386 return l.rstrip('\r\n')
1387
1387
1388 size = 0
1388 size = 0
1389 while True:
1389 while True:
1390 line = getline(lr, self.hunk)
1390 line = getline(lr, self.hunk)
1391 if not line:
1391 if not line:
1392 raise PatchError(_('could not extract "%s" binary data')
1392 raise PatchError(_('could not extract "%s" binary data')
1393 % self._fname)
1393 % self._fname)
1394 if line.startswith('literal '):
1394 if line.startswith('literal '):
1395 size = int(line[8:].rstrip())
1395 size = int(line[8:].rstrip())
1396 break
1396 break
1397 if line.startswith('delta '):
1397 if line.startswith('delta '):
1398 size = int(line[6:].rstrip())
1398 size = int(line[6:].rstrip())
1399 self.delta = True
1399 self.delta = True
1400 break
1400 break
1401 dec = []
1401 dec = []
1402 line = getline(lr, self.hunk)
1402 line = getline(lr, self.hunk)
1403 while len(line) > 1:
1403 while len(line) > 1:
1404 l = line[0]
1404 l = line[0]
1405 if l <= 'Z' and l >= 'A':
1405 if l <= 'Z' and l >= 'A':
1406 l = ord(l) - ord('A') + 1
1406 l = ord(l) - ord('A') + 1
1407 else:
1407 else:
1408 l = ord(l) - ord('a') + 27
1408 l = ord(l) - ord('a') + 27
1409 try:
1409 try:
1410 dec.append(base85.b85decode(line[1:])[:l])
1410 dec.append(base85.b85decode(line[1:])[:l])
1411 except ValueError as e:
1411 except ValueError as e:
1412 raise PatchError(_('could not decode "%s" binary patch: %s')
1412 raise PatchError(_('could not decode "%s" binary patch: %s')
1413 % (self._fname, str(e)))
1413 % (self._fname, str(e)))
1414 line = getline(lr, self.hunk)
1414 line = getline(lr, self.hunk)
1415 text = zlib.decompress(''.join(dec))
1415 text = zlib.decompress(''.join(dec))
1416 if len(text) != size:
1416 if len(text) != size:
1417 raise PatchError(_('"%s" length is %d bytes, should be %d')
1417 raise PatchError(_('"%s" length is %d bytes, should be %d')
1418 % (self._fname, len(text), size))
1418 % (self._fname, len(text), size))
1419 self.text = text
1419 self.text = text
1420
1420
1421 def parsefilename(str):
1421 def parsefilename(str):
1422 # --- filename \t|space stuff
1422 # --- filename \t|space stuff
1423 s = str[4:].rstrip('\r\n')
1423 s = str[4:].rstrip('\r\n')
1424 i = s.find('\t')
1424 i = s.find('\t')
1425 if i < 0:
1425 if i < 0:
1426 i = s.find(' ')
1426 i = s.find(' ')
1427 if i < 0:
1427 if i < 0:
1428 return s
1428 return s
1429 return s[:i]
1429 return s[:i]
1430
1430
1431 def reversehunks(hunks):
1431 def reversehunks(hunks):
1432 '''reverse the signs in the hunks given as argument
1432 '''reverse the signs in the hunks given as argument
1433
1433
1434 This function operates on hunks coming out of patch.filterpatch, that is
1434 This function operates on hunks coming out of patch.filterpatch, that is
1435 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1435 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1436
1436
1437 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1437 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1438 ... --- a/folder1/g
1438 ... --- a/folder1/g
1439 ... +++ b/folder1/g
1439 ... +++ b/folder1/g
1440 ... @@ -1,7 +1,7 @@
1440 ... @@ -1,7 +1,7 @@
1441 ... +firstline
1441 ... +firstline
1442 ... c
1442 ... c
1443 ... 1
1443 ... 1
1444 ... 2
1444 ... 2
1445 ... + 3
1445 ... + 3
1446 ... -4
1446 ... -4
1447 ... 5
1447 ... 5
1448 ... d
1448 ... d
1449 ... +lastline"""
1449 ... +lastline"""
1450 >>> hunks = parsepatch(rawpatch)
1450 >>> hunks = parsepatch(rawpatch)
1451 >>> hunkscomingfromfilterpatch = []
1451 >>> hunkscomingfromfilterpatch = []
1452 >>> for h in hunks:
1452 >>> for h in hunks:
1453 ... hunkscomingfromfilterpatch.append(h)
1453 ... hunkscomingfromfilterpatch.append(h)
1454 ... hunkscomingfromfilterpatch.extend(h.hunks)
1454 ... hunkscomingfromfilterpatch.extend(h.hunks)
1455
1455
1456 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1456 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1457 >>> from . import util
1457 >>> from . import util
1458 >>> fp = util.stringio()
1458 >>> fp = util.stringio()
1459 >>> for c in reversedhunks:
1459 >>> for c in reversedhunks:
1460 ... c.write(fp)
1460 ... c.write(fp)
1461 >>> fp.seek(0)
1461 >>> fp.seek(0)
1462 >>> reversedpatch = fp.read()
1462 >>> reversedpatch = fp.read()
1463 >>> print reversedpatch
1463 >>> print reversedpatch
1464 diff --git a/folder1/g b/folder1/g
1464 diff --git a/folder1/g b/folder1/g
1465 --- a/folder1/g
1465 --- a/folder1/g
1466 +++ b/folder1/g
1466 +++ b/folder1/g
1467 @@ -1,4 +1,3 @@
1467 @@ -1,4 +1,3 @@
1468 -firstline
1468 -firstline
1469 c
1469 c
1470 1
1470 1
1471 2
1471 2
1472 @@ -1,6 +2,6 @@
1472 @@ -1,6 +2,6 @@
1473 c
1473 c
1474 1
1474 1
1475 2
1475 2
1476 - 3
1476 - 3
1477 +4
1477 +4
1478 5
1478 5
1479 d
1479 d
1480 @@ -5,3 +6,2 @@
1480 @@ -5,3 +6,2 @@
1481 5
1481 5
1482 d
1482 d
1483 -lastline
1483 -lastline
1484
1484
1485 '''
1485 '''
1486
1486
1487 from . import crecord as crecordmod
1487 from . import crecord as crecordmod
1488 newhunks = []
1488 newhunks = []
1489 for c in hunks:
1489 for c in hunks:
1490 if isinstance(c, crecordmod.uihunk):
1490 if isinstance(c, crecordmod.uihunk):
1491 # curses hunks encapsulate the record hunk in _hunk
1491 # curses hunks encapsulate the record hunk in _hunk
1492 c = c._hunk
1492 c = c._hunk
1493 if isinstance(c, recordhunk):
1493 if isinstance(c, recordhunk):
1494 for j, line in enumerate(c.hunk):
1494 for j, line in enumerate(c.hunk):
1495 if line.startswith("-"):
1495 if line.startswith("-"):
1496 c.hunk[j] = "+" + c.hunk[j][1:]
1496 c.hunk[j] = "+" + c.hunk[j][1:]
1497 elif line.startswith("+"):
1497 elif line.startswith("+"):
1498 c.hunk[j] = "-" + c.hunk[j][1:]
1498 c.hunk[j] = "-" + c.hunk[j][1:]
1499 c.added, c.removed = c.removed, c.added
1499 c.added, c.removed = c.removed, c.added
1500 newhunks.append(c)
1500 newhunks.append(c)
1501 return newhunks
1501 return newhunks
1502
1502
1503 def parsepatch(originalchunks):
1503 def parsepatch(originalchunks):
1504 """patch -> [] of headers -> [] of hunks """
1504 """patch -> [] of headers -> [] of hunks """
1505 class parser(object):
1505 class parser(object):
1506 """patch parsing state machine"""
1506 """patch parsing state machine"""
1507 def __init__(self):
1507 def __init__(self):
1508 self.fromline = 0
1508 self.fromline = 0
1509 self.toline = 0
1509 self.toline = 0
1510 self.proc = ''
1510 self.proc = ''
1511 self.header = None
1511 self.header = None
1512 self.context = []
1512 self.context = []
1513 self.before = []
1513 self.before = []
1514 self.hunk = []
1514 self.hunk = []
1515 self.headers = []
1515 self.headers = []
1516
1516
1517 def addrange(self, limits):
1517 def addrange(self, limits):
1518 fromstart, fromend, tostart, toend, proc = limits
1518 fromstart, fromend, tostart, toend, proc = limits
1519 self.fromline = int(fromstart)
1519 self.fromline = int(fromstart)
1520 self.toline = int(tostart)
1520 self.toline = int(tostart)
1521 self.proc = proc
1521 self.proc = proc
1522
1522
1523 def addcontext(self, context):
1523 def addcontext(self, context):
1524 if self.hunk:
1524 if self.hunk:
1525 h = recordhunk(self.header, self.fromline, self.toline,
1525 h = recordhunk(self.header, self.fromline, self.toline,
1526 self.proc, self.before, self.hunk, context)
1526 self.proc, self.before, self.hunk, context)
1527 self.header.hunks.append(h)
1527 self.header.hunks.append(h)
1528 self.fromline += len(self.before) + h.removed
1528 self.fromline += len(self.before) + h.removed
1529 self.toline += len(self.before) + h.added
1529 self.toline += len(self.before) + h.added
1530 self.before = []
1530 self.before = []
1531 self.hunk = []
1531 self.hunk = []
1532 self.context = context
1532 self.context = context
1533
1533
1534 def addhunk(self, hunk):
1534 def addhunk(self, hunk):
1535 if self.context:
1535 if self.context:
1536 self.before = self.context
1536 self.before = self.context
1537 self.context = []
1537 self.context = []
1538 self.hunk = hunk
1538 self.hunk = hunk
1539
1539
1540 def newfile(self, hdr):
1540 def newfile(self, hdr):
1541 self.addcontext([])
1541 self.addcontext([])
1542 h = header(hdr)
1542 h = header(hdr)
1543 self.headers.append(h)
1543 self.headers.append(h)
1544 self.header = h
1544 self.header = h
1545
1545
1546 def addother(self, line):
1546 def addother(self, line):
1547 pass # 'other' lines are ignored
1547 pass # 'other' lines are ignored
1548
1548
1549 def finished(self):
1549 def finished(self):
1550 self.addcontext([])
1550 self.addcontext([])
1551 return self.headers
1551 return self.headers
1552
1552
1553 transitions = {
1553 transitions = {
1554 'file': {'context': addcontext,
1554 'file': {'context': addcontext,
1555 'file': newfile,
1555 'file': newfile,
1556 'hunk': addhunk,
1556 'hunk': addhunk,
1557 'range': addrange},
1557 'range': addrange},
1558 'context': {'file': newfile,
1558 'context': {'file': newfile,
1559 'hunk': addhunk,
1559 'hunk': addhunk,
1560 'range': addrange,
1560 'range': addrange,
1561 'other': addother},
1561 'other': addother},
1562 'hunk': {'context': addcontext,
1562 'hunk': {'context': addcontext,
1563 'file': newfile,
1563 'file': newfile,
1564 'range': addrange},
1564 'range': addrange},
1565 'range': {'context': addcontext,
1565 'range': {'context': addcontext,
1566 'hunk': addhunk},
1566 'hunk': addhunk},
1567 'other': {'other': addother},
1567 'other': {'other': addother},
1568 }
1568 }
1569
1569
1570 p = parser()
1570 p = parser()
1571 fp = stringio()
1571 fp = stringio()
1572 fp.write(''.join(originalchunks))
1572 fp.write(''.join(originalchunks))
1573 fp.seek(0)
1573 fp.seek(0)
1574
1574
1575 state = 'context'
1575 state = 'context'
1576 for newstate, data in scanpatch(fp):
1576 for newstate, data in scanpatch(fp):
1577 try:
1577 try:
1578 p.transitions[state][newstate](p, data)
1578 p.transitions[state][newstate](p, data)
1579 except KeyError:
1579 except KeyError:
1580 raise PatchError('unhandled transition: %s -> %s' %
1580 raise PatchError('unhandled transition: %s -> %s' %
1581 (state, newstate))
1581 (state, newstate))
1582 state = newstate
1582 state = newstate
1583 del fp
1583 del fp
1584 return p.finished()
1584 return p.finished()
1585
1585
1586 def pathtransform(path, strip, prefix):
1586 def pathtransform(path, strip, prefix):
1587 '''turn a path from a patch into a path suitable for the repository
1587 '''turn a path from a patch into a path suitable for the repository
1588
1588
1589 prefix, if not empty, is expected to be normalized with a / at the end.
1589 prefix, if not empty, is expected to be normalized with a / at the end.
1590
1590
1591 Returns (stripped components, path in repository).
1591 Returns (stripped components, path in repository).
1592
1592
1593 >>> pathtransform('a/b/c', 0, '')
1593 >>> pathtransform('a/b/c', 0, '')
1594 ('', 'a/b/c')
1594 ('', 'a/b/c')
1595 >>> pathtransform(' a/b/c ', 0, '')
1595 >>> pathtransform(' a/b/c ', 0, '')
1596 ('', ' a/b/c')
1596 ('', ' a/b/c')
1597 >>> pathtransform(' a/b/c ', 2, '')
1597 >>> pathtransform(' a/b/c ', 2, '')
1598 ('a/b/', 'c')
1598 ('a/b/', 'c')
1599 >>> pathtransform('a/b/c', 0, 'd/e/')
1599 >>> pathtransform('a/b/c', 0, 'd/e/')
1600 ('', 'd/e/a/b/c')
1600 ('', 'd/e/a/b/c')
1601 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1601 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1602 ('a//b/', 'd/e/c')
1602 ('a//b/', 'd/e/c')
1603 >>> pathtransform('a/b/c', 3, '')
1603 >>> pathtransform('a/b/c', 3, '')
1604 Traceback (most recent call last):
1604 Traceback (most recent call last):
1605 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1605 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1606 '''
1606 '''
1607 pathlen = len(path)
1607 pathlen = len(path)
1608 i = 0
1608 i = 0
1609 if strip == 0:
1609 if strip == 0:
1610 return '', prefix + path.rstrip()
1610 return '', prefix + path.rstrip()
1611 count = strip
1611 count = strip
1612 while count > 0:
1612 while count > 0:
1613 i = path.find('/', i)
1613 i = path.find('/', i)
1614 if i == -1:
1614 if i == -1:
1615 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1615 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1616 (count, strip, path))
1616 (count, strip, path))
1617 i += 1
1617 i += 1
1618 # consume '//' in the path
1618 # consume '//' in the path
1619 while i < pathlen - 1 and path[i] == '/':
1619 while i < pathlen - 1 and path[i] == '/':
1620 i += 1
1620 i += 1
1621 count -= 1
1621 count -= 1
1622 return path[:i].lstrip(), prefix + path[i:].rstrip()
1622 return path[:i].lstrip(), prefix + path[i:].rstrip()
1623
1623
1624 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1624 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1625 nulla = afile_orig == "/dev/null"
1625 nulla = afile_orig == "/dev/null"
1626 nullb = bfile_orig == "/dev/null"
1626 nullb = bfile_orig == "/dev/null"
1627 create = nulla and hunk.starta == 0 and hunk.lena == 0
1627 create = nulla and hunk.starta == 0 and hunk.lena == 0
1628 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1628 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1629 abase, afile = pathtransform(afile_orig, strip, prefix)
1629 abase, afile = pathtransform(afile_orig, strip, prefix)
1630 gooda = not nulla and backend.exists(afile)
1630 gooda = not nulla and backend.exists(afile)
1631 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1631 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1632 if afile == bfile:
1632 if afile == bfile:
1633 goodb = gooda
1633 goodb = gooda
1634 else:
1634 else:
1635 goodb = not nullb and backend.exists(bfile)
1635 goodb = not nullb and backend.exists(bfile)
1636 missing = not goodb and not gooda and not create
1636 missing = not goodb and not gooda and not create
1637
1637
1638 # some diff programs apparently produce patches where the afile is
1638 # some diff programs apparently produce patches where the afile is
1639 # not /dev/null, but afile starts with bfile
1639 # not /dev/null, but afile starts with bfile
1640 abasedir = afile[:afile.rfind('/') + 1]
1640 abasedir = afile[:afile.rfind('/') + 1]
1641 bbasedir = bfile[:bfile.rfind('/') + 1]
1641 bbasedir = bfile[:bfile.rfind('/') + 1]
1642 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1642 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1643 and hunk.starta == 0 and hunk.lena == 0):
1643 and hunk.starta == 0 and hunk.lena == 0):
1644 create = True
1644 create = True
1645 missing = False
1645 missing = False
1646
1646
1647 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1647 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1648 # diff is between a file and its backup. In this case, the original
1648 # diff is between a file and its backup. In this case, the original
1649 # file should be patched (see original mpatch code).
1649 # file should be patched (see original mpatch code).
1650 isbackup = (abase == bbase and bfile.startswith(afile))
1650 isbackup = (abase == bbase and bfile.startswith(afile))
1651 fname = None
1651 fname = None
1652 if not missing:
1652 if not missing:
1653 if gooda and goodb:
1653 if gooda and goodb:
1654 if isbackup:
1654 if isbackup:
1655 fname = afile
1655 fname = afile
1656 else:
1656 else:
1657 fname = bfile
1657 fname = bfile
1658 elif gooda:
1658 elif gooda:
1659 fname = afile
1659 fname = afile
1660
1660
1661 if not fname:
1661 if not fname:
1662 if not nullb:
1662 if not nullb:
1663 if isbackup:
1663 if isbackup:
1664 fname = afile
1664 fname = afile
1665 else:
1665 else:
1666 fname = bfile
1666 fname = bfile
1667 elif not nulla:
1667 elif not nulla:
1668 fname = afile
1668 fname = afile
1669 else:
1669 else:
1670 raise PatchError(_("undefined source and destination files"))
1670 raise PatchError(_("undefined source and destination files"))
1671
1671
1672 gp = patchmeta(fname)
1672 gp = patchmeta(fname)
1673 if create:
1673 if create:
1674 gp.op = 'ADD'
1674 gp.op = 'ADD'
1675 elif remove:
1675 elif remove:
1676 gp.op = 'DELETE'
1676 gp.op = 'DELETE'
1677 return gp
1677 return gp
1678
1678
1679 def scanpatch(fp):
1679 def scanpatch(fp):
1680 """like patch.iterhunks, but yield different events
1680 """like patch.iterhunks, but yield different events
1681
1681
1682 - ('file', [header_lines + fromfile + tofile])
1682 - ('file', [header_lines + fromfile + tofile])
1683 - ('context', [context_lines])
1683 - ('context', [context_lines])
1684 - ('hunk', [hunk_lines])
1684 - ('hunk', [hunk_lines])
1685 - ('range', (-start,len, +start,len, proc))
1685 - ('range', (-start,len, +start,len, proc))
1686 """
1686 """
1687 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1687 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1688 lr = linereader(fp)
1688 lr = linereader(fp)
1689
1689
1690 def scanwhile(first, p):
1690 def scanwhile(first, p):
1691 """scan lr while predicate holds"""
1691 """scan lr while predicate holds"""
1692 lines = [first]
1692 lines = [first]
1693 for line in iter(lr.readline, ''):
1693 for line in iter(lr.readline, ''):
1694 if p(line):
1694 if p(line):
1695 lines.append(line)
1695 lines.append(line)
1696 else:
1696 else:
1697 lr.push(line)
1697 lr.push(line)
1698 break
1698 break
1699 return lines
1699 return lines
1700
1700
1701 for line in iter(lr.readline, ''):
1701 for line in iter(lr.readline, ''):
1702 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1702 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1703 def notheader(line):
1703 def notheader(line):
1704 s = line.split(None, 1)
1704 s = line.split(None, 1)
1705 return not s or s[0] not in ('---', 'diff')
1705 return not s or s[0] not in ('---', 'diff')
1706 header = scanwhile(line, notheader)
1706 header = scanwhile(line, notheader)
1707 fromfile = lr.readline()
1707 fromfile = lr.readline()
1708 if fromfile.startswith('---'):
1708 if fromfile.startswith('---'):
1709 tofile = lr.readline()
1709 tofile = lr.readline()
1710 header += [fromfile, tofile]
1710 header += [fromfile, tofile]
1711 else:
1711 else:
1712 lr.push(fromfile)
1712 lr.push(fromfile)
1713 yield 'file', header
1713 yield 'file', header
1714 elif line[0] == ' ':
1714 elif line[0] == ' ':
1715 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1715 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1716 elif line[0] in '-+':
1716 elif line[0] in '-+':
1717 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1717 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1718 else:
1718 else:
1719 m = lines_re.match(line)
1719 m = lines_re.match(line)
1720 if m:
1720 if m:
1721 yield 'range', m.groups()
1721 yield 'range', m.groups()
1722 else:
1722 else:
1723 yield 'other', line
1723 yield 'other', line
1724
1724
1725 def scangitpatch(lr, firstline):
1725 def scangitpatch(lr, firstline):
1726 """
1726 """
1727 Git patches can emit:
1727 Git patches can emit:
1728 - rename a to b
1728 - rename a to b
1729 - change b
1729 - change b
1730 - copy a to c
1730 - copy a to c
1731 - change c
1731 - change c
1732
1732
1733 We cannot apply this sequence as-is, the renamed 'a' could not be
1733 We cannot apply this sequence as-is, the renamed 'a' could not be
1734 found for it would have been renamed already. And we cannot copy
1734 found for it would have been renamed already. And we cannot copy
1735 from 'b' instead because 'b' would have been changed already. So
1735 from 'b' instead because 'b' would have been changed already. So
1736 we scan the git patch for copy and rename commands so we can
1736 we scan the git patch for copy and rename commands so we can
1737 perform the copies ahead of time.
1737 perform the copies ahead of time.
1738 """
1738 """
1739 pos = 0
1739 pos = 0
1740 try:
1740 try:
1741 pos = lr.fp.tell()
1741 pos = lr.fp.tell()
1742 fp = lr.fp
1742 fp = lr.fp
1743 except IOError:
1743 except IOError:
1744 fp = stringio(lr.fp.read())
1744 fp = stringio(lr.fp.read())
1745 gitlr = linereader(fp)
1745 gitlr = linereader(fp)
1746 gitlr.push(firstline)
1746 gitlr.push(firstline)
1747 gitpatches = readgitpatch(gitlr)
1747 gitpatches = readgitpatch(gitlr)
1748 fp.seek(pos)
1748 fp.seek(pos)
1749 return gitpatches
1749 return gitpatches
1750
1750
1751 def iterhunks(fp):
1751 def iterhunks(fp):
1752 """Read a patch and yield the following events:
1752 """Read a patch and yield the following events:
1753 - ("file", afile, bfile, firsthunk): select a new target file.
1753 - ("file", afile, bfile, firsthunk): select a new target file.
1754 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1754 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1755 "file" event.
1755 "file" event.
1756 - ("git", gitchanges): current diff is in git format, gitchanges
1756 - ("git", gitchanges): current diff is in git format, gitchanges
1757 maps filenames to gitpatch records. Unique event.
1757 maps filenames to gitpatch records. Unique event.
1758 """
1758 """
1759 afile = ""
1759 afile = ""
1760 bfile = ""
1760 bfile = ""
1761 state = None
1761 state = None
1762 hunknum = 0
1762 hunknum = 0
1763 emitfile = newfile = False
1763 emitfile = newfile = False
1764 gitpatches = None
1764 gitpatches = None
1765
1765
1766 # our states
1766 # our states
1767 BFILE = 1
1767 BFILE = 1
1768 context = None
1768 context = None
1769 lr = linereader(fp)
1769 lr = linereader(fp)
1770
1770
1771 for x in iter(lr.readline, ''):
1771 for x in iter(lr.readline, ''):
1772 if state == BFILE and (
1772 if state == BFILE and (
1773 (not context and x[0] == '@')
1773 (not context and x[0] == '@')
1774 or (context is not False and x.startswith('***************'))
1774 or (context is not False and x.startswith('***************'))
1775 or x.startswith('GIT binary patch')):
1775 or x.startswith('GIT binary patch')):
1776 gp = None
1776 gp = None
1777 if (gitpatches and
1777 if (gitpatches and
1778 gitpatches[-1].ispatching(afile, bfile)):
1778 gitpatches[-1].ispatching(afile, bfile)):
1779 gp = gitpatches.pop()
1779 gp = gitpatches.pop()
1780 if x.startswith('GIT binary patch'):
1780 if x.startswith('GIT binary patch'):
1781 h = binhunk(lr, gp.path)
1781 h = binhunk(lr, gp.path)
1782 else:
1782 else:
1783 if context is None and x.startswith('***************'):
1783 if context is None and x.startswith('***************'):
1784 context = True
1784 context = True
1785 h = hunk(x, hunknum + 1, lr, context)
1785 h = hunk(x, hunknum + 1, lr, context)
1786 hunknum += 1
1786 hunknum += 1
1787 if emitfile:
1787 if emitfile:
1788 emitfile = False
1788 emitfile = False
1789 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1789 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1790 yield 'hunk', h
1790 yield 'hunk', h
1791 elif x.startswith('diff --git a/'):
1791 elif x.startswith('diff --git a/'):
1792 m = gitre.match(x.rstrip(' \r\n'))
1792 m = gitre.match(x.rstrip(' \r\n'))
1793 if not m:
1793 if not m:
1794 continue
1794 continue
1795 if gitpatches is None:
1795 if gitpatches is None:
1796 # scan whole input for git metadata
1796 # scan whole input for git metadata
1797 gitpatches = scangitpatch(lr, x)
1797 gitpatches = scangitpatch(lr, x)
1798 yield 'git', [g.copy() for g in gitpatches
1798 yield 'git', [g.copy() for g in gitpatches
1799 if g.op in ('COPY', 'RENAME')]
1799 if g.op in ('COPY', 'RENAME')]
1800 gitpatches.reverse()
1800 gitpatches.reverse()
1801 afile = 'a/' + m.group(1)
1801 afile = 'a/' + m.group(1)
1802 bfile = 'b/' + m.group(2)
1802 bfile = 'b/' + m.group(2)
1803 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1803 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1804 gp = gitpatches.pop()
1804 gp = gitpatches.pop()
1805 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1805 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1806 if not gitpatches:
1806 if not gitpatches:
1807 raise PatchError(_('failed to synchronize metadata for "%s"')
1807 raise PatchError(_('failed to synchronize metadata for "%s"')
1808 % afile[2:])
1808 % afile[2:])
1809 gp = gitpatches[-1]
1809 gp = gitpatches[-1]
1810 newfile = True
1810 newfile = True
1811 elif x.startswith('---'):
1811 elif x.startswith('---'):
1812 # check for a unified diff
1812 # check for a unified diff
1813 l2 = lr.readline()
1813 l2 = lr.readline()
1814 if not l2.startswith('+++'):
1814 if not l2.startswith('+++'):
1815 lr.push(l2)
1815 lr.push(l2)
1816 continue
1816 continue
1817 newfile = True
1817 newfile = True
1818 context = False
1818 context = False
1819 afile = parsefilename(x)
1819 afile = parsefilename(x)
1820 bfile = parsefilename(l2)
1820 bfile = parsefilename(l2)
1821 elif x.startswith('***'):
1821 elif x.startswith('***'):
1822 # check for a context diff
1822 # check for a context diff
1823 l2 = lr.readline()
1823 l2 = lr.readline()
1824 if not l2.startswith('---'):
1824 if not l2.startswith('---'):
1825 lr.push(l2)
1825 lr.push(l2)
1826 continue
1826 continue
1827 l3 = lr.readline()
1827 l3 = lr.readline()
1828 lr.push(l3)
1828 lr.push(l3)
1829 if not l3.startswith("***************"):
1829 if not l3.startswith("***************"):
1830 lr.push(l2)
1830 lr.push(l2)
1831 continue
1831 continue
1832 newfile = True
1832 newfile = True
1833 context = True
1833 context = True
1834 afile = parsefilename(x)
1834 afile = parsefilename(x)
1835 bfile = parsefilename(l2)
1835 bfile = parsefilename(l2)
1836
1836
1837 if newfile:
1837 if newfile:
1838 newfile = False
1838 newfile = False
1839 emitfile = True
1839 emitfile = True
1840 state = BFILE
1840 state = BFILE
1841 hunknum = 0
1841 hunknum = 0
1842
1842
1843 while gitpatches:
1843 while gitpatches:
1844 gp = gitpatches.pop()
1844 gp = gitpatches.pop()
1845 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1845 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1846
1846
1847 def applybindelta(binchunk, data):
1847 def applybindelta(binchunk, data):
1848 """Apply a binary delta hunk
1848 """Apply a binary delta hunk
1849 The algorithm used is the algorithm from git's patch-delta.c
1849 The algorithm used is the algorithm from git's patch-delta.c
1850 """
1850 """
1851 def deltahead(binchunk):
1851 def deltahead(binchunk):
1852 i = 0
1852 i = 0
1853 for c in binchunk:
1853 for c in binchunk:
1854 i += 1
1854 i += 1
1855 if not (ord(c) & 0x80):
1855 if not (ord(c) & 0x80):
1856 return i
1856 return i
1857 return i
1857 return i
1858 out = ""
1858 out = ""
1859 s = deltahead(binchunk)
1859 s = deltahead(binchunk)
1860 binchunk = binchunk[s:]
1860 binchunk = binchunk[s:]
1861 s = deltahead(binchunk)
1861 s = deltahead(binchunk)
1862 binchunk = binchunk[s:]
1862 binchunk = binchunk[s:]
1863 i = 0
1863 i = 0
1864 while i < len(binchunk):
1864 while i < len(binchunk):
1865 cmd = ord(binchunk[i])
1865 cmd = ord(binchunk[i])
1866 i += 1
1866 i += 1
1867 if (cmd & 0x80):
1867 if (cmd & 0x80):
1868 offset = 0
1868 offset = 0
1869 size = 0
1869 size = 0
1870 if (cmd & 0x01):
1870 if (cmd & 0x01):
1871 offset = ord(binchunk[i])
1871 offset = ord(binchunk[i])
1872 i += 1
1872 i += 1
1873 if (cmd & 0x02):
1873 if (cmd & 0x02):
1874 offset |= ord(binchunk[i]) << 8
1874 offset |= ord(binchunk[i]) << 8
1875 i += 1
1875 i += 1
1876 if (cmd & 0x04):
1876 if (cmd & 0x04):
1877 offset |= ord(binchunk[i]) << 16
1877 offset |= ord(binchunk[i]) << 16
1878 i += 1
1878 i += 1
1879 if (cmd & 0x08):
1879 if (cmd & 0x08):
1880 offset |= ord(binchunk[i]) << 24
1880 offset |= ord(binchunk[i]) << 24
1881 i += 1
1881 i += 1
1882 if (cmd & 0x10):
1882 if (cmd & 0x10):
1883 size = ord(binchunk[i])
1883 size = ord(binchunk[i])
1884 i += 1
1884 i += 1
1885 if (cmd & 0x20):
1885 if (cmd & 0x20):
1886 size |= ord(binchunk[i]) << 8
1886 size |= ord(binchunk[i]) << 8
1887 i += 1
1887 i += 1
1888 if (cmd & 0x40):
1888 if (cmd & 0x40):
1889 size |= ord(binchunk[i]) << 16
1889 size |= ord(binchunk[i]) << 16
1890 i += 1
1890 i += 1
1891 if size == 0:
1891 if size == 0:
1892 size = 0x10000
1892 size = 0x10000
1893 offset_end = offset + size
1893 offset_end = offset + size
1894 out += data[offset:offset_end]
1894 out += data[offset:offset_end]
1895 elif cmd != 0:
1895 elif cmd != 0:
1896 offset_end = i + cmd
1896 offset_end = i + cmd
1897 out += binchunk[i:offset_end]
1897 out += binchunk[i:offset_end]
1898 i += cmd
1898 i += cmd
1899 else:
1899 else:
1900 raise PatchError(_('unexpected delta opcode 0'))
1900 raise PatchError(_('unexpected delta opcode 0'))
1901 return out
1901 return out
1902
1902
1903 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1903 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1904 """Reads a patch from fp and tries to apply it.
1904 """Reads a patch from fp and tries to apply it.
1905
1905
1906 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1906 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1907 there was any fuzz.
1907 there was any fuzz.
1908
1908
1909 If 'eolmode' is 'strict', the patch content and patched file are
1909 If 'eolmode' is 'strict', the patch content and patched file are
1910 read in binary mode. Otherwise, line endings are ignored when
1910 read in binary mode. Otherwise, line endings are ignored when
1911 patching then normalized according to 'eolmode'.
1911 patching then normalized according to 'eolmode'.
1912 """
1912 """
1913 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1913 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1914 prefix=prefix, eolmode=eolmode)
1914 prefix=prefix, eolmode=eolmode)
1915
1915
1916 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1916 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1917 eolmode='strict'):
1917 eolmode='strict'):
1918
1918
1919 if prefix:
1919 if prefix:
1920 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1920 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1921 prefix)
1921 prefix)
1922 if prefix != '':
1922 if prefix != '':
1923 prefix += '/'
1923 prefix += '/'
1924 def pstrip(p):
1924 def pstrip(p):
1925 return pathtransform(p, strip - 1, prefix)[1]
1925 return pathtransform(p, strip - 1, prefix)[1]
1926
1926
1927 rejects = 0
1927 rejects = 0
1928 err = 0
1928 err = 0
1929 current_file = None
1929 current_file = None
1930
1930
1931 for state, values in iterhunks(fp):
1931 for state, values in iterhunks(fp):
1932 if state == 'hunk':
1932 if state == 'hunk':
1933 if not current_file:
1933 if not current_file:
1934 continue
1934 continue
1935 ret = current_file.apply(values)
1935 ret = current_file.apply(values)
1936 if ret > 0:
1936 if ret > 0:
1937 err = 1
1937 err = 1
1938 elif state == 'file':
1938 elif state == 'file':
1939 if current_file:
1939 if current_file:
1940 rejects += current_file.close()
1940 rejects += current_file.close()
1941 current_file = None
1941 current_file = None
1942 afile, bfile, first_hunk, gp = values
1942 afile, bfile, first_hunk, gp = values
1943 if gp:
1943 if gp:
1944 gp.path = pstrip(gp.path)
1944 gp.path = pstrip(gp.path)
1945 if gp.oldpath:
1945 if gp.oldpath:
1946 gp.oldpath = pstrip(gp.oldpath)
1946 gp.oldpath = pstrip(gp.oldpath)
1947 else:
1947 else:
1948 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1948 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1949 prefix)
1949 prefix)
1950 if gp.op == 'RENAME':
1950 if gp.op == 'RENAME':
1951 backend.unlink(gp.oldpath)
1951 backend.unlink(gp.oldpath)
1952 if not first_hunk:
1952 if not first_hunk:
1953 if gp.op == 'DELETE':
1953 if gp.op == 'DELETE':
1954 backend.unlink(gp.path)
1954 backend.unlink(gp.path)
1955 continue
1955 continue
1956 data, mode = None, None
1956 data, mode = None, None
1957 if gp.op in ('RENAME', 'COPY'):
1957 if gp.op in ('RENAME', 'COPY'):
1958 data, mode = store.getfile(gp.oldpath)[:2]
1958 data, mode = store.getfile(gp.oldpath)[:2]
1959 if data is None:
1959 if data is None:
1960 # This means that the old path does not exist
1960 # This means that the old path does not exist
1961 raise PatchError(_("source file '%s' does not exist")
1961 raise PatchError(_("source file '%s' does not exist")
1962 % gp.oldpath)
1962 % gp.oldpath)
1963 if gp.mode:
1963 if gp.mode:
1964 mode = gp.mode
1964 mode = gp.mode
1965 if gp.op == 'ADD':
1965 if gp.op == 'ADD':
1966 # Added files without content have no hunk and
1966 # Added files without content have no hunk and
1967 # must be created
1967 # must be created
1968 data = ''
1968 data = ''
1969 if data or mode:
1969 if data or mode:
1970 if (gp.op in ('ADD', 'RENAME', 'COPY')
1970 if (gp.op in ('ADD', 'RENAME', 'COPY')
1971 and backend.exists(gp.path)):
1971 and backend.exists(gp.path)):
1972 raise PatchError(_("cannot create %s: destination "
1972 raise PatchError(_("cannot create %s: destination "
1973 "already exists") % gp.path)
1973 "already exists") % gp.path)
1974 backend.setfile(gp.path, data, mode, gp.oldpath)
1974 backend.setfile(gp.path, data, mode, gp.oldpath)
1975 continue
1975 continue
1976 try:
1976 try:
1977 current_file = patcher(ui, gp, backend, store,
1977 current_file = patcher(ui, gp, backend, store,
1978 eolmode=eolmode)
1978 eolmode=eolmode)
1979 except PatchError as inst:
1979 except PatchError as inst:
1980 ui.warn(str(inst) + '\n')
1980 ui.warn(str(inst) + '\n')
1981 current_file = None
1981 current_file = None
1982 rejects += 1
1982 rejects += 1
1983 continue
1983 continue
1984 elif state == 'git':
1984 elif state == 'git':
1985 for gp in values:
1985 for gp in values:
1986 path = pstrip(gp.oldpath)
1986 path = pstrip(gp.oldpath)
1987 data, mode = backend.getfile(path)
1987 data, mode = backend.getfile(path)
1988 if data is None:
1988 if data is None:
1989 # The error ignored here will trigger a getfile()
1989 # The error ignored here will trigger a getfile()
1990 # error in a place more appropriate for error
1990 # error in a place more appropriate for error
1991 # handling, and will not interrupt the patching
1991 # handling, and will not interrupt the patching
1992 # process.
1992 # process.
1993 pass
1993 pass
1994 else:
1994 else:
1995 store.setfile(path, data, mode)
1995 store.setfile(path, data, mode)
1996 else:
1996 else:
1997 raise error.Abort(_('unsupported parser state: %s') % state)
1997 raise error.Abort(_('unsupported parser state: %s') % state)
1998
1998
1999 if current_file:
1999 if current_file:
2000 rejects += current_file.close()
2000 rejects += current_file.close()
2001
2001
2002 if rejects:
2002 if rejects:
2003 return -1
2003 return -1
2004 return err
2004 return err
2005
2005
2006 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2006 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2007 similarity):
2007 similarity):
2008 """use <patcher> to apply <patchname> to the working directory.
2008 """use <patcher> to apply <patchname> to the working directory.
2009 returns whether patch was applied with fuzz factor."""
2009 returns whether patch was applied with fuzz factor."""
2010
2010
2011 fuzz = False
2011 fuzz = False
2012 args = []
2012 args = []
2013 cwd = repo.root
2013 cwd = repo.root
2014 if cwd:
2014 if cwd:
2015 args.append('-d %s' % util.shellquote(cwd))
2015 args.append('-d %s' % util.shellquote(cwd))
2016 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2016 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2017 util.shellquote(patchname)))
2017 util.shellquote(patchname)))
2018 try:
2018 try:
2019 for line in util.iterfile(fp):
2019 for line in util.iterfile(fp):
2020 line = line.rstrip()
2020 line = line.rstrip()
2021 ui.note(line + '\n')
2021 ui.note(line + '\n')
2022 if line.startswith('patching file '):
2022 if line.startswith('patching file '):
2023 pf = util.parsepatchoutput(line)
2023 pf = util.parsepatchoutput(line)
2024 printed_file = False
2024 printed_file = False
2025 files.add(pf)
2025 files.add(pf)
2026 elif line.find('with fuzz') >= 0:
2026 elif line.find('with fuzz') >= 0:
2027 fuzz = True
2027 fuzz = True
2028 if not printed_file:
2028 if not printed_file:
2029 ui.warn(pf + '\n')
2029 ui.warn(pf + '\n')
2030 printed_file = True
2030 printed_file = True
2031 ui.warn(line + '\n')
2031 ui.warn(line + '\n')
2032 elif line.find('saving rejects to file') >= 0:
2032 elif line.find('saving rejects to file') >= 0:
2033 ui.warn(line + '\n')
2033 ui.warn(line + '\n')
2034 elif line.find('FAILED') >= 0:
2034 elif line.find('FAILED') >= 0:
2035 if not printed_file:
2035 if not printed_file:
2036 ui.warn(pf + '\n')
2036 ui.warn(pf + '\n')
2037 printed_file = True
2037 printed_file = True
2038 ui.warn(line + '\n')
2038 ui.warn(line + '\n')
2039 finally:
2039 finally:
2040 if files:
2040 if files:
2041 scmutil.marktouched(repo, files, similarity)
2041 scmutil.marktouched(repo, files, similarity)
2042 code = fp.close()
2042 code = fp.close()
2043 if code:
2043 if code:
2044 raise PatchError(_("patch command failed: %s") %
2044 raise PatchError(_("patch command failed: %s") %
2045 util.explainexit(code)[0])
2045 util.explainexit(code)[0])
2046 return fuzz
2046 return fuzz
2047
2047
2048 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2048 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2049 eolmode='strict'):
2049 eolmode='strict'):
2050 if files is None:
2050 if files is None:
2051 files = set()
2051 files = set()
2052 if eolmode is None:
2052 if eolmode is None:
2053 eolmode = ui.config('patch', 'eol', 'strict')
2053 eolmode = ui.config('patch', 'eol', 'strict')
2054 if eolmode.lower() not in eolmodes:
2054 if eolmode.lower() not in eolmodes:
2055 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2055 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2056 eolmode = eolmode.lower()
2056 eolmode = eolmode.lower()
2057
2057
2058 store = filestore()
2058 store = filestore()
2059 try:
2059 try:
2060 fp = open(patchobj, 'rb')
2060 fp = open(patchobj, 'rb')
2061 except TypeError:
2061 except TypeError:
2062 fp = patchobj
2062 fp = patchobj
2063 try:
2063 try:
2064 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2064 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2065 eolmode=eolmode)
2065 eolmode=eolmode)
2066 finally:
2066 finally:
2067 if fp != patchobj:
2067 if fp != patchobj:
2068 fp.close()
2068 fp.close()
2069 files.update(backend.close())
2069 files.update(backend.close())
2070 store.close()
2070 store.close()
2071 if ret < 0:
2071 if ret < 0:
2072 raise PatchError(_('patch failed to apply'))
2072 raise PatchError(_('patch failed to apply'))
2073 return ret > 0
2073 return ret > 0
2074
2074
2075 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2075 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2076 eolmode='strict', similarity=0):
2076 eolmode='strict', similarity=0):
2077 """use builtin patch to apply <patchobj> to the working directory.
2077 """use builtin patch to apply <patchobj> to the working directory.
2078 returns whether patch was applied with fuzz factor."""
2078 returns whether patch was applied with fuzz factor."""
2079 backend = workingbackend(ui, repo, similarity)
2079 backend = workingbackend(ui, repo, similarity)
2080 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2080 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2081
2081
2082 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2082 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2083 eolmode='strict'):
2083 eolmode='strict'):
2084 backend = repobackend(ui, repo, ctx, store)
2084 backend = repobackend(ui, repo, ctx, store)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2086
2086
2087 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2087 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2088 similarity=0):
2088 similarity=0):
2089 """Apply <patchname> to the working directory.
2089 """Apply <patchname> to the working directory.
2090
2090
2091 'eolmode' specifies how end of lines should be handled. It can be:
2091 'eolmode' specifies how end of lines should be handled. It can be:
2092 - 'strict': inputs are read in binary mode, EOLs are preserved
2092 - 'strict': inputs are read in binary mode, EOLs are preserved
2093 - 'crlf': EOLs are ignored when patching and reset to CRLF
2093 - 'crlf': EOLs are ignored when patching and reset to CRLF
2094 - 'lf': EOLs are ignored when patching and reset to LF
2094 - 'lf': EOLs are ignored when patching and reset to LF
2095 - None: get it from user settings, default to 'strict'
2095 - None: get it from user settings, default to 'strict'
2096 'eolmode' is ignored when using an external patcher program.
2096 'eolmode' is ignored when using an external patcher program.
2097
2097
2098 Returns whether patch was applied with fuzz factor.
2098 Returns whether patch was applied with fuzz factor.
2099 """
2099 """
2100 patcher = ui.config('ui', 'patch')
2100 patcher = ui.config('ui', 'patch')
2101 if files is None:
2101 if files is None:
2102 files = set()
2102 files = set()
2103 if patcher:
2103 if patcher:
2104 return _externalpatch(ui, repo, patcher, patchname, strip,
2104 return _externalpatch(ui, repo, patcher, patchname, strip,
2105 files, similarity)
2105 files, similarity)
2106 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2106 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2107 similarity)
2107 similarity)
2108
2108
2109 def changedfiles(ui, repo, patchpath, strip=1):
2109 def changedfiles(ui, repo, patchpath, strip=1):
2110 backend = fsbackend(ui, repo.root)
2110 backend = fsbackend(ui, repo.root)
2111 with open(patchpath, 'rb') as fp:
2111 with open(patchpath, 'rb') as fp:
2112 changed = set()
2112 changed = set()
2113 for state, values in iterhunks(fp):
2113 for state, values in iterhunks(fp):
2114 if state == 'file':
2114 if state == 'file':
2115 afile, bfile, first_hunk, gp = values
2115 afile, bfile, first_hunk, gp = values
2116 if gp:
2116 if gp:
2117 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2117 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2118 if gp.oldpath:
2118 if gp.oldpath:
2119 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2119 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2120 else:
2120 else:
2121 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2121 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2122 '')
2122 '')
2123 changed.add(gp.path)
2123 changed.add(gp.path)
2124 if gp.op == 'RENAME':
2124 if gp.op == 'RENAME':
2125 changed.add(gp.oldpath)
2125 changed.add(gp.oldpath)
2126 elif state not in ('hunk', 'git'):
2126 elif state not in ('hunk', 'git'):
2127 raise error.Abort(_('unsupported parser state: %s') % state)
2127 raise error.Abort(_('unsupported parser state: %s') % state)
2128 return changed
2128 return changed
2129
2129
2130 class GitDiffRequired(Exception):
2130 class GitDiffRequired(Exception):
2131 pass
2131 pass
2132
2132
2133 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2133 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2134 '''return diffopts with all features supported and parsed'''
2134 '''return diffopts with all features supported and parsed'''
2135 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2135 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2136 git=True, whitespace=True, formatchanging=True)
2136 git=True, whitespace=True, formatchanging=True)
2137
2137
2138 diffopts = diffallopts
2138 diffopts = diffallopts
2139
2139
2140 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2140 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2141 whitespace=False, formatchanging=False):
2141 whitespace=False, formatchanging=False):
2142 '''return diffopts with only opted-in features parsed
2142 '''return diffopts with only opted-in features parsed
2143
2143
2144 Features:
2144 Features:
2145 - git: git-style diffs
2145 - git: git-style diffs
2146 - whitespace: whitespace options like ignoreblanklines and ignorews
2146 - whitespace: whitespace options like ignoreblanklines and ignorews
2147 - formatchanging: options that will likely break or cause correctness issues
2147 - formatchanging: options that will likely break or cause correctness issues
2148 with most diff parsers
2148 with most diff parsers
2149 '''
2149 '''
2150 def get(key, name=None, getter=ui.configbool, forceplain=None):
2150 def get(key, name=None, getter=ui.configbool, forceplain=None):
2151 if opts:
2151 if opts:
2152 v = opts.get(key)
2152 v = opts.get(key)
2153 # diffopts flags are either None-default (which is passed
2153 # diffopts flags are either None-default (which is passed
2154 # through unchanged, so we can identify unset values), or
2154 # through unchanged, so we can identify unset values), or
2155 # some other falsey default (eg --unified, which defaults
2155 # some other falsey default (eg --unified, which defaults
2156 # to an empty string). We only want to override the config
2156 # to an empty string). We only want to override the config
2157 # entries from hgrc with command line values if they
2157 # entries from hgrc with command line values if they
2158 # appear to have been set, which is any truthy value,
2158 # appear to have been set, which is any truthy value,
2159 # True, or False.
2159 # True, or False.
2160 if v or isinstance(v, bool):
2160 if v or isinstance(v, bool):
2161 return v
2161 return v
2162 if forceplain is not None and ui.plain():
2162 if forceplain is not None and ui.plain():
2163 return forceplain
2163 return forceplain
2164 return getter(section, name or key, None, untrusted=untrusted)
2164 return getter(section, name or key, None, untrusted=untrusted)
2165
2165
2166 # core options, expected to be understood by every diff parser
2166 # core options, expected to be understood by every diff parser
2167 buildopts = {
2167 buildopts = {
2168 'nodates': get('nodates'),
2168 'nodates': get('nodates'),
2169 'showfunc': get('show_function', 'showfunc'),
2169 'showfunc': get('show_function', 'showfunc'),
2170 'context': get('unified', getter=ui.config),
2170 'context': get('unified', getter=ui.config),
2171 }
2171 }
2172
2172
2173 if git:
2173 if git:
2174 buildopts['git'] = get('git')
2174 buildopts['git'] = get('git')
2175
2175
2176 # since this is in the experimental section, we need to call
2176 # since this is in the experimental section, we need to call
2177 # ui.configbool directory
2177 # ui.configbool directory
2178 buildopts['showsimilarity'] = ui.configbool('experimental',
2178 buildopts['showsimilarity'] = ui.configbool('experimental',
2179 'extendedheader.similarity')
2179 'extendedheader.similarity')
2180
2180
2181 # need to inspect the ui object instead of using get() since we want to
2181 # need to inspect the ui object instead of using get() since we want to
2182 # test for an int
2182 # test for an int
2183 hconf = ui.config('experimental', 'extendedheader.index')
2183 hconf = ui.config('experimental', 'extendedheader.index')
2184 if hconf is not None:
2184 if hconf is not None:
2185 hlen = None
2185 hlen = None
2186 try:
2186 try:
2187 # the hash config could be an integer (for length of hash) or a
2187 # the hash config could be an integer (for length of hash) or a
2188 # word (e.g. short, full, none)
2188 # word (e.g. short, full, none)
2189 hlen = int(hconf)
2189 hlen = int(hconf)
2190 if hlen < 0 or hlen > 40:
2190 if hlen < 0 or hlen > 40:
2191 msg = _("invalid length for extendedheader.index: '%d'\n")
2191 msg = _("invalid length for extendedheader.index: '%d'\n")
2192 ui.warn(msg % hlen)
2192 ui.warn(msg % hlen)
2193 except ValueError:
2193 except ValueError:
2194 # default value
2194 # default value
2195 if hconf == 'short' or hconf == '':
2195 if hconf == 'short' or hconf == '':
2196 hlen = 12
2196 hlen = 12
2197 elif hconf == 'full':
2197 elif hconf == 'full':
2198 hlen = 40
2198 hlen = 40
2199 elif hconf != 'none':
2199 elif hconf != 'none':
2200 msg = _("invalid value for extendedheader.index: '%s'\n")
2200 msg = _("invalid value for extendedheader.index: '%s'\n")
2201 ui.warn(msg % hconf)
2201 ui.warn(msg % hconf)
2202 finally:
2202 finally:
2203 buildopts['index'] = hlen
2203 buildopts['index'] = hlen
2204
2204
2205 if whitespace:
2205 if whitespace:
2206 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2206 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2207 buildopts['ignorewsamount'] = get('ignore_space_change',
2207 buildopts['ignorewsamount'] = get('ignore_space_change',
2208 'ignorewsamount')
2208 'ignorewsamount')
2209 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2209 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2210 'ignoreblanklines')
2210 'ignoreblanklines')
2211 if formatchanging:
2211 if formatchanging:
2212 buildopts['text'] = opts and opts.get('text')
2212 buildopts['text'] = opts and opts.get('text')
2213 buildopts['nobinary'] = get('nobinary', forceplain=False)
2213 buildopts['nobinary'] = get('nobinary', forceplain=False)
2214 buildopts['noprefix'] = get('noprefix', forceplain=False)
2214 buildopts['noprefix'] = get('noprefix', forceplain=False)
2215
2215
2216 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2216 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2217
2217
2218 def diff(repo, node1=None, node2=None, match=None, changes=None,
2218 def diff(repo, node1=None, node2=None, match=None, changes=None,
2219 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2219 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2220 '''yields diff of changes to files between two nodes, or node and
2220 '''yields diff of changes to files between two nodes, or node and
2221 working directory.
2221 working directory.
2222
2222
2223 if node1 is None, use first dirstate parent instead.
2223 if node1 is None, use first dirstate parent instead.
2224 if node2 is None, compare node1 with working directory.
2224 if node2 is None, compare node1 with working directory.
2225
2225
2226 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2226 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2227 every time some change cannot be represented with the current
2227 every time some change cannot be represented with the current
2228 patch format. Return False to upgrade to git patch format, True to
2228 patch format. Return False to upgrade to git patch format, True to
2229 accept the loss or raise an exception to abort the diff. It is
2229 accept the loss or raise an exception to abort the diff. It is
2230 called with the name of current file being diffed as 'fn'. If set
2230 called with the name of current file being diffed as 'fn'. If set
2231 to None, patches will always be upgraded to git format when
2231 to None, patches will always be upgraded to git format when
2232 necessary.
2232 necessary.
2233
2233
2234 prefix is a filename prefix that is prepended to all filenames on
2234 prefix is a filename prefix that is prepended to all filenames on
2235 display (used for subrepos).
2235 display (used for subrepos).
2236
2236
2237 relroot, if not empty, must be normalized with a trailing /. Any match
2237 relroot, if not empty, must be normalized with a trailing /. Any match
2238 patterns that fall outside it will be ignored.
2238 patterns that fall outside it will be ignored.
2239
2239
2240 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2240 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2241 information.'''
2241 information.'''
2242 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2242 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2243 changes=changes, opts=opts,
2243 changes=changes, opts=opts,
2244 losedatafn=losedatafn, prefix=prefix,
2244 losedatafn=losedatafn, prefix=prefix,
2245 relroot=relroot, copy=copy):
2245 relroot=relroot, copy=copy):
2246 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2246 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2247 if header and (text or len(header) > 1):
2247 if header and (text or len(header) > 1):
2248 yield '\n'.join(header) + '\n'
2248 yield '\n'.join(header) + '\n'
2249 if text:
2249 if text:
2250 yield text
2250 yield text
2251
2251
2252 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2252 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2253 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2253 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2254 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2254 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2255 where `header` is a list of diff headers and `hunks` is an iterable of
2255 where `header` is a list of diff headers and `hunks` is an iterable of
2256 (`hunkrange`, `hunklines`) tuples.
2256 (`hunkrange`, `hunklines`) tuples.
2257
2257
2258 See diff() for the meaning of parameters.
2258 See diff() for the meaning of parameters.
2259 """
2259 """
2260
2260
2261 if opts is None:
2261 if opts is None:
2262 opts = mdiff.defaultopts
2262 opts = mdiff.defaultopts
2263
2263
2264 if not node1 and not node2:
2264 if not node1 and not node2:
2265 node1 = repo.dirstate.p1()
2265 node1 = repo.dirstate.p1()
2266
2266
2267 def lrugetfilectx():
2267 def lrugetfilectx():
2268 cache = {}
2268 cache = {}
2269 order = collections.deque()
2269 order = collections.deque()
2270 def getfilectx(f, ctx):
2270 def getfilectx(f, ctx):
2271 fctx = ctx.filectx(f, filelog=cache.get(f))
2271 fctx = ctx.filectx(f, filelog=cache.get(f))
2272 if f not in cache:
2272 if f not in cache:
2273 if len(cache) > 20:
2273 if len(cache) > 20:
2274 del cache[order.popleft()]
2274 del cache[order.popleft()]
2275 cache[f] = fctx.filelog()
2275 cache[f] = fctx.filelog()
2276 else:
2276 else:
2277 order.remove(f)
2277 order.remove(f)
2278 order.append(f)
2278 order.append(f)
2279 return fctx
2279 return fctx
2280 return getfilectx
2280 return getfilectx
2281 getfilectx = lrugetfilectx()
2281 getfilectx = lrugetfilectx()
2282
2282
2283 ctx1 = repo[node1]
2283 ctx1 = repo[node1]
2284 ctx2 = repo[node2]
2284 ctx2 = repo[node2]
2285
2285
2286 relfiltered = False
2286 relfiltered = False
2287 if relroot != '' and match.always():
2287 if relroot != '' and match.always():
2288 # as a special case, create a new matcher with just the relroot
2288 # as a special case, create a new matcher with just the relroot
2289 pats = [relroot]
2289 pats = [relroot]
2290 match = scmutil.match(ctx2, pats, default='path')
2290 match = scmutil.match(ctx2, pats, default='path')
2291 relfiltered = True
2291 relfiltered = True
2292
2292
2293 if not changes:
2293 if not changes:
2294 changes = repo.status(ctx1, ctx2, match=match)
2294 changes = repo.status(ctx1, ctx2, match=match)
2295 modified, added, removed = changes[:3]
2295 modified, added, removed = changes[:3]
2296
2296
2297 if not modified and not added and not removed:
2297 if not modified and not added and not removed:
2298 return []
2298 return []
2299
2299
2300 if repo.ui.debugflag:
2300 if repo.ui.debugflag:
2301 hexfunc = hex
2301 hexfunc = hex
2302 else:
2302 else:
2303 hexfunc = short
2303 hexfunc = short
2304 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2304 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2305
2305
2306 if copy is None:
2306 if copy is None:
2307 copy = {}
2307 copy = {}
2308 if opts.git or opts.upgrade:
2308 if opts.git or opts.upgrade:
2309 copy = copies.pathcopies(ctx1, ctx2, match=match)
2309 copy = copies.pathcopies(ctx1, ctx2, match=match)
2310
2310
2311 if relroot is not None:
2311 if relroot is not None:
2312 if not relfiltered:
2312 if not relfiltered:
2313 # XXX this would ideally be done in the matcher, but that is
2313 # XXX this would ideally be done in the matcher, but that is
2314 # generally meant to 'or' patterns, not 'and' them. In this case we
2314 # generally meant to 'or' patterns, not 'and' them. In this case we
2315 # need to 'and' all the patterns from the matcher with relroot.
2315 # need to 'and' all the patterns from the matcher with relroot.
2316 def filterrel(l):
2316 def filterrel(l):
2317 return [f for f in l if f.startswith(relroot)]
2317 return [f for f in l if f.startswith(relroot)]
2318 modified = filterrel(modified)
2318 modified = filterrel(modified)
2319 added = filterrel(added)
2319 added = filterrel(added)
2320 removed = filterrel(removed)
2320 removed = filterrel(removed)
2321 relfiltered = True
2321 relfiltered = True
2322 # filter out copies where either side isn't inside the relative root
2322 # filter out copies where either side isn't inside the relative root
2323 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2323 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2324 if dst.startswith(relroot)
2324 if dst.startswith(relroot)
2325 and src.startswith(relroot)))
2325 and src.startswith(relroot)))
2326
2326
2327 modifiedset = set(modified)
2327 modifiedset = set(modified)
2328 addedset = set(added)
2328 addedset = set(added)
2329 removedset = set(removed)
2329 removedset = set(removed)
2330 for f in modified:
2330 for f in modified:
2331 if f not in ctx1:
2331 if f not in ctx1:
2332 # Fix up added, since merged-in additions appear as
2332 # Fix up added, since merged-in additions appear as
2333 # modifications during merges
2333 # modifications during merges
2334 modifiedset.remove(f)
2334 modifiedset.remove(f)
2335 addedset.add(f)
2335 addedset.add(f)
2336 for f in removed:
2336 for f in removed:
2337 if f not in ctx1:
2337 if f not in ctx1:
2338 # Merged-in additions that are then removed are reported as removed.
2338 # Merged-in additions that are then removed are reported as removed.
2339 # They are not in ctx1, so We don't want to show them in the diff.
2339 # They are not in ctx1, so We don't want to show them in the diff.
2340 removedset.remove(f)
2340 removedset.remove(f)
2341 modified = sorted(modifiedset)
2341 modified = sorted(modifiedset)
2342 added = sorted(addedset)
2342 added = sorted(addedset)
2343 removed = sorted(removedset)
2343 removed = sorted(removedset)
2344 for dst, src in copy.items():
2344 for dst, src in copy.items():
2345 if src not in ctx1:
2345 if src not in ctx1:
2346 # Files merged in during a merge and then copied/renamed are
2346 # Files merged in during a merge and then copied/renamed are
2347 # reported as copies. We want to show them in the diff as additions.
2347 # reported as copies. We want to show them in the diff as additions.
2348 del copy[dst]
2348 del copy[dst]
2349
2349
2350 def difffn(opts, losedata):
2350 def difffn(opts, losedata):
2351 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2351 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2352 copy, getfilectx, opts, losedata, prefix, relroot)
2352 copy, getfilectx, opts, losedata, prefix, relroot)
2353 if opts.upgrade and not opts.git:
2353 if opts.upgrade and not opts.git:
2354 try:
2354 try:
2355 def losedata(fn):
2355 def losedata(fn):
2356 if not losedatafn or not losedatafn(fn=fn):
2356 if not losedatafn or not losedatafn(fn=fn):
2357 raise GitDiffRequired
2357 raise GitDiffRequired
2358 # Buffer the whole output until we are sure it can be generated
2358 # Buffer the whole output until we are sure it can be generated
2359 return list(difffn(opts.copy(git=False), losedata))
2359 return list(difffn(opts.copy(git=False), losedata))
2360 except GitDiffRequired:
2360 except GitDiffRequired:
2361 return difffn(opts.copy(git=True), None)
2361 return difffn(opts.copy(git=True), None)
2362 else:
2362 else:
2363 return difffn(opts, None)
2363 return difffn(opts, None)
2364
2364
2365 def difflabel(func, *args, **kw):
2365 def difflabel(func, *args, **kw):
2366 '''yields 2-tuples of (output, label) based on the output of func()'''
2366 '''yields 2-tuples of (output, label) based on the output of func()'''
2367 headprefixes = [('diff', 'diff.diffline'),
2367 headprefixes = [('diff', 'diff.diffline'),
2368 ('copy', 'diff.extended'),
2368 ('copy', 'diff.extended'),
2369 ('rename', 'diff.extended'),
2369 ('rename', 'diff.extended'),
2370 ('old', 'diff.extended'),
2370 ('old', 'diff.extended'),
2371 ('new', 'diff.extended'),
2371 ('new', 'diff.extended'),
2372 ('deleted', 'diff.extended'),
2372 ('deleted', 'diff.extended'),
2373 ('index', 'diff.extended'),
2373 ('index', 'diff.extended'),
2374 ('similarity', 'diff.extended'),
2374 ('similarity', 'diff.extended'),
2375 ('---', 'diff.file_a'),
2375 ('---', 'diff.file_a'),
2376 ('+++', 'diff.file_b')]
2376 ('+++', 'diff.file_b')]
2377 textprefixes = [('@', 'diff.hunk'),
2377 textprefixes = [('@', 'diff.hunk'),
2378 ('-', 'diff.deleted'),
2378 ('-', 'diff.deleted'),
2379 ('+', 'diff.inserted')]
2379 ('+', 'diff.inserted')]
2380 head = False
2380 head = False
2381 for chunk in func(*args, **kw):
2381 for chunk in func(*args, **kw):
2382 lines = chunk.split('\n')
2382 lines = chunk.split('\n')
2383 for i, line in enumerate(lines):
2383 for i, line in enumerate(lines):
2384 if i != 0:
2384 if i != 0:
2385 yield ('\n', '')
2385 yield ('\n', '')
2386 if head:
2386 if head:
2387 if line.startswith('@'):
2387 if line.startswith('@'):
2388 head = False
2388 head = False
2389 else:
2389 else:
2390 if line and line[0] not in ' +-@\\':
2390 if line and line[0] not in ' +-@\\':
2391 head = True
2391 head = True
2392 stripline = line
2392 stripline = line
2393 diffline = False
2393 diffline = False
2394 if not head and line and line[0] in '+-':
2394 if not head and line and line[0] in '+-':
2395 # highlight tabs and trailing whitespace, but only in
2395 # highlight tabs and trailing whitespace, but only in
2396 # changed lines
2396 # changed lines
2397 stripline = line.rstrip()
2397 stripline = line.rstrip()
2398 diffline = True
2398 diffline = True
2399
2399
2400 prefixes = textprefixes
2400 prefixes = textprefixes
2401 if head:
2401 if head:
2402 prefixes = headprefixes
2402 prefixes = headprefixes
2403 for prefix, label in prefixes:
2403 for prefix, label in prefixes:
2404 if stripline.startswith(prefix):
2404 if stripline.startswith(prefix):
2405 if diffline:
2405 if diffline:
2406 for token in tabsplitter.findall(stripline):
2406 for token in tabsplitter.findall(stripline):
2407 if '\t' == token[0]:
2407 if '\t' == token[0]:
2408 yield (token, 'diff.tab')
2408 yield (token, 'diff.tab')
2409 else:
2409 else:
2410 yield (token, label)
2410 yield (token, label)
2411 else:
2411 else:
2412 yield (stripline, label)
2412 yield (stripline, label)
2413 break
2413 break
2414 else:
2414 else:
2415 yield (line, '')
2415 yield (line, '')
2416 if line != stripline:
2416 if line != stripline:
2417 yield (line[len(stripline):], 'diff.trailingwhitespace')
2417 yield (line[len(stripline):], 'diff.trailingwhitespace')
2418
2418
2419 def diffui(*args, **kw):
2419 def diffui(*args, **kw):
2420 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2420 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2421 return difflabel(diff, *args, **kw)
2421 return difflabel(diff, *args, **kw)
2422
2422
2423 def _filepairs(modified, added, removed, copy, opts):
2423 def _filepairs(modified, added, removed, copy, opts):
2424 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2424 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2425 before and f2 is the the name after. For added files, f1 will be None,
2425 before and f2 is the the name after. For added files, f1 will be None,
2426 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2426 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2427 or 'rename' (the latter two only if opts.git is set).'''
2427 or 'rename' (the latter two only if opts.git is set).'''
2428 gone = set()
2428 gone = set()
2429
2429
2430 copyto = dict([(v, k) for k, v in copy.items()])
2430 copyto = dict([(v, k) for k, v in copy.items()])
2431
2431
2432 addedset, removedset = set(added), set(removed)
2432 addedset, removedset = set(added), set(removed)
2433
2433
2434 for f in sorted(modified + added + removed):
2434 for f in sorted(modified + added + removed):
2435 copyop = None
2435 copyop = None
2436 f1, f2 = f, f
2436 f1, f2 = f, f
2437 if f in addedset:
2437 if f in addedset:
2438 f1 = None
2438 f1 = None
2439 if f in copy:
2439 if f in copy:
2440 if opts.git:
2440 if opts.git:
2441 f1 = copy[f]
2441 f1 = copy[f]
2442 if f1 in removedset and f1 not in gone:
2442 if f1 in removedset and f1 not in gone:
2443 copyop = 'rename'
2443 copyop = 'rename'
2444 gone.add(f1)
2444 gone.add(f1)
2445 else:
2445 else:
2446 copyop = 'copy'
2446 copyop = 'copy'
2447 elif f in removedset:
2447 elif f in removedset:
2448 f2 = None
2448 f2 = None
2449 if opts.git:
2449 if opts.git:
2450 # have we already reported a copy above?
2450 # have we already reported a copy above?
2451 if (f in copyto and copyto[f] in addedset
2451 if (f in copyto and copyto[f] in addedset
2452 and copy[copyto[f]] == f):
2452 and copy[copyto[f]] == f):
2453 continue
2453 continue
2454 yield f1, f2, copyop
2454 yield f1, f2, copyop
2455
2455
2456 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2456 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2457 copy, getfilectx, opts, losedatafn, prefix, relroot):
2457 copy, getfilectx, opts, losedatafn, prefix, relroot):
2458 '''given input data, generate a diff and yield it in blocks
2458 '''given input data, generate a diff and yield it in blocks
2459
2459
2460 If generating a diff would lose data like flags or binary data and
2460 If generating a diff would lose data like flags or binary data and
2461 losedatafn is not None, it will be called.
2461 losedatafn is not None, it will be called.
2462
2462
2463 relroot is removed and prefix is added to every path in the diff output.
2463 relroot is removed and prefix is added to every path in the diff output.
2464
2464
2465 If relroot is not empty, this function expects every path in modified,
2465 If relroot is not empty, this function expects every path in modified,
2466 added, removed and copy to start with it.'''
2466 added, removed and copy to start with it.'''
2467
2467
2468 def gitindex(text):
2468 def gitindex(text):
2469 if not text:
2469 if not text:
2470 text = ""
2470 text = ""
2471 l = len(text)
2471 l = len(text)
2472 s = hashlib.sha1('blob %d\0' % l)
2472 s = hashlib.sha1('blob %d\0' % l)
2473 s.update(text)
2473 s.update(text)
2474 return s.hexdigest()
2474 return s.hexdigest()
2475
2475
2476 if opts.noprefix:
2476 if opts.noprefix:
2477 aprefix = bprefix = ''
2477 aprefix = bprefix = ''
2478 else:
2478 else:
2479 aprefix = 'a/'
2479 aprefix = 'a/'
2480 bprefix = 'b/'
2480 bprefix = 'b/'
2481
2481
2482 def diffline(f, revs):
2482 def diffline(f, revs):
2483 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2483 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2484 return 'diff %s %s' % (revinfo, f)
2484 return 'diff %s %s' % (revinfo, f)
2485
2485
2486 date1 = util.datestr(ctx1.date())
2486 date1 = util.datestr(ctx1.date())
2487 date2 = util.datestr(ctx2.date())
2487 date2 = util.datestr(ctx2.date())
2488
2488
2489 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2489 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2490
2490
2491 if relroot != '' and (repo.ui.configbool('devel', 'all')
2491 if relroot != '' and (repo.ui.configbool('devel', 'all')
2492 or repo.ui.configbool('devel', 'check-relroot')):
2492 or repo.ui.configbool('devel', 'check-relroot')):
2493 for f in modified + added + removed + copy.keys() + copy.values():
2493 for f in modified + added + removed + copy.keys() + copy.values():
2494 if f is not None and not f.startswith(relroot):
2494 if f is not None and not f.startswith(relroot):
2495 raise AssertionError(
2495 raise AssertionError(
2496 "file %s doesn't start with relroot %s" % (f, relroot))
2496 "file %s doesn't start with relroot %s" % (f, relroot))
2497
2497
2498 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2498 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2499 content1 = None
2499 content1 = None
2500 content2 = None
2500 content2 = None
2501 flag1 = None
2501 flag1 = None
2502 flag2 = None
2502 flag2 = None
2503 if f1:
2503 if f1:
2504 content1 = getfilectx(f1, ctx1).data()
2504 content1 = getfilectx(f1, ctx1).data()
2505 if opts.git or losedatafn:
2505 if opts.git or losedatafn:
2506 flag1 = ctx1.flags(f1)
2506 flag1 = ctx1.flags(f1)
2507 if f2:
2507 if f2:
2508 content2 = getfilectx(f2, ctx2).data()
2508 content2 = getfilectx(f2, ctx2).data()
2509 if opts.git or losedatafn:
2509 if opts.git or losedatafn:
2510 flag2 = ctx2.flags(f2)
2510 flag2 = ctx2.flags(f2)
2511 binary = False
2511 binary = False
2512 if opts.git or losedatafn:
2512 if opts.git or losedatafn:
2513 binary = util.binary(content1) or util.binary(content2)
2513 binary = util.binary(content1) or util.binary(content2)
2514
2514
2515 if losedatafn and not opts.git:
2515 if losedatafn and not opts.git:
2516 if (binary or
2516 if (binary or
2517 # copy/rename
2517 # copy/rename
2518 f2 in copy or
2518 f2 in copy or
2519 # empty file creation
2519 # empty file creation
2520 (not f1 and not content2) or
2520 (not f1 and not content2) or
2521 # empty file deletion
2521 # empty file deletion
2522 (not content1 and not f2) or
2522 (not content1 and not f2) or
2523 # create with flags
2523 # create with flags
2524 (not f1 and flag2) or
2524 (not f1 and flag2) or
2525 # change flags
2525 # change flags
2526 (f1 and f2 and flag1 != flag2)):
2526 (f1 and f2 and flag1 != flag2)):
2527 losedatafn(f2 or f1)
2527 losedatafn(f2 or f1)
2528
2528
2529 path1 = f1 or f2
2529 path1 = f1 or f2
2530 path2 = f2 or f1
2530 path2 = f2 or f1
2531 path1 = posixpath.join(prefix, path1[len(relroot):])
2531 path1 = posixpath.join(prefix, path1[len(relroot):])
2532 path2 = posixpath.join(prefix, path2[len(relroot):])
2532 path2 = posixpath.join(prefix, path2[len(relroot):])
2533 header = []
2533 header = []
2534 if opts.git:
2534 if opts.git:
2535 header.append('diff --git %s%s %s%s' %
2535 header.append('diff --git %s%s %s%s' %
2536 (aprefix, path1, bprefix, path2))
2536 (aprefix, path1, bprefix, path2))
2537 if not f1: # added
2537 if not f1: # added
2538 header.append('new file mode %s' % gitmode[flag2])
2538 header.append('new file mode %s' % gitmode[flag2])
2539 elif not f2: # removed
2539 elif not f2: # removed
2540 header.append('deleted file mode %s' % gitmode[flag1])
2540 header.append('deleted file mode %s' % gitmode[flag1])
2541 else: # modified/copied/renamed
2541 else: # modified/copied/renamed
2542 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2542 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2543 if mode1 != mode2:
2543 if mode1 != mode2:
2544 header.append('old mode %s' % mode1)
2544 header.append('old mode %s' % mode1)
2545 header.append('new mode %s' % mode2)
2545 header.append('new mode %s' % mode2)
2546 if copyop is not None:
2546 if copyop is not None:
2547 if opts.showsimilarity:
2547 if opts.showsimilarity:
2548 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2548 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2549 header.append('similarity index %d%%' % sim)
2549 header.append('similarity index %d%%' % sim)
2550 header.append('%s from %s' % (copyop, path1))
2550 header.append('%s from %s' % (copyop, path1))
2551 header.append('%s to %s' % (copyop, path2))
2551 header.append('%s to %s' % (copyop, path2))
2552 elif revs and not repo.ui.quiet:
2552 elif revs and not repo.ui.quiet:
2553 header.append(diffline(path1, revs))
2553 header.append(diffline(path1, revs))
2554
2554
2555 if binary and opts.git and not opts.nobinary:
2555 if binary and opts.git and not opts.nobinary:
2556 text = mdiff.b85diff(content1, content2)
2556 text = mdiff.b85diff(content1, content2)
2557 if text:
2557 if text:
2558 header.append('index %s..%s' %
2558 header.append('index %s..%s' %
2559 (gitindex(content1), gitindex(content2)))
2559 (gitindex(content1), gitindex(content2)))
2560 hunks = (None, [text]),
2560 hunks = (None, [text]),
2561 else:
2561 else:
2562 if opts.git and opts.index > 0:
2562 if opts.git and opts.index > 0:
2563 flag = flag1
2563 flag = flag1
2564 if flag is None:
2564 if flag is None:
2565 flag = flag2
2565 flag = flag2
2566 header.append('index %s..%s %s' %
2566 header.append('index %s..%s %s' %
2567 (gitindex(content1)[0:opts.index],
2567 (gitindex(content1)[0:opts.index],
2568 gitindex(content2)[0:opts.index],
2568 gitindex(content2)[0:opts.index],
2569 gitmode[flag]))
2569 gitmode[flag]))
2570
2570
2571 uheaders, hunks = mdiff.unidiff(content1, date1,
2571 uheaders, hunks = mdiff.unidiff(content1, date1,
2572 content2, date2,
2572 content2, date2,
2573 path1, path2, opts=opts)
2573 path1, path2, opts=opts)
2574 header.extend(uheaders)
2574 header.extend(uheaders)
2575 yield header, hunks
2575 yield header, hunks
2576
2576
2577 def diffstatsum(stats):
2577 def diffstatsum(stats):
2578 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2578 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2579 for f, a, r, b in stats:
2579 for f, a, r, b in stats:
2580 maxfile = max(maxfile, encoding.colwidth(f))
2580 maxfile = max(maxfile, encoding.colwidth(f))
2581 maxtotal = max(maxtotal, a + r)
2581 maxtotal = max(maxtotal, a + r)
2582 addtotal += a
2582 addtotal += a
2583 removetotal += r
2583 removetotal += r
2584 binary = binary or b
2584 binary = binary or b
2585
2585
2586 return maxfile, maxtotal, addtotal, removetotal, binary
2586 return maxfile, maxtotal, addtotal, removetotal, binary
2587
2587
2588 def diffstatdata(lines):
2588 def diffstatdata(lines):
2589 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2589 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2590
2590
2591 results = []
2591 results = []
2592 filename, adds, removes, isbinary = None, 0, 0, False
2592 filename, adds, removes, isbinary = None, 0, 0, False
2593
2593
2594 def addresult():
2594 def addresult():
2595 if filename:
2595 if filename:
2596 results.append((filename, adds, removes, isbinary))
2596 results.append((filename, adds, removes, isbinary))
2597
2597
2598 for line in lines:
2598 for line in lines:
2599 if line.startswith('diff'):
2599 if line.startswith('diff'):
2600 addresult()
2600 addresult()
2601 # set numbers to 0 anyway when starting new file
2601 # set numbers to 0 anyway when starting new file
2602 adds, removes, isbinary = 0, 0, False
2602 adds, removes, isbinary = 0, 0, False
2603 if line.startswith('diff --git a/'):
2603 if line.startswith('diff --git a/'):
2604 filename = gitre.search(line).group(2)
2604 filename = gitre.search(line).group(2)
2605 elif line.startswith('diff -r'):
2605 elif line.startswith('diff -r'):
2606 # format: "diff -r ... -r ... filename"
2606 # format: "diff -r ... -r ... filename"
2607 filename = diffre.search(line).group(1)
2607 filename = diffre.search(line).group(1)
2608 elif line.startswith('+') and not line.startswith('+++ '):
2608 elif line.startswith('+') and not line.startswith('+++ '):
2609 adds += 1
2609 adds += 1
2610 elif line.startswith('-') and not line.startswith('--- '):
2610 elif line.startswith('-') and not line.startswith('--- '):
2611 removes += 1
2611 removes += 1
2612 elif (line.startswith('GIT binary patch') or
2612 elif (line.startswith('GIT binary patch') or
2613 line.startswith('Binary file')):
2613 line.startswith('Binary file')):
2614 isbinary = True
2614 isbinary = True
2615 addresult()
2615 addresult()
2616 return results
2616 return results
2617
2617
2618 def diffstat(lines, width=80):
2618 def diffstat(lines, width=80):
2619 output = []
2619 output = []
2620 stats = diffstatdata(lines)
2620 stats = diffstatdata(lines)
2621 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2621 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2622
2622
2623 countwidth = len(str(maxtotal))
2623 countwidth = len(str(maxtotal))
2624 if hasbinary and countwidth < 3:
2624 if hasbinary and countwidth < 3:
2625 countwidth = 3
2625 countwidth = 3
2626 graphwidth = width - countwidth - maxname - 6
2626 graphwidth = width - countwidth - maxname - 6
2627 if graphwidth < 10:
2627 if graphwidth < 10:
2628 graphwidth = 10
2628 graphwidth = 10
2629
2629
2630 def scale(i):
2630 def scale(i):
2631 if maxtotal <= graphwidth:
2631 if maxtotal <= graphwidth:
2632 return i
2632 return i
2633 # If diffstat runs out of room it doesn't print anything,
2633 # If diffstat runs out of room it doesn't print anything,
2634 # which isn't very useful, so always print at least one + or -
2634 # which isn't very useful, so always print at least one + or -
2635 # if there were at least some changes.
2635 # if there were at least some changes.
2636 return max(i * graphwidth // maxtotal, int(bool(i)))
2636 return max(i * graphwidth // maxtotal, int(bool(i)))
2637
2637
2638 for filename, adds, removes, isbinary in stats:
2638 for filename, adds, removes, isbinary in stats:
2639 if isbinary:
2639 if isbinary:
2640 count = 'Bin'
2640 count = 'Bin'
2641 else:
2641 else:
2642 count = adds + removes
2642 count = adds + removes
2643 pluses = '+' * scale(adds)
2643 pluses = '+' * scale(adds)
2644 minuses = '-' * scale(removes)
2644 minuses = '-' * scale(removes)
2645 output.append(' %s%s | %*s %s%s\n' %
2645 output.append(' %s%s | %*s %s%s\n' %
2646 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2646 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2647 countwidth, count, pluses, minuses))
2647 countwidth, count, pluses, minuses))
2648
2648
2649 if stats:
2649 if stats:
2650 output.append(_(' %d files changed, %d insertions(+), '
2650 output.append(_(' %d files changed, %d insertions(+), '
2651 '%d deletions(-)\n')
2651 '%d deletions(-)\n')
2652 % (len(stats), totaladds, totalremoves))
2652 % (len(stats), totaladds, totalremoves))
2653
2653
2654 return ''.join(output)
2654 return ''.join(output)
2655
2655
2656 def diffstatui(*args, **kw):
2656 def diffstatui(*args, **kw):
2657 '''like diffstat(), but yields 2-tuples of (output, label) for
2657 '''like diffstat(), but yields 2-tuples of (output, label) for
2658 ui.write()
2658 ui.write()
2659 '''
2659 '''
2660
2660
2661 for line in diffstat(*args, **kw).splitlines():
2661 for line in diffstat(*args, **kw).splitlines():
2662 if line and line[-1] in '+-':
2662 if line and line[-1] in '+-':
2663 name, graph = line.rsplit(' ', 1)
2663 name, graph = line.rsplit(' ', 1)
2664 yield (name + ' ', '')
2664 yield (name + ' ', '')
2665 m = re.search(r'\++', graph)
2665 m = re.search(r'\++', graph)
2666 if m:
2666 if m:
2667 yield (m.group(0), 'diffstat.inserted')
2667 yield (m.group(0), 'diffstat.inserted')
2668 m = re.search(r'-+', graph)
2668 m = re.search(r'-+', graph)
2669 if m:
2669 if m:
2670 yield (m.group(0), 'diffstat.deleted')
2670 yield (m.group(0), 'diffstat.deleted')
2671 else:
2671 else:
2672 yield (line, '')
2672 yield (line, '')
2673 yield ('\n', '')
2673 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now