##// END OF EJS Templates
mdiff: let unidiff return (diffheader, hunks)...
Denis Laxalde -
r31273:92714858 default
parent child Browse files
Show More
@@ -1,443 +1,457 b''
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 base85,
16 base85,
17 bdiff,
17 bdiff,
18 error,
18 error,
19 mpatch,
19 mpatch,
20 util,
20 util,
21 )
21 )
22
22
23 def splitnewlines(text):
23 def splitnewlines(text):
24 '''like str.splitlines, but only split on newlines.'''
24 '''like str.splitlines, but only split on newlines.'''
25 lines = [l + '\n' for l in text.split('\n')]
25 lines = [l + '\n' for l in text.split('\n')]
26 if lines:
26 if lines:
27 if lines[-1] == '\n':
27 if lines[-1] == '\n':
28 lines.pop()
28 lines.pop()
29 else:
29 else:
30 lines[-1] = lines[-1][:-1]
30 lines[-1] = lines[-1][:-1]
31 return lines
31 return lines
32
32
33 class diffopts(object):
33 class diffopts(object):
34 '''context is the number of context lines
34 '''context is the number of context lines
35 text treats all files as text
35 text treats all files as text
36 showfunc enables diff -p output
36 showfunc enables diff -p output
37 git enables the git extended patch format
37 git enables the git extended patch format
38 nodates removes dates from diff headers
38 nodates removes dates from diff headers
39 nobinary ignores binary files
39 nobinary ignores binary files
40 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
40 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
41 ignorews ignores all whitespace changes in the diff
41 ignorews ignores all whitespace changes in the diff
42 ignorewsamount ignores changes in the amount of whitespace
42 ignorewsamount ignores changes in the amount of whitespace
43 ignoreblanklines ignores changes whose lines are all blank
43 ignoreblanklines ignores changes whose lines are all blank
44 upgrade generates git diffs to avoid data loss
44 upgrade generates git diffs to avoid data loss
45 '''
45 '''
46
46
47 defaults = {
47 defaults = {
48 'context': 3,
48 'context': 3,
49 'text': False,
49 'text': False,
50 'showfunc': False,
50 'showfunc': False,
51 'git': False,
51 'git': False,
52 'nodates': False,
52 'nodates': False,
53 'nobinary': False,
53 'nobinary': False,
54 'noprefix': False,
54 'noprefix': False,
55 'index': 0,
55 'index': 0,
56 'ignorews': False,
56 'ignorews': False,
57 'ignorewsamount': False,
57 'ignorewsamount': False,
58 'ignoreblanklines': False,
58 'ignoreblanklines': False,
59 'upgrade': False,
59 'upgrade': False,
60 'showsimilarity': False,
60 'showsimilarity': False,
61 }
61 }
62
62
63 def __init__(self, **opts):
63 def __init__(self, **opts):
64 for k in self.defaults.keys():
64 for k in self.defaults.keys():
65 v = opts.get(k)
65 v = opts.get(k)
66 if v is None:
66 if v is None:
67 v = self.defaults[k]
67 v = self.defaults[k]
68 setattr(self, k, v)
68 setattr(self, k, v)
69
69
70 try:
70 try:
71 self.context = int(self.context)
71 self.context = int(self.context)
72 except ValueError:
72 except ValueError:
73 raise error.Abort(_('diff context lines count must be '
73 raise error.Abort(_('diff context lines count must be '
74 'an integer, not %r') % self.context)
74 'an integer, not %r') % self.context)
75
75
76 def copy(self, **kwargs):
76 def copy(self, **kwargs):
77 opts = dict((k, getattr(self, k)) for k in self.defaults)
77 opts = dict((k, getattr(self, k)) for k in self.defaults)
78 opts.update(kwargs)
78 opts.update(kwargs)
79 return diffopts(**opts)
79 return diffopts(**opts)
80
80
81 defaultopts = diffopts()
81 defaultopts = diffopts()
82
82
83 def wsclean(opts, text, blank=True):
83 def wsclean(opts, text, blank=True):
84 if opts.ignorews:
84 if opts.ignorews:
85 text = bdiff.fixws(text, 1)
85 text = bdiff.fixws(text, 1)
86 elif opts.ignorewsamount:
86 elif opts.ignorewsamount:
87 text = bdiff.fixws(text, 0)
87 text = bdiff.fixws(text, 0)
88 if blank and opts.ignoreblanklines:
88 if blank and opts.ignoreblanklines:
89 text = re.sub('\n+', '\n', text).strip('\n')
89 text = re.sub('\n+', '\n', text).strip('\n')
90 return text
90 return text
91
91
92 def splitblock(base1, lines1, base2, lines2, opts):
92 def splitblock(base1, lines1, base2, lines2, opts):
93 # The input lines matches except for interwoven blank lines. We
93 # The input lines matches except for interwoven blank lines. We
94 # transform it into a sequence of matching blocks and blank blocks.
94 # transform it into a sequence of matching blocks and blank blocks.
95 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
95 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
96 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
96 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
97 s1, e1 = 0, len(lines1)
97 s1, e1 = 0, len(lines1)
98 s2, e2 = 0, len(lines2)
98 s2, e2 = 0, len(lines2)
99 while s1 < e1 or s2 < e2:
99 while s1 < e1 or s2 < e2:
100 i1, i2, btype = s1, s2, '='
100 i1, i2, btype = s1, s2, '='
101 if (i1 >= e1 or lines1[i1] == 0
101 if (i1 >= e1 or lines1[i1] == 0
102 or i2 >= e2 or lines2[i2] == 0):
102 or i2 >= e2 or lines2[i2] == 0):
103 # Consume the block of blank lines
103 # Consume the block of blank lines
104 btype = '~'
104 btype = '~'
105 while i1 < e1 and lines1[i1] == 0:
105 while i1 < e1 and lines1[i1] == 0:
106 i1 += 1
106 i1 += 1
107 while i2 < e2 and lines2[i2] == 0:
107 while i2 < e2 and lines2[i2] == 0:
108 i2 += 1
108 i2 += 1
109 else:
109 else:
110 # Consume the matching lines
110 # Consume the matching lines
111 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
111 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
112 i1 += 1
112 i1 += 1
113 i2 += 1
113 i2 += 1
114 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
114 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
115 s1 = i1
115 s1 = i1
116 s2 = i2
116 s2 = i2
117
117
118 def blocksinrange(blocks, rangeb):
118 def blocksinrange(blocks, rangeb):
119 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
119 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
120 `rangeb` from ``(b1, b2)`` point of view.
120 `rangeb` from ``(b1, b2)`` point of view.
121
121
122 Return `filteredblocks, rangea` where:
122 Return `filteredblocks, rangea` where:
123
123
124 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
124 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
125 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
125 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
126 block ``(b1, b2)`` being inside `rangeb` if
126 block ``(b1, b2)`` being inside `rangeb` if
127 ``rangeb[0] < b2 and b1 < rangeb[1]``;
127 ``rangeb[0] < b2 and b1 < rangeb[1]``;
128 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
128 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
129 """
129 """
130 lbb, ubb = rangeb
130 lbb, ubb = rangeb
131 lba, uba = None, None
131 lba, uba = None, None
132 filteredblocks = []
132 filteredblocks = []
133 for block in blocks:
133 for block in blocks:
134 (a1, a2, b1, b2), stype = block
134 (a1, a2, b1, b2), stype = block
135 if lbb >= b1 and ubb <= b2 and stype == '=':
135 if lbb >= b1 and ubb <= b2 and stype == '=':
136 # rangeb is within a single "=" hunk, restrict back linerange1
136 # rangeb is within a single "=" hunk, restrict back linerange1
137 # by offsetting rangeb
137 # by offsetting rangeb
138 lba = lbb - b1 + a1
138 lba = lbb - b1 + a1
139 uba = ubb - b1 + a1
139 uba = ubb - b1 + a1
140 else:
140 else:
141 if b1 <= lbb < b2:
141 if b1 <= lbb < b2:
142 if stype == '=':
142 if stype == '=':
143 lba = a2 - (b2 - lbb)
143 lba = a2 - (b2 - lbb)
144 else:
144 else:
145 lba = a1
145 lba = a1
146 if b1 < ubb <= b2:
146 if b1 < ubb <= b2:
147 if stype == '=':
147 if stype == '=':
148 uba = a1 + (ubb - b1)
148 uba = a1 + (ubb - b1)
149 else:
149 else:
150 uba = a2
150 uba = a2
151 if lbb < b2 and b1 < ubb:
151 if lbb < b2 and b1 < ubb:
152 filteredblocks.append(block)
152 filteredblocks.append(block)
153 if lba is None or uba is None or uba < lba:
153 if lba is None or uba is None or uba < lba:
154 raise error.Abort(_('line range exceeds file size'))
154 raise error.Abort(_('line range exceeds file size'))
155 return filteredblocks, (lba, uba)
155 return filteredblocks, (lba, uba)
156
156
157 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
157 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
158 """Return (block, type) tuples, where block is an mdiff.blocks
158 """Return (block, type) tuples, where block is an mdiff.blocks
159 line entry. type is '=' for blocks matching exactly one another
159 line entry. type is '=' for blocks matching exactly one another
160 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
160 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
161 matching only after having filtered blank lines.
161 matching only after having filtered blank lines.
162 line1 and line2 are text1 and text2 split with splitnewlines() if
162 line1 and line2 are text1 and text2 split with splitnewlines() if
163 they are already available.
163 they are already available.
164 """
164 """
165 if opts is None:
165 if opts is None:
166 opts = defaultopts
166 opts = defaultopts
167 if opts.ignorews or opts.ignorewsamount:
167 if opts.ignorews or opts.ignorewsamount:
168 text1 = wsclean(opts, text1, False)
168 text1 = wsclean(opts, text1, False)
169 text2 = wsclean(opts, text2, False)
169 text2 = wsclean(opts, text2, False)
170 diff = bdiff.blocks(text1, text2)
170 diff = bdiff.blocks(text1, text2)
171 for i, s1 in enumerate(diff):
171 for i, s1 in enumerate(diff):
172 # The first match is special.
172 # The first match is special.
173 # we've either found a match starting at line 0 or a match later
173 # we've either found a match starting at line 0 or a match later
174 # in the file. If it starts later, old and new below will both be
174 # in the file. If it starts later, old and new below will both be
175 # empty and we'll continue to the next match.
175 # empty and we'll continue to the next match.
176 if i > 0:
176 if i > 0:
177 s = diff[i - 1]
177 s = diff[i - 1]
178 else:
178 else:
179 s = [0, 0, 0, 0]
179 s = [0, 0, 0, 0]
180 s = [s[1], s1[0], s[3], s1[2]]
180 s = [s[1], s1[0], s[3], s1[2]]
181
181
182 # bdiff sometimes gives huge matches past eof, this check eats them,
182 # bdiff sometimes gives huge matches past eof, this check eats them,
183 # and deals with the special first match case described above
183 # and deals with the special first match case described above
184 if s[0] != s[1] or s[2] != s[3]:
184 if s[0] != s[1] or s[2] != s[3]:
185 type = '!'
185 type = '!'
186 if opts.ignoreblanklines:
186 if opts.ignoreblanklines:
187 if lines1 is None:
187 if lines1 is None:
188 lines1 = splitnewlines(text1)
188 lines1 = splitnewlines(text1)
189 if lines2 is None:
189 if lines2 is None:
190 lines2 = splitnewlines(text2)
190 lines2 = splitnewlines(text2)
191 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
191 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
192 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
192 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
193 if old == new:
193 if old == new:
194 type = '~'
194 type = '~'
195 yield s, type
195 yield s, type
196 yield s1, '='
196 yield s1, '='
197
197
198 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
198 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
199 """Return a unified diff as a (headers, hunkstext) tuple.
199 """Return a unified diff as a (headers, hunks) tuple.
200
200
201 If the diff is not null, `headers` is a list with unified diff header
201 If the diff is not null, `headers` is a list with unified diff header
202 lines "--- <original>" and "+++ <new>" and `hunkstext` is a string
202 lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
203 containing diff hunks. Otherwise, both `headers` and `hunkstext` are
203 (hunkrange, hunklines) coming from _unidiff().
204 empty.
204 Otherwise, `headers` and `hunks` are empty.
205 """
205 """
206 def datetag(date, fn=None):
206 def datetag(date, fn=None):
207 if not opts.git and not opts.nodates:
207 if not opts.git and not opts.nodates:
208 return '\t%s' % date
208 return '\t%s' % date
209 if fn and ' ' in fn:
209 if fn and ' ' in fn:
210 return '\t'
210 return '\t'
211 return ''
211 return ''
212
212
213 sentinel = [], ""
213 sentinel = [], ()
214 if not a and not b:
214 if not a and not b:
215 return sentinel
215 return sentinel
216
216
217 if opts.noprefix:
217 if opts.noprefix:
218 aprefix = bprefix = ''
218 aprefix = bprefix = ''
219 else:
219 else:
220 aprefix = 'a/'
220 aprefix = 'a/'
221 bprefix = 'b/'
221 bprefix = 'b/'
222
222
223 epoch = util.datestr((0, 0))
223 epoch = util.datestr((0, 0))
224
224
225 fn1 = util.pconvert(fn1)
225 fn1 = util.pconvert(fn1)
226 fn2 = util.pconvert(fn2)
226 fn2 = util.pconvert(fn2)
227
227
228 def checknonewline(lines):
228 def checknonewline(lines):
229 for text in lines:
229 for text in lines:
230 if text[-1] != '\n':
230 if text[-1] != '\n':
231 text += "\n\ No newline at end of file\n"
231 text += "\n\ No newline at end of file\n"
232 yield text
232 yield text
233
233
234 if not opts.text and (util.binary(a) or util.binary(b)):
234 if not opts.text and (util.binary(a) or util.binary(b)):
235 if a and b and len(a) == len(b) and a == b:
235 if a and b and len(a) == len(b) and a == b:
236 return sentinel
236 return sentinel
237 headerlines = []
237 headerlines = []
238 l = ['Binary file %s has changed\n' % fn1]
238 hunks = (None, ['Binary file %s has changed\n' % fn1]),
239 elif not a:
239 elif not a:
240 b = splitnewlines(b)
240 b = splitnewlines(b)
241 if a is None:
241 if a is None:
242 l1 = '--- /dev/null%s' % datetag(epoch)
242 l1 = '--- /dev/null%s' % datetag(epoch)
243 else:
243 else:
244 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
244 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
245 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
245 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
246 headerlines = [l1, l2]
246 headerlines = [l1, l2]
247 l = ["@@ -0,0 +1,%d @@\n" % len(b)] + ["+" + e for e in b]
247 size = len(b)
248 hunkrange = (0, 0, 1, size)
249 hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
250 hunks = (hunkrange, checknonewline(hunklines)),
248 elif not b:
251 elif not b:
249 a = splitnewlines(a)
252 a = splitnewlines(a)
250 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
253 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
251 if b is None:
254 if b is None:
252 l2 = '+++ /dev/null%s' % datetag(epoch)
255 l2 = '+++ /dev/null%s' % datetag(epoch)
253 else:
256 else:
254 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
257 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
255 headerlines = [l1, l2]
258 headerlines = [l1, l2]
256 l = ["@@ -1,%d +0,0 @@\n" % len(a)] + ["-" + e for e in a]
259 size = len(a)
260 hunkrange = (1, size, 0, 0)
261 hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
262 hunks = (hunkrange, checknonewline(hunklines)),
257 else:
263 else:
258 l = sum((hlines for hrange, hlines in _unidiff(a, b, opts=opts)), [])
264 diffhunks = _unidiff(a, b, opts=opts)
259 if not l:
265 try:
266 hunkrange, hunklines = next(diffhunks)
267 except StopIteration:
260 return sentinel
268 return sentinel
261
269
262 headerlines = [
270 headerlines = [
263 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
271 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
264 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
272 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
265 ]
273 ]
274 def rewindhunks():
275 yield hunkrange, checknonewline(hunklines)
276 for hr, hl in diffhunks:
277 yield hr, checknonewline(hl)
266
278
267 return headerlines, "".join(checknonewline(l))
279 hunks = rewindhunks()
280
281 return headerlines, hunks
268
282
269 def _unidiff(t1, t2, opts=defaultopts):
283 def _unidiff(t1, t2, opts=defaultopts):
270 """Yield hunks of a headerless unified diff from t1 and t2 texts.
284 """Yield hunks of a headerless unified diff from t1 and t2 texts.
271
285
272 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
286 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
273 tuple (s1, l1, s2, l2) representing the range information of the hunk to
287 tuple (s1, l1, s2, l2) representing the range information of the hunk to
274 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
288 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
275 of the hunk combining said header followed by line additions and
289 of the hunk combining said header followed by line additions and
276 deletions.
290 deletions.
277 """
291 """
278 l1 = splitnewlines(t1)
292 l1 = splitnewlines(t1)
279 l2 = splitnewlines(t2)
293 l2 = splitnewlines(t2)
280 def contextend(l, len):
294 def contextend(l, len):
281 ret = l + opts.context
295 ret = l + opts.context
282 if ret > len:
296 if ret > len:
283 ret = len
297 ret = len
284 return ret
298 return ret
285
299
286 def contextstart(l):
300 def contextstart(l):
287 ret = l - opts.context
301 ret = l - opts.context
288 if ret < 0:
302 if ret < 0:
289 return 0
303 return 0
290 return ret
304 return ret
291
305
292 lastfunc = [0, '']
306 lastfunc = [0, '']
293 def yieldhunk(hunk):
307 def yieldhunk(hunk):
294 (astart, a2, bstart, b2, delta) = hunk
308 (astart, a2, bstart, b2, delta) = hunk
295 aend = contextend(a2, len(l1))
309 aend = contextend(a2, len(l1))
296 alen = aend - astart
310 alen = aend - astart
297 blen = b2 - bstart + aend - a2
311 blen = b2 - bstart + aend - a2
298
312
299 func = ""
313 func = ""
300 if opts.showfunc:
314 if opts.showfunc:
301 lastpos, func = lastfunc
315 lastpos, func = lastfunc
302 # walk backwards from the start of the context up to the start of
316 # walk backwards from the start of the context up to the start of
303 # the previous hunk context until we find a line starting with an
317 # the previous hunk context until we find a line starting with an
304 # alphanumeric char.
318 # alphanumeric char.
305 for i in xrange(astart - 1, lastpos - 1, -1):
319 for i in xrange(astart - 1, lastpos - 1, -1):
306 if l1[i][0].isalnum():
320 if l1[i][0].isalnum():
307 func = ' ' + l1[i].rstrip()[:40]
321 func = ' ' + l1[i].rstrip()[:40]
308 lastfunc[1] = func
322 lastfunc[1] = func
309 break
323 break
310 # by recording this hunk's starting point as the next place to
324 # by recording this hunk's starting point as the next place to
311 # start looking for function lines, we avoid reading any line in
325 # start looking for function lines, we avoid reading any line in
312 # the file more than once.
326 # the file more than once.
313 lastfunc[0] = astart
327 lastfunc[0] = astart
314
328
315 # zero-length hunk ranges report their start line as one less
329 # zero-length hunk ranges report their start line as one less
316 if alen:
330 if alen:
317 astart += 1
331 astart += 1
318 if blen:
332 if blen:
319 bstart += 1
333 bstart += 1
320
334
321 hunkrange = astart, alen, bstart, blen
335 hunkrange = astart, alen, bstart, blen
322 hunklines = (
336 hunklines = (
323 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
337 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
324 + delta
338 + delta
325 + [' ' + l1[x] for x in xrange(a2, aend)]
339 + [' ' + l1[x] for x in xrange(a2, aend)]
326 )
340 )
327 yield hunkrange, hunklines
341 yield hunkrange, hunklines
328
342
329 # bdiff.blocks gives us the matching sequences in the files. The loop
343 # bdiff.blocks gives us the matching sequences in the files. The loop
330 # below finds the spaces between those matching sequences and translates
344 # below finds the spaces between those matching sequences and translates
331 # them into diff output.
345 # them into diff output.
332 #
346 #
333 hunk = None
347 hunk = None
334 ignoredlines = 0
348 ignoredlines = 0
335 for s, stype in allblocks(t1, t2, opts, l1, l2):
349 for s, stype in allblocks(t1, t2, opts, l1, l2):
336 a1, a2, b1, b2 = s
350 a1, a2, b1, b2 = s
337 if stype != '!':
351 if stype != '!':
338 if stype == '~':
352 if stype == '~':
339 # The diff context lines are based on t1 content. When
353 # The diff context lines are based on t1 content. When
340 # blank lines are ignored, the new lines offsets must
354 # blank lines are ignored, the new lines offsets must
341 # be adjusted as if equivalent blocks ('~') had the
355 # be adjusted as if equivalent blocks ('~') had the
342 # same sizes on both sides.
356 # same sizes on both sides.
343 ignoredlines += (b2 - b1) - (a2 - a1)
357 ignoredlines += (b2 - b1) - (a2 - a1)
344 continue
358 continue
345 delta = []
359 delta = []
346 old = l1[a1:a2]
360 old = l1[a1:a2]
347 new = l2[b1:b2]
361 new = l2[b1:b2]
348
362
349 b1 -= ignoredlines
363 b1 -= ignoredlines
350 b2 -= ignoredlines
364 b2 -= ignoredlines
351 astart = contextstart(a1)
365 astart = contextstart(a1)
352 bstart = contextstart(b1)
366 bstart = contextstart(b1)
353 prev = None
367 prev = None
354 if hunk:
368 if hunk:
355 # join with the previous hunk if it falls inside the context
369 # join with the previous hunk if it falls inside the context
356 if astart < hunk[1] + opts.context + 1:
370 if astart < hunk[1] + opts.context + 1:
357 prev = hunk
371 prev = hunk
358 astart = hunk[1]
372 astart = hunk[1]
359 bstart = hunk[3]
373 bstart = hunk[3]
360 else:
374 else:
361 for x in yieldhunk(hunk):
375 for x in yieldhunk(hunk):
362 yield x
376 yield x
363 if prev:
377 if prev:
364 # we've joined the previous hunk, record the new ending points.
378 # we've joined the previous hunk, record the new ending points.
365 hunk[1] = a2
379 hunk[1] = a2
366 hunk[3] = b2
380 hunk[3] = b2
367 delta = hunk[4]
381 delta = hunk[4]
368 else:
382 else:
369 # create a new hunk
383 # create a new hunk
370 hunk = [astart, a2, bstart, b2, delta]
384 hunk = [astart, a2, bstart, b2, delta]
371
385
372 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
386 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
373 delta[len(delta):] = ['-' + x for x in old]
387 delta[len(delta):] = ['-' + x for x in old]
374 delta[len(delta):] = ['+' + x for x in new]
388 delta[len(delta):] = ['+' + x for x in new]
375
389
376 if hunk:
390 if hunk:
377 for x in yieldhunk(hunk):
391 for x in yieldhunk(hunk):
378 yield x
392 yield x
379
393
380 def b85diff(to, tn):
394 def b85diff(to, tn):
381 '''print base85-encoded binary diff'''
395 '''print base85-encoded binary diff'''
382 def fmtline(line):
396 def fmtline(line):
383 l = len(line)
397 l = len(line)
384 if l <= 26:
398 if l <= 26:
385 l = chr(ord('A') + l - 1)
399 l = chr(ord('A') + l - 1)
386 else:
400 else:
387 l = chr(l - 26 + ord('a') - 1)
401 l = chr(l - 26 + ord('a') - 1)
388 return '%c%s\n' % (l, base85.b85encode(line, True))
402 return '%c%s\n' % (l, base85.b85encode(line, True))
389
403
390 def chunk(text, csize=52):
404 def chunk(text, csize=52):
391 l = len(text)
405 l = len(text)
392 i = 0
406 i = 0
393 while i < l:
407 while i < l:
394 yield text[i:i + csize]
408 yield text[i:i + csize]
395 i += csize
409 i += csize
396
410
397 if to is None:
411 if to is None:
398 to = ''
412 to = ''
399 if tn is None:
413 if tn is None:
400 tn = ''
414 tn = ''
401
415
402 if to == tn:
416 if to == tn:
403 return ''
417 return ''
404
418
405 # TODO: deltas
419 # TODO: deltas
406 ret = []
420 ret = []
407 ret.append('GIT binary patch\n')
421 ret.append('GIT binary patch\n')
408 ret.append('literal %s\n' % len(tn))
422 ret.append('literal %s\n' % len(tn))
409 for l in chunk(zlib.compress(tn)):
423 for l in chunk(zlib.compress(tn)):
410 ret.append(fmtline(l))
424 ret.append(fmtline(l))
411 ret.append('\n')
425 ret.append('\n')
412
426
413 return ''.join(ret)
427 return ''.join(ret)
414
428
415 def patchtext(bin):
429 def patchtext(bin):
416 pos = 0
430 pos = 0
417 t = []
431 t = []
418 while pos < len(bin):
432 while pos < len(bin):
419 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
433 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
420 pos += 12
434 pos += 12
421 t.append(bin[pos:pos + l])
435 t.append(bin[pos:pos + l])
422 pos += l
436 pos += l
423 return "".join(t)
437 return "".join(t)
424
438
425 def patch(a, bin):
439 def patch(a, bin):
426 if len(a) == 0:
440 if len(a) == 0:
427 # skip over trivial delta header
441 # skip over trivial delta header
428 return util.buffer(bin, 12)
442 return util.buffer(bin, 12)
429 return mpatch.patches(a, [bin])
443 return mpatch.patches(a, [bin])
430
444
431 # similar to difflib.SequenceMatcher.get_matching_blocks
445 # similar to difflib.SequenceMatcher.get_matching_blocks
432 def get_matching_blocks(a, b):
446 def get_matching_blocks(a, b):
433 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
447 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
434
448
435 def trivialdiffheader(length):
449 def trivialdiffheader(length):
436 return struct.pack(">lll", 0, 0, length) if length else ''
450 return struct.pack(">lll", 0, 0, length) if length else ''
437
451
438 def replacediffheader(oldlen, newlen):
452 def replacediffheader(oldlen, newlen):
439 return struct.pack(">lll", 0, oldlen, newlen)
453 return struct.pack(">lll", 0, oldlen, newlen)
440
454
441 patches = mpatch.patches
455 patches = mpatch.patches
442 patchedsize = mpatch.patchedsize
456 patchedsize = mpatch.patchedsize
443 textdiff = bdiff.bdiff
457 textdiff = bdiff.bdiff
@@ -1,2657 +1,2658 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 stringio = util.stringio
43 stringio = util.stringio
44
44
45 gitre = re.compile('diff --git a/(.*) b/(.*)')
45 gitre = re.compile('diff --git a/(.*) b/(.*)')
46 tabsplitter = re.compile(r'(\t+|[^\t]+)')
46 tabsplitter = re.compile(r'(\t+|[^\t]+)')
47
47
48 class PatchError(Exception):
48 class PatchError(Exception):
49 pass
49 pass
50
50
51
51
52 # public functions
52 # public functions
53
53
54 def split(stream):
54 def split(stream):
55 '''return an iterator of individual patches from a stream'''
55 '''return an iterator of individual patches from a stream'''
56 def isheader(line, inheader):
56 def isheader(line, inheader):
57 if inheader and line[0] in (' ', '\t'):
57 if inheader and line[0] in (' ', '\t'):
58 # continuation
58 # continuation
59 return True
59 return True
60 if line[0] in (' ', '-', '+'):
60 if line[0] in (' ', '-', '+'):
61 # diff line - don't check for header pattern in there
61 # diff line - don't check for header pattern in there
62 return False
62 return False
63 l = line.split(': ', 1)
63 l = line.split(': ', 1)
64 return len(l) == 2 and ' ' not in l[0]
64 return len(l) == 2 and ' ' not in l[0]
65
65
66 def chunk(lines):
66 def chunk(lines):
67 return stringio(''.join(lines))
67 return stringio(''.join(lines))
68
68
69 def hgsplit(stream, cur):
69 def hgsplit(stream, cur):
70 inheader = True
70 inheader = True
71
71
72 for line in stream:
72 for line in stream:
73 if not line.strip():
73 if not line.strip():
74 inheader = False
74 inheader = False
75 if not inheader and line.startswith('# HG changeset patch'):
75 if not inheader and line.startswith('# HG changeset patch'):
76 yield chunk(cur)
76 yield chunk(cur)
77 cur = []
77 cur = []
78 inheader = True
78 inheader = True
79
79
80 cur.append(line)
80 cur.append(line)
81
81
82 if cur:
82 if cur:
83 yield chunk(cur)
83 yield chunk(cur)
84
84
85 def mboxsplit(stream, cur):
85 def mboxsplit(stream, cur):
86 for line in stream:
86 for line in stream:
87 if line.startswith('From '):
87 if line.startswith('From '):
88 for c in split(chunk(cur[1:])):
88 for c in split(chunk(cur[1:])):
89 yield c
89 yield c
90 cur = []
90 cur = []
91
91
92 cur.append(line)
92 cur.append(line)
93
93
94 if cur:
94 if cur:
95 for c in split(chunk(cur[1:])):
95 for c in split(chunk(cur[1:])):
96 yield c
96 yield c
97
97
98 def mimesplit(stream, cur):
98 def mimesplit(stream, cur):
99 def msgfp(m):
99 def msgfp(m):
100 fp = stringio()
100 fp = stringio()
101 g = email.Generator.Generator(fp, mangle_from_=False)
101 g = email.Generator.Generator(fp, mangle_from_=False)
102 g.flatten(m)
102 g.flatten(m)
103 fp.seek(0)
103 fp.seek(0)
104 return fp
104 return fp
105
105
106 for line in stream:
106 for line in stream:
107 cur.append(line)
107 cur.append(line)
108 c = chunk(cur)
108 c = chunk(cur)
109
109
110 m = email.Parser.Parser().parse(c)
110 m = email.Parser.Parser().parse(c)
111 if not m.is_multipart():
111 if not m.is_multipart():
112 yield msgfp(m)
112 yield msgfp(m)
113 else:
113 else:
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 for part in m.walk():
115 for part in m.walk():
116 ct = part.get_content_type()
116 ct = part.get_content_type()
117 if ct not in ok_types:
117 if ct not in ok_types:
118 continue
118 continue
119 yield msgfp(part)
119 yield msgfp(part)
120
120
121 def headersplit(stream, cur):
121 def headersplit(stream, cur):
122 inheader = False
122 inheader = False
123
123
124 for line in stream:
124 for line in stream:
125 if not inheader and isheader(line, inheader):
125 if not inheader and isheader(line, inheader):
126 yield chunk(cur)
126 yield chunk(cur)
127 cur = []
127 cur = []
128 inheader = True
128 inheader = True
129 if inheader and not isheader(line, inheader):
129 if inheader and not isheader(line, inheader):
130 inheader = False
130 inheader = False
131
131
132 cur.append(line)
132 cur.append(line)
133
133
134 if cur:
134 if cur:
135 yield chunk(cur)
135 yield chunk(cur)
136
136
137 def remainder(cur):
137 def remainder(cur):
138 yield chunk(cur)
138 yield chunk(cur)
139
139
140 class fiter(object):
140 class fiter(object):
141 def __init__(self, fp):
141 def __init__(self, fp):
142 self.fp = fp
142 self.fp = fp
143
143
144 def __iter__(self):
144 def __iter__(self):
145 return self
145 return self
146
146
147 def next(self):
147 def next(self):
148 l = self.fp.readline()
148 l = self.fp.readline()
149 if not l:
149 if not l:
150 raise StopIteration
150 raise StopIteration
151 return l
151 return l
152
152
153 inheader = False
153 inheader = False
154 cur = []
154 cur = []
155
155
156 mimeheaders = ['content-type']
156 mimeheaders = ['content-type']
157
157
158 if not util.safehasattr(stream, 'next'):
158 if not util.safehasattr(stream, 'next'):
159 # http responses, for example, have readline but not next
159 # http responses, for example, have readline but not next
160 stream = fiter(stream)
160 stream = fiter(stream)
161
161
162 for line in stream:
162 for line in stream:
163 cur.append(line)
163 cur.append(line)
164 if line.startswith('# HG changeset patch'):
164 if line.startswith('# HG changeset patch'):
165 return hgsplit(stream, cur)
165 return hgsplit(stream, cur)
166 elif line.startswith('From '):
166 elif line.startswith('From '):
167 return mboxsplit(stream, cur)
167 return mboxsplit(stream, cur)
168 elif isheader(line, inheader):
168 elif isheader(line, inheader):
169 inheader = True
169 inheader = True
170 if line.split(':', 1)[0].lower() in mimeheaders:
170 if line.split(':', 1)[0].lower() in mimeheaders:
171 # let email parser handle this
171 # let email parser handle this
172 return mimesplit(stream, cur)
172 return mimesplit(stream, cur)
173 elif line.startswith('--- ') and inheader:
173 elif line.startswith('--- ') and inheader:
174 # No evil headers seen by diff start, split by hand
174 # No evil headers seen by diff start, split by hand
175 return headersplit(stream, cur)
175 return headersplit(stream, cur)
176 # Not enough info, keep reading
176 # Not enough info, keep reading
177
177
178 # if we are here, we have a very plain patch
178 # if we are here, we have a very plain patch
179 return remainder(cur)
179 return remainder(cur)
180
180
181 ## Some facility for extensible patch parsing:
181 ## Some facility for extensible patch parsing:
182 # list of pairs ("header to match", "data key")
182 # list of pairs ("header to match", "data key")
183 patchheadermap = [('Date', 'date'),
183 patchheadermap = [('Date', 'date'),
184 ('Branch', 'branch'),
184 ('Branch', 'branch'),
185 ('Node ID', 'nodeid'),
185 ('Node ID', 'nodeid'),
186 ]
186 ]
187
187
188 def extract(ui, fileobj):
188 def extract(ui, fileobj):
189 '''extract patch from data read from fileobj.
189 '''extract patch from data read from fileobj.
190
190
191 patch can be a normal patch or contained in an email message.
191 patch can be a normal patch or contained in an email message.
192
192
193 return a dictionary. Standard keys are:
193 return a dictionary. Standard keys are:
194 - filename,
194 - filename,
195 - message,
195 - message,
196 - user,
196 - user,
197 - date,
197 - date,
198 - branch,
198 - branch,
199 - node,
199 - node,
200 - p1,
200 - p1,
201 - p2.
201 - p2.
202 Any item can be missing from the dictionary. If filename is missing,
202 Any item can be missing from the dictionary. If filename is missing,
203 fileobj did not contain a patch. Caller must unlink filename when done.'''
203 fileobj did not contain a patch. Caller must unlink filename when done.'''
204
204
205 # attempt to detect the start of a patch
205 # attempt to detect the start of a patch
206 # (this heuristic is borrowed from quilt)
206 # (this heuristic is borrowed from quilt)
207 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'---[ \t].*?^\+\+\+[ \t]|'
209 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
210 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211
211
212 data = {}
212 data = {}
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 try:
215 try:
216 msg = email.Parser.Parser().parse(fileobj)
216 msg = email.Parser.Parser().parse(fileobj)
217
217
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 if not subject and not data['user']:
220 if not subject and not data['user']:
221 # Not an email, restore parsed headers if any
221 # Not an email, restore parsed headers if any
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223
223
224 # should try to parse msg['Date']
224 # should try to parse msg['Date']
225 parents = []
225 parents = []
226
226
227 if subject:
227 if subject:
228 if subject.startswith('[PATCH'):
228 if subject.startswith('[PATCH'):
229 pend = subject.find(']')
229 pend = subject.find(']')
230 if pend >= 0:
230 if pend >= 0:
231 subject = subject[pend + 1:].lstrip()
231 subject = subject[pend + 1:].lstrip()
232 subject = re.sub(r'\n[ \t]+', ' ', subject)
232 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 ui.debug('Subject: %s\n' % subject)
233 ui.debug('Subject: %s\n' % subject)
234 if data['user']:
234 if data['user']:
235 ui.debug('From: %s\n' % data['user'])
235 ui.debug('From: %s\n' % data['user'])
236 diffs_seen = 0
236 diffs_seen = 0
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 message = ''
238 message = ''
239 for part in msg.walk():
239 for part in msg.walk():
240 content_type = part.get_content_type()
240 content_type = part.get_content_type()
241 ui.debug('Content-Type: %s\n' % content_type)
241 ui.debug('Content-Type: %s\n' % content_type)
242 if content_type not in ok_types:
242 if content_type not in ok_types:
243 continue
243 continue
244 payload = part.get_payload(decode=True)
244 payload = part.get_payload(decode=True)
245 m = diffre.search(payload)
245 m = diffre.search(payload)
246 if m:
246 if m:
247 hgpatch = False
247 hgpatch = False
248 hgpatchheader = False
248 hgpatchheader = False
249 ignoretext = False
249 ignoretext = False
250
250
251 ui.debug('found patch at byte %d\n' % m.start(0))
251 ui.debug('found patch at byte %d\n' % m.start(0))
252 diffs_seen += 1
252 diffs_seen += 1
253 cfp = stringio()
253 cfp = stringio()
254 for line in payload[:m.start(0)].splitlines():
254 for line in payload[:m.start(0)].splitlines():
255 if line.startswith('# HG changeset patch') and not hgpatch:
255 if line.startswith('# HG changeset patch') and not hgpatch:
256 ui.debug('patch generated by hg export\n')
256 ui.debug('patch generated by hg export\n')
257 hgpatch = True
257 hgpatch = True
258 hgpatchheader = True
258 hgpatchheader = True
259 # drop earlier commit message content
259 # drop earlier commit message content
260 cfp.seek(0)
260 cfp.seek(0)
261 cfp.truncate()
261 cfp.truncate()
262 subject = None
262 subject = None
263 elif hgpatchheader:
263 elif hgpatchheader:
264 if line.startswith('# User '):
264 if line.startswith('# User '):
265 data['user'] = line[7:]
265 data['user'] = line[7:]
266 ui.debug('From: %s\n' % data['user'])
266 ui.debug('From: %s\n' % data['user'])
267 elif line.startswith("# Parent "):
267 elif line.startswith("# Parent "):
268 parents.append(line[9:].lstrip())
268 parents.append(line[9:].lstrip())
269 elif line.startswith("# "):
269 elif line.startswith("# "):
270 for header, key in patchheadermap:
270 for header, key in patchheadermap:
271 prefix = '# %s ' % header
271 prefix = '# %s ' % header
272 if line.startswith(prefix):
272 if line.startswith(prefix):
273 data[key] = line[len(prefix):]
273 data[key] = line[len(prefix):]
274 else:
274 else:
275 hgpatchheader = False
275 hgpatchheader = False
276 elif line == '---':
276 elif line == '---':
277 ignoretext = True
277 ignoretext = True
278 if not hgpatchheader and not ignoretext:
278 if not hgpatchheader and not ignoretext:
279 cfp.write(line)
279 cfp.write(line)
280 cfp.write('\n')
280 cfp.write('\n')
281 message = cfp.getvalue()
281 message = cfp.getvalue()
282 if tmpfp:
282 if tmpfp:
283 tmpfp.write(payload)
283 tmpfp.write(payload)
284 if not payload.endswith('\n'):
284 if not payload.endswith('\n'):
285 tmpfp.write('\n')
285 tmpfp.write('\n')
286 elif not diffs_seen and message and content_type == 'text/plain':
286 elif not diffs_seen and message and content_type == 'text/plain':
287 message += '\n' + payload
287 message += '\n' + payload
288 except: # re-raises
288 except: # re-raises
289 tmpfp.close()
289 tmpfp.close()
290 os.unlink(tmpname)
290 os.unlink(tmpname)
291 raise
291 raise
292
292
293 if subject and not message.startswith(subject):
293 if subject and not message.startswith(subject):
294 message = '%s\n%s' % (subject, message)
294 message = '%s\n%s' % (subject, message)
295 data['message'] = message
295 data['message'] = message
296 tmpfp.close()
296 tmpfp.close()
297 if parents:
297 if parents:
298 data['p1'] = parents.pop(0)
298 data['p1'] = parents.pop(0)
299 if parents:
299 if parents:
300 data['p2'] = parents.pop(0)
300 data['p2'] = parents.pop(0)
301
301
302 if diffs_seen:
302 if diffs_seen:
303 data['filename'] = tmpname
303 data['filename'] = tmpname
304 else:
304 else:
305 os.unlink(tmpname)
305 os.unlink(tmpname)
306 return data
306 return data
307
307
308 class patchmeta(object):
308 class patchmeta(object):
309 """Patched file metadata
309 """Patched file metadata
310
310
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 'islink' is True if the file is a symlink and 'isexec' is True if
315 'islink' is True if the file is a symlink and 'isexec' is True if
316 the file is executable. Otherwise, 'mode' is None.
316 the file is executable. Otherwise, 'mode' is None.
317 """
317 """
318 def __init__(self, path):
318 def __init__(self, path):
319 self.path = path
319 self.path = path
320 self.oldpath = None
320 self.oldpath = None
321 self.mode = None
321 self.mode = None
322 self.op = 'MODIFY'
322 self.op = 'MODIFY'
323 self.binary = False
323 self.binary = False
324
324
325 def setmode(self, mode):
325 def setmode(self, mode):
326 islink = mode & 0o20000
326 islink = mode & 0o20000
327 isexec = mode & 0o100
327 isexec = mode & 0o100
328 self.mode = (islink, isexec)
328 self.mode = (islink, isexec)
329
329
330 def copy(self):
330 def copy(self):
331 other = patchmeta(self.path)
331 other = patchmeta(self.path)
332 other.oldpath = self.oldpath
332 other.oldpath = self.oldpath
333 other.mode = self.mode
333 other.mode = self.mode
334 other.op = self.op
334 other.op = self.op
335 other.binary = self.binary
335 other.binary = self.binary
336 return other
336 return other
337
337
338 def _ispatchinga(self, afile):
338 def _ispatchinga(self, afile):
339 if afile == '/dev/null':
339 if afile == '/dev/null':
340 return self.op == 'ADD'
340 return self.op == 'ADD'
341 return afile == 'a/' + (self.oldpath or self.path)
341 return afile == 'a/' + (self.oldpath or self.path)
342
342
343 def _ispatchingb(self, bfile):
343 def _ispatchingb(self, bfile):
344 if bfile == '/dev/null':
344 if bfile == '/dev/null':
345 return self.op == 'DELETE'
345 return self.op == 'DELETE'
346 return bfile == 'b/' + self.path
346 return bfile == 'b/' + self.path
347
347
348 def ispatching(self, afile, bfile):
348 def ispatching(self, afile, bfile):
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350
350
351 def __repr__(self):
351 def __repr__(self):
352 return "<patchmeta %s %r>" % (self.op, self.path)
352 return "<patchmeta %s %r>" % (self.op, self.path)
353
353
354 def readgitpatch(lr):
354 def readgitpatch(lr):
355 """extract git-style metadata about patches from <patchname>"""
355 """extract git-style metadata about patches from <patchname>"""
356
356
357 # Filter patch for git information
357 # Filter patch for git information
358 gp = None
358 gp = None
359 gitpatches = []
359 gitpatches = []
360 for line in lr:
360 for line in lr:
361 line = line.rstrip(' \r\n')
361 line = line.rstrip(' \r\n')
362 if line.startswith('diff --git a/'):
362 if line.startswith('diff --git a/'):
363 m = gitre.match(line)
363 m = gitre.match(line)
364 if m:
364 if m:
365 if gp:
365 if gp:
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 dst = m.group(2)
367 dst = m.group(2)
368 gp = patchmeta(dst)
368 gp = patchmeta(dst)
369 elif gp:
369 elif gp:
370 if line.startswith('--- '):
370 if line.startswith('--- '):
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 gp = None
372 gp = None
373 continue
373 continue
374 if line.startswith('rename from '):
374 if line.startswith('rename from '):
375 gp.op = 'RENAME'
375 gp.op = 'RENAME'
376 gp.oldpath = line[12:]
376 gp.oldpath = line[12:]
377 elif line.startswith('rename to '):
377 elif line.startswith('rename to '):
378 gp.path = line[10:]
378 gp.path = line[10:]
379 elif line.startswith('copy from '):
379 elif line.startswith('copy from '):
380 gp.op = 'COPY'
380 gp.op = 'COPY'
381 gp.oldpath = line[10:]
381 gp.oldpath = line[10:]
382 elif line.startswith('copy to '):
382 elif line.startswith('copy to '):
383 gp.path = line[8:]
383 gp.path = line[8:]
384 elif line.startswith('deleted file'):
384 elif line.startswith('deleted file'):
385 gp.op = 'DELETE'
385 gp.op = 'DELETE'
386 elif line.startswith('new file mode '):
386 elif line.startswith('new file mode '):
387 gp.op = 'ADD'
387 gp.op = 'ADD'
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('new mode '):
389 elif line.startswith('new mode '):
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('GIT binary patch'):
391 elif line.startswith('GIT binary patch'):
392 gp.binary = True
392 gp.binary = True
393 if gp:
393 if gp:
394 gitpatches.append(gp)
394 gitpatches.append(gp)
395
395
396 return gitpatches
396 return gitpatches
397
397
398 class linereader(object):
398 class linereader(object):
399 # simple class to allow pushing lines back into the input stream
399 # simple class to allow pushing lines back into the input stream
400 def __init__(self, fp):
400 def __init__(self, fp):
401 self.fp = fp
401 self.fp = fp
402 self.buf = []
402 self.buf = []
403
403
404 def push(self, line):
404 def push(self, line):
405 if line is not None:
405 if line is not None:
406 self.buf.append(line)
406 self.buf.append(line)
407
407
408 def readline(self):
408 def readline(self):
409 if self.buf:
409 if self.buf:
410 l = self.buf[0]
410 l = self.buf[0]
411 del self.buf[0]
411 del self.buf[0]
412 return l
412 return l
413 return self.fp.readline()
413 return self.fp.readline()
414
414
415 def __iter__(self):
415 def __iter__(self):
416 return iter(self.readline, '')
416 return iter(self.readline, '')
417
417
418 class abstractbackend(object):
418 class abstractbackend(object):
419 def __init__(self, ui):
419 def __init__(self, ui):
420 self.ui = ui
420 self.ui = ui
421
421
422 def getfile(self, fname):
422 def getfile(self, fname):
423 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
424 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
425 """
425 """
426 raise NotImplementedError
426 raise NotImplementedError
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
430 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
431 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
432 copysource is set to the original file name.
432 copysource is set to the original file name.
433 """
433 """
434 raise NotImplementedError
434 raise NotImplementedError
435
435
436 def unlink(self, fname):
436 def unlink(self, fname):
437 """Unlink target file."""
437 """Unlink target file."""
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
441 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
442 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
443 files.
443 files.
444 """
444 """
445 pass
445 pass
446
446
447 def exists(self, fname):
447 def exists(self, fname):
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 class fsbackend(abstractbackend):
450 class fsbackend(abstractbackend):
451 def __init__(self, ui, basedir):
451 def __init__(self, ui, basedir):
452 super(fsbackend, self).__init__(ui)
452 super(fsbackend, self).__init__(ui)
453 self.opener = vfsmod.vfs(basedir)
453 self.opener = vfsmod.vfs(basedir)
454
454
455 def _join(self, f):
455 def _join(self, f):
456 return os.path.join(self.opener.base, f)
456 return os.path.join(self.opener.base, f)
457
457
458 def getfile(self, fname):
458 def getfile(self, fname):
459 if self.opener.islink(fname):
459 if self.opener.islink(fname):
460 return (self.opener.readlink(fname), (True, False))
460 return (self.opener.readlink(fname), (True, False))
461
461
462 isexec = False
462 isexec = False
463 try:
463 try:
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 except OSError as e:
465 except OSError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 try:
468 try:
469 return (self.opener.read(fname), (False, isexec))
469 return (self.opener.read(fname), (False, isexec))
470 except IOError as e:
470 except IOError as e:
471 if e.errno != errno.ENOENT:
471 if e.errno != errno.ENOENT:
472 raise
472 raise
473 return None, None
473 return None, None
474
474
475 def setfile(self, fname, data, mode, copysource):
475 def setfile(self, fname, data, mode, copysource):
476 islink, isexec = mode
476 islink, isexec = mode
477 if data is None:
477 if data is None:
478 self.opener.setflags(fname, islink, isexec)
478 self.opener.setflags(fname, islink, isexec)
479 return
479 return
480 if islink:
480 if islink:
481 self.opener.symlink(data, fname)
481 self.opener.symlink(data, fname)
482 else:
482 else:
483 self.opener.write(fname, data)
483 self.opener.write(fname, data)
484 if isexec:
484 if isexec:
485 self.opener.setflags(fname, False, True)
485 self.opener.setflags(fname, False, True)
486
486
487 def unlink(self, fname):
487 def unlink(self, fname):
488 self.opener.unlinkpath(fname, ignoremissing=True)
488 self.opener.unlinkpath(fname, ignoremissing=True)
489
489
490 def writerej(self, fname, failed, total, lines):
490 def writerej(self, fname, failed, total, lines):
491 fname = fname + ".rej"
491 fname = fname + ".rej"
492 self.ui.warn(
492 self.ui.warn(
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 (failed, total, fname))
494 (failed, total, fname))
495 fp = self.opener(fname, 'w')
495 fp = self.opener(fname, 'w')
496 fp.writelines(lines)
496 fp.writelines(lines)
497 fp.close()
497 fp.close()
498
498
499 def exists(self, fname):
499 def exists(self, fname):
500 return self.opener.lexists(fname)
500 return self.opener.lexists(fname)
501
501
502 class workingbackend(fsbackend):
502 class workingbackend(fsbackend):
503 def __init__(self, ui, repo, similarity):
503 def __init__(self, ui, repo, similarity):
504 super(workingbackend, self).__init__(ui, repo.root)
504 super(workingbackend, self).__init__(ui, repo.root)
505 self.repo = repo
505 self.repo = repo
506 self.similarity = similarity
506 self.similarity = similarity
507 self.removed = set()
507 self.removed = set()
508 self.changed = set()
508 self.changed = set()
509 self.copied = []
509 self.copied = []
510
510
511 def _checkknown(self, fname):
511 def _checkknown(self, fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514
514
515 def setfile(self, fname, data, mode, copysource):
515 def setfile(self, fname, data, mode, copysource):
516 self._checkknown(fname)
516 self._checkknown(fname)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 if copysource is not None:
518 if copysource is not None:
519 self.copied.append((copysource, fname))
519 self.copied.append((copysource, fname))
520 self.changed.add(fname)
520 self.changed.add(fname)
521
521
522 def unlink(self, fname):
522 def unlink(self, fname):
523 self._checkknown(fname)
523 self._checkknown(fname)
524 super(workingbackend, self).unlink(fname)
524 super(workingbackend, self).unlink(fname)
525 self.removed.add(fname)
525 self.removed.add(fname)
526 self.changed.add(fname)
526 self.changed.add(fname)
527
527
528 def close(self):
528 def close(self):
529 wctx = self.repo[None]
529 wctx = self.repo[None]
530 changed = set(self.changed)
530 changed = set(self.changed)
531 for src, dst in self.copied:
531 for src, dst in self.copied:
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 if self.removed:
533 if self.removed:
534 wctx.forget(sorted(self.removed))
534 wctx.forget(sorted(self.removed))
535 for f in self.removed:
535 for f in self.removed:
536 if f not in self.repo.dirstate:
536 if f not in self.repo.dirstate:
537 # File was deleted and no longer belongs to the
537 # File was deleted and no longer belongs to the
538 # dirstate, it was probably marked added then
538 # dirstate, it was probably marked added then
539 # deleted, and should not be considered by
539 # deleted, and should not be considered by
540 # marktouched().
540 # marktouched().
541 changed.discard(f)
541 changed.discard(f)
542 if changed:
542 if changed:
543 scmutil.marktouched(self.repo, changed, self.similarity)
543 scmutil.marktouched(self.repo, changed, self.similarity)
544 return sorted(self.changed)
544 return sorted(self.changed)
545
545
546 class filestore(object):
546 class filestore(object):
547 def __init__(self, maxsize=None):
547 def __init__(self, maxsize=None):
548 self.opener = None
548 self.opener = None
549 self.files = {}
549 self.files = {}
550 self.created = 0
550 self.created = 0
551 self.maxsize = maxsize
551 self.maxsize = maxsize
552 if self.maxsize is None:
552 if self.maxsize is None:
553 self.maxsize = 4*(2**20)
553 self.maxsize = 4*(2**20)
554 self.size = 0
554 self.size = 0
555 self.data = {}
555 self.data = {}
556
556
557 def setfile(self, fname, data, mode, copied=None):
557 def setfile(self, fname, data, mode, copied=None):
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 self.data[fname] = (data, mode, copied)
559 self.data[fname] = (data, mode, copied)
560 self.size += len(data)
560 self.size += len(data)
561 else:
561 else:
562 if self.opener is None:
562 if self.opener is None:
563 root = tempfile.mkdtemp(prefix='hg-patch-')
563 root = tempfile.mkdtemp(prefix='hg-patch-')
564 self.opener = vfsmod.vfs(root)
564 self.opener = vfsmod.vfs(root)
565 # Avoid filename issues with these simple names
565 # Avoid filename issues with these simple names
566 fn = str(self.created)
566 fn = str(self.created)
567 self.opener.write(fn, data)
567 self.opener.write(fn, data)
568 self.created += 1
568 self.created += 1
569 self.files[fname] = (fn, mode, copied)
569 self.files[fname] = (fn, mode, copied)
570
570
571 def getfile(self, fname):
571 def getfile(self, fname):
572 if fname in self.data:
572 if fname in self.data:
573 return self.data[fname]
573 return self.data[fname]
574 if not self.opener or fname not in self.files:
574 if not self.opener or fname not in self.files:
575 return None, None, None
575 return None, None, None
576 fn, mode, copied = self.files[fname]
576 fn, mode, copied = self.files[fname]
577 return self.opener.read(fn), mode, copied
577 return self.opener.read(fn), mode, copied
578
578
579 def close(self):
579 def close(self):
580 if self.opener:
580 if self.opener:
581 shutil.rmtree(self.opener.base)
581 shutil.rmtree(self.opener.base)
582
582
583 class repobackend(abstractbackend):
583 class repobackend(abstractbackend):
584 def __init__(self, ui, repo, ctx, store):
584 def __init__(self, ui, repo, ctx, store):
585 super(repobackend, self).__init__(ui)
585 super(repobackend, self).__init__(ui)
586 self.repo = repo
586 self.repo = repo
587 self.ctx = ctx
587 self.ctx = ctx
588 self.store = store
588 self.store = store
589 self.changed = set()
589 self.changed = set()
590 self.removed = set()
590 self.removed = set()
591 self.copied = {}
591 self.copied = {}
592
592
593 def _checkknown(self, fname):
593 def _checkknown(self, fname):
594 if fname not in self.ctx:
594 if fname not in self.ctx:
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596
596
597 def getfile(self, fname):
597 def getfile(self, fname):
598 try:
598 try:
599 fctx = self.ctx[fname]
599 fctx = self.ctx[fname]
600 except error.LookupError:
600 except error.LookupError:
601 return None, None
601 return None, None
602 flags = fctx.flags()
602 flags = fctx.flags()
603 return fctx.data(), ('l' in flags, 'x' in flags)
603 return fctx.data(), ('l' in flags, 'x' in flags)
604
604
605 def setfile(self, fname, data, mode, copysource):
605 def setfile(self, fname, data, mode, copysource):
606 if copysource:
606 if copysource:
607 self._checkknown(copysource)
607 self._checkknown(copysource)
608 if data is None:
608 if data is None:
609 data = self.ctx[fname].data()
609 data = self.ctx[fname].data()
610 self.store.setfile(fname, data, mode, copysource)
610 self.store.setfile(fname, data, mode, copysource)
611 self.changed.add(fname)
611 self.changed.add(fname)
612 if copysource:
612 if copysource:
613 self.copied[fname] = copysource
613 self.copied[fname] = copysource
614
614
615 def unlink(self, fname):
615 def unlink(self, fname):
616 self._checkknown(fname)
616 self._checkknown(fname)
617 self.removed.add(fname)
617 self.removed.add(fname)
618
618
619 def exists(self, fname):
619 def exists(self, fname):
620 return fname in self.ctx
620 return fname in self.ctx
621
621
622 def close(self):
622 def close(self):
623 return self.changed | self.removed
623 return self.changed | self.removed
624
624
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629
629
630 class patchfile(object):
630 class patchfile(object):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 self.fname = gp.path
632 self.fname = gp.path
633 self.eolmode = eolmode
633 self.eolmode = eolmode
634 self.eol = None
634 self.eol = None
635 self.backend = backend
635 self.backend = backend
636 self.ui = ui
636 self.ui = ui
637 self.lines = []
637 self.lines = []
638 self.exists = False
638 self.exists = False
639 self.missing = True
639 self.missing = True
640 self.mode = gp.mode
640 self.mode = gp.mode
641 self.copysource = gp.oldpath
641 self.copysource = gp.oldpath
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.remove = gp.op == 'DELETE'
643 self.remove = gp.op == 'DELETE'
644 if self.copysource is None:
644 if self.copysource is None:
645 data, mode = backend.getfile(self.fname)
645 data, mode = backend.getfile(self.fname)
646 else:
646 else:
647 data, mode = store.getfile(self.copysource)[:2]
647 data, mode = store.getfile(self.copysource)[:2]
648 if data is not None:
648 if data is not None:
649 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.missing = False
650 self.missing = False
651 if data:
651 if data:
652 self.lines = mdiff.splitnewlines(data)
652 self.lines = mdiff.splitnewlines(data)
653 if self.mode is None:
653 if self.mode is None:
654 self.mode = mode
654 self.mode = mode
655 if self.lines:
655 if self.lines:
656 # Normalize line endings
656 # Normalize line endings
657 if self.lines[0].endswith('\r\n'):
657 if self.lines[0].endswith('\r\n'):
658 self.eol = '\r\n'
658 self.eol = '\r\n'
659 elif self.lines[0].endswith('\n'):
659 elif self.lines[0].endswith('\n'):
660 self.eol = '\n'
660 self.eol = '\n'
661 if eolmode != 'strict':
661 if eolmode != 'strict':
662 nlines = []
662 nlines = []
663 for l in self.lines:
663 for l in self.lines:
664 if l.endswith('\r\n'):
664 if l.endswith('\r\n'):
665 l = l[:-2] + '\n'
665 l = l[:-2] + '\n'
666 nlines.append(l)
666 nlines.append(l)
667 self.lines = nlines
667 self.lines = nlines
668 else:
668 else:
669 if self.create:
669 if self.create:
670 self.missing = False
670 self.missing = False
671 if self.mode is None:
671 if self.mode is None:
672 self.mode = (False, False)
672 self.mode = (False, False)
673 if self.missing:
673 if self.missing:
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 "current directory)\n"))
676 "current directory)\n"))
677
677
678 self.hash = {}
678 self.hash = {}
679 self.dirty = 0
679 self.dirty = 0
680 self.offset = 0
680 self.offset = 0
681 self.skew = 0
681 self.skew = 0
682 self.rej = []
682 self.rej = []
683 self.fileprinted = False
683 self.fileprinted = False
684 self.printfile(False)
684 self.printfile(False)
685 self.hunks = 0
685 self.hunks = 0
686
686
687 def writelines(self, fname, lines, mode):
687 def writelines(self, fname, lines, mode):
688 if self.eolmode == 'auto':
688 if self.eolmode == 'auto':
689 eol = self.eol
689 eol = self.eol
690 elif self.eolmode == 'crlf':
690 elif self.eolmode == 'crlf':
691 eol = '\r\n'
691 eol = '\r\n'
692 else:
692 else:
693 eol = '\n'
693 eol = '\n'
694
694
695 if self.eolmode != 'strict' and eol and eol != '\n':
695 if self.eolmode != 'strict' and eol and eol != '\n':
696 rawlines = []
696 rawlines = []
697 for l in lines:
697 for l in lines:
698 if l and l[-1] == '\n':
698 if l and l[-1] == '\n':
699 l = l[:-1] + eol
699 l = l[:-1] + eol
700 rawlines.append(l)
700 rawlines.append(l)
701 lines = rawlines
701 lines = rawlines
702
702
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704
704
705 def printfile(self, warn):
705 def printfile(self, warn):
706 if self.fileprinted:
706 if self.fileprinted:
707 return
707 return
708 if warn or self.ui.verbose:
708 if warn or self.ui.verbose:
709 self.fileprinted = True
709 self.fileprinted = True
710 s = _("patching file %s\n") % self.fname
710 s = _("patching file %s\n") % self.fname
711 if warn:
711 if warn:
712 self.ui.warn(s)
712 self.ui.warn(s)
713 else:
713 else:
714 self.ui.note(s)
714 self.ui.note(s)
715
715
716
716
717 def findlines(self, l, linenum):
717 def findlines(self, l, linenum):
718 # looks through the hash and finds candidate lines. The
718 # looks through the hash and finds candidate lines. The
719 # result is a list of line numbers sorted based on distance
719 # result is a list of line numbers sorted based on distance
720 # from linenum
720 # from linenum
721
721
722 cand = self.hash.get(l, [])
722 cand = self.hash.get(l, [])
723 if len(cand) > 1:
723 if len(cand) > 1:
724 # resort our list of potentials forward then back.
724 # resort our list of potentials forward then back.
725 cand.sort(key=lambda x: abs(x - linenum))
725 cand.sort(key=lambda x: abs(x - linenum))
726 return cand
726 return cand
727
727
728 def write_rej(self):
728 def write_rej(self):
729 # our rejects are a little different from patch(1). This always
729 # our rejects are a little different from patch(1). This always
730 # creates rejects in the same form as the original patch. A file
730 # creates rejects in the same form as the original patch. A file
731 # header is inserted so that you can run the reject through patch again
731 # header is inserted so that you can run the reject through patch again
732 # without having to type the filename.
732 # without having to type the filename.
733 if not self.rej:
733 if not self.rej:
734 return
734 return
735 base = os.path.basename(self.fname)
735 base = os.path.basename(self.fname)
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 for x in self.rej:
737 for x in self.rej:
738 for l in x.hunk:
738 for l in x.hunk:
739 lines.append(l)
739 lines.append(l)
740 if l[-1] != '\n':
740 if l[-1] != '\n':
741 lines.append("\n\ No newline at end of file\n")
741 lines.append("\n\ No newline at end of file\n")
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743
743
744 def apply(self, h):
744 def apply(self, h):
745 if not h.complete():
745 if not h.complete():
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 h.lenb))
748 h.lenb))
749
749
750 self.hunks += 1
750 self.hunks += 1
751
751
752 if self.missing:
752 if self.missing:
753 self.rej.append(h)
753 self.rej.append(h)
754 return -1
754 return -1
755
755
756 if self.exists and self.create:
756 if self.exists and self.create:
757 if self.copysource:
757 if self.copysource:
758 self.ui.warn(_("cannot create %s: destination already "
758 self.ui.warn(_("cannot create %s: destination already "
759 "exists\n") % self.fname)
759 "exists\n") % self.fname)
760 else:
760 else:
761 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.rej.append(h)
762 self.rej.append(h)
763 return -1
763 return -1
764
764
765 if isinstance(h, binhunk):
765 if isinstance(h, binhunk):
766 if self.remove:
766 if self.remove:
767 self.backend.unlink(self.fname)
767 self.backend.unlink(self.fname)
768 else:
768 else:
769 l = h.new(self.lines)
769 l = h.new(self.lines)
770 self.lines[:] = l
770 self.lines[:] = l
771 self.offset += len(l)
771 self.offset += len(l)
772 self.dirty = True
772 self.dirty = True
773 return 0
773 return 0
774
774
775 horig = h
775 horig = h
776 if (self.eolmode in ('crlf', 'lf')
776 if (self.eolmode in ('crlf', 'lf')
777 or self.eolmode == 'auto' and self.eol):
777 or self.eolmode == 'auto' and self.eol):
778 # If new eols are going to be normalized, then normalize
778 # If new eols are going to be normalized, then normalize
779 # hunk data before patching. Otherwise, preserve input
779 # hunk data before patching. Otherwise, preserve input
780 # line-endings.
780 # line-endings.
781 h = h.getnormalized()
781 h = h.getnormalized()
782
782
783 # fast case first, no offsets, no fuzz
783 # fast case first, no offsets, no fuzz
784 old, oldstart, new, newstart = h.fuzzit(0, False)
784 old, oldstart, new, newstart = h.fuzzit(0, False)
785 oldstart += self.offset
785 oldstart += self.offset
786 orig_start = oldstart
786 orig_start = oldstart
787 # if there's skew we want to emit the "(offset %d lines)" even
787 # if there's skew we want to emit the "(offset %d lines)" even
788 # when the hunk cleanly applies at start + skew, so skip the
788 # when the hunk cleanly applies at start + skew, so skip the
789 # fast case code
789 # fast case code
790 if (self.skew == 0 and
790 if (self.skew == 0 and
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 if self.remove:
792 if self.remove:
793 self.backend.unlink(self.fname)
793 self.backend.unlink(self.fname)
794 else:
794 else:
795 self.lines[oldstart:oldstart + len(old)] = new
795 self.lines[oldstart:oldstart + len(old)] = new
796 self.offset += len(new) - len(old)
796 self.offset += len(new) - len(old)
797 self.dirty = True
797 self.dirty = True
798 return 0
798 return 0
799
799
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 self.hash = {}
801 self.hash = {}
802 for x, s in enumerate(self.lines):
802 for x, s in enumerate(self.lines):
803 self.hash.setdefault(s, []).append(x)
803 self.hash.setdefault(s, []).append(x)
804
804
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
806 for toponly in [True, False]:
806 for toponly in [True, False]:
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 oldstart = oldstart + self.offset + self.skew
808 oldstart = oldstart + self.offset + self.skew
809 oldstart = min(oldstart, len(self.lines))
809 oldstart = min(oldstart, len(self.lines))
810 if old:
810 if old:
811 cand = self.findlines(old[0][1:], oldstart)
811 cand = self.findlines(old[0][1:], oldstart)
812 else:
812 else:
813 # Only adding lines with no or fuzzed context, just
813 # Only adding lines with no or fuzzed context, just
814 # take the skew in account
814 # take the skew in account
815 cand = [oldstart]
815 cand = [oldstart]
816
816
817 for l in cand:
817 for l in cand:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 self.lines[l : l + len(old)] = new
819 self.lines[l : l + len(old)] = new
820 self.offset += len(new) - len(old)
820 self.offset += len(new) - len(old)
821 self.skew = l - orig_start
821 self.skew = l - orig_start
822 self.dirty = True
822 self.dirty = True
823 offset = l - orig_start - fuzzlen
823 offset = l - orig_start - fuzzlen
824 if fuzzlen:
824 if fuzzlen:
825 msg = _("Hunk #%d succeeded at %d "
825 msg = _("Hunk #%d succeeded at %d "
826 "with fuzz %d "
826 "with fuzz %d "
827 "(offset %d lines).\n")
827 "(offset %d lines).\n")
828 self.printfile(True)
828 self.printfile(True)
829 self.ui.warn(msg %
829 self.ui.warn(msg %
830 (h.number, l + 1, fuzzlen, offset))
830 (h.number, l + 1, fuzzlen, offset))
831 else:
831 else:
832 msg = _("Hunk #%d succeeded at %d "
832 msg = _("Hunk #%d succeeded at %d "
833 "(offset %d lines).\n")
833 "(offset %d lines).\n")
834 self.ui.note(msg % (h.number, l + 1, offset))
834 self.ui.note(msg % (h.number, l + 1, offset))
835 return fuzzlen
835 return fuzzlen
836 self.printfile(True)
836 self.printfile(True)
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.rej.append(horig)
838 self.rej.append(horig)
839 return -1
839 return -1
840
840
841 def close(self):
841 def close(self):
842 if self.dirty:
842 if self.dirty:
843 self.writelines(self.fname, self.lines, self.mode)
843 self.writelines(self.fname, self.lines, self.mode)
844 self.write_rej()
844 self.write_rej()
845 return len(self.rej)
845 return len(self.rej)
846
846
847 class header(object):
847 class header(object):
848 """patch header
848 """patch header
849 """
849 """
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
852 allhunks_re = re.compile('(?:index|deleted file) ')
852 allhunks_re = re.compile('(?:index|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 newfile_re = re.compile('(?:new file)')
855 newfile_re = re.compile('(?:new file)')
856
856
857 def __init__(self, header):
857 def __init__(self, header):
858 self.header = header
858 self.header = header
859 self.hunks = []
859 self.hunks = []
860
860
861 def binary(self):
861 def binary(self):
862 return any(h.startswith('index ') for h in self.header)
862 return any(h.startswith('index ') for h in self.header)
863
863
864 def pretty(self, fp):
864 def pretty(self, fp):
865 for h in self.header:
865 for h in self.header:
866 if h.startswith('index '):
866 if h.startswith('index '):
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 break
868 break
869 if self.pretty_re.match(h):
869 if self.pretty_re.match(h):
870 fp.write(h)
870 fp.write(h)
871 if self.binary():
871 if self.binary():
872 fp.write(_('this is a binary file\n'))
872 fp.write(_('this is a binary file\n'))
873 break
873 break
874 if h.startswith('---'):
874 if h.startswith('---'):
875 fp.write(_('%d hunks, %d lines changed\n') %
875 fp.write(_('%d hunks, %d lines changed\n') %
876 (len(self.hunks),
876 (len(self.hunks),
877 sum([max(h.added, h.removed) for h in self.hunks])))
877 sum([max(h.added, h.removed) for h in self.hunks])))
878 break
878 break
879 fp.write(h)
879 fp.write(h)
880
880
881 def write(self, fp):
881 def write(self, fp):
882 fp.write(''.join(self.header))
882 fp.write(''.join(self.header))
883
883
884 def allhunks(self):
884 def allhunks(self):
885 return any(self.allhunks_re.match(h) for h in self.header)
885 return any(self.allhunks_re.match(h) for h in self.header)
886
886
887 def files(self):
887 def files(self):
888 match = self.diffgit_re.match(self.header[0])
888 match = self.diffgit_re.match(self.header[0])
889 if match:
889 if match:
890 fromfile, tofile = match.groups()
890 fromfile, tofile = match.groups()
891 if fromfile == tofile:
891 if fromfile == tofile:
892 return [fromfile]
892 return [fromfile]
893 return [fromfile, tofile]
893 return [fromfile, tofile]
894 else:
894 else:
895 return self.diff_re.match(self.header[0]).groups()
895 return self.diff_re.match(self.header[0]).groups()
896
896
897 def filename(self):
897 def filename(self):
898 return self.files()[-1]
898 return self.files()[-1]
899
899
900 def __repr__(self):
900 def __repr__(self):
901 return '<header %s>' % (' '.join(map(repr, self.files())))
901 return '<header %s>' % (' '.join(map(repr, self.files())))
902
902
903 def isnewfile(self):
903 def isnewfile(self):
904 return any(self.newfile_re.match(h) for h in self.header)
904 return any(self.newfile_re.match(h) for h in self.header)
905
905
906 def special(self):
906 def special(self):
907 # Special files are shown only at the header level and not at the hunk
907 # Special files are shown only at the header level and not at the hunk
908 # level for example a file that has been deleted is a special file.
908 # level for example a file that has been deleted is a special file.
909 # The user cannot change the content of the operation, in the case of
909 # The user cannot change the content of the operation, in the case of
910 # the deleted file he has to take the deletion or not take it, he
910 # the deleted file he has to take the deletion or not take it, he
911 # cannot take some of it.
911 # cannot take some of it.
912 # Newly added files are special if they are empty, they are not special
912 # Newly added files are special if they are empty, they are not special
913 # if they have some content as we want to be able to change it
913 # if they have some content as we want to be able to change it
914 nocontent = len(self.header) == 2
914 nocontent = len(self.header) == 2
915 emptynewfile = self.isnewfile() and nocontent
915 emptynewfile = self.isnewfile() and nocontent
916 return emptynewfile or \
916 return emptynewfile or \
917 any(self.special_re.match(h) for h in self.header)
917 any(self.special_re.match(h) for h in self.header)
918
918
919 class recordhunk(object):
919 class recordhunk(object):
920 """patch hunk
920 """patch hunk
921
921
922 XXX shouldn't we merge this with the other hunk class?
922 XXX shouldn't we merge this with the other hunk class?
923 """
923 """
924 maxcontext = 3
924 maxcontext = 3
925
925
926 def __init__(self, header, fromline, toline, proc, before, hunk, after):
926 def __init__(self, header, fromline, toline, proc, before, hunk, after):
927 def trimcontext(number, lines):
927 def trimcontext(number, lines):
928 delta = len(lines) - self.maxcontext
928 delta = len(lines) - self.maxcontext
929 if False and delta > 0:
929 if False and delta > 0:
930 return number + delta, lines[:self.maxcontext]
930 return number + delta, lines[:self.maxcontext]
931 return number, lines
931 return number, lines
932
932
933 self.header = header
933 self.header = header
934 self.fromline, self.before = trimcontext(fromline, before)
934 self.fromline, self.before = trimcontext(fromline, before)
935 self.toline, self.after = trimcontext(toline, after)
935 self.toline, self.after = trimcontext(toline, after)
936 self.proc = proc
936 self.proc = proc
937 self.hunk = hunk
937 self.hunk = hunk
938 self.added, self.removed = self.countchanges(self.hunk)
938 self.added, self.removed = self.countchanges(self.hunk)
939
939
940 def __eq__(self, v):
940 def __eq__(self, v):
941 if not isinstance(v, recordhunk):
941 if not isinstance(v, recordhunk):
942 return False
942 return False
943
943
944 return ((v.hunk == self.hunk) and
944 return ((v.hunk == self.hunk) and
945 (v.proc == self.proc) and
945 (v.proc == self.proc) and
946 (self.fromline == v.fromline) and
946 (self.fromline == v.fromline) and
947 (self.header.files() == v.header.files()))
947 (self.header.files() == v.header.files()))
948
948
949 def __hash__(self):
949 def __hash__(self):
950 return hash((tuple(self.hunk),
950 return hash((tuple(self.hunk),
951 tuple(self.header.files()),
951 tuple(self.header.files()),
952 self.fromline,
952 self.fromline,
953 self.proc))
953 self.proc))
954
954
955 def countchanges(self, hunk):
955 def countchanges(self, hunk):
956 """hunk -> (n+,n-)"""
956 """hunk -> (n+,n-)"""
957 add = len([h for h in hunk if h[0] == '+'])
957 add = len([h for h in hunk if h[0] == '+'])
958 rem = len([h for h in hunk if h[0] == '-'])
958 rem = len([h for h in hunk if h[0] == '-'])
959 return add, rem
959 return add, rem
960
960
961 def write(self, fp):
961 def write(self, fp):
962 delta = len(self.before) + len(self.after)
962 delta = len(self.before) + len(self.after)
963 if self.after and self.after[-1] == '\\ No newline at end of file\n':
963 if self.after and self.after[-1] == '\\ No newline at end of file\n':
964 delta -= 1
964 delta -= 1
965 fromlen = delta + self.removed
965 fromlen = delta + self.removed
966 tolen = delta + self.added
966 tolen = delta + self.added
967 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
967 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
968 (self.fromline, fromlen, self.toline, tolen,
968 (self.fromline, fromlen, self.toline, tolen,
969 self.proc and (' ' + self.proc)))
969 self.proc and (' ' + self.proc)))
970 fp.write(''.join(self.before + self.hunk + self.after))
970 fp.write(''.join(self.before + self.hunk + self.after))
971
971
972 pretty = write
972 pretty = write
973
973
974 def filename(self):
974 def filename(self):
975 return self.header.filename()
975 return self.header.filename()
976
976
977 def __repr__(self):
977 def __repr__(self):
978 return '<hunk %r@%d>' % (self.filename(), self.fromline)
978 return '<hunk %r@%d>' % (self.filename(), self.fromline)
979
979
980 def filterpatch(ui, headers, operation=None):
980 def filterpatch(ui, headers, operation=None):
981 """Interactively filter patch chunks into applied-only chunks"""
981 """Interactively filter patch chunks into applied-only chunks"""
982 if operation is None:
982 if operation is None:
983 operation = 'record'
983 operation = 'record'
984 messages = {
984 messages = {
985 'multiple': {
985 'multiple': {
986 'discard': _("discard change %d/%d to '%s'?"),
986 'discard': _("discard change %d/%d to '%s'?"),
987 'record': _("record change %d/%d to '%s'?"),
987 'record': _("record change %d/%d to '%s'?"),
988 'revert': _("revert change %d/%d to '%s'?"),
988 'revert': _("revert change %d/%d to '%s'?"),
989 }[operation],
989 }[operation],
990 'single': {
990 'single': {
991 'discard': _("discard this change to '%s'?"),
991 'discard': _("discard this change to '%s'?"),
992 'record': _("record this change to '%s'?"),
992 'record': _("record this change to '%s'?"),
993 'revert': _("revert this change to '%s'?"),
993 'revert': _("revert this change to '%s'?"),
994 }[operation],
994 }[operation],
995 }
995 }
996
996
997 def prompt(skipfile, skipall, query, chunk):
997 def prompt(skipfile, skipall, query, chunk):
998 """prompt query, and process base inputs
998 """prompt query, and process base inputs
999
999
1000 - y/n for the rest of file
1000 - y/n for the rest of file
1001 - y/n for the rest
1001 - y/n for the rest
1002 - ? (help)
1002 - ? (help)
1003 - q (quit)
1003 - q (quit)
1004
1004
1005 Return True/False and possibly updated skipfile and skipall.
1005 Return True/False and possibly updated skipfile and skipall.
1006 """
1006 """
1007 newpatches = None
1007 newpatches = None
1008 if skipall is not None:
1008 if skipall is not None:
1009 return skipall, skipfile, skipall, newpatches
1009 return skipall, skipfile, skipall, newpatches
1010 if skipfile is not None:
1010 if skipfile is not None:
1011 return skipfile, skipfile, skipall, newpatches
1011 return skipfile, skipfile, skipall, newpatches
1012 while True:
1012 while True:
1013 resps = _('[Ynesfdaq?]'
1013 resps = _('[Ynesfdaq?]'
1014 '$$ &Yes, record this change'
1014 '$$ &Yes, record this change'
1015 '$$ &No, skip this change'
1015 '$$ &No, skip this change'
1016 '$$ &Edit this change manually'
1016 '$$ &Edit this change manually'
1017 '$$ &Skip remaining changes to this file'
1017 '$$ &Skip remaining changes to this file'
1018 '$$ Record remaining changes to this &file'
1018 '$$ Record remaining changes to this &file'
1019 '$$ &Done, skip remaining changes and files'
1019 '$$ &Done, skip remaining changes and files'
1020 '$$ Record &all changes to all remaining files'
1020 '$$ Record &all changes to all remaining files'
1021 '$$ &Quit, recording no changes'
1021 '$$ &Quit, recording no changes'
1022 '$$ &? (display help)')
1022 '$$ &? (display help)')
1023 r = ui.promptchoice("%s %s" % (query, resps))
1023 r = ui.promptchoice("%s %s" % (query, resps))
1024 ui.write("\n")
1024 ui.write("\n")
1025 if r == 8: # ?
1025 if r == 8: # ?
1026 for c, t in ui.extractchoices(resps)[1]:
1026 for c, t in ui.extractchoices(resps)[1]:
1027 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1027 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1028 continue
1028 continue
1029 elif r == 0: # yes
1029 elif r == 0: # yes
1030 ret = True
1030 ret = True
1031 elif r == 1: # no
1031 elif r == 1: # no
1032 ret = False
1032 ret = False
1033 elif r == 2: # Edit patch
1033 elif r == 2: # Edit patch
1034 if chunk is None:
1034 if chunk is None:
1035 ui.write(_('cannot edit patch for whole file'))
1035 ui.write(_('cannot edit patch for whole file'))
1036 ui.write("\n")
1036 ui.write("\n")
1037 continue
1037 continue
1038 if chunk.header.binary():
1038 if chunk.header.binary():
1039 ui.write(_('cannot edit patch for binary file'))
1039 ui.write(_('cannot edit patch for binary file'))
1040 ui.write("\n")
1040 ui.write("\n")
1041 continue
1041 continue
1042 # Patch comment based on the Git one (based on comment at end of
1042 # Patch comment based on the Git one (based on comment at end of
1043 # https://mercurial-scm.org/wiki/RecordExtension)
1043 # https://mercurial-scm.org/wiki/RecordExtension)
1044 phelp = '---' + _("""
1044 phelp = '---' + _("""
1045 To remove '-' lines, make them ' ' lines (context).
1045 To remove '-' lines, make them ' ' lines (context).
1046 To remove '+' lines, delete them.
1046 To remove '+' lines, delete them.
1047 Lines starting with # will be removed from the patch.
1047 Lines starting with # will be removed from the patch.
1048
1048
1049 If the patch applies cleanly, the edited hunk will immediately be
1049 If the patch applies cleanly, the edited hunk will immediately be
1050 added to the record list. If it does not apply cleanly, a rejects
1050 added to the record list. If it does not apply cleanly, a rejects
1051 file will be generated: you can use that when you try again. If
1051 file will be generated: you can use that when you try again. If
1052 all lines of the hunk are removed, then the edit is aborted and
1052 all lines of the hunk are removed, then the edit is aborted and
1053 the hunk is left unchanged.
1053 the hunk is left unchanged.
1054 """)
1054 """)
1055 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1055 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1056 suffix=".diff", text=True)
1056 suffix=".diff", text=True)
1057 ncpatchfp = None
1057 ncpatchfp = None
1058 try:
1058 try:
1059 # Write the initial patch
1059 # Write the initial patch
1060 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1060 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1061 chunk.header.write(f)
1061 chunk.header.write(f)
1062 chunk.write(f)
1062 chunk.write(f)
1063 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1063 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1064 f.close()
1064 f.close()
1065 # Start the editor and wait for it to complete
1065 # Start the editor and wait for it to complete
1066 editor = ui.geteditor()
1066 editor = ui.geteditor()
1067 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1067 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1068 environ={'HGUSER': ui.username()},
1068 environ={'HGUSER': ui.username()},
1069 blockedtag='filterpatch')
1069 blockedtag='filterpatch')
1070 if ret != 0:
1070 if ret != 0:
1071 ui.warn(_("editor exited with exit code %d\n") % ret)
1071 ui.warn(_("editor exited with exit code %d\n") % ret)
1072 continue
1072 continue
1073 # Remove comment lines
1073 # Remove comment lines
1074 patchfp = open(patchfn)
1074 patchfp = open(patchfn)
1075 ncpatchfp = stringio()
1075 ncpatchfp = stringio()
1076 for line in util.iterfile(patchfp):
1076 for line in util.iterfile(patchfp):
1077 if not line.startswith('#'):
1077 if not line.startswith('#'):
1078 ncpatchfp.write(line)
1078 ncpatchfp.write(line)
1079 patchfp.close()
1079 patchfp.close()
1080 ncpatchfp.seek(0)
1080 ncpatchfp.seek(0)
1081 newpatches = parsepatch(ncpatchfp)
1081 newpatches = parsepatch(ncpatchfp)
1082 finally:
1082 finally:
1083 os.unlink(patchfn)
1083 os.unlink(patchfn)
1084 del ncpatchfp
1084 del ncpatchfp
1085 # Signal that the chunk shouldn't be applied as-is, but
1085 # Signal that the chunk shouldn't be applied as-is, but
1086 # provide the new patch to be used instead.
1086 # provide the new patch to be used instead.
1087 ret = False
1087 ret = False
1088 elif r == 3: # Skip
1088 elif r == 3: # Skip
1089 ret = skipfile = False
1089 ret = skipfile = False
1090 elif r == 4: # file (Record remaining)
1090 elif r == 4: # file (Record remaining)
1091 ret = skipfile = True
1091 ret = skipfile = True
1092 elif r == 5: # done, skip remaining
1092 elif r == 5: # done, skip remaining
1093 ret = skipall = False
1093 ret = skipall = False
1094 elif r == 6: # all
1094 elif r == 6: # all
1095 ret = skipall = True
1095 ret = skipall = True
1096 elif r == 7: # quit
1096 elif r == 7: # quit
1097 raise error.Abort(_('user quit'))
1097 raise error.Abort(_('user quit'))
1098 return ret, skipfile, skipall, newpatches
1098 return ret, skipfile, skipall, newpatches
1099
1099
1100 seen = set()
1100 seen = set()
1101 applied = {} # 'filename' -> [] of chunks
1101 applied = {} # 'filename' -> [] of chunks
1102 skipfile, skipall = None, None
1102 skipfile, skipall = None, None
1103 pos, total = 1, sum(len(h.hunks) for h in headers)
1103 pos, total = 1, sum(len(h.hunks) for h in headers)
1104 for h in headers:
1104 for h in headers:
1105 pos += len(h.hunks)
1105 pos += len(h.hunks)
1106 skipfile = None
1106 skipfile = None
1107 fixoffset = 0
1107 fixoffset = 0
1108 hdr = ''.join(h.header)
1108 hdr = ''.join(h.header)
1109 if hdr in seen:
1109 if hdr in seen:
1110 continue
1110 continue
1111 seen.add(hdr)
1111 seen.add(hdr)
1112 if skipall is None:
1112 if skipall is None:
1113 h.pretty(ui)
1113 h.pretty(ui)
1114 msg = (_('examine changes to %s?') %
1114 msg = (_('examine changes to %s?') %
1115 _(' and ').join("'%s'" % f for f in h.files()))
1115 _(' and ').join("'%s'" % f for f in h.files()))
1116 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1116 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1117 if not r:
1117 if not r:
1118 continue
1118 continue
1119 applied[h.filename()] = [h]
1119 applied[h.filename()] = [h]
1120 if h.allhunks():
1120 if h.allhunks():
1121 applied[h.filename()] += h.hunks
1121 applied[h.filename()] += h.hunks
1122 continue
1122 continue
1123 for i, chunk in enumerate(h.hunks):
1123 for i, chunk in enumerate(h.hunks):
1124 if skipfile is None and skipall is None:
1124 if skipfile is None and skipall is None:
1125 chunk.pretty(ui)
1125 chunk.pretty(ui)
1126 if total == 1:
1126 if total == 1:
1127 msg = messages['single'] % chunk.filename()
1127 msg = messages['single'] % chunk.filename()
1128 else:
1128 else:
1129 idx = pos - len(h.hunks) + i
1129 idx = pos - len(h.hunks) + i
1130 msg = messages['multiple'] % (idx, total, chunk.filename())
1130 msg = messages['multiple'] % (idx, total, chunk.filename())
1131 r, skipfile, skipall, newpatches = prompt(skipfile,
1131 r, skipfile, skipall, newpatches = prompt(skipfile,
1132 skipall, msg, chunk)
1132 skipall, msg, chunk)
1133 if r:
1133 if r:
1134 if fixoffset:
1134 if fixoffset:
1135 chunk = copy.copy(chunk)
1135 chunk = copy.copy(chunk)
1136 chunk.toline += fixoffset
1136 chunk.toline += fixoffset
1137 applied[chunk.filename()].append(chunk)
1137 applied[chunk.filename()].append(chunk)
1138 elif newpatches is not None:
1138 elif newpatches is not None:
1139 for newpatch in newpatches:
1139 for newpatch in newpatches:
1140 for newhunk in newpatch.hunks:
1140 for newhunk in newpatch.hunks:
1141 if fixoffset:
1141 if fixoffset:
1142 newhunk.toline += fixoffset
1142 newhunk.toline += fixoffset
1143 applied[newhunk.filename()].append(newhunk)
1143 applied[newhunk.filename()].append(newhunk)
1144 else:
1144 else:
1145 fixoffset += chunk.removed - chunk.added
1145 fixoffset += chunk.removed - chunk.added
1146 return (sum([h for h in applied.itervalues()
1146 return (sum([h for h in applied.itervalues()
1147 if h[0].special() or len(h) > 1], []), {})
1147 if h[0].special() or len(h) > 1], []), {})
1148 class hunk(object):
1148 class hunk(object):
1149 def __init__(self, desc, num, lr, context):
1149 def __init__(self, desc, num, lr, context):
1150 self.number = num
1150 self.number = num
1151 self.desc = desc
1151 self.desc = desc
1152 self.hunk = [desc]
1152 self.hunk = [desc]
1153 self.a = []
1153 self.a = []
1154 self.b = []
1154 self.b = []
1155 self.starta = self.lena = None
1155 self.starta = self.lena = None
1156 self.startb = self.lenb = None
1156 self.startb = self.lenb = None
1157 if lr is not None:
1157 if lr is not None:
1158 if context:
1158 if context:
1159 self.read_context_hunk(lr)
1159 self.read_context_hunk(lr)
1160 else:
1160 else:
1161 self.read_unified_hunk(lr)
1161 self.read_unified_hunk(lr)
1162
1162
1163 def getnormalized(self):
1163 def getnormalized(self):
1164 """Return a copy with line endings normalized to LF."""
1164 """Return a copy with line endings normalized to LF."""
1165
1165
1166 def normalize(lines):
1166 def normalize(lines):
1167 nlines = []
1167 nlines = []
1168 for line in lines:
1168 for line in lines:
1169 if line.endswith('\r\n'):
1169 if line.endswith('\r\n'):
1170 line = line[:-2] + '\n'
1170 line = line[:-2] + '\n'
1171 nlines.append(line)
1171 nlines.append(line)
1172 return nlines
1172 return nlines
1173
1173
1174 # Dummy object, it is rebuilt manually
1174 # Dummy object, it is rebuilt manually
1175 nh = hunk(self.desc, self.number, None, None)
1175 nh = hunk(self.desc, self.number, None, None)
1176 nh.number = self.number
1176 nh.number = self.number
1177 nh.desc = self.desc
1177 nh.desc = self.desc
1178 nh.hunk = self.hunk
1178 nh.hunk = self.hunk
1179 nh.a = normalize(self.a)
1179 nh.a = normalize(self.a)
1180 nh.b = normalize(self.b)
1180 nh.b = normalize(self.b)
1181 nh.starta = self.starta
1181 nh.starta = self.starta
1182 nh.startb = self.startb
1182 nh.startb = self.startb
1183 nh.lena = self.lena
1183 nh.lena = self.lena
1184 nh.lenb = self.lenb
1184 nh.lenb = self.lenb
1185 return nh
1185 return nh
1186
1186
1187 def read_unified_hunk(self, lr):
1187 def read_unified_hunk(self, lr):
1188 m = unidesc.match(self.desc)
1188 m = unidesc.match(self.desc)
1189 if not m:
1189 if not m:
1190 raise PatchError(_("bad hunk #%d") % self.number)
1190 raise PatchError(_("bad hunk #%d") % self.number)
1191 self.starta, self.lena, self.startb, self.lenb = m.groups()
1191 self.starta, self.lena, self.startb, self.lenb = m.groups()
1192 if self.lena is None:
1192 if self.lena is None:
1193 self.lena = 1
1193 self.lena = 1
1194 else:
1194 else:
1195 self.lena = int(self.lena)
1195 self.lena = int(self.lena)
1196 if self.lenb is None:
1196 if self.lenb is None:
1197 self.lenb = 1
1197 self.lenb = 1
1198 else:
1198 else:
1199 self.lenb = int(self.lenb)
1199 self.lenb = int(self.lenb)
1200 self.starta = int(self.starta)
1200 self.starta = int(self.starta)
1201 self.startb = int(self.startb)
1201 self.startb = int(self.startb)
1202 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1202 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1203 self.b)
1203 self.b)
1204 # if we hit eof before finishing out the hunk, the last line will
1204 # if we hit eof before finishing out the hunk, the last line will
1205 # be zero length. Lets try to fix it up.
1205 # be zero length. Lets try to fix it up.
1206 while len(self.hunk[-1]) == 0:
1206 while len(self.hunk[-1]) == 0:
1207 del self.hunk[-1]
1207 del self.hunk[-1]
1208 del self.a[-1]
1208 del self.a[-1]
1209 del self.b[-1]
1209 del self.b[-1]
1210 self.lena -= 1
1210 self.lena -= 1
1211 self.lenb -= 1
1211 self.lenb -= 1
1212 self._fixnewline(lr)
1212 self._fixnewline(lr)
1213
1213
1214 def read_context_hunk(self, lr):
1214 def read_context_hunk(self, lr):
1215 self.desc = lr.readline()
1215 self.desc = lr.readline()
1216 m = contextdesc.match(self.desc)
1216 m = contextdesc.match(self.desc)
1217 if not m:
1217 if not m:
1218 raise PatchError(_("bad hunk #%d") % self.number)
1218 raise PatchError(_("bad hunk #%d") % self.number)
1219 self.starta, aend = m.groups()
1219 self.starta, aend = m.groups()
1220 self.starta = int(self.starta)
1220 self.starta = int(self.starta)
1221 if aend is None:
1221 if aend is None:
1222 aend = self.starta
1222 aend = self.starta
1223 self.lena = int(aend) - self.starta
1223 self.lena = int(aend) - self.starta
1224 if self.starta:
1224 if self.starta:
1225 self.lena += 1
1225 self.lena += 1
1226 for x in xrange(self.lena):
1226 for x in xrange(self.lena):
1227 l = lr.readline()
1227 l = lr.readline()
1228 if l.startswith('---'):
1228 if l.startswith('---'):
1229 # lines addition, old block is empty
1229 # lines addition, old block is empty
1230 lr.push(l)
1230 lr.push(l)
1231 break
1231 break
1232 s = l[2:]
1232 s = l[2:]
1233 if l.startswith('- ') or l.startswith('! '):
1233 if l.startswith('- ') or l.startswith('! '):
1234 u = '-' + s
1234 u = '-' + s
1235 elif l.startswith(' '):
1235 elif l.startswith(' '):
1236 u = ' ' + s
1236 u = ' ' + s
1237 else:
1237 else:
1238 raise PatchError(_("bad hunk #%d old text line %d") %
1238 raise PatchError(_("bad hunk #%d old text line %d") %
1239 (self.number, x))
1239 (self.number, x))
1240 self.a.append(u)
1240 self.a.append(u)
1241 self.hunk.append(u)
1241 self.hunk.append(u)
1242
1242
1243 l = lr.readline()
1243 l = lr.readline()
1244 if l.startswith('\ '):
1244 if l.startswith('\ '):
1245 s = self.a[-1][:-1]
1245 s = self.a[-1][:-1]
1246 self.a[-1] = s
1246 self.a[-1] = s
1247 self.hunk[-1] = s
1247 self.hunk[-1] = s
1248 l = lr.readline()
1248 l = lr.readline()
1249 m = contextdesc.match(l)
1249 m = contextdesc.match(l)
1250 if not m:
1250 if not m:
1251 raise PatchError(_("bad hunk #%d") % self.number)
1251 raise PatchError(_("bad hunk #%d") % self.number)
1252 self.startb, bend = m.groups()
1252 self.startb, bend = m.groups()
1253 self.startb = int(self.startb)
1253 self.startb = int(self.startb)
1254 if bend is None:
1254 if bend is None:
1255 bend = self.startb
1255 bend = self.startb
1256 self.lenb = int(bend) - self.startb
1256 self.lenb = int(bend) - self.startb
1257 if self.startb:
1257 if self.startb:
1258 self.lenb += 1
1258 self.lenb += 1
1259 hunki = 1
1259 hunki = 1
1260 for x in xrange(self.lenb):
1260 for x in xrange(self.lenb):
1261 l = lr.readline()
1261 l = lr.readline()
1262 if l.startswith('\ '):
1262 if l.startswith('\ '):
1263 # XXX: the only way to hit this is with an invalid line range.
1263 # XXX: the only way to hit this is with an invalid line range.
1264 # The no-eol marker is not counted in the line range, but I
1264 # The no-eol marker is not counted in the line range, but I
1265 # guess there are diff(1) out there which behave differently.
1265 # guess there are diff(1) out there which behave differently.
1266 s = self.b[-1][:-1]
1266 s = self.b[-1][:-1]
1267 self.b[-1] = s
1267 self.b[-1] = s
1268 self.hunk[hunki - 1] = s
1268 self.hunk[hunki - 1] = s
1269 continue
1269 continue
1270 if not l:
1270 if not l:
1271 # line deletions, new block is empty and we hit EOF
1271 # line deletions, new block is empty and we hit EOF
1272 lr.push(l)
1272 lr.push(l)
1273 break
1273 break
1274 s = l[2:]
1274 s = l[2:]
1275 if l.startswith('+ ') or l.startswith('! '):
1275 if l.startswith('+ ') or l.startswith('! '):
1276 u = '+' + s
1276 u = '+' + s
1277 elif l.startswith(' '):
1277 elif l.startswith(' '):
1278 u = ' ' + s
1278 u = ' ' + s
1279 elif len(self.b) == 0:
1279 elif len(self.b) == 0:
1280 # line deletions, new block is empty
1280 # line deletions, new block is empty
1281 lr.push(l)
1281 lr.push(l)
1282 break
1282 break
1283 else:
1283 else:
1284 raise PatchError(_("bad hunk #%d old text line %d") %
1284 raise PatchError(_("bad hunk #%d old text line %d") %
1285 (self.number, x))
1285 (self.number, x))
1286 self.b.append(s)
1286 self.b.append(s)
1287 while True:
1287 while True:
1288 if hunki >= len(self.hunk):
1288 if hunki >= len(self.hunk):
1289 h = ""
1289 h = ""
1290 else:
1290 else:
1291 h = self.hunk[hunki]
1291 h = self.hunk[hunki]
1292 hunki += 1
1292 hunki += 1
1293 if h == u:
1293 if h == u:
1294 break
1294 break
1295 elif h.startswith('-'):
1295 elif h.startswith('-'):
1296 continue
1296 continue
1297 else:
1297 else:
1298 self.hunk.insert(hunki - 1, u)
1298 self.hunk.insert(hunki - 1, u)
1299 break
1299 break
1300
1300
1301 if not self.a:
1301 if not self.a:
1302 # this happens when lines were only added to the hunk
1302 # this happens when lines were only added to the hunk
1303 for x in self.hunk:
1303 for x in self.hunk:
1304 if x.startswith('-') or x.startswith(' '):
1304 if x.startswith('-') or x.startswith(' '):
1305 self.a.append(x)
1305 self.a.append(x)
1306 if not self.b:
1306 if not self.b:
1307 # this happens when lines were only deleted from the hunk
1307 # this happens when lines were only deleted from the hunk
1308 for x in self.hunk:
1308 for x in self.hunk:
1309 if x.startswith('+') or x.startswith(' '):
1309 if x.startswith('+') or x.startswith(' '):
1310 self.b.append(x[1:])
1310 self.b.append(x[1:])
1311 # @@ -start,len +start,len @@
1311 # @@ -start,len +start,len @@
1312 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1312 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1313 self.startb, self.lenb)
1313 self.startb, self.lenb)
1314 self.hunk[0] = self.desc
1314 self.hunk[0] = self.desc
1315 self._fixnewline(lr)
1315 self._fixnewline(lr)
1316
1316
1317 def _fixnewline(self, lr):
1317 def _fixnewline(self, lr):
1318 l = lr.readline()
1318 l = lr.readline()
1319 if l.startswith('\ '):
1319 if l.startswith('\ '):
1320 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1320 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1321 else:
1321 else:
1322 lr.push(l)
1322 lr.push(l)
1323
1323
1324 def complete(self):
1324 def complete(self):
1325 return len(self.a) == self.lena and len(self.b) == self.lenb
1325 return len(self.a) == self.lena and len(self.b) == self.lenb
1326
1326
1327 def _fuzzit(self, old, new, fuzz, toponly):
1327 def _fuzzit(self, old, new, fuzz, toponly):
1328 # this removes context lines from the top and bottom of list 'l'. It
1328 # this removes context lines from the top and bottom of list 'l'. It
1329 # checks the hunk to make sure only context lines are removed, and then
1329 # checks the hunk to make sure only context lines are removed, and then
1330 # returns a new shortened list of lines.
1330 # returns a new shortened list of lines.
1331 fuzz = min(fuzz, len(old))
1331 fuzz = min(fuzz, len(old))
1332 if fuzz:
1332 if fuzz:
1333 top = 0
1333 top = 0
1334 bot = 0
1334 bot = 0
1335 hlen = len(self.hunk)
1335 hlen = len(self.hunk)
1336 for x in xrange(hlen - 1):
1336 for x in xrange(hlen - 1):
1337 # the hunk starts with the @@ line, so use x+1
1337 # the hunk starts with the @@ line, so use x+1
1338 if self.hunk[x + 1][0] == ' ':
1338 if self.hunk[x + 1][0] == ' ':
1339 top += 1
1339 top += 1
1340 else:
1340 else:
1341 break
1341 break
1342 if not toponly:
1342 if not toponly:
1343 for x in xrange(hlen - 1):
1343 for x in xrange(hlen - 1):
1344 if self.hunk[hlen - bot - 1][0] == ' ':
1344 if self.hunk[hlen - bot - 1][0] == ' ':
1345 bot += 1
1345 bot += 1
1346 else:
1346 else:
1347 break
1347 break
1348
1348
1349 bot = min(fuzz, bot)
1349 bot = min(fuzz, bot)
1350 top = min(fuzz, top)
1350 top = min(fuzz, top)
1351 return old[top:len(old) - bot], new[top:len(new) - bot], top
1351 return old[top:len(old) - bot], new[top:len(new) - bot], top
1352 return old, new, 0
1352 return old, new, 0
1353
1353
1354 def fuzzit(self, fuzz, toponly):
1354 def fuzzit(self, fuzz, toponly):
1355 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1355 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1356 oldstart = self.starta + top
1356 oldstart = self.starta + top
1357 newstart = self.startb + top
1357 newstart = self.startb + top
1358 # zero length hunk ranges already have their start decremented
1358 # zero length hunk ranges already have their start decremented
1359 if self.lena and oldstart > 0:
1359 if self.lena and oldstart > 0:
1360 oldstart -= 1
1360 oldstart -= 1
1361 if self.lenb and newstart > 0:
1361 if self.lenb and newstart > 0:
1362 newstart -= 1
1362 newstart -= 1
1363 return old, oldstart, new, newstart
1363 return old, oldstart, new, newstart
1364
1364
1365 class binhunk(object):
1365 class binhunk(object):
1366 'A binary patch file.'
1366 'A binary patch file.'
1367 def __init__(self, lr, fname):
1367 def __init__(self, lr, fname):
1368 self.text = None
1368 self.text = None
1369 self.delta = False
1369 self.delta = False
1370 self.hunk = ['GIT binary patch\n']
1370 self.hunk = ['GIT binary patch\n']
1371 self._fname = fname
1371 self._fname = fname
1372 self._read(lr)
1372 self._read(lr)
1373
1373
1374 def complete(self):
1374 def complete(self):
1375 return self.text is not None
1375 return self.text is not None
1376
1376
1377 def new(self, lines):
1377 def new(self, lines):
1378 if self.delta:
1378 if self.delta:
1379 return [applybindelta(self.text, ''.join(lines))]
1379 return [applybindelta(self.text, ''.join(lines))]
1380 return [self.text]
1380 return [self.text]
1381
1381
1382 def _read(self, lr):
1382 def _read(self, lr):
1383 def getline(lr, hunk):
1383 def getline(lr, hunk):
1384 l = lr.readline()
1384 l = lr.readline()
1385 hunk.append(l)
1385 hunk.append(l)
1386 return l.rstrip('\r\n')
1386 return l.rstrip('\r\n')
1387
1387
1388 size = 0
1388 size = 0
1389 while True:
1389 while True:
1390 line = getline(lr, self.hunk)
1390 line = getline(lr, self.hunk)
1391 if not line:
1391 if not line:
1392 raise PatchError(_('could not extract "%s" binary data')
1392 raise PatchError(_('could not extract "%s" binary data')
1393 % self._fname)
1393 % self._fname)
1394 if line.startswith('literal '):
1394 if line.startswith('literal '):
1395 size = int(line[8:].rstrip())
1395 size = int(line[8:].rstrip())
1396 break
1396 break
1397 if line.startswith('delta '):
1397 if line.startswith('delta '):
1398 size = int(line[6:].rstrip())
1398 size = int(line[6:].rstrip())
1399 self.delta = True
1399 self.delta = True
1400 break
1400 break
1401 dec = []
1401 dec = []
1402 line = getline(lr, self.hunk)
1402 line = getline(lr, self.hunk)
1403 while len(line) > 1:
1403 while len(line) > 1:
1404 l = line[0]
1404 l = line[0]
1405 if l <= 'Z' and l >= 'A':
1405 if l <= 'Z' and l >= 'A':
1406 l = ord(l) - ord('A') + 1
1406 l = ord(l) - ord('A') + 1
1407 else:
1407 else:
1408 l = ord(l) - ord('a') + 27
1408 l = ord(l) - ord('a') + 27
1409 try:
1409 try:
1410 dec.append(base85.b85decode(line[1:])[:l])
1410 dec.append(base85.b85decode(line[1:])[:l])
1411 except ValueError as e:
1411 except ValueError as e:
1412 raise PatchError(_('could not decode "%s" binary patch: %s')
1412 raise PatchError(_('could not decode "%s" binary patch: %s')
1413 % (self._fname, str(e)))
1413 % (self._fname, str(e)))
1414 line = getline(lr, self.hunk)
1414 line = getline(lr, self.hunk)
1415 text = zlib.decompress(''.join(dec))
1415 text = zlib.decompress(''.join(dec))
1416 if len(text) != size:
1416 if len(text) != size:
1417 raise PatchError(_('"%s" length is %d bytes, should be %d')
1417 raise PatchError(_('"%s" length is %d bytes, should be %d')
1418 % (self._fname, len(text), size))
1418 % (self._fname, len(text), size))
1419 self.text = text
1419 self.text = text
1420
1420
1421 def parsefilename(str):
1421 def parsefilename(str):
1422 # --- filename \t|space stuff
1422 # --- filename \t|space stuff
1423 s = str[4:].rstrip('\r\n')
1423 s = str[4:].rstrip('\r\n')
1424 i = s.find('\t')
1424 i = s.find('\t')
1425 if i < 0:
1425 if i < 0:
1426 i = s.find(' ')
1426 i = s.find(' ')
1427 if i < 0:
1427 if i < 0:
1428 return s
1428 return s
1429 return s[:i]
1429 return s[:i]
1430
1430
1431 def reversehunks(hunks):
1431 def reversehunks(hunks):
1432 '''reverse the signs in the hunks given as argument
1432 '''reverse the signs in the hunks given as argument
1433
1433
1434 This function operates on hunks coming out of patch.filterpatch, that is
1434 This function operates on hunks coming out of patch.filterpatch, that is
1435 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1435 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1436
1436
1437 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1437 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1438 ... --- a/folder1/g
1438 ... --- a/folder1/g
1439 ... +++ b/folder1/g
1439 ... +++ b/folder1/g
1440 ... @@ -1,7 +1,7 @@
1440 ... @@ -1,7 +1,7 @@
1441 ... +firstline
1441 ... +firstline
1442 ... c
1442 ... c
1443 ... 1
1443 ... 1
1444 ... 2
1444 ... 2
1445 ... + 3
1445 ... + 3
1446 ... -4
1446 ... -4
1447 ... 5
1447 ... 5
1448 ... d
1448 ... d
1449 ... +lastline"""
1449 ... +lastline"""
1450 >>> hunks = parsepatch(rawpatch)
1450 >>> hunks = parsepatch(rawpatch)
1451 >>> hunkscomingfromfilterpatch = []
1451 >>> hunkscomingfromfilterpatch = []
1452 >>> for h in hunks:
1452 >>> for h in hunks:
1453 ... hunkscomingfromfilterpatch.append(h)
1453 ... hunkscomingfromfilterpatch.append(h)
1454 ... hunkscomingfromfilterpatch.extend(h.hunks)
1454 ... hunkscomingfromfilterpatch.extend(h.hunks)
1455
1455
1456 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1456 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1457 >>> from . import util
1457 >>> from . import util
1458 >>> fp = util.stringio()
1458 >>> fp = util.stringio()
1459 >>> for c in reversedhunks:
1459 >>> for c in reversedhunks:
1460 ... c.write(fp)
1460 ... c.write(fp)
1461 >>> fp.seek(0)
1461 >>> fp.seek(0)
1462 >>> reversedpatch = fp.read()
1462 >>> reversedpatch = fp.read()
1463 >>> print reversedpatch
1463 >>> print reversedpatch
1464 diff --git a/folder1/g b/folder1/g
1464 diff --git a/folder1/g b/folder1/g
1465 --- a/folder1/g
1465 --- a/folder1/g
1466 +++ b/folder1/g
1466 +++ b/folder1/g
1467 @@ -1,4 +1,3 @@
1467 @@ -1,4 +1,3 @@
1468 -firstline
1468 -firstline
1469 c
1469 c
1470 1
1470 1
1471 2
1471 2
1472 @@ -1,6 +2,6 @@
1472 @@ -1,6 +2,6 @@
1473 c
1473 c
1474 1
1474 1
1475 2
1475 2
1476 - 3
1476 - 3
1477 +4
1477 +4
1478 5
1478 5
1479 d
1479 d
1480 @@ -5,3 +6,2 @@
1480 @@ -5,3 +6,2 @@
1481 5
1481 5
1482 d
1482 d
1483 -lastline
1483 -lastline
1484
1484
1485 '''
1485 '''
1486
1486
1487 from . import crecord as crecordmod
1487 from . import crecord as crecordmod
1488 newhunks = []
1488 newhunks = []
1489 for c in hunks:
1489 for c in hunks:
1490 if isinstance(c, crecordmod.uihunk):
1490 if isinstance(c, crecordmod.uihunk):
1491 # curses hunks encapsulate the record hunk in _hunk
1491 # curses hunks encapsulate the record hunk in _hunk
1492 c = c._hunk
1492 c = c._hunk
1493 if isinstance(c, recordhunk):
1493 if isinstance(c, recordhunk):
1494 for j, line in enumerate(c.hunk):
1494 for j, line in enumerate(c.hunk):
1495 if line.startswith("-"):
1495 if line.startswith("-"):
1496 c.hunk[j] = "+" + c.hunk[j][1:]
1496 c.hunk[j] = "+" + c.hunk[j][1:]
1497 elif line.startswith("+"):
1497 elif line.startswith("+"):
1498 c.hunk[j] = "-" + c.hunk[j][1:]
1498 c.hunk[j] = "-" + c.hunk[j][1:]
1499 c.added, c.removed = c.removed, c.added
1499 c.added, c.removed = c.removed, c.added
1500 newhunks.append(c)
1500 newhunks.append(c)
1501 return newhunks
1501 return newhunks
1502
1502
1503 def parsepatch(originalchunks):
1503 def parsepatch(originalchunks):
1504 """patch -> [] of headers -> [] of hunks """
1504 """patch -> [] of headers -> [] of hunks """
1505 class parser(object):
1505 class parser(object):
1506 """patch parsing state machine"""
1506 """patch parsing state machine"""
1507 def __init__(self):
1507 def __init__(self):
1508 self.fromline = 0
1508 self.fromline = 0
1509 self.toline = 0
1509 self.toline = 0
1510 self.proc = ''
1510 self.proc = ''
1511 self.header = None
1511 self.header = None
1512 self.context = []
1512 self.context = []
1513 self.before = []
1513 self.before = []
1514 self.hunk = []
1514 self.hunk = []
1515 self.headers = []
1515 self.headers = []
1516
1516
1517 def addrange(self, limits):
1517 def addrange(self, limits):
1518 fromstart, fromend, tostart, toend, proc = limits
1518 fromstart, fromend, tostart, toend, proc = limits
1519 self.fromline = int(fromstart)
1519 self.fromline = int(fromstart)
1520 self.toline = int(tostart)
1520 self.toline = int(tostart)
1521 self.proc = proc
1521 self.proc = proc
1522
1522
1523 def addcontext(self, context):
1523 def addcontext(self, context):
1524 if self.hunk:
1524 if self.hunk:
1525 h = recordhunk(self.header, self.fromline, self.toline,
1525 h = recordhunk(self.header, self.fromline, self.toline,
1526 self.proc, self.before, self.hunk, context)
1526 self.proc, self.before, self.hunk, context)
1527 self.header.hunks.append(h)
1527 self.header.hunks.append(h)
1528 self.fromline += len(self.before) + h.removed
1528 self.fromline += len(self.before) + h.removed
1529 self.toline += len(self.before) + h.added
1529 self.toline += len(self.before) + h.added
1530 self.before = []
1530 self.before = []
1531 self.hunk = []
1531 self.hunk = []
1532 self.context = context
1532 self.context = context
1533
1533
1534 def addhunk(self, hunk):
1534 def addhunk(self, hunk):
1535 if self.context:
1535 if self.context:
1536 self.before = self.context
1536 self.before = self.context
1537 self.context = []
1537 self.context = []
1538 self.hunk = hunk
1538 self.hunk = hunk
1539
1539
1540 def newfile(self, hdr):
1540 def newfile(self, hdr):
1541 self.addcontext([])
1541 self.addcontext([])
1542 h = header(hdr)
1542 h = header(hdr)
1543 self.headers.append(h)
1543 self.headers.append(h)
1544 self.header = h
1544 self.header = h
1545
1545
1546 def addother(self, line):
1546 def addother(self, line):
1547 pass # 'other' lines are ignored
1547 pass # 'other' lines are ignored
1548
1548
1549 def finished(self):
1549 def finished(self):
1550 self.addcontext([])
1550 self.addcontext([])
1551 return self.headers
1551 return self.headers
1552
1552
1553 transitions = {
1553 transitions = {
1554 'file': {'context': addcontext,
1554 'file': {'context': addcontext,
1555 'file': newfile,
1555 'file': newfile,
1556 'hunk': addhunk,
1556 'hunk': addhunk,
1557 'range': addrange},
1557 'range': addrange},
1558 'context': {'file': newfile,
1558 'context': {'file': newfile,
1559 'hunk': addhunk,
1559 'hunk': addhunk,
1560 'range': addrange,
1560 'range': addrange,
1561 'other': addother},
1561 'other': addother},
1562 'hunk': {'context': addcontext,
1562 'hunk': {'context': addcontext,
1563 'file': newfile,
1563 'file': newfile,
1564 'range': addrange},
1564 'range': addrange},
1565 'range': {'context': addcontext,
1565 'range': {'context': addcontext,
1566 'hunk': addhunk},
1566 'hunk': addhunk},
1567 'other': {'other': addother},
1567 'other': {'other': addother},
1568 }
1568 }
1569
1569
1570 p = parser()
1570 p = parser()
1571 fp = stringio()
1571 fp = stringio()
1572 fp.write(''.join(originalchunks))
1572 fp.write(''.join(originalchunks))
1573 fp.seek(0)
1573 fp.seek(0)
1574
1574
1575 state = 'context'
1575 state = 'context'
1576 for newstate, data in scanpatch(fp):
1576 for newstate, data in scanpatch(fp):
1577 try:
1577 try:
1578 p.transitions[state][newstate](p, data)
1578 p.transitions[state][newstate](p, data)
1579 except KeyError:
1579 except KeyError:
1580 raise PatchError('unhandled transition: %s -> %s' %
1580 raise PatchError('unhandled transition: %s -> %s' %
1581 (state, newstate))
1581 (state, newstate))
1582 state = newstate
1582 state = newstate
1583 del fp
1583 del fp
1584 return p.finished()
1584 return p.finished()
1585
1585
1586 def pathtransform(path, strip, prefix):
1586 def pathtransform(path, strip, prefix):
1587 '''turn a path from a patch into a path suitable for the repository
1587 '''turn a path from a patch into a path suitable for the repository
1588
1588
1589 prefix, if not empty, is expected to be normalized with a / at the end.
1589 prefix, if not empty, is expected to be normalized with a / at the end.
1590
1590
1591 Returns (stripped components, path in repository).
1591 Returns (stripped components, path in repository).
1592
1592
1593 >>> pathtransform('a/b/c', 0, '')
1593 >>> pathtransform('a/b/c', 0, '')
1594 ('', 'a/b/c')
1594 ('', 'a/b/c')
1595 >>> pathtransform(' a/b/c ', 0, '')
1595 >>> pathtransform(' a/b/c ', 0, '')
1596 ('', ' a/b/c')
1596 ('', ' a/b/c')
1597 >>> pathtransform(' a/b/c ', 2, '')
1597 >>> pathtransform(' a/b/c ', 2, '')
1598 ('a/b/', 'c')
1598 ('a/b/', 'c')
1599 >>> pathtransform('a/b/c', 0, 'd/e/')
1599 >>> pathtransform('a/b/c', 0, 'd/e/')
1600 ('', 'd/e/a/b/c')
1600 ('', 'd/e/a/b/c')
1601 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1601 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1602 ('a//b/', 'd/e/c')
1602 ('a//b/', 'd/e/c')
1603 >>> pathtransform('a/b/c', 3, '')
1603 >>> pathtransform('a/b/c', 3, '')
1604 Traceback (most recent call last):
1604 Traceback (most recent call last):
1605 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1605 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1606 '''
1606 '''
1607 pathlen = len(path)
1607 pathlen = len(path)
1608 i = 0
1608 i = 0
1609 if strip == 0:
1609 if strip == 0:
1610 return '', prefix + path.rstrip()
1610 return '', prefix + path.rstrip()
1611 count = strip
1611 count = strip
1612 while count > 0:
1612 while count > 0:
1613 i = path.find('/', i)
1613 i = path.find('/', i)
1614 if i == -1:
1614 if i == -1:
1615 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1615 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1616 (count, strip, path))
1616 (count, strip, path))
1617 i += 1
1617 i += 1
1618 # consume '//' in the path
1618 # consume '//' in the path
1619 while i < pathlen - 1 and path[i] == '/':
1619 while i < pathlen - 1 and path[i] == '/':
1620 i += 1
1620 i += 1
1621 count -= 1
1621 count -= 1
1622 return path[:i].lstrip(), prefix + path[i:].rstrip()
1622 return path[:i].lstrip(), prefix + path[i:].rstrip()
1623
1623
1624 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1624 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1625 nulla = afile_orig == "/dev/null"
1625 nulla = afile_orig == "/dev/null"
1626 nullb = bfile_orig == "/dev/null"
1626 nullb = bfile_orig == "/dev/null"
1627 create = nulla and hunk.starta == 0 and hunk.lena == 0
1627 create = nulla and hunk.starta == 0 and hunk.lena == 0
1628 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1628 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1629 abase, afile = pathtransform(afile_orig, strip, prefix)
1629 abase, afile = pathtransform(afile_orig, strip, prefix)
1630 gooda = not nulla and backend.exists(afile)
1630 gooda = not nulla and backend.exists(afile)
1631 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1631 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1632 if afile == bfile:
1632 if afile == bfile:
1633 goodb = gooda
1633 goodb = gooda
1634 else:
1634 else:
1635 goodb = not nullb and backend.exists(bfile)
1635 goodb = not nullb and backend.exists(bfile)
1636 missing = not goodb and not gooda and not create
1636 missing = not goodb and not gooda and not create
1637
1637
1638 # some diff programs apparently produce patches where the afile is
1638 # some diff programs apparently produce patches where the afile is
1639 # not /dev/null, but afile starts with bfile
1639 # not /dev/null, but afile starts with bfile
1640 abasedir = afile[:afile.rfind('/') + 1]
1640 abasedir = afile[:afile.rfind('/') + 1]
1641 bbasedir = bfile[:bfile.rfind('/') + 1]
1641 bbasedir = bfile[:bfile.rfind('/') + 1]
1642 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1642 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1643 and hunk.starta == 0 and hunk.lena == 0):
1643 and hunk.starta == 0 and hunk.lena == 0):
1644 create = True
1644 create = True
1645 missing = False
1645 missing = False
1646
1646
1647 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1647 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1648 # diff is between a file and its backup. In this case, the original
1648 # diff is between a file and its backup. In this case, the original
1649 # file should be patched (see original mpatch code).
1649 # file should be patched (see original mpatch code).
1650 isbackup = (abase == bbase and bfile.startswith(afile))
1650 isbackup = (abase == bbase and bfile.startswith(afile))
1651 fname = None
1651 fname = None
1652 if not missing:
1652 if not missing:
1653 if gooda and goodb:
1653 if gooda and goodb:
1654 if isbackup:
1654 if isbackup:
1655 fname = afile
1655 fname = afile
1656 else:
1656 else:
1657 fname = bfile
1657 fname = bfile
1658 elif gooda:
1658 elif gooda:
1659 fname = afile
1659 fname = afile
1660
1660
1661 if not fname:
1661 if not fname:
1662 if not nullb:
1662 if not nullb:
1663 if isbackup:
1663 if isbackup:
1664 fname = afile
1664 fname = afile
1665 else:
1665 else:
1666 fname = bfile
1666 fname = bfile
1667 elif not nulla:
1667 elif not nulla:
1668 fname = afile
1668 fname = afile
1669 else:
1669 else:
1670 raise PatchError(_("undefined source and destination files"))
1670 raise PatchError(_("undefined source and destination files"))
1671
1671
1672 gp = patchmeta(fname)
1672 gp = patchmeta(fname)
1673 if create:
1673 if create:
1674 gp.op = 'ADD'
1674 gp.op = 'ADD'
1675 elif remove:
1675 elif remove:
1676 gp.op = 'DELETE'
1676 gp.op = 'DELETE'
1677 return gp
1677 return gp
1678
1678
1679 def scanpatch(fp):
1679 def scanpatch(fp):
1680 """like patch.iterhunks, but yield different events
1680 """like patch.iterhunks, but yield different events
1681
1681
1682 - ('file', [header_lines + fromfile + tofile])
1682 - ('file', [header_lines + fromfile + tofile])
1683 - ('context', [context_lines])
1683 - ('context', [context_lines])
1684 - ('hunk', [hunk_lines])
1684 - ('hunk', [hunk_lines])
1685 - ('range', (-start,len, +start,len, proc))
1685 - ('range', (-start,len, +start,len, proc))
1686 """
1686 """
1687 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1687 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1688 lr = linereader(fp)
1688 lr = linereader(fp)
1689
1689
1690 def scanwhile(first, p):
1690 def scanwhile(first, p):
1691 """scan lr while predicate holds"""
1691 """scan lr while predicate holds"""
1692 lines = [first]
1692 lines = [first]
1693 for line in iter(lr.readline, ''):
1693 for line in iter(lr.readline, ''):
1694 if p(line):
1694 if p(line):
1695 lines.append(line)
1695 lines.append(line)
1696 else:
1696 else:
1697 lr.push(line)
1697 lr.push(line)
1698 break
1698 break
1699 return lines
1699 return lines
1700
1700
1701 for line in iter(lr.readline, ''):
1701 for line in iter(lr.readline, ''):
1702 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1702 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1703 def notheader(line):
1703 def notheader(line):
1704 s = line.split(None, 1)
1704 s = line.split(None, 1)
1705 return not s or s[0] not in ('---', 'diff')
1705 return not s or s[0] not in ('---', 'diff')
1706 header = scanwhile(line, notheader)
1706 header = scanwhile(line, notheader)
1707 fromfile = lr.readline()
1707 fromfile = lr.readline()
1708 if fromfile.startswith('---'):
1708 if fromfile.startswith('---'):
1709 tofile = lr.readline()
1709 tofile = lr.readline()
1710 header += [fromfile, tofile]
1710 header += [fromfile, tofile]
1711 else:
1711 else:
1712 lr.push(fromfile)
1712 lr.push(fromfile)
1713 yield 'file', header
1713 yield 'file', header
1714 elif line[0] == ' ':
1714 elif line[0] == ' ':
1715 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1715 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1716 elif line[0] in '-+':
1716 elif line[0] in '-+':
1717 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1717 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1718 else:
1718 else:
1719 m = lines_re.match(line)
1719 m = lines_re.match(line)
1720 if m:
1720 if m:
1721 yield 'range', m.groups()
1721 yield 'range', m.groups()
1722 else:
1722 else:
1723 yield 'other', line
1723 yield 'other', line
1724
1724
1725 def scangitpatch(lr, firstline):
1725 def scangitpatch(lr, firstline):
1726 """
1726 """
1727 Git patches can emit:
1727 Git patches can emit:
1728 - rename a to b
1728 - rename a to b
1729 - change b
1729 - change b
1730 - copy a to c
1730 - copy a to c
1731 - change c
1731 - change c
1732
1732
1733 We cannot apply this sequence as-is, the renamed 'a' could not be
1733 We cannot apply this sequence as-is, the renamed 'a' could not be
1734 found for it would have been renamed already. And we cannot copy
1734 found for it would have been renamed already. And we cannot copy
1735 from 'b' instead because 'b' would have been changed already. So
1735 from 'b' instead because 'b' would have been changed already. So
1736 we scan the git patch for copy and rename commands so we can
1736 we scan the git patch for copy and rename commands so we can
1737 perform the copies ahead of time.
1737 perform the copies ahead of time.
1738 """
1738 """
1739 pos = 0
1739 pos = 0
1740 try:
1740 try:
1741 pos = lr.fp.tell()
1741 pos = lr.fp.tell()
1742 fp = lr.fp
1742 fp = lr.fp
1743 except IOError:
1743 except IOError:
1744 fp = stringio(lr.fp.read())
1744 fp = stringio(lr.fp.read())
1745 gitlr = linereader(fp)
1745 gitlr = linereader(fp)
1746 gitlr.push(firstline)
1746 gitlr.push(firstline)
1747 gitpatches = readgitpatch(gitlr)
1747 gitpatches = readgitpatch(gitlr)
1748 fp.seek(pos)
1748 fp.seek(pos)
1749 return gitpatches
1749 return gitpatches
1750
1750
1751 def iterhunks(fp):
1751 def iterhunks(fp):
1752 """Read a patch and yield the following events:
1752 """Read a patch and yield the following events:
1753 - ("file", afile, bfile, firsthunk): select a new target file.
1753 - ("file", afile, bfile, firsthunk): select a new target file.
1754 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1754 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1755 "file" event.
1755 "file" event.
1756 - ("git", gitchanges): current diff is in git format, gitchanges
1756 - ("git", gitchanges): current diff is in git format, gitchanges
1757 maps filenames to gitpatch records. Unique event.
1757 maps filenames to gitpatch records. Unique event.
1758 """
1758 """
1759 afile = ""
1759 afile = ""
1760 bfile = ""
1760 bfile = ""
1761 state = None
1761 state = None
1762 hunknum = 0
1762 hunknum = 0
1763 emitfile = newfile = False
1763 emitfile = newfile = False
1764 gitpatches = None
1764 gitpatches = None
1765
1765
1766 # our states
1766 # our states
1767 BFILE = 1
1767 BFILE = 1
1768 context = None
1768 context = None
1769 lr = linereader(fp)
1769 lr = linereader(fp)
1770
1770
1771 for x in iter(lr.readline, ''):
1771 for x in iter(lr.readline, ''):
1772 if state == BFILE and (
1772 if state == BFILE and (
1773 (not context and x[0] == '@')
1773 (not context and x[0] == '@')
1774 or (context is not False and x.startswith('***************'))
1774 or (context is not False and x.startswith('***************'))
1775 or x.startswith('GIT binary patch')):
1775 or x.startswith('GIT binary patch')):
1776 gp = None
1776 gp = None
1777 if (gitpatches and
1777 if (gitpatches and
1778 gitpatches[-1].ispatching(afile, bfile)):
1778 gitpatches[-1].ispatching(afile, bfile)):
1779 gp = gitpatches.pop()
1779 gp = gitpatches.pop()
1780 if x.startswith('GIT binary patch'):
1780 if x.startswith('GIT binary patch'):
1781 h = binhunk(lr, gp.path)
1781 h = binhunk(lr, gp.path)
1782 else:
1782 else:
1783 if context is None and x.startswith('***************'):
1783 if context is None and x.startswith('***************'):
1784 context = True
1784 context = True
1785 h = hunk(x, hunknum + 1, lr, context)
1785 h = hunk(x, hunknum + 1, lr, context)
1786 hunknum += 1
1786 hunknum += 1
1787 if emitfile:
1787 if emitfile:
1788 emitfile = False
1788 emitfile = False
1789 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1789 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1790 yield 'hunk', h
1790 yield 'hunk', h
1791 elif x.startswith('diff --git a/'):
1791 elif x.startswith('diff --git a/'):
1792 m = gitre.match(x.rstrip(' \r\n'))
1792 m = gitre.match(x.rstrip(' \r\n'))
1793 if not m:
1793 if not m:
1794 continue
1794 continue
1795 if gitpatches is None:
1795 if gitpatches is None:
1796 # scan whole input for git metadata
1796 # scan whole input for git metadata
1797 gitpatches = scangitpatch(lr, x)
1797 gitpatches = scangitpatch(lr, x)
1798 yield 'git', [g.copy() for g in gitpatches
1798 yield 'git', [g.copy() for g in gitpatches
1799 if g.op in ('COPY', 'RENAME')]
1799 if g.op in ('COPY', 'RENAME')]
1800 gitpatches.reverse()
1800 gitpatches.reverse()
1801 afile = 'a/' + m.group(1)
1801 afile = 'a/' + m.group(1)
1802 bfile = 'b/' + m.group(2)
1802 bfile = 'b/' + m.group(2)
1803 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1803 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1804 gp = gitpatches.pop()
1804 gp = gitpatches.pop()
1805 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1805 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1806 if not gitpatches:
1806 if not gitpatches:
1807 raise PatchError(_('failed to synchronize metadata for "%s"')
1807 raise PatchError(_('failed to synchronize metadata for "%s"')
1808 % afile[2:])
1808 % afile[2:])
1809 gp = gitpatches[-1]
1809 gp = gitpatches[-1]
1810 newfile = True
1810 newfile = True
1811 elif x.startswith('---'):
1811 elif x.startswith('---'):
1812 # check for a unified diff
1812 # check for a unified diff
1813 l2 = lr.readline()
1813 l2 = lr.readline()
1814 if not l2.startswith('+++'):
1814 if not l2.startswith('+++'):
1815 lr.push(l2)
1815 lr.push(l2)
1816 continue
1816 continue
1817 newfile = True
1817 newfile = True
1818 context = False
1818 context = False
1819 afile = parsefilename(x)
1819 afile = parsefilename(x)
1820 bfile = parsefilename(l2)
1820 bfile = parsefilename(l2)
1821 elif x.startswith('***'):
1821 elif x.startswith('***'):
1822 # check for a context diff
1822 # check for a context diff
1823 l2 = lr.readline()
1823 l2 = lr.readline()
1824 if not l2.startswith('---'):
1824 if not l2.startswith('---'):
1825 lr.push(l2)
1825 lr.push(l2)
1826 continue
1826 continue
1827 l3 = lr.readline()
1827 l3 = lr.readline()
1828 lr.push(l3)
1828 lr.push(l3)
1829 if not l3.startswith("***************"):
1829 if not l3.startswith("***************"):
1830 lr.push(l2)
1830 lr.push(l2)
1831 continue
1831 continue
1832 newfile = True
1832 newfile = True
1833 context = True
1833 context = True
1834 afile = parsefilename(x)
1834 afile = parsefilename(x)
1835 bfile = parsefilename(l2)
1835 bfile = parsefilename(l2)
1836
1836
1837 if newfile:
1837 if newfile:
1838 newfile = False
1838 newfile = False
1839 emitfile = True
1839 emitfile = True
1840 state = BFILE
1840 state = BFILE
1841 hunknum = 0
1841 hunknum = 0
1842
1842
1843 while gitpatches:
1843 while gitpatches:
1844 gp = gitpatches.pop()
1844 gp = gitpatches.pop()
1845 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1845 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1846
1846
1847 def applybindelta(binchunk, data):
1847 def applybindelta(binchunk, data):
1848 """Apply a binary delta hunk
1848 """Apply a binary delta hunk
1849 The algorithm used is the algorithm from git's patch-delta.c
1849 The algorithm used is the algorithm from git's patch-delta.c
1850 """
1850 """
1851 def deltahead(binchunk):
1851 def deltahead(binchunk):
1852 i = 0
1852 i = 0
1853 for c in binchunk:
1853 for c in binchunk:
1854 i += 1
1854 i += 1
1855 if not (ord(c) & 0x80):
1855 if not (ord(c) & 0x80):
1856 return i
1856 return i
1857 return i
1857 return i
1858 out = ""
1858 out = ""
1859 s = deltahead(binchunk)
1859 s = deltahead(binchunk)
1860 binchunk = binchunk[s:]
1860 binchunk = binchunk[s:]
1861 s = deltahead(binchunk)
1861 s = deltahead(binchunk)
1862 binchunk = binchunk[s:]
1862 binchunk = binchunk[s:]
1863 i = 0
1863 i = 0
1864 while i < len(binchunk):
1864 while i < len(binchunk):
1865 cmd = ord(binchunk[i])
1865 cmd = ord(binchunk[i])
1866 i += 1
1866 i += 1
1867 if (cmd & 0x80):
1867 if (cmd & 0x80):
1868 offset = 0
1868 offset = 0
1869 size = 0
1869 size = 0
1870 if (cmd & 0x01):
1870 if (cmd & 0x01):
1871 offset = ord(binchunk[i])
1871 offset = ord(binchunk[i])
1872 i += 1
1872 i += 1
1873 if (cmd & 0x02):
1873 if (cmd & 0x02):
1874 offset |= ord(binchunk[i]) << 8
1874 offset |= ord(binchunk[i]) << 8
1875 i += 1
1875 i += 1
1876 if (cmd & 0x04):
1876 if (cmd & 0x04):
1877 offset |= ord(binchunk[i]) << 16
1877 offset |= ord(binchunk[i]) << 16
1878 i += 1
1878 i += 1
1879 if (cmd & 0x08):
1879 if (cmd & 0x08):
1880 offset |= ord(binchunk[i]) << 24
1880 offset |= ord(binchunk[i]) << 24
1881 i += 1
1881 i += 1
1882 if (cmd & 0x10):
1882 if (cmd & 0x10):
1883 size = ord(binchunk[i])
1883 size = ord(binchunk[i])
1884 i += 1
1884 i += 1
1885 if (cmd & 0x20):
1885 if (cmd & 0x20):
1886 size |= ord(binchunk[i]) << 8
1886 size |= ord(binchunk[i]) << 8
1887 i += 1
1887 i += 1
1888 if (cmd & 0x40):
1888 if (cmd & 0x40):
1889 size |= ord(binchunk[i]) << 16
1889 size |= ord(binchunk[i]) << 16
1890 i += 1
1890 i += 1
1891 if size == 0:
1891 if size == 0:
1892 size = 0x10000
1892 size = 0x10000
1893 offset_end = offset + size
1893 offset_end = offset + size
1894 out += data[offset:offset_end]
1894 out += data[offset:offset_end]
1895 elif cmd != 0:
1895 elif cmd != 0:
1896 offset_end = i + cmd
1896 offset_end = i + cmd
1897 out += binchunk[i:offset_end]
1897 out += binchunk[i:offset_end]
1898 i += cmd
1898 i += cmd
1899 else:
1899 else:
1900 raise PatchError(_('unexpected delta opcode 0'))
1900 raise PatchError(_('unexpected delta opcode 0'))
1901 return out
1901 return out
1902
1902
1903 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1903 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1904 """Reads a patch from fp and tries to apply it.
1904 """Reads a patch from fp and tries to apply it.
1905
1905
1906 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1906 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1907 there was any fuzz.
1907 there was any fuzz.
1908
1908
1909 If 'eolmode' is 'strict', the patch content and patched file are
1909 If 'eolmode' is 'strict', the patch content and patched file are
1910 read in binary mode. Otherwise, line endings are ignored when
1910 read in binary mode. Otherwise, line endings are ignored when
1911 patching then normalized according to 'eolmode'.
1911 patching then normalized according to 'eolmode'.
1912 """
1912 """
1913 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1913 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1914 prefix=prefix, eolmode=eolmode)
1914 prefix=prefix, eolmode=eolmode)
1915
1915
1916 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1916 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1917 eolmode='strict'):
1917 eolmode='strict'):
1918
1918
1919 if prefix:
1919 if prefix:
1920 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1920 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1921 prefix)
1921 prefix)
1922 if prefix != '':
1922 if prefix != '':
1923 prefix += '/'
1923 prefix += '/'
1924 def pstrip(p):
1924 def pstrip(p):
1925 return pathtransform(p, strip - 1, prefix)[1]
1925 return pathtransform(p, strip - 1, prefix)[1]
1926
1926
1927 rejects = 0
1927 rejects = 0
1928 err = 0
1928 err = 0
1929 current_file = None
1929 current_file = None
1930
1930
1931 for state, values in iterhunks(fp):
1931 for state, values in iterhunks(fp):
1932 if state == 'hunk':
1932 if state == 'hunk':
1933 if not current_file:
1933 if not current_file:
1934 continue
1934 continue
1935 ret = current_file.apply(values)
1935 ret = current_file.apply(values)
1936 if ret > 0:
1936 if ret > 0:
1937 err = 1
1937 err = 1
1938 elif state == 'file':
1938 elif state == 'file':
1939 if current_file:
1939 if current_file:
1940 rejects += current_file.close()
1940 rejects += current_file.close()
1941 current_file = None
1941 current_file = None
1942 afile, bfile, first_hunk, gp = values
1942 afile, bfile, first_hunk, gp = values
1943 if gp:
1943 if gp:
1944 gp.path = pstrip(gp.path)
1944 gp.path = pstrip(gp.path)
1945 if gp.oldpath:
1945 if gp.oldpath:
1946 gp.oldpath = pstrip(gp.oldpath)
1946 gp.oldpath = pstrip(gp.oldpath)
1947 else:
1947 else:
1948 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1948 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1949 prefix)
1949 prefix)
1950 if gp.op == 'RENAME':
1950 if gp.op == 'RENAME':
1951 backend.unlink(gp.oldpath)
1951 backend.unlink(gp.oldpath)
1952 if not first_hunk:
1952 if not first_hunk:
1953 if gp.op == 'DELETE':
1953 if gp.op == 'DELETE':
1954 backend.unlink(gp.path)
1954 backend.unlink(gp.path)
1955 continue
1955 continue
1956 data, mode = None, None
1956 data, mode = None, None
1957 if gp.op in ('RENAME', 'COPY'):
1957 if gp.op in ('RENAME', 'COPY'):
1958 data, mode = store.getfile(gp.oldpath)[:2]
1958 data, mode = store.getfile(gp.oldpath)[:2]
1959 if data is None:
1959 if data is None:
1960 # This means that the old path does not exist
1960 # This means that the old path does not exist
1961 raise PatchError(_("source file '%s' does not exist")
1961 raise PatchError(_("source file '%s' does not exist")
1962 % gp.oldpath)
1962 % gp.oldpath)
1963 if gp.mode:
1963 if gp.mode:
1964 mode = gp.mode
1964 mode = gp.mode
1965 if gp.op == 'ADD':
1965 if gp.op == 'ADD':
1966 # Added files without content have no hunk and
1966 # Added files without content have no hunk and
1967 # must be created
1967 # must be created
1968 data = ''
1968 data = ''
1969 if data or mode:
1969 if data or mode:
1970 if (gp.op in ('ADD', 'RENAME', 'COPY')
1970 if (gp.op in ('ADD', 'RENAME', 'COPY')
1971 and backend.exists(gp.path)):
1971 and backend.exists(gp.path)):
1972 raise PatchError(_("cannot create %s: destination "
1972 raise PatchError(_("cannot create %s: destination "
1973 "already exists") % gp.path)
1973 "already exists") % gp.path)
1974 backend.setfile(gp.path, data, mode, gp.oldpath)
1974 backend.setfile(gp.path, data, mode, gp.oldpath)
1975 continue
1975 continue
1976 try:
1976 try:
1977 current_file = patcher(ui, gp, backend, store,
1977 current_file = patcher(ui, gp, backend, store,
1978 eolmode=eolmode)
1978 eolmode=eolmode)
1979 except PatchError as inst:
1979 except PatchError as inst:
1980 ui.warn(str(inst) + '\n')
1980 ui.warn(str(inst) + '\n')
1981 current_file = None
1981 current_file = None
1982 rejects += 1
1982 rejects += 1
1983 continue
1983 continue
1984 elif state == 'git':
1984 elif state == 'git':
1985 for gp in values:
1985 for gp in values:
1986 path = pstrip(gp.oldpath)
1986 path = pstrip(gp.oldpath)
1987 data, mode = backend.getfile(path)
1987 data, mode = backend.getfile(path)
1988 if data is None:
1988 if data is None:
1989 # The error ignored here will trigger a getfile()
1989 # The error ignored here will trigger a getfile()
1990 # error in a place more appropriate for error
1990 # error in a place more appropriate for error
1991 # handling, and will not interrupt the patching
1991 # handling, and will not interrupt the patching
1992 # process.
1992 # process.
1993 pass
1993 pass
1994 else:
1994 else:
1995 store.setfile(path, data, mode)
1995 store.setfile(path, data, mode)
1996 else:
1996 else:
1997 raise error.Abort(_('unsupported parser state: %s') % state)
1997 raise error.Abort(_('unsupported parser state: %s') % state)
1998
1998
1999 if current_file:
1999 if current_file:
2000 rejects += current_file.close()
2000 rejects += current_file.close()
2001
2001
2002 if rejects:
2002 if rejects:
2003 return -1
2003 return -1
2004 return err
2004 return err
2005
2005
2006 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2006 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2007 similarity):
2007 similarity):
2008 """use <patcher> to apply <patchname> to the working directory.
2008 """use <patcher> to apply <patchname> to the working directory.
2009 returns whether patch was applied with fuzz factor."""
2009 returns whether patch was applied with fuzz factor."""
2010
2010
2011 fuzz = False
2011 fuzz = False
2012 args = []
2012 args = []
2013 cwd = repo.root
2013 cwd = repo.root
2014 if cwd:
2014 if cwd:
2015 args.append('-d %s' % util.shellquote(cwd))
2015 args.append('-d %s' % util.shellquote(cwd))
2016 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2016 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2017 util.shellquote(patchname)))
2017 util.shellquote(patchname)))
2018 try:
2018 try:
2019 for line in util.iterfile(fp):
2019 for line in util.iterfile(fp):
2020 line = line.rstrip()
2020 line = line.rstrip()
2021 ui.note(line + '\n')
2021 ui.note(line + '\n')
2022 if line.startswith('patching file '):
2022 if line.startswith('patching file '):
2023 pf = util.parsepatchoutput(line)
2023 pf = util.parsepatchoutput(line)
2024 printed_file = False
2024 printed_file = False
2025 files.add(pf)
2025 files.add(pf)
2026 elif line.find('with fuzz') >= 0:
2026 elif line.find('with fuzz') >= 0:
2027 fuzz = True
2027 fuzz = True
2028 if not printed_file:
2028 if not printed_file:
2029 ui.warn(pf + '\n')
2029 ui.warn(pf + '\n')
2030 printed_file = True
2030 printed_file = True
2031 ui.warn(line + '\n')
2031 ui.warn(line + '\n')
2032 elif line.find('saving rejects to file') >= 0:
2032 elif line.find('saving rejects to file') >= 0:
2033 ui.warn(line + '\n')
2033 ui.warn(line + '\n')
2034 elif line.find('FAILED') >= 0:
2034 elif line.find('FAILED') >= 0:
2035 if not printed_file:
2035 if not printed_file:
2036 ui.warn(pf + '\n')
2036 ui.warn(pf + '\n')
2037 printed_file = True
2037 printed_file = True
2038 ui.warn(line + '\n')
2038 ui.warn(line + '\n')
2039 finally:
2039 finally:
2040 if files:
2040 if files:
2041 scmutil.marktouched(repo, files, similarity)
2041 scmutil.marktouched(repo, files, similarity)
2042 code = fp.close()
2042 code = fp.close()
2043 if code:
2043 if code:
2044 raise PatchError(_("patch command failed: %s") %
2044 raise PatchError(_("patch command failed: %s") %
2045 util.explainexit(code)[0])
2045 util.explainexit(code)[0])
2046 return fuzz
2046 return fuzz
2047
2047
2048 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2048 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2049 eolmode='strict'):
2049 eolmode='strict'):
2050 if files is None:
2050 if files is None:
2051 files = set()
2051 files = set()
2052 if eolmode is None:
2052 if eolmode is None:
2053 eolmode = ui.config('patch', 'eol', 'strict')
2053 eolmode = ui.config('patch', 'eol', 'strict')
2054 if eolmode.lower() not in eolmodes:
2054 if eolmode.lower() not in eolmodes:
2055 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2055 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2056 eolmode = eolmode.lower()
2056 eolmode = eolmode.lower()
2057
2057
2058 store = filestore()
2058 store = filestore()
2059 try:
2059 try:
2060 fp = open(patchobj, 'rb')
2060 fp = open(patchobj, 'rb')
2061 except TypeError:
2061 except TypeError:
2062 fp = patchobj
2062 fp = patchobj
2063 try:
2063 try:
2064 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2064 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2065 eolmode=eolmode)
2065 eolmode=eolmode)
2066 finally:
2066 finally:
2067 if fp != patchobj:
2067 if fp != patchobj:
2068 fp.close()
2068 fp.close()
2069 files.update(backend.close())
2069 files.update(backend.close())
2070 store.close()
2070 store.close()
2071 if ret < 0:
2071 if ret < 0:
2072 raise PatchError(_('patch failed to apply'))
2072 raise PatchError(_('patch failed to apply'))
2073 return ret > 0
2073 return ret > 0
2074
2074
2075 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2075 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2076 eolmode='strict', similarity=0):
2076 eolmode='strict', similarity=0):
2077 """use builtin patch to apply <patchobj> to the working directory.
2077 """use builtin patch to apply <patchobj> to the working directory.
2078 returns whether patch was applied with fuzz factor."""
2078 returns whether patch was applied with fuzz factor."""
2079 backend = workingbackend(ui, repo, similarity)
2079 backend = workingbackend(ui, repo, similarity)
2080 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2080 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2081
2081
2082 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2082 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2083 eolmode='strict'):
2083 eolmode='strict'):
2084 backend = repobackend(ui, repo, ctx, store)
2084 backend = repobackend(ui, repo, ctx, store)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2086
2086
2087 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2087 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2088 similarity=0):
2088 similarity=0):
2089 """Apply <patchname> to the working directory.
2089 """Apply <patchname> to the working directory.
2090
2090
2091 'eolmode' specifies how end of lines should be handled. It can be:
2091 'eolmode' specifies how end of lines should be handled. It can be:
2092 - 'strict': inputs are read in binary mode, EOLs are preserved
2092 - 'strict': inputs are read in binary mode, EOLs are preserved
2093 - 'crlf': EOLs are ignored when patching and reset to CRLF
2093 - 'crlf': EOLs are ignored when patching and reset to CRLF
2094 - 'lf': EOLs are ignored when patching and reset to LF
2094 - 'lf': EOLs are ignored when patching and reset to LF
2095 - None: get it from user settings, default to 'strict'
2095 - None: get it from user settings, default to 'strict'
2096 'eolmode' is ignored when using an external patcher program.
2096 'eolmode' is ignored when using an external patcher program.
2097
2097
2098 Returns whether patch was applied with fuzz factor.
2098 Returns whether patch was applied with fuzz factor.
2099 """
2099 """
2100 patcher = ui.config('ui', 'patch')
2100 patcher = ui.config('ui', 'patch')
2101 if files is None:
2101 if files is None:
2102 files = set()
2102 files = set()
2103 if patcher:
2103 if patcher:
2104 return _externalpatch(ui, repo, patcher, patchname, strip,
2104 return _externalpatch(ui, repo, patcher, patchname, strip,
2105 files, similarity)
2105 files, similarity)
2106 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2106 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2107 similarity)
2107 similarity)
2108
2108
2109 def changedfiles(ui, repo, patchpath, strip=1):
2109 def changedfiles(ui, repo, patchpath, strip=1):
2110 backend = fsbackend(ui, repo.root)
2110 backend = fsbackend(ui, repo.root)
2111 with open(patchpath, 'rb') as fp:
2111 with open(patchpath, 'rb') as fp:
2112 changed = set()
2112 changed = set()
2113 for state, values in iterhunks(fp):
2113 for state, values in iterhunks(fp):
2114 if state == 'file':
2114 if state == 'file':
2115 afile, bfile, first_hunk, gp = values
2115 afile, bfile, first_hunk, gp = values
2116 if gp:
2116 if gp:
2117 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2117 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2118 if gp.oldpath:
2118 if gp.oldpath:
2119 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2119 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2120 else:
2120 else:
2121 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2121 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2122 '')
2122 '')
2123 changed.add(gp.path)
2123 changed.add(gp.path)
2124 if gp.op == 'RENAME':
2124 if gp.op == 'RENAME':
2125 changed.add(gp.oldpath)
2125 changed.add(gp.oldpath)
2126 elif state not in ('hunk', 'git'):
2126 elif state not in ('hunk', 'git'):
2127 raise error.Abort(_('unsupported parser state: %s') % state)
2127 raise error.Abort(_('unsupported parser state: %s') % state)
2128 return changed
2128 return changed
2129
2129
2130 class GitDiffRequired(Exception):
2130 class GitDiffRequired(Exception):
2131 pass
2131 pass
2132
2132
2133 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2133 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2134 '''return diffopts with all features supported and parsed'''
2134 '''return diffopts with all features supported and parsed'''
2135 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2135 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2136 git=True, whitespace=True, formatchanging=True)
2136 git=True, whitespace=True, formatchanging=True)
2137
2137
2138 diffopts = diffallopts
2138 diffopts = diffallopts
2139
2139
2140 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2140 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2141 whitespace=False, formatchanging=False):
2141 whitespace=False, formatchanging=False):
2142 '''return diffopts with only opted-in features parsed
2142 '''return diffopts with only opted-in features parsed
2143
2143
2144 Features:
2144 Features:
2145 - git: git-style diffs
2145 - git: git-style diffs
2146 - whitespace: whitespace options like ignoreblanklines and ignorews
2146 - whitespace: whitespace options like ignoreblanklines and ignorews
2147 - formatchanging: options that will likely break or cause correctness issues
2147 - formatchanging: options that will likely break or cause correctness issues
2148 with most diff parsers
2148 with most diff parsers
2149 '''
2149 '''
2150 def get(key, name=None, getter=ui.configbool, forceplain=None):
2150 def get(key, name=None, getter=ui.configbool, forceplain=None):
2151 if opts:
2151 if opts:
2152 v = opts.get(key)
2152 v = opts.get(key)
2153 # diffopts flags are either None-default (which is passed
2153 # diffopts flags are either None-default (which is passed
2154 # through unchanged, so we can identify unset values), or
2154 # through unchanged, so we can identify unset values), or
2155 # some other falsey default (eg --unified, which defaults
2155 # some other falsey default (eg --unified, which defaults
2156 # to an empty string). We only want to override the config
2156 # to an empty string). We only want to override the config
2157 # entries from hgrc with command line values if they
2157 # entries from hgrc with command line values if they
2158 # appear to have been set, which is any truthy value,
2158 # appear to have been set, which is any truthy value,
2159 # True, or False.
2159 # True, or False.
2160 if v or isinstance(v, bool):
2160 if v or isinstance(v, bool):
2161 return v
2161 return v
2162 if forceplain is not None and ui.plain():
2162 if forceplain is not None and ui.plain():
2163 return forceplain
2163 return forceplain
2164 return getter(section, name or key, None, untrusted=untrusted)
2164 return getter(section, name or key, None, untrusted=untrusted)
2165
2165
2166 # core options, expected to be understood by every diff parser
2166 # core options, expected to be understood by every diff parser
2167 buildopts = {
2167 buildopts = {
2168 'nodates': get('nodates'),
2168 'nodates': get('nodates'),
2169 'showfunc': get('show_function', 'showfunc'),
2169 'showfunc': get('show_function', 'showfunc'),
2170 'context': get('unified', getter=ui.config),
2170 'context': get('unified', getter=ui.config),
2171 }
2171 }
2172
2172
2173 if git:
2173 if git:
2174 buildopts['git'] = get('git')
2174 buildopts['git'] = get('git')
2175
2175
2176 # since this is in the experimental section, we need to call
2176 # since this is in the experimental section, we need to call
2177 # ui.configbool directory
2177 # ui.configbool directory
2178 buildopts['showsimilarity'] = ui.configbool('experimental',
2178 buildopts['showsimilarity'] = ui.configbool('experimental',
2179 'extendedheader.similarity')
2179 'extendedheader.similarity')
2180
2180
2181 # need to inspect the ui object instead of using get() since we want to
2181 # need to inspect the ui object instead of using get() since we want to
2182 # test for an int
2182 # test for an int
2183 hconf = ui.config('experimental', 'extendedheader.index')
2183 hconf = ui.config('experimental', 'extendedheader.index')
2184 if hconf is not None:
2184 if hconf is not None:
2185 hlen = None
2185 hlen = None
2186 try:
2186 try:
2187 # the hash config could be an integer (for length of hash) or a
2187 # the hash config could be an integer (for length of hash) or a
2188 # word (e.g. short, full, none)
2188 # word (e.g. short, full, none)
2189 hlen = int(hconf)
2189 hlen = int(hconf)
2190 if hlen < 0 or hlen > 40:
2190 if hlen < 0 or hlen > 40:
2191 msg = _("invalid length for extendedheader.index: '%d'\n")
2191 msg = _("invalid length for extendedheader.index: '%d'\n")
2192 ui.warn(msg % hlen)
2192 ui.warn(msg % hlen)
2193 except ValueError:
2193 except ValueError:
2194 # default value
2194 # default value
2195 if hconf == 'short' or hconf == '':
2195 if hconf == 'short' or hconf == '':
2196 hlen = 12
2196 hlen = 12
2197 elif hconf == 'full':
2197 elif hconf == 'full':
2198 hlen = 40
2198 hlen = 40
2199 elif hconf != 'none':
2199 elif hconf != 'none':
2200 msg = _("invalid value for extendedheader.index: '%s'\n")
2200 msg = _("invalid value for extendedheader.index: '%s'\n")
2201 ui.warn(msg % hconf)
2201 ui.warn(msg % hconf)
2202 finally:
2202 finally:
2203 buildopts['index'] = hlen
2203 buildopts['index'] = hlen
2204
2204
2205 if whitespace:
2205 if whitespace:
2206 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2206 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2207 buildopts['ignorewsamount'] = get('ignore_space_change',
2207 buildopts['ignorewsamount'] = get('ignore_space_change',
2208 'ignorewsamount')
2208 'ignorewsamount')
2209 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2209 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2210 'ignoreblanklines')
2210 'ignoreblanklines')
2211 if formatchanging:
2211 if formatchanging:
2212 buildopts['text'] = opts and opts.get('text')
2212 buildopts['text'] = opts and opts.get('text')
2213 buildopts['nobinary'] = get('nobinary', forceplain=False)
2213 buildopts['nobinary'] = get('nobinary', forceplain=False)
2214 buildopts['noprefix'] = get('noprefix', forceplain=False)
2214 buildopts['noprefix'] = get('noprefix', forceplain=False)
2215
2215
2216 return mdiff.diffopts(**buildopts)
2216 return mdiff.diffopts(**buildopts)
2217
2217
2218 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2218 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2219 losedatafn=None, prefix='', relroot='', copy=None):
2219 losedatafn=None, prefix='', relroot='', copy=None):
2220 '''yields diff of changes to files between two nodes, or node and
2220 '''yields diff of changes to files between two nodes, or node and
2221 working directory.
2221 working directory.
2222
2222
2223 if node1 is None, use first dirstate parent instead.
2223 if node1 is None, use first dirstate parent instead.
2224 if node2 is None, compare node1 with working directory.
2224 if node2 is None, compare node1 with working directory.
2225
2225
2226 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2226 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2227 every time some change cannot be represented with the current
2227 every time some change cannot be represented with the current
2228 patch format. Return False to upgrade to git patch format, True to
2228 patch format. Return False to upgrade to git patch format, True to
2229 accept the loss or raise an exception to abort the diff. It is
2229 accept the loss or raise an exception to abort the diff. It is
2230 called with the name of current file being diffed as 'fn'. If set
2230 called with the name of current file being diffed as 'fn'. If set
2231 to None, patches will always be upgraded to git format when
2231 to None, patches will always be upgraded to git format when
2232 necessary.
2232 necessary.
2233
2233
2234 prefix is a filename prefix that is prepended to all filenames on
2234 prefix is a filename prefix that is prepended to all filenames on
2235 display (used for subrepos).
2235 display (used for subrepos).
2236
2236
2237 relroot, if not empty, must be normalized with a trailing /. Any match
2237 relroot, if not empty, must be normalized with a trailing /. Any match
2238 patterns that fall outside it will be ignored.
2238 patterns that fall outside it will be ignored.
2239
2239
2240 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2240 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2241 information.'''
2241 information.'''
2242
2242
2243 if opts is None:
2243 if opts is None:
2244 opts = mdiff.defaultopts
2244 opts = mdiff.defaultopts
2245
2245
2246 if not node1 and not node2:
2246 if not node1 and not node2:
2247 node1 = repo.dirstate.p1()
2247 node1 = repo.dirstate.p1()
2248
2248
2249 def lrugetfilectx():
2249 def lrugetfilectx():
2250 cache = {}
2250 cache = {}
2251 order = collections.deque()
2251 order = collections.deque()
2252 def getfilectx(f, ctx):
2252 def getfilectx(f, ctx):
2253 fctx = ctx.filectx(f, filelog=cache.get(f))
2253 fctx = ctx.filectx(f, filelog=cache.get(f))
2254 if f not in cache:
2254 if f not in cache:
2255 if len(cache) > 20:
2255 if len(cache) > 20:
2256 del cache[order.popleft()]
2256 del cache[order.popleft()]
2257 cache[f] = fctx.filelog()
2257 cache[f] = fctx.filelog()
2258 else:
2258 else:
2259 order.remove(f)
2259 order.remove(f)
2260 order.append(f)
2260 order.append(f)
2261 return fctx
2261 return fctx
2262 return getfilectx
2262 return getfilectx
2263 getfilectx = lrugetfilectx()
2263 getfilectx = lrugetfilectx()
2264
2264
2265 ctx1 = repo[node1]
2265 ctx1 = repo[node1]
2266 ctx2 = repo[node2]
2266 ctx2 = repo[node2]
2267
2267
2268 relfiltered = False
2268 relfiltered = False
2269 if relroot != '' and match.always():
2269 if relroot != '' and match.always():
2270 # as a special case, create a new matcher with just the relroot
2270 # as a special case, create a new matcher with just the relroot
2271 pats = [relroot]
2271 pats = [relroot]
2272 match = scmutil.match(ctx2, pats, default='path')
2272 match = scmutil.match(ctx2, pats, default='path')
2273 relfiltered = True
2273 relfiltered = True
2274
2274
2275 if not changes:
2275 if not changes:
2276 changes = repo.status(ctx1, ctx2, match=match)
2276 changes = repo.status(ctx1, ctx2, match=match)
2277 modified, added, removed = changes[:3]
2277 modified, added, removed = changes[:3]
2278
2278
2279 if not modified and not added and not removed:
2279 if not modified and not added and not removed:
2280 return []
2280 return []
2281
2281
2282 if repo.ui.debugflag:
2282 if repo.ui.debugflag:
2283 hexfunc = hex
2283 hexfunc = hex
2284 else:
2284 else:
2285 hexfunc = short
2285 hexfunc = short
2286 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2286 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2287
2287
2288 if copy is None:
2288 if copy is None:
2289 copy = {}
2289 copy = {}
2290 if opts.git or opts.upgrade:
2290 if opts.git or opts.upgrade:
2291 copy = copies.pathcopies(ctx1, ctx2, match=match)
2291 copy = copies.pathcopies(ctx1, ctx2, match=match)
2292
2292
2293 if relroot is not None:
2293 if relroot is not None:
2294 if not relfiltered:
2294 if not relfiltered:
2295 # XXX this would ideally be done in the matcher, but that is
2295 # XXX this would ideally be done in the matcher, but that is
2296 # generally meant to 'or' patterns, not 'and' them. In this case we
2296 # generally meant to 'or' patterns, not 'and' them. In this case we
2297 # need to 'and' all the patterns from the matcher with relroot.
2297 # need to 'and' all the patterns from the matcher with relroot.
2298 def filterrel(l):
2298 def filterrel(l):
2299 return [f for f in l if f.startswith(relroot)]
2299 return [f for f in l if f.startswith(relroot)]
2300 modified = filterrel(modified)
2300 modified = filterrel(modified)
2301 added = filterrel(added)
2301 added = filterrel(added)
2302 removed = filterrel(removed)
2302 removed = filterrel(removed)
2303 relfiltered = True
2303 relfiltered = True
2304 # filter out copies where either side isn't inside the relative root
2304 # filter out copies where either side isn't inside the relative root
2305 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2305 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2306 if dst.startswith(relroot)
2306 if dst.startswith(relroot)
2307 and src.startswith(relroot)))
2307 and src.startswith(relroot)))
2308
2308
2309 modifiedset = set(modified)
2309 modifiedset = set(modified)
2310 addedset = set(added)
2310 addedset = set(added)
2311 removedset = set(removed)
2311 removedset = set(removed)
2312 for f in modified:
2312 for f in modified:
2313 if f not in ctx1:
2313 if f not in ctx1:
2314 # Fix up added, since merged-in additions appear as
2314 # Fix up added, since merged-in additions appear as
2315 # modifications during merges
2315 # modifications during merges
2316 modifiedset.remove(f)
2316 modifiedset.remove(f)
2317 addedset.add(f)
2317 addedset.add(f)
2318 for f in removed:
2318 for f in removed:
2319 if f not in ctx1:
2319 if f not in ctx1:
2320 # Merged-in additions that are then removed are reported as removed.
2320 # Merged-in additions that are then removed are reported as removed.
2321 # They are not in ctx1, so We don't want to show them in the diff.
2321 # They are not in ctx1, so We don't want to show them in the diff.
2322 removedset.remove(f)
2322 removedset.remove(f)
2323 modified = sorted(modifiedset)
2323 modified = sorted(modifiedset)
2324 added = sorted(addedset)
2324 added = sorted(addedset)
2325 removed = sorted(removedset)
2325 removed = sorted(removedset)
2326 for dst, src in copy.items():
2326 for dst, src in copy.items():
2327 if src not in ctx1:
2327 if src not in ctx1:
2328 # Files merged in during a merge and then copied/renamed are
2328 # Files merged in during a merge and then copied/renamed are
2329 # reported as copies. We want to show them in the diff as additions.
2329 # reported as copies. We want to show them in the diff as additions.
2330 del copy[dst]
2330 del copy[dst]
2331
2331
2332 def difffn(opts, losedata):
2332 def difffn(opts, losedata):
2333 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2333 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2334 copy, getfilectx, opts, losedata, prefix, relroot)
2334 copy, getfilectx, opts, losedata, prefix, relroot)
2335 if opts.upgrade and not opts.git:
2335 if opts.upgrade and not opts.git:
2336 try:
2336 try:
2337 def losedata(fn):
2337 def losedata(fn):
2338 if not losedatafn or not losedatafn(fn=fn):
2338 if not losedatafn or not losedatafn(fn=fn):
2339 raise GitDiffRequired
2339 raise GitDiffRequired
2340 # Buffer the whole output until we are sure it can be generated
2340 # Buffer the whole output until we are sure it can be generated
2341 return list(difffn(opts.copy(git=False), losedata))
2341 return list(difffn(opts.copy(git=False), losedata))
2342 except GitDiffRequired:
2342 except GitDiffRequired:
2343 return difffn(opts.copy(git=True), None)
2343 return difffn(opts.copy(git=True), None)
2344 else:
2344 else:
2345 return difffn(opts, None)
2345 return difffn(opts, None)
2346
2346
2347 def difflabel(func, *args, **kw):
2347 def difflabel(func, *args, **kw):
2348 '''yields 2-tuples of (output, label) based on the output of func()'''
2348 '''yields 2-tuples of (output, label) based on the output of func()'''
2349 headprefixes = [('diff', 'diff.diffline'),
2349 headprefixes = [('diff', 'diff.diffline'),
2350 ('copy', 'diff.extended'),
2350 ('copy', 'diff.extended'),
2351 ('rename', 'diff.extended'),
2351 ('rename', 'diff.extended'),
2352 ('old', 'diff.extended'),
2352 ('old', 'diff.extended'),
2353 ('new', 'diff.extended'),
2353 ('new', 'diff.extended'),
2354 ('deleted', 'diff.extended'),
2354 ('deleted', 'diff.extended'),
2355 ('index', 'diff.extended'),
2355 ('index', 'diff.extended'),
2356 ('similarity', 'diff.extended'),
2356 ('similarity', 'diff.extended'),
2357 ('---', 'diff.file_a'),
2357 ('---', 'diff.file_a'),
2358 ('+++', 'diff.file_b')]
2358 ('+++', 'diff.file_b')]
2359 textprefixes = [('@', 'diff.hunk'),
2359 textprefixes = [('@', 'diff.hunk'),
2360 ('-', 'diff.deleted'),
2360 ('-', 'diff.deleted'),
2361 ('+', 'diff.inserted')]
2361 ('+', 'diff.inserted')]
2362 head = False
2362 head = False
2363 for chunk in func(*args, **kw):
2363 for chunk in func(*args, **kw):
2364 lines = chunk.split('\n')
2364 lines = chunk.split('\n')
2365 for i, line in enumerate(lines):
2365 for i, line in enumerate(lines):
2366 if i != 0:
2366 if i != 0:
2367 yield ('\n', '')
2367 yield ('\n', '')
2368 if head:
2368 if head:
2369 if line.startswith('@'):
2369 if line.startswith('@'):
2370 head = False
2370 head = False
2371 else:
2371 else:
2372 if line and line[0] not in ' +-@\\':
2372 if line and line[0] not in ' +-@\\':
2373 head = True
2373 head = True
2374 stripline = line
2374 stripline = line
2375 diffline = False
2375 diffline = False
2376 if not head and line and line[0] in '+-':
2376 if not head and line and line[0] in '+-':
2377 # highlight tabs and trailing whitespace, but only in
2377 # highlight tabs and trailing whitespace, but only in
2378 # changed lines
2378 # changed lines
2379 stripline = line.rstrip()
2379 stripline = line.rstrip()
2380 diffline = True
2380 diffline = True
2381
2381
2382 prefixes = textprefixes
2382 prefixes = textprefixes
2383 if head:
2383 if head:
2384 prefixes = headprefixes
2384 prefixes = headprefixes
2385 for prefix, label in prefixes:
2385 for prefix, label in prefixes:
2386 if stripline.startswith(prefix):
2386 if stripline.startswith(prefix):
2387 if diffline:
2387 if diffline:
2388 for token in tabsplitter.findall(stripline):
2388 for token in tabsplitter.findall(stripline):
2389 if '\t' == token[0]:
2389 if '\t' == token[0]:
2390 yield (token, 'diff.tab')
2390 yield (token, 'diff.tab')
2391 else:
2391 else:
2392 yield (token, label)
2392 yield (token, label)
2393 else:
2393 else:
2394 yield (stripline, label)
2394 yield (stripline, label)
2395 break
2395 break
2396 else:
2396 else:
2397 yield (line, '')
2397 yield (line, '')
2398 if line != stripline:
2398 if line != stripline:
2399 yield (line[len(stripline):], 'diff.trailingwhitespace')
2399 yield (line[len(stripline):], 'diff.trailingwhitespace')
2400
2400
2401 def diffui(*args, **kw):
2401 def diffui(*args, **kw):
2402 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2402 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2403 return difflabel(diff, *args, **kw)
2403 return difflabel(diff, *args, **kw)
2404
2404
2405 def _filepairs(modified, added, removed, copy, opts):
2405 def _filepairs(modified, added, removed, copy, opts):
2406 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2406 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2407 before and f2 is the the name after. For added files, f1 will be None,
2407 before and f2 is the the name after. For added files, f1 will be None,
2408 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2408 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2409 or 'rename' (the latter two only if opts.git is set).'''
2409 or 'rename' (the latter two only if opts.git is set).'''
2410 gone = set()
2410 gone = set()
2411
2411
2412 copyto = dict([(v, k) for k, v in copy.items()])
2412 copyto = dict([(v, k) for k, v in copy.items()])
2413
2413
2414 addedset, removedset = set(added), set(removed)
2414 addedset, removedset = set(added), set(removed)
2415
2415
2416 for f in sorted(modified + added + removed):
2416 for f in sorted(modified + added + removed):
2417 copyop = None
2417 copyop = None
2418 f1, f2 = f, f
2418 f1, f2 = f, f
2419 if f in addedset:
2419 if f in addedset:
2420 f1 = None
2420 f1 = None
2421 if f in copy:
2421 if f in copy:
2422 if opts.git:
2422 if opts.git:
2423 f1 = copy[f]
2423 f1 = copy[f]
2424 if f1 in removedset and f1 not in gone:
2424 if f1 in removedset and f1 not in gone:
2425 copyop = 'rename'
2425 copyop = 'rename'
2426 gone.add(f1)
2426 gone.add(f1)
2427 else:
2427 else:
2428 copyop = 'copy'
2428 copyop = 'copy'
2429 elif f in removedset:
2429 elif f in removedset:
2430 f2 = None
2430 f2 = None
2431 if opts.git:
2431 if opts.git:
2432 # have we already reported a copy above?
2432 # have we already reported a copy above?
2433 if (f in copyto and copyto[f] in addedset
2433 if (f in copyto and copyto[f] in addedset
2434 and copy[copyto[f]] == f):
2434 and copy[copyto[f]] == f):
2435 continue
2435 continue
2436 yield f1, f2, copyop
2436 yield f1, f2, copyop
2437
2437
2438 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2438 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2439 copy, getfilectx, opts, losedatafn, prefix, relroot):
2439 copy, getfilectx, opts, losedatafn, prefix, relroot):
2440 '''given input data, generate a diff and yield it in blocks
2440 '''given input data, generate a diff and yield it in blocks
2441
2441
2442 If generating a diff would lose data like flags or binary data and
2442 If generating a diff would lose data like flags or binary data and
2443 losedatafn is not None, it will be called.
2443 losedatafn is not None, it will be called.
2444
2444
2445 relroot is removed and prefix is added to every path in the diff output.
2445 relroot is removed and prefix is added to every path in the diff output.
2446
2446
2447 If relroot is not empty, this function expects every path in modified,
2447 If relroot is not empty, this function expects every path in modified,
2448 added, removed and copy to start with it.'''
2448 added, removed and copy to start with it.'''
2449
2449
2450 def gitindex(text):
2450 def gitindex(text):
2451 if not text:
2451 if not text:
2452 text = ""
2452 text = ""
2453 l = len(text)
2453 l = len(text)
2454 s = hashlib.sha1('blob %d\0' % l)
2454 s = hashlib.sha1('blob %d\0' % l)
2455 s.update(text)
2455 s.update(text)
2456 return s.hexdigest()
2456 return s.hexdigest()
2457
2457
2458 if opts.noprefix:
2458 if opts.noprefix:
2459 aprefix = bprefix = ''
2459 aprefix = bprefix = ''
2460 else:
2460 else:
2461 aprefix = 'a/'
2461 aprefix = 'a/'
2462 bprefix = 'b/'
2462 bprefix = 'b/'
2463
2463
2464 def diffline(f, revs):
2464 def diffline(f, revs):
2465 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2465 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2466 return 'diff %s %s' % (revinfo, f)
2466 return 'diff %s %s' % (revinfo, f)
2467
2467
2468 date1 = util.datestr(ctx1.date())
2468 date1 = util.datestr(ctx1.date())
2469 date2 = util.datestr(ctx2.date())
2469 date2 = util.datestr(ctx2.date())
2470
2470
2471 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2471 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2472
2472
2473 if relroot != '' and (repo.ui.configbool('devel', 'all')
2473 if relroot != '' and (repo.ui.configbool('devel', 'all')
2474 or repo.ui.configbool('devel', 'check-relroot')):
2474 or repo.ui.configbool('devel', 'check-relroot')):
2475 for f in modified + added + removed + copy.keys() + copy.values():
2475 for f in modified + added + removed + copy.keys() + copy.values():
2476 if f is not None and not f.startswith(relroot):
2476 if f is not None and not f.startswith(relroot):
2477 raise AssertionError(
2477 raise AssertionError(
2478 "file %s doesn't start with relroot %s" % (f, relroot))
2478 "file %s doesn't start with relroot %s" % (f, relroot))
2479
2479
2480 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2480 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2481 content1 = None
2481 content1 = None
2482 content2 = None
2482 content2 = None
2483 flag1 = None
2483 flag1 = None
2484 flag2 = None
2484 flag2 = None
2485 if f1:
2485 if f1:
2486 content1 = getfilectx(f1, ctx1).data()
2486 content1 = getfilectx(f1, ctx1).data()
2487 if opts.git or losedatafn:
2487 if opts.git or losedatafn:
2488 flag1 = ctx1.flags(f1)
2488 flag1 = ctx1.flags(f1)
2489 if f2:
2489 if f2:
2490 content2 = getfilectx(f2, ctx2).data()
2490 content2 = getfilectx(f2, ctx2).data()
2491 if opts.git or losedatafn:
2491 if opts.git or losedatafn:
2492 flag2 = ctx2.flags(f2)
2492 flag2 = ctx2.flags(f2)
2493 binary = False
2493 binary = False
2494 if opts.git or losedatafn:
2494 if opts.git or losedatafn:
2495 binary = util.binary(content1) or util.binary(content2)
2495 binary = util.binary(content1) or util.binary(content2)
2496
2496
2497 if losedatafn and not opts.git:
2497 if losedatafn and not opts.git:
2498 if (binary or
2498 if (binary or
2499 # copy/rename
2499 # copy/rename
2500 f2 in copy or
2500 f2 in copy or
2501 # empty file creation
2501 # empty file creation
2502 (not f1 and not content2) or
2502 (not f1 and not content2) or
2503 # empty file deletion
2503 # empty file deletion
2504 (not content1 and not f2) or
2504 (not content1 and not f2) or
2505 # create with flags
2505 # create with flags
2506 (not f1 and flag2) or
2506 (not f1 and flag2) or
2507 # change flags
2507 # change flags
2508 (f1 and f2 and flag1 != flag2)):
2508 (f1 and f2 and flag1 != flag2)):
2509 losedatafn(f2 or f1)
2509 losedatafn(f2 or f1)
2510
2510
2511 path1 = f1 or f2
2511 path1 = f1 or f2
2512 path2 = f2 or f1
2512 path2 = f2 or f1
2513 path1 = posixpath.join(prefix, path1[len(relroot):])
2513 path1 = posixpath.join(prefix, path1[len(relroot):])
2514 path2 = posixpath.join(prefix, path2[len(relroot):])
2514 path2 = posixpath.join(prefix, path2[len(relroot):])
2515 header = []
2515 header = []
2516 if opts.git:
2516 if opts.git:
2517 header.append('diff --git %s%s %s%s' %
2517 header.append('diff --git %s%s %s%s' %
2518 (aprefix, path1, bprefix, path2))
2518 (aprefix, path1, bprefix, path2))
2519 if not f1: # added
2519 if not f1: # added
2520 header.append('new file mode %s' % gitmode[flag2])
2520 header.append('new file mode %s' % gitmode[flag2])
2521 elif not f2: # removed
2521 elif not f2: # removed
2522 header.append('deleted file mode %s' % gitmode[flag1])
2522 header.append('deleted file mode %s' % gitmode[flag1])
2523 else: # modified/copied/renamed
2523 else: # modified/copied/renamed
2524 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2524 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2525 if mode1 != mode2:
2525 if mode1 != mode2:
2526 header.append('old mode %s' % mode1)
2526 header.append('old mode %s' % mode1)
2527 header.append('new mode %s' % mode2)
2527 header.append('new mode %s' % mode2)
2528 if copyop is not None:
2528 if copyop is not None:
2529 if opts.showsimilarity:
2529 if opts.showsimilarity:
2530 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2530 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2531 header.append('similarity index %d%%' % sim)
2531 header.append('similarity index %d%%' % sim)
2532 header.append('%s from %s' % (copyop, path1))
2532 header.append('%s from %s' % (copyop, path1))
2533 header.append('%s to %s' % (copyop, path2))
2533 header.append('%s to %s' % (copyop, path2))
2534 elif revs and not repo.ui.quiet:
2534 elif revs and not repo.ui.quiet:
2535 header.append(diffline(path1, revs))
2535 header.append(diffline(path1, revs))
2536
2536
2537 if binary and opts.git and not opts.nobinary:
2537 if binary and opts.git and not opts.nobinary:
2538 text = mdiff.b85diff(content1, content2)
2538 text = mdiff.b85diff(content1, content2)
2539 if text:
2539 if text:
2540 header.append('index %s..%s' %
2540 header.append('index %s..%s' %
2541 (gitindex(content1), gitindex(content2)))
2541 (gitindex(content1), gitindex(content2)))
2542 else:
2542 else:
2543 if opts.git and opts.index > 0:
2543 if opts.git and opts.index > 0:
2544 flag = flag1
2544 flag = flag1
2545 if flag is None:
2545 if flag is None:
2546 flag = flag2
2546 flag = flag2
2547 header.append('index %s..%s %s' %
2547 header.append('index %s..%s %s' %
2548 (gitindex(content1)[0:opts.index],
2548 (gitindex(content1)[0:opts.index],
2549 gitindex(content2)[0:opts.index],
2549 gitindex(content2)[0:opts.index],
2550 gitmode[flag]))
2550 gitmode[flag]))
2551
2551
2552 uheaders, text = mdiff.unidiff(content1, date1,
2552 uheaders, hunks = mdiff.unidiff(content1, date1,
2553 content2, date2,
2553 content2, date2,
2554 path1, path2, opts=opts)
2554 path1, path2, opts=opts)
2555 header.extend(uheaders)
2555 header.extend(uheaders)
2556 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2556 if header and (text or len(header) > 1):
2557 if header and (text or len(header) > 1):
2557 yield '\n'.join(header) + '\n'
2558 yield '\n'.join(header) + '\n'
2558 if text:
2559 if text:
2559 yield text
2560 yield text
2560
2561
2561 def diffstatsum(stats):
2562 def diffstatsum(stats):
2562 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2563 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2563 for f, a, r, b in stats:
2564 for f, a, r, b in stats:
2564 maxfile = max(maxfile, encoding.colwidth(f))
2565 maxfile = max(maxfile, encoding.colwidth(f))
2565 maxtotal = max(maxtotal, a + r)
2566 maxtotal = max(maxtotal, a + r)
2566 addtotal += a
2567 addtotal += a
2567 removetotal += r
2568 removetotal += r
2568 binary = binary or b
2569 binary = binary or b
2569
2570
2570 return maxfile, maxtotal, addtotal, removetotal, binary
2571 return maxfile, maxtotal, addtotal, removetotal, binary
2571
2572
2572 def diffstatdata(lines):
2573 def diffstatdata(lines):
2573 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2574 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2574
2575
2575 results = []
2576 results = []
2576 filename, adds, removes, isbinary = None, 0, 0, False
2577 filename, adds, removes, isbinary = None, 0, 0, False
2577
2578
2578 def addresult():
2579 def addresult():
2579 if filename:
2580 if filename:
2580 results.append((filename, adds, removes, isbinary))
2581 results.append((filename, adds, removes, isbinary))
2581
2582
2582 for line in lines:
2583 for line in lines:
2583 if line.startswith('diff'):
2584 if line.startswith('diff'):
2584 addresult()
2585 addresult()
2585 # set numbers to 0 anyway when starting new file
2586 # set numbers to 0 anyway when starting new file
2586 adds, removes, isbinary = 0, 0, False
2587 adds, removes, isbinary = 0, 0, False
2587 if line.startswith('diff --git a/'):
2588 if line.startswith('diff --git a/'):
2588 filename = gitre.search(line).group(2)
2589 filename = gitre.search(line).group(2)
2589 elif line.startswith('diff -r'):
2590 elif line.startswith('diff -r'):
2590 # format: "diff -r ... -r ... filename"
2591 # format: "diff -r ... -r ... filename"
2591 filename = diffre.search(line).group(1)
2592 filename = diffre.search(line).group(1)
2592 elif line.startswith('+') and not line.startswith('+++ '):
2593 elif line.startswith('+') and not line.startswith('+++ '):
2593 adds += 1
2594 adds += 1
2594 elif line.startswith('-') and not line.startswith('--- '):
2595 elif line.startswith('-') and not line.startswith('--- '):
2595 removes += 1
2596 removes += 1
2596 elif (line.startswith('GIT binary patch') or
2597 elif (line.startswith('GIT binary patch') or
2597 line.startswith('Binary file')):
2598 line.startswith('Binary file')):
2598 isbinary = True
2599 isbinary = True
2599 addresult()
2600 addresult()
2600 return results
2601 return results
2601
2602
2602 def diffstat(lines, width=80):
2603 def diffstat(lines, width=80):
2603 output = []
2604 output = []
2604 stats = diffstatdata(lines)
2605 stats = diffstatdata(lines)
2605 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2606 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2606
2607
2607 countwidth = len(str(maxtotal))
2608 countwidth = len(str(maxtotal))
2608 if hasbinary and countwidth < 3:
2609 if hasbinary and countwidth < 3:
2609 countwidth = 3
2610 countwidth = 3
2610 graphwidth = width - countwidth - maxname - 6
2611 graphwidth = width - countwidth - maxname - 6
2611 if graphwidth < 10:
2612 if graphwidth < 10:
2612 graphwidth = 10
2613 graphwidth = 10
2613
2614
2614 def scale(i):
2615 def scale(i):
2615 if maxtotal <= graphwidth:
2616 if maxtotal <= graphwidth:
2616 return i
2617 return i
2617 # If diffstat runs out of room it doesn't print anything,
2618 # If diffstat runs out of room it doesn't print anything,
2618 # which isn't very useful, so always print at least one + or -
2619 # which isn't very useful, so always print at least one + or -
2619 # if there were at least some changes.
2620 # if there were at least some changes.
2620 return max(i * graphwidth // maxtotal, int(bool(i)))
2621 return max(i * graphwidth // maxtotal, int(bool(i)))
2621
2622
2622 for filename, adds, removes, isbinary in stats:
2623 for filename, adds, removes, isbinary in stats:
2623 if isbinary:
2624 if isbinary:
2624 count = 'Bin'
2625 count = 'Bin'
2625 else:
2626 else:
2626 count = adds + removes
2627 count = adds + removes
2627 pluses = '+' * scale(adds)
2628 pluses = '+' * scale(adds)
2628 minuses = '-' * scale(removes)
2629 minuses = '-' * scale(removes)
2629 output.append(' %s%s | %*s %s%s\n' %
2630 output.append(' %s%s | %*s %s%s\n' %
2630 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2631 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2631 countwidth, count, pluses, minuses))
2632 countwidth, count, pluses, minuses))
2632
2633
2633 if stats:
2634 if stats:
2634 output.append(_(' %d files changed, %d insertions(+), '
2635 output.append(_(' %d files changed, %d insertions(+), '
2635 '%d deletions(-)\n')
2636 '%d deletions(-)\n')
2636 % (len(stats), totaladds, totalremoves))
2637 % (len(stats), totaladds, totalremoves))
2637
2638
2638 return ''.join(output)
2639 return ''.join(output)
2639
2640
2640 def diffstatui(*args, **kw):
2641 def diffstatui(*args, **kw):
2641 '''like diffstat(), but yields 2-tuples of (output, label) for
2642 '''like diffstat(), but yields 2-tuples of (output, label) for
2642 ui.write()
2643 ui.write()
2643 '''
2644 '''
2644
2645
2645 for line in diffstat(*args, **kw).splitlines():
2646 for line in diffstat(*args, **kw).splitlines():
2646 if line and line[-1] in '+-':
2647 if line and line[-1] in '+-':
2647 name, graph = line.rsplit(' ', 1)
2648 name, graph = line.rsplit(' ', 1)
2648 yield (name + ' ', '')
2649 yield (name + ' ', '')
2649 m = re.search(r'\++', graph)
2650 m = re.search(r'\++', graph)
2650 if m:
2651 if m:
2651 yield (m.group(0), 'diffstat.inserted')
2652 yield (m.group(0), 'diffstat.inserted')
2652 m = re.search(r'-+', graph)
2653 m = re.search(r'-+', graph)
2653 if m:
2654 if m:
2654 yield (m.group(0), 'diffstat.deleted')
2655 yield (m.group(0), 'diffstat.deleted')
2655 else:
2656 else:
2656 yield (line, '')
2657 yield (line, '')
2657 yield ('\n', '')
2658 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now