##// END OF EJS Templates
diff: unify calls to diffline...
Guillermo Pérez -
r17940:c84ef004 default
parent child Browse files
Show More
@@ -1,381 +1,378
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import bdiff, mpatch, util
9 import bdiff, mpatch, util
10 import re, struct, base85, zlib
10 import re, struct, base85, zlib
11 from node import hex, nullid
11 from node import hex, nullid
12
12
13 def splitnewlines(text):
13 def splitnewlines(text):
14 '''like str.splitlines, but only split on newlines.'''
14 '''like str.splitlines, but only split on newlines.'''
15 lines = [l + '\n' for l in text.split('\n')]
15 lines = [l + '\n' for l in text.split('\n')]
16 if lines:
16 if lines:
17 if lines[-1] == '\n':
17 if lines[-1] == '\n':
18 lines.pop()
18 lines.pop()
19 else:
19 else:
20 lines[-1] = lines[-1][:-1]
20 lines[-1] = lines[-1][:-1]
21 return lines
21 return lines
22
22
23 class diffopts(object):
23 class diffopts(object):
24 '''context is the number of context lines
24 '''context is the number of context lines
25 text treats all files as text
25 text treats all files as text
26 showfunc enables diff -p output
26 showfunc enables diff -p output
27 git enables the git extended patch format
27 git enables the git extended patch format
28 nodates removes dates from diff headers
28 nodates removes dates from diff headers
29 ignorews ignores all whitespace changes in the diff
29 ignorews ignores all whitespace changes in the diff
30 ignorewsamount ignores changes in the amount of whitespace
30 ignorewsamount ignores changes in the amount of whitespace
31 ignoreblanklines ignores changes whose lines are all blank
31 ignoreblanklines ignores changes whose lines are all blank
32 upgrade generates git diffs to avoid data loss
32 upgrade generates git diffs to avoid data loss
33 '''
33 '''
34
34
35 defaults = {
35 defaults = {
36 'context': 3,
36 'context': 3,
37 'text': False,
37 'text': False,
38 'showfunc': False,
38 'showfunc': False,
39 'git': False,
39 'git': False,
40 'nodates': False,
40 'nodates': False,
41 'ignorews': False,
41 'ignorews': False,
42 'ignorewsamount': False,
42 'ignorewsamount': False,
43 'ignoreblanklines': False,
43 'ignoreblanklines': False,
44 'upgrade': False,
44 'upgrade': False,
45 }
45 }
46
46
47 __slots__ = defaults.keys()
47 __slots__ = defaults.keys()
48
48
49 def __init__(self, **opts):
49 def __init__(self, **opts):
50 for k in self.__slots__:
50 for k in self.__slots__:
51 v = opts.get(k)
51 v = opts.get(k)
52 if v is None:
52 if v is None:
53 v = self.defaults[k]
53 v = self.defaults[k]
54 setattr(self, k, v)
54 setattr(self, k, v)
55
55
56 try:
56 try:
57 self.context = int(self.context)
57 self.context = int(self.context)
58 except ValueError:
58 except ValueError:
59 raise util.Abort(_('diff context lines count must be '
59 raise util.Abort(_('diff context lines count must be '
60 'an integer, not %r') % self.context)
60 'an integer, not %r') % self.context)
61
61
62 def copy(self, **kwargs):
62 def copy(self, **kwargs):
63 opts = dict((k, getattr(self, k)) for k in self.defaults)
63 opts = dict((k, getattr(self, k)) for k in self.defaults)
64 opts.update(kwargs)
64 opts.update(kwargs)
65 return diffopts(**opts)
65 return diffopts(**opts)
66
66
67 defaultopts = diffopts()
67 defaultopts = diffopts()
68
68
69 def wsclean(opts, text, blank=True):
69 def wsclean(opts, text, blank=True):
70 if opts.ignorews:
70 if opts.ignorews:
71 text = bdiff.fixws(text, 1)
71 text = bdiff.fixws(text, 1)
72 elif opts.ignorewsamount:
72 elif opts.ignorewsamount:
73 text = bdiff.fixws(text, 0)
73 text = bdiff.fixws(text, 0)
74 if blank and opts.ignoreblanklines:
74 if blank and opts.ignoreblanklines:
75 text = re.sub('\n+', '\n', text).strip('\n')
75 text = re.sub('\n+', '\n', text).strip('\n')
76 return text
76 return text
77
77
78 def splitblock(base1, lines1, base2, lines2, opts):
78 def splitblock(base1, lines1, base2, lines2, opts):
79 # The input lines matches except for interwoven blank lines. We
79 # The input lines matches except for interwoven blank lines. We
80 # transform it into a sequence of matching blocks and blank blocks.
80 # transform it into a sequence of matching blocks and blank blocks.
81 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
81 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
82 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
82 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
83 s1, e1 = 0, len(lines1)
83 s1, e1 = 0, len(lines1)
84 s2, e2 = 0, len(lines2)
84 s2, e2 = 0, len(lines2)
85 while s1 < e1 or s2 < e2:
85 while s1 < e1 or s2 < e2:
86 i1, i2, btype = s1, s2, '='
86 i1, i2, btype = s1, s2, '='
87 if (i1 >= e1 or lines1[i1] == 0
87 if (i1 >= e1 or lines1[i1] == 0
88 or i2 >= e2 or lines2[i2] == 0):
88 or i2 >= e2 or lines2[i2] == 0):
89 # Consume the block of blank lines
89 # Consume the block of blank lines
90 btype = '~'
90 btype = '~'
91 while i1 < e1 and lines1[i1] == 0:
91 while i1 < e1 and lines1[i1] == 0:
92 i1 += 1
92 i1 += 1
93 while i2 < e2 and lines2[i2] == 0:
93 while i2 < e2 and lines2[i2] == 0:
94 i2 += 1
94 i2 += 1
95 else:
95 else:
96 # Consume the matching lines
96 # Consume the matching lines
97 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
97 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
98 i1 += 1
98 i1 += 1
99 i2 += 1
99 i2 += 1
100 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
100 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
101 s1 = i1
101 s1 = i1
102 s2 = i2
102 s2 = i2
103
103
104 def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
104 def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
105 """Return (block, type) tuples, where block is an mdiff.blocks
105 """Return (block, type) tuples, where block is an mdiff.blocks
106 line entry. type is '=' for blocks matching exactly one another
106 line entry. type is '=' for blocks matching exactly one another
107 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
107 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
108 matching only after having filtered blank lines. If refine is True,
108 matching only after having filtered blank lines. If refine is True,
109 then '~' blocks are refined and are only made of blank lines.
109 then '~' blocks are refined and are only made of blank lines.
110 line1 and line2 are text1 and text2 split with splitnewlines() if
110 line1 and line2 are text1 and text2 split with splitnewlines() if
111 they are already available.
111 they are already available.
112 """
112 """
113 if opts is None:
113 if opts is None:
114 opts = defaultopts
114 opts = defaultopts
115 if opts.ignorews or opts.ignorewsamount:
115 if opts.ignorews or opts.ignorewsamount:
116 text1 = wsclean(opts, text1, False)
116 text1 = wsclean(opts, text1, False)
117 text2 = wsclean(opts, text2, False)
117 text2 = wsclean(opts, text2, False)
118 diff = bdiff.blocks(text1, text2)
118 diff = bdiff.blocks(text1, text2)
119 for i, s1 in enumerate(diff):
119 for i, s1 in enumerate(diff):
120 # The first match is special.
120 # The first match is special.
121 # we've either found a match starting at line 0 or a match later
121 # we've either found a match starting at line 0 or a match later
122 # in the file. If it starts later, old and new below will both be
122 # in the file. If it starts later, old and new below will both be
123 # empty and we'll continue to the next match.
123 # empty and we'll continue to the next match.
124 if i > 0:
124 if i > 0:
125 s = diff[i - 1]
125 s = diff[i - 1]
126 else:
126 else:
127 s = [0, 0, 0, 0]
127 s = [0, 0, 0, 0]
128 s = [s[1], s1[0], s[3], s1[2]]
128 s = [s[1], s1[0], s[3], s1[2]]
129
129
130 # bdiff sometimes gives huge matches past eof, this check eats them,
130 # bdiff sometimes gives huge matches past eof, this check eats them,
131 # and deals with the special first match case described above
131 # and deals with the special first match case described above
132 if s[0] != s[1] or s[2] != s[3]:
132 if s[0] != s[1] or s[2] != s[3]:
133 type = '!'
133 type = '!'
134 if opts.ignoreblanklines:
134 if opts.ignoreblanklines:
135 if lines1 is None:
135 if lines1 is None:
136 lines1 = splitnewlines(text1)
136 lines1 = splitnewlines(text1)
137 if lines2 is None:
137 if lines2 is None:
138 lines2 = splitnewlines(text2)
138 lines2 = splitnewlines(text2)
139 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
139 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
140 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
140 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
141 if old == new:
141 if old == new:
142 type = '~'
142 type = '~'
143 yield s, type
143 yield s, type
144 yield s1, '='
144 yield s1, '='
145
145
146 def diffline(revs, a, b, opts):
146 def diffline(revs, a, b, opts):
147 parts = ['diff']
147 parts = ['diff']
148 if opts.git:
148 if opts.git:
149 parts.append('--git')
149 parts.append('--git')
150 if revs and not opts.git:
150 if revs and not opts.git:
151 parts.append(' '.join(["-r %s" % rev for rev in revs]))
151 parts.append(' '.join(["-r %s" % rev for rev in revs]))
152 if opts.git:
152 if opts.git:
153 parts.append('a/%s' % a)
153 parts.append('a/%s' % a)
154 parts.append('b/%s' % b)
154 parts.append('b/%s' % b)
155 else:
155 else:
156 parts.append(a)
156 parts.append(a)
157 return ' '.join(parts) + '\n'
157 return ' '.join(parts) + '\n'
158
158
159 def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
159 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
160 def datetag(date, fn=None):
160 def datetag(date, fn=None):
161 if not opts.git and not opts.nodates:
161 if not opts.git and not opts.nodates:
162 return '\t%s\n' % date
162 return '\t%s\n' % date
163 if fn and ' ' in fn:
163 if fn and ' ' in fn:
164 return '\t\n'
164 return '\t\n'
165 return '\n'
165 return '\n'
166
166
167 if not a and not b:
167 if not a and not b:
168 return ""
168 return ""
169 epoch = util.datestr((0, 0))
169 epoch = util.datestr((0, 0))
170
170
171 fn1 = util.pconvert(fn1)
171 fn1 = util.pconvert(fn1)
172 fn2 = util.pconvert(fn2)
172 fn2 = util.pconvert(fn2)
173
173
174 if not opts.text and (util.binary(a) or util.binary(b)):
174 if not opts.text and (util.binary(a) or util.binary(b)):
175 if a and b and len(a) == len(b) and a == b:
175 if a and b and len(a) == len(b) and a == b:
176 return ""
176 return ""
177 l = ['Binary file %s has changed\n' % fn1]
177 l = ['Binary file %s has changed\n' % fn1]
178 elif not a:
178 elif not a:
179 b = splitnewlines(b)
179 b = splitnewlines(b)
180 if a is None:
180 if a is None:
181 l1 = '--- /dev/null%s' % datetag(epoch)
181 l1 = '--- /dev/null%s' % datetag(epoch)
182 else:
182 else:
183 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
183 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
184 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
184 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
185 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
185 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
186 l = [l1, l2, l3] + ["+" + e for e in b]
186 l = [l1, l2, l3] + ["+" + e for e in b]
187 elif not b:
187 elif not b:
188 a = splitnewlines(a)
188 a = splitnewlines(a)
189 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
189 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
190 if b is None:
190 if b is None:
191 l2 = '+++ /dev/null%s' % datetag(epoch)
191 l2 = '+++ /dev/null%s' % datetag(epoch)
192 else:
192 else:
193 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
193 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
194 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
194 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
195 l = [l1, l2, l3] + ["-" + e for e in a]
195 l = [l1, l2, l3] + ["-" + e for e in a]
196 else:
196 else:
197 al = splitnewlines(a)
197 al = splitnewlines(a)
198 bl = splitnewlines(b)
198 bl = splitnewlines(b)
199 l = list(_unidiff(a, b, al, bl, opts=opts))
199 l = list(_unidiff(a, b, al, bl, opts=opts))
200 if not l:
200 if not l:
201 return ""
201 return ""
202
202
203 l.insert(0, "--- a/%s%s" % (fn1, datetag(ad, fn1)))
203 l.insert(0, "--- a/%s%s" % (fn1, datetag(ad, fn1)))
204 l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd, fn2)))
204 l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd, fn2)))
205
205
206 for ln in xrange(len(l)):
206 for ln in xrange(len(l)):
207 if l[ln][-1] != '\n':
207 if l[ln][-1] != '\n':
208 l[ln] += "\n\ No newline at end of file\n"
208 l[ln] += "\n\ No newline at end of file\n"
209
209
210 if r:
211 l.insert(0, diffline(r, fn1, fn2, opts))
212
213 return "".join(l)
210 return "".join(l)
214
211
215 # creates a headerless unified diff
212 # creates a headerless unified diff
216 # t1 and t2 are the text to be diffed
213 # t1 and t2 are the text to be diffed
217 # l1 and l2 are the text broken up into lines
214 # l1 and l2 are the text broken up into lines
218 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
215 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
219 def contextend(l, len):
216 def contextend(l, len):
220 ret = l + opts.context
217 ret = l + opts.context
221 if ret > len:
218 if ret > len:
222 ret = len
219 ret = len
223 return ret
220 return ret
224
221
225 def contextstart(l):
222 def contextstart(l):
226 ret = l - opts.context
223 ret = l - opts.context
227 if ret < 0:
224 if ret < 0:
228 return 0
225 return 0
229 return ret
226 return ret
230
227
231 lastfunc = [0, '']
228 lastfunc = [0, '']
232 def yieldhunk(hunk):
229 def yieldhunk(hunk):
233 (astart, a2, bstart, b2, delta) = hunk
230 (astart, a2, bstart, b2, delta) = hunk
234 aend = contextend(a2, len(l1))
231 aend = contextend(a2, len(l1))
235 alen = aend - astart
232 alen = aend - astart
236 blen = b2 - bstart + aend - a2
233 blen = b2 - bstart + aend - a2
237
234
238 func = ""
235 func = ""
239 if opts.showfunc:
236 if opts.showfunc:
240 lastpos, func = lastfunc
237 lastpos, func = lastfunc
241 # walk backwards from the start of the context up to the start of
238 # walk backwards from the start of the context up to the start of
242 # the previous hunk context until we find a line starting with an
239 # the previous hunk context until we find a line starting with an
243 # alphanumeric char.
240 # alphanumeric char.
244 for i in xrange(astart - 1, lastpos - 1, -1):
241 for i in xrange(astart - 1, lastpos - 1, -1):
245 if l1[i][0].isalnum():
242 if l1[i][0].isalnum():
246 func = ' ' + l1[i].rstrip()[:40]
243 func = ' ' + l1[i].rstrip()[:40]
247 lastfunc[1] = func
244 lastfunc[1] = func
248 break
245 break
249 # by recording this hunk's starting point as the next place to
246 # by recording this hunk's starting point as the next place to
250 # start looking for function lines, we avoid reading any line in
247 # start looking for function lines, we avoid reading any line in
251 # the file more than once.
248 # the file more than once.
252 lastfunc[0] = astart
249 lastfunc[0] = astart
253
250
254 # zero-length hunk ranges report their start line as one less
251 # zero-length hunk ranges report their start line as one less
255 if alen:
252 if alen:
256 astart += 1
253 astart += 1
257 if blen:
254 if blen:
258 bstart += 1
255 bstart += 1
259
256
260 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
257 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
261 bstart, blen, func)
258 bstart, blen, func)
262 for x in delta:
259 for x in delta:
263 yield x
260 yield x
264 for x in xrange(a2, aend):
261 for x in xrange(a2, aend):
265 yield ' ' + l1[x]
262 yield ' ' + l1[x]
266
263
267 # bdiff.blocks gives us the matching sequences in the files. The loop
264 # bdiff.blocks gives us the matching sequences in the files. The loop
268 # below finds the spaces between those matching sequences and translates
265 # below finds the spaces between those matching sequences and translates
269 # them into diff output.
266 # them into diff output.
270 #
267 #
271 hunk = None
268 hunk = None
272 ignoredlines = 0
269 ignoredlines = 0
273 for s, stype in allblocks(t1, t2, opts, l1, l2):
270 for s, stype in allblocks(t1, t2, opts, l1, l2):
274 a1, a2, b1, b2 = s
271 a1, a2, b1, b2 = s
275 if stype != '!':
272 if stype != '!':
276 if stype == '~':
273 if stype == '~':
277 # The diff context lines are based on t1 content. When
274 # The diff context lines are based on t1 content. When
278 # blank lines are ignored, the new lines offsets must
275 # blank lines are ignored, the new lines offsets must
279 # be adjusted as if equivalent blocks ('~') had the
276 # be adjusted as if equivalent blocks ('~') had the
280 # same sizes on both sides.
277 # same sizes on both sides.
281 ignoredlines += (b2 - b1) - (a2 - a1)
278 ignoredlines += (b2 - b1) - (a2 - a1)
282 continue
279 continue
283 delta = []
280 delta = []
284 old = l1[a1:a2]
281 old = l1[a1:a2]
285 new = l2[b1:b2]
282 new = l2[b1:b2]
286
283
287 b1 -= ignoredlines
284 b1 -= ignoredlines
288 b2 -= ignoredlines
285 b2 -= ignoredlines
289 astart = contextstart(a1)
286 astart = contextstart(a1)
290 bstart = contextstart(b1)
287 bstart = contextstart(b1)
291 prev = None
288 prev = None
292 if hunk:
289 if hunk:
293 # join with the previous hunk if it falls inside the context
290 # join with the previous hunk if it falls inside the context
294 if astart < hunk[1] + opts.context + 1:
291 if astart < hunk[1] + opts.context + 1:
295 prev = hunk
292 prev = hunk
296 astart = hunk[1]
293 astart = hunk[1]
297 bstart = hunk[3]
294 bstart = hunk[3]
298 else:
295 else:
299 for x in yieldhunk(hunk):
296 for x in yieldhunk(hunk):
300 yield x
297 yield x
301 if prev:
298 if prev:
302 # we've joined the previous hunk, record the new ending points.
299 # we've joined the previous hunk, record the new ending points.
303 hunk[1] = a2
300 hunk[1] = a2
304 hunk[3] = b2
301 hunk[3] = b2
305 delta = hunk[4]
302 delta = hunk[4]
306 else:
303 else:
307 # create a new hunk
304 # create a new hunk
308 hunk = [astart, a2, bstart, b2, delta]
305 hunk = [astart, a2, bstart, b2, delta]
309
306
310 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
307 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
311 delta[len(delta):] = ['-' + x for x in old]
308 delta[len(delta):] = ['-' + x for x in old]
312 delta[len(delta):] = ['+' + x for x in new]
309 delta[len(delta):] = ['+' + x for x in new]
313
310
314 if hunk:
311 if hunk:
315 for x in yieldhunk(hunk):
312 for x in yieldhunk(hunk):
316 yield x
313 yield x
317
314
318 def b85diff(to, tn):
315 def b85diff(to, tn):
319 '''print base85-encoded binary diff'''
316 '''print base85-encoded binary diff'''
320 def gitindex(text):
317 def gitindex(text):
321 if not text:
318 if not text:
322 return hex(nullid)
319 return hex(nullid)
323 l = len(text)
320 l = len(text)
324 s = util.sha1('blob %d\0' % l)
321 s = util.sha1('blob %d\0' % l)
325 s.update(text)
322 s.update(text)
326 return s.hexdigest()
323 return s.hexdigest()
327
324
328 def fmtline(line):
325 def fmtline(line):
329 l = len(line)
326 l = len(line)
330 if l <= 26:
327 if l <= 26:
331 l = chr(ord('A') + l - 1)
328 l = chr(ord('A') + l - 1)
332 else:
329 else:
333 l = chr(l - 26 + ord('a') - 1)
330 l = chr(l - 26 + ord('a') - 1)
334 return '%c%s\n' % (l, base85.b85encode(line, True))
331 return '%c%s\n' % (l, base85.b85encode(line, True))
335
332
336 def chunk(text, csize=52):
333 def chunk(text, csize=52):
337 l = len(text)
334 l = len(text)
338 i = 0
335 i = 0
339 while i < l:
336 while i < l:
340 yield text[i:i + csize]
337 yield text[i:i + csize]
341 i += csize
338 i += csize
342
339
343 tohash = gitindex(to)
340 tohash = gitindex(to)
344 tnhash = gitindex(tn)
341 tnhash = gitindex(tn)
345 if tohash == tnhash:
342 if tohash == tnhash:
346 return ""
343 return ""
347
344
348 # TODO: deltas
345 # TODO: deltas
349 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
346 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
350 (tohash, tnhash, len(tn))]
347 (tohash, tnhash, len(tn))]
351 for l in chunk(zlib.compress(tn)):
348 for l in chunk(zlib.compress(tn)):
352 ret.append(fmtline(l))
349 ret.append(fmtline(l))
353 ret.append('\n')
350 ret.append('\n')
354 return ''.join(ret)
351 return ''.join(ret)
355
352
356 def patchtext(bin):
353 def patchtext(bin):
357 pos = 0
354 pos = 0
358 t = []
355 t = []
359 while pos < len(bin):
356 while pos < len(bin):
360 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
357 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
361 pos += 12
358 pos += 12
362 t.append(bin[pos:pos + l])
359 t.append(bin[pos:pos + l])
363 pos += l
360 pos += l
364 return "".join(t)
361 return "".join(t)
365
362
366 def patch(a, bin):
363 def patch(a, bin):
367 if len(a) == 0:
364 if len(a) == 0:
368 # skip over trivial delta header
365 # skip over trivial delta header
369 return util.buffer(bin, 12)
366 return util.buffer(bin, 12)
370 return mpatch.patches(a, [bin])
367 return mpatch.patches(a, [bin])
371
368
372 # similar to difflib.SequenceMatcher.get_matching_blocks
369 # similar to difflib.SequenceMatcher.get_matching_blocks
373 def get_matching_blocks(a, b):
370 def get_matching_blocks(a, b):
374 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
371 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
375
372
376 def trivialdiffheader(length):
373 def trivialdiffheader(length):
377 return struct.pack(">lll", 0, 0, length)
374 return struct.pack(">lll", 0, 0, length)
378
375
379 patches = mpatch.patches
376 patches = mpatch.patches
380 patchedsize = mpatch.patchedsize
377 patchedsize = mpatch.patchedsize
381 textdiff = bdiff.bdiff
378 textdiff = bdiff.bdiff
@@ -1,1861 +1,1861
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, short
13 from node import hex, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[9:].lstrip())
233 parents.append(line[9:].lstrip())
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except: # re-raises
248 except: # re-raises
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def _ispatchinga(self, afile):
293 def _ispatchinga(self, afile):
294 if afile == '/dev/null':
294 if afile == '/dev/null':
295 return self.op == 'ADD'
295 return self.op == 'ADD'
296 return afile == 'a/' + (self.oldpath or self.path)
296 return afile == 'a/' + (self.oldpath or self.path)
297
297
298 def _ispatchingb(self, bfile):
298 def _ispatchingb(self, bfile):
299 if bfile == '/dev/null':
299 if bfile == '/dev/null':
300 return self.op == 'DELETE'
300 return self.op == 'DELETE'
301 return bfile == 'b/' + self.path
301 return bfile == 'b/' + self.path
302
302
303 def ispatching(self, afile, bfile):
303 def ispatching(self, afile, bfile):
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
305
305
306 def __repr__(self):
306 def __repr__(self):
307 return "<patchmeta %s %r>" % (self.op, self.path)
307 return "<patchmeta %s %r>" % (self.op, self.path)
308
308
309 def readgitpatch(lr):
309 def readgitpatch(lr):
310 """extract git-style metadata about patches from <patchname>"""
310 """extract git-style metadata about patches from <patchname>"""
311
311
312 # Filter patch for git information
312 # Filter patch for git information
313 gp = None
313 gp = None
314 gitpatches = []
314 gitpatches = []
315 for line in lr:
315 for line in lr:
316 line = line.rstrip(' \r\n')
316 line = line.rstrip(' \r\n')
317 if line.startswith('diff --git'):
317 if line.startswith('diff --git'):
318 m = gitre.match(line)
318 m = gitre.match(line)
319 if m:
319 if m:
320 if gp:
320 if gp:
321 gitpatches.append(gp)
321 gitpatches.append(gp)
322 dst = m.group(2)
322 dst = m.group(2)
323 gp = patchmeta(dst)
323 gp = patchmeta(dst)
324 elif gp:
324 elif gp:
325 if line.startswith('--- '):
325 if line.startswith('--- '):
326 gitpatches.append(gp)
326 gitpatches.append(gp)
327 gp = None
327 gp = None
328 continue
328 continue
329 if line.startswith('rename from '):
329 if line.startswith('rename from '):
330 gp.op = 'RENAME'
330 gp.op = 'RENAME'
331 gp.oldpath = line[12:]
331 gp.oldpath = line[12:]
332 elif line.startswith('rename to '):
332 elif line.startswith('rename to '):
333 gp.path = line[10:]
333 gp.path = line[10:]
334 elif line.startswith('copy from '):
334 elif line.startswith('copy from '):
335 gp.op = 'COPY'
335 gp.op = 'COPY'
336 gp.oldpath = line[10:]
336 gp.oldpath = line[10:]
337 elif line.startswith('copy to '):
337 elif line.startswith('copy to '):
338 gp.path = line[8:]
338 gp.path = line[8:]
339 elif line.startswith('deleted file'):
339 elif line.startswith('deleted file'):
340 gp.op = 'DELETE'
340 gp.op = 'DELETE'
341 elif line.startswith('new file mode '):
341 elif line.startswith('new file mode '):
342 gp.op = 'ADD'
342 gp.op = 'ADD'
343 gp.setmode(int(line[-6:], 8))
343 gp.setmode(int(line[-6:], 8))
344 elif line.startswith('new mode '):
344 elif line.startswith('new mode '):
345 gp.setmode(int(line[-6:], 8))
345 gp.setmode(int(line[-6:], 8))
346 elif line.startswith('GIT binary patch'):
346 elif line.startswith('GIT binary patch'):
347 gp.binary = True
347 gp.binary = True
348 if gp:
348 if gp:
349 gitpatches.append(gp)
349 gitpatches.append(gp)
350
350
351 return gitpatches
351 return gitpatches
352
352
353 class linereader(object):
353 class linereader(object):
354 # simple class to allow pushing lines back into the input stream
354 # simple class to allow pushing lines back into the input stream
355 def __init__(self, fp):
355 def __init__(self, fp):
356 self.fp = fp
356 self.fp = fp
357 self.buf = []
357 self.buf = []
358
358
359 def push(self, line):
359 def push(self, line):
360 if line is not None:
360 if line is not None:
361 self.buf.append(line)
361 self.buf.append(line)
362
362
363 def readline(self):
363 def readline(self):
364 if self.buf:
364 if self.buf:
365 l = self.buf[0]
365 l = self.buf[0]
366 del self.buf[0]
366 del self.buf[0]
367 return l
367 return l
368 return self.fp.readline()
368 return self.fp.readline()
369
369
370 def __iter__(self):
370 def __iter__(self):
371 while True:
371 while True:
372 l = self.readline()
372 l = self.readline()
373 if not l:
373 if not l:
374 break
374 break
375 yield l
375 yield l
376
376
377 class abstractbackend(object):
377 class abstractbackend(object):
378 def __init__(self, ui):
378 def __init__(self, ui):
379 self.ui = ui
379 self.ui = ui
380
380
381 def getfile(self, fname):
381 def getfile(self, fname):
382 """Return target file data and flags as a (data, (islink,
382 """Return target file data and flags as a (data, (islink,
383 isexec)) tuple.
383 isexec)) tuple.
384 """
384 """
385 raise NotImplementedError
385 raise NotImplementedError
386
386
387 def setfile(self, fname, data, mode, copysource):
387 def setfile(self, fname, data, mode, copysource):
388 """Write data to target file fname and set its mode. mode is a
388 """Write data to target file fname and set its mode. mode is a
389 (islink, isexec) tuple. If data is None, the file content should
389 (islink, isexec) tuple. If data is None, the file content should
390 be left unchanged. If the file is modified after being copied,
390 be left unchanged. If the file is modified after being copied,
391 copysource is set to the original file name.
391 copysource is set to the original file name.
392 """
392 """
393 raise NotImplementedError
393 raise NotImplementedError
394
394
395 def unlink(self, fname):
395 def unlink(self, fname):
396 """Unlink target file."""
396 """Unlink target file."""
397 raise NotImplementedError
397 raise NotImplementedError
398
398
399 def writerej(self, fname, failed, total, lines):
399 def writerej(self, fname, failed, total, lines):
400 """Write rejected lines for fname. total is the number of hunks
400 """Write rejected lines for fname. total is the number of hunks
401 which failed to apply and total the total number of hunks for this
401 which failed to apply and total the total number of hunks for this
402 files.
402 files.
403 """
403 """
404 pass
404 pass
405
405
406 def exists(self, fname):
406 def exists(self, fname):
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 class fsbackend(abstractbackend):
409 class fsbackend(abstractbackend):
410 def __init__(self, ui, basedir):
410 def __init__(self, ui, basedir):
411 super(fsbackend, self).__init__(ui)
411 super(fsbackend, self).__init__(ui)
412 self.opener = scmutil.opener(basedir)
412 self.opener = scmutil.opener(basedir)
413
413
414 def _join(self, f):
414 def _join(self, f):
415 return os.path.join(self.opener.base, f)
415 return os.path.join(self.opener.base, f)
416
416
417 def getfile(self, fname):
417 def getfile(self, fname):
418 path = self._join(fname)
418 path = self._join(fname)
419 if os.path.islink(path):
419 if os.path.islink(path):
420 return (os.readlink(path), (True, False))
420 return (os.readlink(path), (True, False))
421 isexec = False
421 isexec = False
422 try:
422 try:
423 isexec = os.lstat(path).st_mode & 0100 != 0
423 isexec = os.lstat(path).st_mode & 0100 != 0
424 except OSError, e:
424 except OSError, e:
425 if e.errno != errno.ENOENT:
425 if e.errno != errno.ENOENT:
426 raise
426 raise
427 return (self.opener.read(fname), (False, isexec))
427 return (self.opener.read(fname), (False, isexec))
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 islink, isexec = mode
430 islink, isexec = mode
431 if data is None:
431 if data is None:
432 util.setflags(self._join(fname), islink, isexec)
432 util.setflags(self._join(fname), islink, isexec)
433 return
433 return
434 if islink:
434 if islink:
435 self.opener.symlink(data, fname)
435 self.opener.symlink(data, fname)
436 else:
436 else:
437 self.opener.write(fname, data)
437 self.opener.write(fname, data)
438 if isexec:
438 if isexec:
439 util.setflags(self._join(fname), False, True)
439 util.setflags(self._join(fname), False, True)
440
440
441 def unlink(self, fname):
441 def unlink(self, fname):
442 try:
442 try:
443 util.unlinkpath(self._join(fname))
443 util.unlinkpath(self._join(fname))
444 except OSError, inst:
444 except OSError, inst:
445 if inst.errno != errno.ENOENT:
445 if inst.errno != errno.ENOENT:
446 raise
446 raise
447
447
448 def writerej(self, fname, failed, total, lines):
448 def writerej(self, fname, failed, total, lines):
449 fname = fname + ".rej"
449 fname = fname + ".rej"
450 self.ui.warn(
450 self.ui.warn(
451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
452 (failed, total, fname))
452 (failed, total, fname))
453 fp = self.opener(fname, 'w')
453 fp = self.opener(fname, 'w')
454 fp.writelines(lines)
454 fp.writelines(lines)
455 fp.close()
455 fp.close()
456
456
457 def exists(self, fname):
457 def exists(self, fname):
458 return os.path.lexists(self._join(fname))
458 return os.path.lexists(self._join(fname))
459
459
460 class workingbackend(fsbackend):
460 class workingbackend(fsbackend):
461 def __init__(self, ui, repo, similarity):
461 def __init__(self, ui, repo, similarity):
462 super(workingbackend, self).__init__(ui, repo.root)
462 super(workingbackend, self).__init__(ui, repo.root)
463 self.repo = repo
463 self.repo = repo
464 self.similarity = similarity
464 self.similarity = similarity
465 self.removed = set()
465 self.removed = set()
466 self.changed = set()
466 self.changed = set()
467 self.copied = []
467 self.copied = []
468
468
469 def _checkknown(self, fname):
469 def _checkknown(self, fname):
470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
472
472
473 def setfile(self, fname, data, mode, copysource):
473 def setfile(self, fname, data, mode, copysource):
474 self._checkknown(fname)
474 self._checkknown(fname)
475 super(workingbackend, self).setfile(fname, data, mode, copysource)
475 super(workingbackend, self).setfile(fname, data, mode, copysource)
476 if copysource is not None:
476 if copysource is not None:
477 self.copied.append((copysource, fname))
477 self.copied.append((copysource, fname))
478 self.changed.add(fname)
478 self.changed.add(fname)
479
479
480 def unlink(self, fname):
480 def unlink(self, fname):
481 self._checkknown(fname)
481 self._checkknown(fname)
482 super(workingbackend, self).unlink(fname)
482 super(workingbackend, self).unlink(fname)
483 self.removed.add(fname)
483 self.removed.add(fname)
484 self.changed.add(fname)
484 self.changed.add(fname)
485
485
486 def close(self):
486 def close(self):
487 wctx = self.repo[None]
487 wctx = self.repo[None]
488 addremoved = set(self.changed)
488 addremoved = set(self.changed)
489 for src, dst in self.copied:
489 for src, dst in self.copied:
490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
491 if self.removed:
491 if self.removed:
492 wctx.forget(sorted(self.removed))
492 wctx.forget(sorted(self.removed))
493 for f in self.removed:
493 for f in self.removed:
494 if f not in self.repo.dirstate:
494 if f not in self.repo.dirstate:
495 # File was deleted and no longer belongs to the
495 # File was deleted and no longer belongs to the
496 # dirstate, it was probably marked added then
496 # dirstate, it was probably marked added then
497 # deleted, and should not be considered by
497 # deleted, and should not be considered by
498 # addremove().
498 # addremove().
499 addremoved.discard(f)
499 addremoved.discard(f)
500 if addremoved:
500 if addremoved:
501 cwd = self.repo.getcwd()
501 cwd = self.repo.getcwd()
502 if cwd:
502 if cwd:
503 addremoved = [util.pathto(self.repo.root, cwd, f)
503 addremoved = [util.pathto(self.repo.root, cwd, f)
504 for f in addremoved]
504 for f in addremoved]
505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 raise IOError
537 raise IOError
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 raise IOError
563 raise IOError
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 try:
606 try:
607 if self.copysource is None:
607 if self.copysource is None:
608 data, mode = backend.getfile(self.fname)
608 data, mode = backend.getfile(self.fname)
609 self.exists = True
609 self.exists = True
610 else:
610 else:
611 data, mode = store.getfile(self.copysource)[:2]
611 data, mode = store.getfile(self.copysource)[:2]
612 self.exists = backend.exists(self.fname)
612 self.exists = backend.exists(self.fname)
613 self.missing = False
613 self.missing = False
614 if data:
614 if data:
615 self.lines = mdiff.splitnewlines(data)
615 self.lines = mdiff.splitnewlines(data)
616 if self.mode is None:
616 if self.mode is None:
617 self.mode = mode
617 self.mode = mode
618 if self.lines:
618 if self.lines:
619 # Normalize line endings
619 # Normalize line endings
620 if self.lines[0].endswith('\r\n'):
620 if self.lines[0].endswith('\r\n'):
621 self.eol = '\r\n'
621 self.eol = '\r\n'
622 elif self.lines[0].endswith('\n'):
622 elif self.lines[0].endswith('\n'):
623 self.eol = '\n'
623 self.eol = '\n'
624 if eolmode != 'strict':
624 if eolmode != 'strict':
625 nlines = []
625 nlines = []
626 for l in self.lines:
626 for l in self.lines:
627 if l.endswith('\r\n'):
627 if l.endswith('\r\n'):
628 l = l[:-2] + '\n'
628 l = l[:-2] + '\n'
629 nlines.append(l)
629 nlines.append(l)
630 self.lines = nlines
630 self.lines = nlines
631 except IOError:
631 except IOError:
632 if self.create:
632 if self.create:
633 self.missing = False
633 self.missing = False
634 if self.mode is None:
634 if self.mode is None:
635 self.mode = (False, False)
635 self.mode = (False, False)
636 if self.missing:
636 if self.missing:
637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
638
638
639 self.hash = {}
639 self.hash = {}
640 self.dirty = 0
640 self.dirty = 0
641 self.offset = 0
641 self.offset = 0
642 self.skew = 0
642 self.skew = 0
643 self.rej = []
643 self.rej = []
644 self.fileprinted = False
644 self.fileprinted = False
645 self.printfile(False)
645 self.printfile(False)
646 self.hunks = 0
646 self.hunks = 0
647
647
648 def writelines(self, fname, lines, mode):
648 def writelines(self, fname, lines, mode):
649 if self.eolmode == 'auto':
649 if self.eolmode == 'auto':
650 eol = self.eol
650 eol = self.eol
651 elif self.eolmode == 'crlf':
651 elif self.eolmode == 'crlf':
652 eol = '\r\n'
652 eol = '\r\n'
653 else:
653 else:
654 eol = '\n'
654 eol = '\n'
655
655
656 if self.eolmode != 'strict' and eol and eol != '\n':
656 if self.eolmode != 'strict' and eol and eol != '\n':
657 rawlines = []
657 rawlines = []
658 for l in lines:
658 for l in lines:
659 if l and l[-1] == '\n':
659 if l and l[-1] == '\n':
660 l = l[:-1] + eol
660 l = l[:-1] + eol
661 rawlines.append(l)
661 rawlines.append(l)
662 lines = rawlines
662 lines = rawlines
663
663
664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
665
665
666 def printfile(self, warn):
666 def printfile(self, warn):
667 if self.fileprinted:
667 if self.fileprinted:
668 return
668 return
669 if warn or self.ui.verbose:
669 if warn or self.ui.verbose:
670 self.fileprinted = True
670 self.fileprinted = True
671 s = _("patching file %s\n") % self.fname
671 s = _("patching file %s\n") % self.fname
672 if warn:
672 if warn:
673 self.ui.warn(s)
673 self.ui.warn(s)
674 else:
674 else:
675 self.ui.note(s)
675 self.ui.note(s)
676
676
677
677
678 def findlines(self, l, linenum):
678 def findlines(self, l, linenum):
679 # looks through the hash and finds candidate lines. The
679 # looks through the hash and finds candidate lines. The
680 # result is a list of line numbers sorted based on distance
680 # result is a list of line numbers sorted based on distance
681 # from linenum
681 # from linenum
682
682
683 cand = self.hash.get(l, [])
683 cand = self.hash.get(l, [])
684 if len(cand) > 1:
684 if len(cand) > 1:
685 # resort our list of potentials forward then back.
685 # resort our list of potentials forward then back.
686 cand.sort(key=lambda x: abs(x - linenum))
686 cand.sort(key=lambda x: abs(x - linenum))
687 return cand
687 return cand
688
688
689 def write_rej(self):
689 def write_rej(self):
690 # our rejects are a little different from patch(1). This always
690 # our rejects are a little different from patch(1). This always
691 # creates rejects in the same form as the original patch. A file
691 # creates rejects in the same form as the original patch. A file
692 # header is inserted so that you can run the reject through patch again
692 # header is inserted so that you can run the reject through patch again
693 # without having to type the filename.
693 # without having to type the filename.
694 if not self.rej:
694 if not self.rej:
695 return
695 return
696 base = os.path.basename(self.fname)
696 base = os.path.basename(self.fname)
697 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 lines = ["--- %s\n+++ %s\n" % (base, base)]
698 for x in self.rej:
698 for x in self.rej:
699 for l in x.hunk:
699 for l in x.hunk:
700 lines.append(l)
700 lines.append(l)
701 if l[-1] != '\n':
701 if l[-1] != '\n':
702 lines.append("\n\ No newline at end of file\n")
702 lines.append("\n\ No newline at end of file\n")
703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
704
704
705 def apply(self, h):
705 def apply(self, h):
706 if not h.complete():
706 if not h.complete():
707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
709 h.lenb))
709 h.lenb))
710
710
711 self.hunks += 1
711 self.hunks += 1
712
712
713 if self.missing:
713 if self.missing:
714 self.rej.append(h)
714 self.rej.append(h)
715 return -1
715 return -1
716
716
717 if self.exists and self.create:
717 if self.exists and self.create:
718 if self.copysource:
718 if self.copysource:
719 self.ui.warn(_("cannot create %s: destination already "
719 self.ui.warn(_("cannot create %s: destination already "
720 "exists\n" % self.fname))
720 "exists\n" % self.fname))
721 else:
721 else:
722 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.ui.warn(_("file %s already exists\n") % self.fname)
723 self.rej.append(h)
723 self.rej.append(h)
724 return -1
724 return -1
725
725
726 if isinstance(h, binhunk):
726 if isinstance(h, binhunk):
727 if self.remove:
727 if self.remove:
728 self.backend.unlink(self.fname)
728 self.backend.unlink(self.fname)
729 else:
729 else:
730 self.lines[:] = h.new()
730 self.lines[:] = h.new()
731 self.offset += len(h.new())
731 self.offset += len(h.new())
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class hunk(object):
807 class hunk(object):
808 def __init__(self, desc, num, lr, context):
808 def __init__(self, desc, num, lr, context):
809 self.number = num
809 self.number = num
810 self.desc = desc
810 self.desc = desc
811 self.hunk = [desc]
811 self.hunk = [desc]
812 self.a = []
812 self.a = []
813 self.b = []
813 self.b = []
814 self.starta = self.lena = None
814 self.starta = self.lena = None
815 self.startb = self.lenb = None
815 self.startb = self.lenb = None
816 if lr is not None:
816 if lr is not None:
817 if context:
817 if context:
818 self.read_context_hunk(lr)
818 self.read_context_hunk(lr)
819 else:
819 else:
820 self.read_unified_hunk(lr)
820 self.read_unified_hunk(lr)
821
821
822 def getnormalized(self):
822 def getnormalized(self):
823 """Return a copy with line endings normalized to LF."""
823 """Return a copy with line endings normalized to LF."""
824
824
825 def normalize(lines):
825 def normalize(lines):
826 nlines = []
826 nlines = []
827 for line in lines:
827 for line in lines:
828 if line.endswith('\r\n'):
828 if line.endswith('\r\n'):
829 line = line[:-2] + '\n'
829 line = line[:-2] + '\n'
830 nlines.append(line)
830 nlines.append(line)
831 return nlines
831 return nlines
832
832
833 # Dummy object, it is rebuilt manually
833 # Dummy object, it is rebuilt manually
834 nh = hunk(self.desc, self.number, None, None)
834 nh = hunk(self.desc, self.number, None, None)
835 nh.number = self.number
835 nh.number = self.number
836 nh.desc = self.desc
836 nh.desc = self.desc
837 nh.hunk = self.hunk
837 nh.hunk = self.hunk
838 nh.a = normalize(self.a)
838 nh.a = normalize(self.a)
839 nh.b = normalize(self.b)
839 nh.b = normalize(self.b)
840 nh.starta = self.starta
840 nh.starta = self.starta
841 nh.startb = self.startb
841 nh.startb = self.startb
842 nh.lena = self.lena
842 nh.lena = self.lena
843 nh.lenb = self.lenb
843 nh.lenb = self.lenb
844 return nh
844 return nh
845
845
846 def read_unified_hunk(self, lr):
846 def read_unified_hunk(self, lr):
847 m = unidesc.match(self.desc)
847 m = unidesc.match(self.desc)
848 if not m:
848 if not m:
849 raise PatchError(_("bad hunk #%d") % self.number)
849 raise PatchError(_("bad hunk #%d") % self.number)
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 if self.lena is None:
851 if self.lena is None:
852 self.lena = 1
852 self.lena = 1
853 else:
853 else:
854 self.lena = int(self.lena)
854 self.lena = int(self.lena)
855 if self.lenb is None:
855 if self.lenb is None:
856 self.lenb = 1
856 self.lenb = 1
857 else:
857 else:
858 self.lenb = int(self.lenb)
858 self.lenb = int(self.lenb)
859 self.starta = int(self.starta)
859 self.starta = int(self.starta)
860 self.startb = int(self.startb)
860 self.startb = int(self.startb)
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 self.b)
862 self.b)
863 # if we hit eof before finishing out the hunk, the last line will
863 # if we hit eof before finishing out the hunk, the last line will
864 # be zero length. Lets try to fix it up.
864 # be zero length. Lets try to fix it up.
865 while len(self.hunk[-1]) == 0:
865 while len(self.hunk[-1]) == 0:
866 del self.hunk[-1]
866 del self.hunk[-1]
867 del self.a[-1]
867 del self.a[-1]
868 del self.b[-1]
868 del self.b[-1]
869 self.lena -= 1
869 self.lena -= 1
870 self.lenb -= 1
870 self.lenb -= 1
871 self._fixnewline(lr)
871 self._fixnewline(lr)
872
872
873 def read_context_hunk(self, lr):
873 def read_context_hunk(self, lr):
874 self.desc = lr.readline()
874 self.desc = lr.readline()
875 m = contextdesc.match(self.desc)
875 m = contextdesc.match(self.desc)
876 if not m:
876 if not m:
877 raise PatchError(_("bad hunk #%d") % self.number)
877 raise PatchError(_("bad hunk #%d") % self.number)
878 self.starta, aend = m.groups()
878 self.starta, aend = m.groups()
879 self.starta = int(self.starta)
879 self.starta = int(self.starta)
880 if aend is None:
880 if aend is None:
881 aend = self.starta
881 aend = self.starta
882 self.lena = int(aend) - self.starta
882 self.lena = int(aend) - self.starta
883 if self.starta:
883 if self.starta:
884 self.lena += 1
884 self.lena += 1
885 for x in xrange(self.lena):
885 for x in xrange(self.lena):
886 l = lr.readline()
886 l = lr.readline()
887 if l.startswith('---'):
887 if l.startswith('---'):
888 # lines addition, old block is empty
888 # lines addition, old block is empty
889 lr.push(l)
889 lr.push(l)
890 break
890 break
891 s = l[2:]
891 s = l[2:]
892 if l.startswith('- ') or l.startswith('! '):
892 if l.startswith('- ') or l.startswith('! '):
893 u = '-' + s
893 u = '-' + s
894 elif l.startswith(' '):
894 elif l.startswith(' '):
895 u = ' ' + s
895 u = ' ' + s
896 else:
896 else:
897 raise PatchError(_("bad hunk #%d old text line %d") %
897 raise PatchError(_("bad hunk #%d old text line %d") %
898 (self.number, x))
898 (self.number, x))
899 self.a.append(u)
899 self.a.append(u)
900 self.hunk.append(u)
900 self.hunk.append(u)
901
901
902 l = lr.readline()
902 l = lr.readline()
903 if l.startswith('\ '):
903 if l.startswith('\ '):
904 s = self.a[-1][:-1]
904 s = self.a[-1][:-1]
905 self.a[-1] = s
905 self.a[-1] = s
906 self.hunk[-1] = s
906 self.hunk[-1] = s
907 l = lr.readline()
907 l = lr.readline()
908 m = contextdesc.match(l)
908 m = contextdesc.match(l)
909 if not m:
909 if not m:
910 raise PatchError(_("bad hunk #%d") % self.number)
910 raise PatchError(_("bad hunk #%d") % self.number)
911 self.startb, bend = m.groups()
911 self.startb, bend = m.groups()
912 self.startb = int(self.startb)
912 self.startb = int(self.startb)
913 if bend is None:
913 if bend is None:
914 bend = self.startb
914 bend = self.startb
915 self.lenb = int(bend) - self.startb
915 self.lenb = int(bend) - self.startb
916 if self.startb:
916 if self.startb:
917 self.lenb += 1
917 self.lenb += 1
918 hunki = 1
918 hunki = 1
919 for x in xrange(self.lenb):
919 for x in xrange(self.lenb):
920 l = lr.readline()
920 l = lr.readline()
921 if l.startswith('\ '):
921 if l.startswith('\ '):
922 # XXX: the only way to hit this is with an invalid line range.
922 # XXX: the only way to hit this is with an invalid line range.
923 # The no-eol marker is not counted in the line range, but I
923 # The no-eol marker is not counted in the line range, but I
924 # guess there are diff(1) out there which behave differently.
924 # guess there are diff(1) out there which behave differently.
925 s = self.b[-1][:-1]
925 s = self.b[-1][:-1]
926 self.b[-1] = s
926 self.b[-1] = s
927 self.hunk[hunki - 1] = s
927 self.hunk[hunki - 1] = s
928 continue
928 continue
929 if not l:
929 if not l:
930 # line deletions, new block is empty and we hit EOF
930 # line deletions, new block is empty and we hit EOF
931 lr.push(l)
931 lr.push(l)
932 break
932 break
933 s = l[2:]
933 s = l[2:]
934 if l.startswith('+ ') or l.startswith('! '):
934 if l.startswith('+ ') or l.startswith('! '):
935 u = '+' + s
935 u = '+' + s
936 elif l.startswith(' '):
936 elif l.startswith(' '):
937 u = ' ' + s
937 u = ' ' + s
938 elif len(self.b) == 0:
938 elif len(self.b) == 0:
939 # line deletions, new block is empty
939 # line deletions, new block is empty
940 lr.push(l)
940 lr.push(l)
941 break
941 break
942 else:
942 else:
943 raise PatchError(_("bad hunk #%d old text line %d") %
943 raise PatchError(_("bad hunk #%d old text line %d") %
944 (self.number, x))
944 (self.number, x))
945 self.b.append(s)
945 self.b.append(s)
946 while True:
946 while True:
947 if hunki >= len(self.hunk):
947 if hunki >= len(self.hunk):
948 h = ""
948 h = ""
949 else:
949 else:
950 h = self.hunk[hunki]
950 h = self.hunk[hunki]
951 hunki += 1
951 hunki += 1
952 if h == u:
952 if h == u:
953 break
953 break
954 elif h.startswith('-'):
954 elif h.startswith('-'):
955 continue
955 continue
956 else:
956 else:
957 self.hunk.insert(hunki - 1, u)
957 self.hunk.insert(hunki - 1, u)
958 break
958 break
959
959
960 if not self.a:
960 if not self.a:
961 # this happens when lines were only added to the hunk
961 # this happens when lines were only added to the hunk
962 for x in self.hunk:
962 for x in self.hunk:
963 if x.startswith('-') or x.startswith(' '):
963 if x.startswith('-') or x.startswith(' '):
964 self.a.append(x)
964 self.a.append(x)
965 if not self.b:
965 if not self.b:
966 # this happens when lines were only deleted from the hunk
966 # this happens when lines were only deleted from the hunk
967 for x in self.hunk:
967 for x in self.hunk:
968 if x.startswith('+') or x.startswith(' '):
968 if x.startswith('+') or x.startswith(' '):
969 self.b.append(x[1:])
969 self.b.append(x[1:])
970 # @@ -start,len +start,len @@
970 # @@ -start,len +start,len @@
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 self.startb, self.lenb)
972 self.startb, self.lenb)
973 self.hunk[0] = self.desc
973 self.hunk[0] = self.desc
974 self._fixnewline(lr)
974 self._fixnewline(lr)
975
975
976 def _fixnewline(self, lr):
976 def _fixnewline(self, lr):
977 l = lr.readline()
977 l = lr.readline()
978 if l.startswith('\ '):
978 if l.startswith('\ '):
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 else:
980 else:
981 lr.push(l)
981 lr.push(l)
982
982
983 def complete(self):
983 def complete(self):
984 return len(self.a) == self.lena and len(self.b) == self.lenb
984 return len(self.a) == self.lena and len(self.b) == self.lenb
985
985
986 def _fuzzit(self, old, new, fuzz, toponly):
986 def _fuzzit(self, old, new, fuzz, toponly):
987 # this removes context lines from the top and bottom of list 'l'. It
987 # this removes context lines from the top and bottom of list 'l'. It
988 # checks the hunk to make sure only context lines are removed, and then
988 # checks the hunk to make sure only context lines are removed, and then
989 # returns a new shortened list of lines.
989 # returns a new shortened list of lines.
990 fuzz = min(fuzz, len(old))
990 fuzz = min(fuzz, len(old))
991 if fuzz:
991 if fuzz:
992 top = 0
992 top = 0
993 bot = 0
993 bot = 0
994 hlen = len(self.hunk)
994 hlen = len(self.hunk)
995 for x in xrange(hlen - 1):
995 for x in xrange(hlen - 1):
996 # the hunk starts with the @@ line, so use x+1
996 # the hunk starts with the @@ line, so use x+1
997 if self.hunk[x + 1][0] == ' ':
997 if self.hunk[x + 1][0] == ' ':
998 top += 1
998 top += 1
999 else:
999 else:
1000 break
1000 break
1001 if not toponly:
1001 if not toponly:
1002 for x in xrange(hlen - 1):
1002 for x in xrange(hlen - 1):
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1004 bot += 1
1004 bot += 1
1005 else:
1005 else:
1006 break
1006 break
1007
1007
1008 bot = min(fuzz, bot)
1008 bot = min(fuzz, bot)
1009 top = min(fuzz, top)
1009 top = min(fuzz, top)
1010 return old[top:len(old)-bot], new[top:len(new)-bot], top
1010 return old[top:len(old)-bot], new[top:len(new)-bot], top
1011 return old, new, 0
1011 return old, new, 0
1012
1012
1013 def fuzzit(self, fuzz, toponly):
1013 def fuzzit(self, fuzz, toponly):
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 oldstart = self.starta + top
1015 oldstart = self.starta + top
1016 newstart = self.startb + top
1016 newstart = self.startb + top
1017 # zero length hunk ranges already have their start decremented
1017 # zero length hunk ranges already have their start decremented
1018 if self.lena and oldstart > 0:
1018 if self.lena and oldstart > 0:
1019 oldstart -= 1
1019 oldstart -= 1
1020 if self.lenb and newstart > 0:
1020 if self.lenb and newstart > 0:
1021 newstart -= 1
1021 newstart -= 1
1022 return old, oldstart, new, newstart
1022 return old, oldstart, new, newstart
1023
1023
1024 class binhunk(object):
1024 class binhunk(object):
1025 'A binary patch file. Only understands literals so far.'
1025 'A binary patch file. Only understands literals so far.'
1026 def __init__(self, lr, fname):
1026 def __init__(self, lr, fname):
1027 self.text = None
1027 self.text = None
1028 self.hunk = ['GIT binary patch\n']
1028 self.hunk = ['GIT binary patch\n']
1029 self._fname = fname
1029 self._fname = fname
1030 self._read(lr)
1030 self._read(lr)
1031
1031
1032 def complete(self):
1032 def complete(self):
1033 return self.text is not None
1033 return self.text is not None
1034
1034
1035 def new(self):
1035 def new(self):
1036 return [self.text]
1036 return [self.text]
1037
1037
1038 def _read(self, lr):
1038 def _read(self, lr):
1039 def getline(lr, hunk):
1039 def getline(lr, hunk):
1040 l = lr.readline()
1040 l = lr.readline()
1041 hunk.append(l)
1041 hunk.append(l)
1042 return l.rstrip('\r\n')
1042 return l.rstrip('\r\n')
1043
1043
1044 while True:
1044 while True:
1045 line = getline(lr, self.hunk)
1045 line = getline(lr, self.hunk)
1046 if not line:
1046 if not line:
1047 raise PatchError(_('could not extract "%s" binary data')
1047 raise PatchError(_('could not extract "%s" binary data')
1048 % self._fname)
1048 % self._fname)
1049 if line.startswith('literal '):
1049 if line.startswith('literal '):
1050 break
1050 break
1051 size = int(line[8:].rstrip())
1051 size = int(line[8:].rstrip())
1052 dec = []
1052 dec = []
1053 line = getline(lr, self.hunk)
1053 line = getline(lr, self.hunk)
1054 while len(line) > 1:
1054 while len(line) > 1:
1055 l = line[0]
1055 l = line[0]
1056 if l <= 'Z' and l >= 'A':
1056 if l <= 'Z' and l >= 'A':
1057 l = ord(l) - ord('A') + 1
1057 l = ord(l) - ord('A') + 1
1058 else:
1058 else:
1059 l = ord(l) - ord('a') + 27
1059 l = ord(l) - ord('a') + 27
1060 try:
1060 try:
1061 dec.append(base85.b85decode(line[1:])[:l])
1061 dec.append(base85.b85decode(line[1:])[:l])
1062 except ValueError, e:
1062 except ValueError, e:
1063 raise PatchError(_('could not decode "%s" binary patch: %s')
1063 raise PatchError(_('could not decode "%s" binary patch: %s')
1064 % (self._fname, str(e)))
1064 % (self._fname, str(e)))
1065 line = getline(lr, self.hunk)
1065 line = getline(lr, self.hunk)
1066 text = zlib.decompress(''.join(dec))
1066 text = zlib.decompress(''.join(dec))
1067 if len(text) != size:
1067 if len(text) != size:
1068 raise PatchError(_('"%s" length is %d bytes, should be %d')
1068 raise PatchError(_('"%s" length is %d bytes, should be %d')
1069 % (self._fname, len(text), size))
1069 % (self._fname, len(text), size))
1070 self.text = text
1070 self.text = text
1071
1071
1072 def parsefilename(str):
1072 def parsefilename(str):
1073 # --- filename \t|space stuff
1073 # --- filename \t|space stuff
1074 s = str[4:].rstrip('\r\n')
1074 s = str[4:].rstrip('\r\n')
1075 i = s.find('\t')
1075 i = s.find('\t')
1076 if i < 0:
1076 if i < 0:
1077 i = s.find(' ')
1077 i = s.find(' ')
1078 if i < 0:
1078 if i < 0:
1079 return s
1079 return s
1080 return s[:i]
1080 return s[:i]
1081
1081
1082 def pathstrip(path, strip):
1082 def pathstrip(path, strip):
1083 pathlen = len(path)
1083 pathlen = len(path)
1084 i = 0
1084 i = 0
1085 if strip == 0:
1085 if strip == 0:
1086 return '', path.rstrip()
1086 return '', path.rstrip()
1087 count = strip
1087 count = strip
1088 while count > 0:
1088 while count > 0:
1089 i = path.find('/', i)
1089 i = path.find('/', i)
1090 if i == -1:
1090 if i == -1:
1091 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1091 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1092 (count, strip, path))
1092 (count, strip, path))
1093 i += 1
1093 i += 1
1094 # consume '//' in the path
1094 # consume '//' in the path
1095 while i < pathlen - 1 and path[i] == '/':
1095 while i < pathlen - 1 and path[i] == '/':
1096 i += 1
1096 i += 1
1097 count -= 1
1097 count -= 1
1098 return path[:i].lstrip(), path[i:].rstrip()
1098 return path[:i].lstrip(), path[i:].rstrip()
1099
1099
1100 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1100 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1101 nulla = afile_orig == "/dev/null"
1101 nulla = afile_orig == "/dev/null"
1102 nullb = bfile_orig == "/dev/null"
1102 nullb = bfile_orig == "/dev/null"
1103 create = nulla and hunk.starta == 0 and hunk.lena == 0
1103 create = nulla and hunk.starta == 0 and hunk.lena == 0
1104 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1104 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1105 abase, afile = pathstrip(afile_orig, strip)
1105 abase, afile = pathstrip(afile_orig, strip)
1106 gooda = not nulla and backend.exists(afile)
1106 gooda = not nulla and backend.exists(afile)
1107 bbase, bfile = pathstrip(bfile_orig, strip)
1107 bbase, bfile = pathstrip(bfile_orig, strip)
1108 if afile == bfile:
1108 if afile == bfile:
1109 goodb = gooda
1109 goodb = gooda
1110 else:
1110 else:
1111 goodb = not nullb and backend.exists(bfile)
1111 goodb = not nullb and backend.exists(bfile)
1112 missing = not goodb and not gooda and not create
1112 missing = not goodb and not gooda and not create
1113
1113
1114 # some diff programs apparently produce patches where the afile is
1114 # some diff programs apparently produce patches where the afile is
1115 # not /dev/null, but afile starts with bfile
1115 # not /dev/null, but afile starts with bfile
1116 abasedir = afile[:afile.rfind('/') + 1]
1116 abasedir = afile[:afile.rfind('/') + 1]
1117 bbasedir = bfile[:bfile.rfind('/') + 1]
1117 bbasedir = bfile[:bfile.rfind('/') + 1]
1118 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1118 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1119 and hunk.starta == 0 and hunk.lena == 0):
1119 and hunk.starta == 0 and hunk.lena == 0):
1120 create = True
1120 create = True
1121 missing = False
1121 missing = False
1122
1122
1123 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1123 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1124 # diff is between a file and its backup. In this case, the original
1124 # diff is between a file and its backup. In this case, the original
1125 # file should be patched (see original mpatch code).
1125 # file should be patched (see original mpatch code).
1126 isbackup = (abase == bbase and bfile.startswith(afile))
1126 isbackup = (abase == bbase and bfile.startswith(afile))
1127 fname = None
1127 fname = None
1128 if not missing:
1128 if not missing:
1129 if gooda and goodb:
1129 if gooda and goodb:
1130 fname = isbackup and afile or bfile
1130 fname = isbackup and afile or bfile
1131 elif gooda:
1131 elif gooda:
1132 fname = afile
1132 fname = afile
1133
1133
1134 if not fname:
1134 if not fname:
1135 if not nullb:
1135 if not nullb:
1136 fname = isbackup and afile or bfile
1136 fname = isbackup and afile or bfile
1137 elif not nulla:
1137 elif not nulla:
1138 fname = afile
1138 fname = afile
1139 else:
1139 else:
1140 raise PatchError(_("undefined source and destination files"))
1140 raise PatchError(_("undefined source and destination files"))
1141
1141
1142 gp = patchmeta(fname)
1142 gp = patchmeta(fname)
1143 if create:
1143 if create:
1144 gp.op = 'ADD'
1144 gp.op = 'ADD'
1145 elif remove:
1145 elif remove:
1146 gp.op = 'DELETE'
1146 gp.op = 'DELETE'
1147 return gp
1147 return gp
1148
1148
1149 def scangitpatch(lr, firstline):
1149 def scangitpatch(lr, firstline):
1150 """
1150 """
1151 Git patches can emit:
1151 Git patches can emit:
1152 - rename a to b
1152 - rename a to b
1153 - change b
1153 - change b
1154 - copy a to c
1154 - copy a to c
1155 - change c
1155 - change c
1156
1156
1157 We cannot apply this sequence as-is, the renamed 'a' could not be
1157 We cannot apply this sequence as-is, the renamed 'a' could not be
1158 found for it would have been renamed already. And we cannot copy
1158 found for it would have been renamed already. And we cannot copy
1159 from 'b' instead because 'b' would have been changed already. So
1159 from 'b' instead because 'b' would have been changed already. So
1160 we scan the git patch for copy and rename commands so we can
1160 we scan the git patch for copy and rename commands so we can
1161 perform the copies ahead of time.
1161 perform the copies ahead of time.
1162 """
1162 """
1163 pos = 0
1163 pos = 0
1164 try:
1164 try:
1165 pos = lr.fp.tell()
1165 pos = lr.fp.tell()
1166 fp = lr.fp
1166 fp = lr.fp
1167 except IOError:
1167 except IOError:
1168 fp = cStringIO.StringIO(lr.fp.read())
1168 fp = cStringIO.StringIO(lr.fp.read())
1169 gitlr = linereader(fp)
1169 gitlr = linereader(fp)
1170 gitlr.push(firstline)
1170 gitlr.push(firstline)
1171 gitpatches = readgitpatch(gitlr)
1171 gitpatches = readgitpatch(gitlr)
1172 fp.seek(pos)
1172 fp.seek(pos)
1173 return gitpatches
1173 return gitpatches
1174
1174
1175 def iterhunks(fp):
1175 def iterhunks(fp):
1176 """Read a patch and yield the following events:
1176 """Read a patch and yield the following events:
1177 - ("file", afile, bfile, firsthunk): select a new target file.
1177 - ("file", afile, bfile, firsthunk): select a new target file.
1178 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1178 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1179 "file" event.
1179 "file" event.
1180 - ("git", gitchanges): current diff is in git format, gitchanges
1180 - ("git", gitchanges): current diff is in git format, gitchanges
1181 maps filenames to gitpatch records. Unique event.
1181 maps filenames to gitpatch records. Unique event.
1182 """
1182 """
1183 afile = ""
1183 afile = ""
1184 bfile = ""
1184 bfile = ""
1185 state = None
1185 state = None
1186 hunknum = 0
1186 hunknum = 0
1187 emitfile = newfile = False
1187 emitfile = newfile = False
1188 gitpatches = None
1188 gitpatches = None
1189
1189
1190 # our states
1190 # our states
1191 BFILE = 1
1191 BFILE = 1
1192 context = None
1192 context = None
1193 lr = linereader(fp)
1193 lr = linereader(fp)
1194
1194
1195 while True:
1195 while True:
1196 x = lr.readline()
1196 x = lr.readline()
1197 if not x:
1197 if not x:
1198 break
1198 break
1199 if state == BFILE and (
1199 if state == BFILE and (
1200 (not context and x[0] == '@')
1200 (not context and x[0] == '@')
1201 or (context is not False and x.startswith('***************'))
1201 or (context is not False and x.startswith('***************'))
1202 or x.startswith('GIT binary patch')):
1202 or x.startswith('GIT binary patch')):
1203 gp = None
1203 gp = None
1204 if (gitpatches and
1204 if (gitpatches and
1205 gitpatches[-1].ispatching(afile, bfile)):
1205 gitpatches[-1].ispatching(afile, bfile)):
1206 gp = gitpatches.pop()
1206 gp = gitpatches.pop()
1207 if x.startswith('GIT binary patch'):
1207 if x.startswith('GIT binary patch'):
1208 h = binhunk(lr, gp.path)
1208 h = binhunk(lr, gp.path)
1209 else:
1209 else:
1210 if context is None and x.startswith('***************'):
1210 if context is None and x.startswith('***************'):
1211 context = True
1211 context = True
1212 h = hunk(x, hunknum + 1, lr, context)
1212 h = hunk(x, hunknum + 1, lr, context)
1213 hunknum += 1
1213 hunknum += 1
1214 if emitfile:
1214 if emitfile:
1215 emitfile = False
1215 emitfile = False
1216 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1216 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1217 yield 'hunk', h
1217 yield 'hunk', h
1218 elif x.startswith('diff --git'):
1218 elif x.startswith('diff --git'):
1219 m = gitre.match(x.rstrip(' \r\n'))
1219 m = gitre.match(x.rstrip(' \r\n'))
1220 if not m:
1220 if not m:
1221 continue
1221 continue
1222 if gitpatches is None:
1222 if gitpatches is None:
1223 # scan whole input for git metadata
1223 # scan whole input for git metadata
1224 gitpatches = scangitpatch(lr, x)
1224 gitpatches = scangitpatch(lr, x)
1225 yield 'git', [g.copy() for g in gitpatches
1225 yield 'git', [g.copy() for g in gitpatches
1226 if g.op in ('COPY', 'RENAME')]
1226 if g.op in ('COPY', 'RENAME')]
1227 gitpatches.reverse()
1227 gitpatches.reverse()
1228 afile = 'a/' + m.group(1)
1228 afile = 'a/' + m.group(1)
1229 bfile = 'b/' + m.group(2)
1229 bfile = 'b/' + m.group(2)
1230 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1230 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1231 gp = gitpatches.pop()
1231 gp = gitpatches.pop()
1232 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1232 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1233 if not gitpatches:
1233 if not gitpatches:
1234 raise PatchError(_('failed to synchronize metadata for "%s"')
1234 raise PatchError(_('failed to synchronize metadata for "%s"')
1235 % afile[2:])
1235 % afile[2:])
1236 gp = gitpatches[-1]
1236 gp = gitpatches[-1]
1237 newfile = True
1237 newfile = True
1238 elif x.startswith('---'):
1238 elif x.startswith('---'):
1239 # check for a unified diff
1239 # check for a unified diff
1240 l2 = lr.readline()
1240 l2 = lr.readline()
1241 if not l2.startswith('+++'):
1241 if not l2.startswith('+++'):
1242 lr.push(l2)
1242 lr.push(l2)
1243 continue
1243 continue
1244 newfile = True
1244 newfile = True
1245 context = False
1245 context = False
1246 afile = parsefilename(x)
1246 afile = parsefilename(x)
1247 bfile = parsefilename(l2)
1247 bfile = parsefilename(l2)
1248 elif x.startswith('***'):
1248 elif x.startswith('***'):
1249 # check for a context diff
1249 # check for a context diff
1250 l2 = lr.readline()
1250 l2 = lr.readline()
1251 if not l2.startswith('---'):
1251 if not l2.startswith('---'):
1252 lr.push(l2)
1252 lr.push(l2)
1253 continue
1253 continue
1254 l3 = lr.readline()
1254 l3 = lr.readline()
1255 lr.push(l3)
1255 lr.push(l3)
1256 if not l3.startswith("***************"):
1256 if not l3.startswith("***************"):
1257 lr.push(l2)
1257 lr.push(l2)
1258 continue
1258 continue
1259 newfile = True
1259 newfile = True
1260 context = True
1260 context = True
1261 afile = parsefilename(x)
1261 afile = parsefilename(x)
1262 bfile = parsefilename(l2)
1262 bfile = parsefilename(l2)
1263
1263
1264 if newfile:
1264 if newfile:
1265 newfile = False
1265 newfile = False
1266 emitfile = True
1266 emitfile = True
1267 state = BFILE
1267 state = BFILE
1268 hunknum = 0
1268 hunknum = 0
1269
1269
1270 while gitpatches:
1270 while gitpatches:
1271 gp = gitpatches.pop()
1271 gp = gitpatches.pop()
1272 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1272 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1273
1273
1274 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1274 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1275 """Reads a patch from fp and tries to apply it.
1275 """Reads a patch from fp and tries to apply it.
1276
1276
1277 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1277 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1278 there was any fuzz.
1278 there was any fuzz.
1279
1279
1280 If 'eolmode' is 'strict', the patch content and patched file are
1280 If 'eolmode' is 'strict', the patch content and patched file are
1281 read in binary mode. Otherwise, line endings are ignored when
1281 read in binary mode. Otherwise, line endings are ignored when
1282 patching then normalized according to 'eolmode'.
1282 patching then normalized according to 'eolmode'.
1283 """
1283 """
1284 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1284 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1285 eolmode=eolmode)
1285 eolmode=eolmode)
1286
1286
1287 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1287 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1288 eolmode='strict'):
1288 eolmode='strict'):
1289
1289
1290 def pstrip(p):
1290 def pstrip(p):
1291 return pathstrip(p, strip - 1)[1]
1291 return pathstrip(p, strip - 1)[1]
1292
1292
1293 rejects = 0
1293 rejects = 0
1294 err = 0
1294 err = 0
1295 current_file = None
1295 current_file = None
1296
1296
1297 for state, values in iterhunks(fp):
1297 for state, values in iterhunks(fp):
1298 if state == 'hunk':
1298 if state == 'hunk':
1299 if not current_file:
1299 if not current_file:
1300 continue
1300 continue
1301 ret = current_file.apply(values)
1301 ret = current_file.apply(values)
1302 if ret > 0:
1302 if ret > 0:
1303 err = 1
1303 err = 1
1304 elif state == 'file':
1304 elif state == 'file':
1305 if current_file:
1305 if current_file:
1306 rejects += current_file.close()
1306 rejects += current_file.close()
1307 current_file = None
1307 current_file = None
1308 afile, bfile, first_hunk, gp = values
1308 afile, bfile, first_hunk, gp = values
1309 if gp:
1309 if gp:
1310 gp.path = pstrip(gp.path)
1310 gp.path = pstrip(gp.path)
1311 if gp.oldpath:
1311 if gp.oldpath:
1312 gp.oldpath = pstrip(gp.oldpath)
1312 gp.oldpath = pstrip(gp.oldpath)
1313 else:
1313 else:
1314 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1314 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1315 if gp.op == 'RENAME':
1315 if gp.op == 'RENAME':
1316 backend.unlink(gp.oldpath)
1316 backend.unlink(gp.oldpath)
1317 if not first_hunk:
1317 if not first_hunk:
1318 if gp.op == 'DELETE':
1318 if gp.op == 'DELETE':
1319 backend.unlink(gp.path)
1319 backend.unlink(gp.path)
1320 continue
1320 continue
1321 data, mode = None, None
1321 data, mode = None, None
1322 if gp.op in ('RENAME', 'COPY'):
1322 if gp.op in ('RENAME', 'COPY'):
1323 data, mode = store.getfile(gp.oldpath)[:2]
1323 data, mode = store.getfile(gp.oldpath)[:2]
1324 if gp.mode:
1324 if gp.mode:
1325 mode = gp.mode
1325 mode = gp.mode
1326 if gp.op == 'ADD':
1326 if gp.op == 'ADD':
1327 # Added files without content have no hunk and
1327 # Added files without content have no hunk and
1328 # must be created
1328 # must be created
1329 data = ''
1329 data = ''
1330 if data or mode:
1330 if data or mode:
1331 if (gp.op in ('ADD', 'RENAME', 'COPY')
1331 if (gp.op in ('ADD', 'RENAME', 'COPY')
1332 and backend.exists(gp.path)):
1332 and backend.exists(gp.path)):
1333 raise PatchError(_("cannot create %s: destination "
1333 raise PatchError(_("cannot create %s: destination "
1334 "already exists") % gp.path)
1334 "already exists") % gp.path)
1335 backend.setfile(gp.path, data, mode, gp.oldpath)
1335 backend.setfile(gp.path, data, mode, gp.oldpath)
1336 continue
1336 continue
1337 try:
1337 try:
1338 current_file = patcher(ui, gp, backend, store,
1338 current_file = patcher(ui, gp, backend, store,
1339 eolmode=eolmode)
1339 eolmode=eolmode)
1340 except PatchError, inst:
1340 except PatchError, inst:
1341 ui.warn(str(inst) + '\n')
1341 ui.warn(str(inst) + '\n')
1342 current_file = None
1342 current_file = None
1343 rejects += 1
1343 rejects += 1
1344 continue
1344 continue
1345 elif state == 'git':
1345 elif state == 'git':
1346 for gp in values:
1346 for gp in values:
1347 path = pstrip(gp.oldpath)
1347 path = pstrip(gp.oldpath)
1348 try:
1348 try:
1349 data, mode = backend.getfile(path)
1349 data, mode = backend.getfile(path)
1350 except IOError, e:
1350 except IOError, e:
1351 if e.errno != errno.ENOENT:
1351 if e.errno != errno.ENOENT:
1352 raise
1352 raise
1353 # The error ignored here will trigger a getfile()
1353 # The error ignored here will trigger a getfile()
1354 # error in a place more appropriate for error
1354 # error in a place more appropriate for error
1355 # handling, and will not interrupt the patching
1355 # handling, and will not interrupt the patching
1356 # process.
1356 # process.
1357 else:
1357 else:
1358 store.setfile(path, data, mode)
1358 store.setfile(path, data, mode)
1359 else:
1359 else:
1360 raise util.Abort(_('unsupported parser state: %s') % state)
1360 raise util.Abort(_('unsupported parser state: %s') % state)
1361
1361
1362 if current_file:
1362 if current_file:
1363 rejects += current_file.close()
1363 rejects += current_file.close()
1364
1364
1365 if rejects:
1365 if rejects:
1366 return -1
1366 return -1
1367 return err
1367 return err
1368
1368
1369 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1369 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1370 similarity):
1370 similarity):
1371 """use <patcher> to apply <patchname> to the working directory.
1371 """use <patcher> to apply <patchname> to the working directory.
1372 returns whether patch was applied with fuzz factor."""
1372 returns whether patch was applied with fuzz factor."""
1373
1373
1374 fuzz = False
1374 fuzz = False
1375 args = []
1375 args = []
1376 cwd = repo.root
1376 cwd = repo.root
1377 if cwd:
1377 if cwd:
1378 args.append('-d %s' % util.shellquote(cwd))
1378 args.append('-d %s' % util.shellquote(cwd))
1379 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1379 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1380 util.shellquote(patchname)))
1380 util.shellquote(patchname)))
1381 try:
1381 try:
1382 for line in fp:
1382 for line in fp:
1383 line = line.rstrip()
1383 line = line.rstrip()
1384 ui.note(line + '\n')
1384 ui.note(line + '\n')
1385 if line.startswith('patching file '):
1385 if line.startswith('patching file '):
1386 pf = util.parsepatchoutput(line)
1386 pf = util.parsepatchoutput(line)
1387 printed_file = False
1387 printed_file = False
1388 files.add(pf)
1388 files.add(pf)
1389 elif line.find('with fuzz') >= 0:
1389 elif line.find('with fuzz') >= 0:
1390 fuzz = True
1390 fuzz = True
1391 if not printed_file:
1391 if not printed_file:
1392 ui.warn(pf + '\n')
1392 ui.warn(pf + '\n')
1393 printed_file = True
1393 printed_file = True
1394 ui.warn(line + '\n')
1394 ui.warn(line + '\n')
1395 elif line.find('saving rejects to file') >= 0:
1395 elif line.find('saving rejects to file') >= 0:
1396 ui.warn(line + '\n')
1396 ui.warn(line + '\n')
1397 elif line.find('FAILED') >= 0:
1397 elif line.find('FAILED') >= 0:
1398 if not printed_file:
1398 if not printed_file:
1399 ui.warn(pf + '\n')
1399 ui.warn(pf + '\n')
1400 printed_file = True
1400 printed_file = True
1401 ui.warn(line + '\n')
1401 ui.warn(line + '\n')
1402 finally:
1402 finally:
1403 if files:
1403 if files:
1404 cfiles = list(files)
1404 cfiles = list(files)
1405 cwd = repo.getcwd()
1405 cwd = repo.getcwd()
1406 if cwd:
1406 if cwd:
1407 cfiles = [util.pathto(repo.root, cwd, f)
1407 cfiles = [util.pathto(repo.root, cwd, f)
1408 for f in cfiles]
1408 for f in cfiles]
1409 scmutil.addremove(repo, cfiles, similarity=similarity)
1409 scmutil.addremove(repo, cfiles, similarity=similarity)
1410 code = fp.close()
1410 code = fp.close()
1411 if code:
1411 if code:
1412 raise PatchError(_("patch command failed: %s") %
1412 raise PatchError(_("patch command failed: %s") %
1413 util.explainexit(code)[0])
1413 util.explainexit(code)[0])
1414 return fuzz
1414 return fuzz
1415
1415
1416 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1416 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1417 if files is None:
1417 if files is None:
1418 files = set()
1418 files = set()
1419 if eolmode is None:
1419 if eolmode is None:
1420 eolmode = ui.config('patch', 'eol', 'strict')
1420 eolmode = ui.config('patch', 'eol', 'strict')
1421 if eolmode.lower() not in eolmodes:
1421 if eolmode.lower() not in eolmodes:
1422 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1422 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1423 eolmode = eolmode.lower()
1423 eolmode = eolmode.lower()
1424
1424
1425 store = filestore()
1425 store = filestore()
1426 try:
1426 try:
1427 fp = open(patchobj, 'rb')
1427 fp = open(patchobj, 'rb')
1428 except TypeError:
1428 except TypeError:
1429 fp = patchobj
1429 fp = patchobj
1430 try:
1430 try:
1431 ret = applydiff(ui, fp, backend, store, strip=strip,
1431 ret = applydiff(ui, fp, backend, store, strip=strip,
1432 eolmode=eolmode)
1432 eolmode=eolmode)
1433 finally:
1433 finally:
1434 if fp != patchobj:
1434 if fp != patchobj:
1435 fp.close()
1435 fp.close()
1436 files.update(backend.close())
1436 files.update(backend.close())
1437 store.close()
1437 store.close()
1438 if ret < 0:
1438 if ret < 0:
1439 raise PatchError(_('patch failed to apply'))
1439 raise PatchError(_('patch failed to apply'))
1440 return ret > 0
1440 return ret > 0
1441
1441
1442 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1442 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1443 similarity=0):
1443 similarity=0):
1444 """use builtin patch to apply <patchobj> to the working directory.
1444 """use builtin patch to apply <patchobj> to the working directory.
1445 returns whether patch was applied with fuzz factor."""
1445 returns whether patch was applied with fuzz factor."""
1446 backend = workingbackend(ui, repo, similarity)
1446 backend = workingbackend(ui, repo, similarity)
1447 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1447 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1448
1448
1449 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1449 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1450 eolmode='strict'):
1450 eolmode='strict'):
1451 backend = repobackend(ui, repo, ctx, store)
1451 backend = repobackend(ui, repo, ctx, store)
1452 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1452 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1453
1453
1454 def makememctx(repo, parents, text, user, date, branch, files, store,
1454 def makememctx(repo, parents, text, user, date, branch, files, store,
1455 editor=None):
1455 editor=None):
1456 def getfilectx(repo, memctx, path):
1456 def getfilectx(repo, memctx, path):
1457 data, (islink, isexec), copied = store.getfile(path)
1457 data, (islink, isexec), copied = store.getfile(path)
1458 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1458 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1459 copied=copied)
1459 copied=copied)
1460 extra = {}
1460 extra = {}
1461 if branch:
1461 if branch:
1462 extra['branch'] = encoding.fromlocal(branch)
1462 extra['branch'] = encoding.fromlocal(branch)
1463 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1463 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1464 date, extra)
1464 date, extra)
1465 if editor:
1465 if editor:
1466 ctx._text = editor(repo, ctx, [])
1466 ctx._text = editor(repo, ctx, [])
1467 return ctx
1467 return ctx
1468
1468
1469 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1469 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1470 similarity=0):
1470 similarity=0):
1471 """Apply <patchname> to the working directory.
1471 """Apply <patchname> to the working directory.
1472
1472
1473 'eolmode' specifies how end of lines should be handled. It can be:
1473 'eolmode' specifies how end of lines should be handled. It can be:
1474 - 'strict': inputs are read in binary mode, EOLs are preserved
1474 - 'strict': inputs are read in binary mode, EOLs are preserved
1475 - 'crlf': EOLs are ignored when patching and reset to CRLF
1475 - 'crlf': EOLs are ignored when patching and reset to CRLF
1476 - 'lf': EOLs are ignored when patching and reset to LF
1476 - 'lf': EOLs are ignored when patching and reset to LF
1477 - None: get it from user settings, default to 'strict'
1477 - None: get it from user settings, default to 'strict'
1478 'eolmode' is ignored when using an external patcher program.
1478 'eolmode' is ignored when using an external patcher program.
1479
1479
1480 Returns whether patch was applied with fuzz factor.
1480 Returns whether patch was applied with fuzz factor.
1481 """
1481 """
1482 patcher = ui.config('ui', 'patch')
1482 patcher = ui.config('ui', 'patch')
1483 if files is None:
1483 if files is None:
1484 files = set()
1484 files = set()
1485 try:
1485 try:
1486 if patcher:
1486 if patcher:
1487 return _externalpatch(ui, repo, patcher, patchname, strip,
1487 return _externalpatch(ui, repo, patcher, patchname, strip,
1488 files, similarity)
1488 files, similarity)
1489 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1489 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1490 similarity)
1490 similarity)
1491 except PatchError, err:
1491 except PatchError, err:
1492 raise util.Abort(str(err))
1492 raise util.Abort(str(err))
1493
1493
1494 def changedfiles(ui, repo, patchpath, strip=1):
1494 def changedfiles(ui, repo, patchpath, strip=1):
1495 backend = fsbackend(ui, repo.root)
1495 backend = fsbackend(ui, repo.root)
1496 fp = open(patchpath, 'rb')
1496 fp = open(patchpath, 'rb')
1497 try:
1497 try:
1498 changed = set()
1498 changed = set()
1499 for state, values in iterhunks(fp):
1499 for state, values in iterhunks(fp):
1500 if state == 'file':
1500 if state == 'file':
1501 afile, bfile, first_hunk, gp = values
1501 afile, bfile, first_hunk, gp = values
1502 if gp:
1502 if gp:
1503 gp.path = pathstrip(gp.path, strip - 1)[1]
1503 gp.path = pathstrip(gp.path, strip - 1)[1]
1504 if gp.oldpath:
1504 if gp.oldpath:
1505 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1505 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1506 else:
1506 else:
1507 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1507 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1508 changed.add(gp.path)
1508 changed.add(gp.path)
1509 if gp.op == 'RENAME':
1509 if gp.op == 'RENAME':
1510 changed.add(gp.oldpath)
1510 changed.add(gp.oldpath)
1511 elif state not in ('hunk', 'git'):
1511 elif state not in ('hunk', 'git'):
1512 raise util.Abort(_('unsupported parser state: %s') % state)
1512 raise util.Abort(_('unsupported parser state: %s') % state)
1513 return changed
1513 return changed
1514 finally:
1514 finally:
1515 fp.close()
1515 fp.close()
1516
1516
1517 class GitDiffRequired(Exception):
1517 class GitDiffRequired(Exception):
1518 pass
1518 pass
1519
1519
1520 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1520 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1521 def get(key, name=None, getter=ui.configbool):
1521 def get(key, name=None, getter=ui.configbool):
1522 return ((opts and opts.get(key)) or
1522 return ((opts and opts.get(key)) or
1523 getter(section, name or key, None, untrusted=untrusted))
1523 getter(section, name or key, None, untrusted=untrusted))
1524 return mdiff.diffopts(
1524 return mdiff.diffopts(
1525 text=opts and opts.get('text'),
1525 text=opts and opts.get('text'),
1526 git=get('git'),
1526 git=get('git'),
1527 nodates=get('nodates'),
1527 nodates=get('nodates'),
1528 showfunc=get('show_function', 'showfunc'),
1528 showfunc=get('show_function', 'showfunc'),
1529 ignorews=get('ignore_all_space', 'ignorews'),
1529 ignorews=get('ignore_all_space', 'ignorews'),
1530 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1530 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1531 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1531 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1532 context=get('unified', getter=ui.config))
1532 context=get('unified', getter=ui.config))
1533
1533
1534 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1534 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1535 losedatafn=None, prefix=''):
1535 losedatafn=None, prefix=''):
1536 '''yields diff of changes to files between two nodes, or node and
1536 '''yields diff of changes to files between two nodes, or node and
1537 working directory.
1537 working directory.
1538
1538
1539 if node1 is None, use first dirstate parent instead.
1539 if node1 is None, use first dirstate parent instead.
1540 if node2 is None, compare node1 with working directory.
1540 if node2 is None, compare node1 with working directory.
1541
1541
1542 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1542 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1543 every time some change cannot be represented with the current
1543 every time some change cannot be represented with the current
1544 patch format. Return False to upgrade to git patch format, True to
1544 patch format. Return False to upgrade to git patch format, True to
1545 accept the loss or raise an exception to abort the diff. It is
1545 accept the loss or raise an exception to abort the diff. It is
1546 called with the name of current file being diffed as 'fn'. If set
1546 called with the name of current file being diffed as 'fn'. If set
1547 to None, patches will always be upgraded to git format when
1547 to None, patches will always be upgraded to git format when
1548 necessary.
1548 necessary.
1549
1549
1550 prefix is a filename prefix that is prepended to all filenames on
1550 prefix is a filename prefix that is prepended to all filenames on
1551 display (used for subrepos).
1551 display (used for subrepos).
1552 '''
1552 '''
1553
1553
1554 if opts is None:
1554 if opts is None:
1555 opts = mdiff.defaultopts
1555 opts = mdiff.defaultopts
1556
1556
1557 if not node1 and not node2:
1557 if not node1 and not node2:
1558 node1 = repo.dirstate.p1()
1558 node1 = repo.dirstate.p1()
1559
1559
1560 def lrugetfilectx():
1560 def lrugetfilectx():
1561 cache = {}
1561 cache = {}
1562 order = util.deque()
1562 order = util.deque()
1563 def getfilectx(f, ctx):
1563 def getfilectx(f, ctx):
1564 fctx = ctx.filectx(f, filelog=cache.get(f))
1564 fctx = ctx.filectx(f, filelog=cache.get(f))
1565 if f not in cache:
1565 if f not in cache:
1566 if len(cache) > 20:
1566 if len(cache) > 20:
1567 del cache[order.popleft()]
1567 del cache[order.popleft()]
1568 cache[f] = fctx.filelog()
1568 cache[f] = fctx.filelog()
1569 else:
1569 else:
1570 order.remove(f)
1570 order.remove(f)
1571 order.append(f)
1571 order.append(f)
1572 return fctx
1572 return fctx
1573 return getfilectx
1573 return getfilectx
1574 getfilectx = lrugetfilectx()
1574 getfilectx = lrugetfilectx()
1575
1575
1576 ctx1 = repo[node1]
1576 ctx1 = repo[node1]
1577 ctx2 = repo[node2]
1577 ctx2 = repo[node2]
1578
1578
1579 if not changes:
1579 if not changes:
1580 changes = repo.status(ctx1, ctx2, match=match)
1580 changes = repo.status(ctx1, ctx2, match=match)
1581 modified, added, removed = changes[:3]
1581 modified, added, removed = changes[:3]
1582
1582
1583 if not modified and not added and not removed:
1583 if not modified and not added and not removed:
1584 return []
1584 return []
1585
1585
1586 revs = None
1586 revs = None
1587 if not repo.ui.quiet:
1587 if not repo.ui.quiet:
1588 hexfunc = repo.ui.debugflag and hex or short
1588 hexfunc = repo.ui.debugflag and hex or short
1589 revs = [hexfunc(node) for node in [node1, node2] if node]
1589 revs = [hexfunc(node) for node in [node1, node2] if node]
1590
1590
1591 copy = {}
1591 copy = {}
1592 if opts.git or opts.upgrade:
1592 if opts.git or opts.upgrade:
1593 copy = copies.pathcopies(ctx1, ctx2)
1593 copy = copies.pathcopies(ctx1, ctx2)
1594
1594
1595 def difffn(opts, losedata):
1595 def difffn(opts, losedata):
1596 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1596 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1597 copy, getfilectx, opts, losedata, prefix)
1597 copy, getfilectx, opts, losedata, prefix)
1598 if opts.upgrade and not opts.git:
1598 if opts.upgrade and not opts.git:
1599 try:
1599 try:
1600 def losedata(fn):
1600 def losedata(fn):
1601 if not losedatafn or not losedatafn(fn=fn):
1601 if not losedatafn or not losedatafn(fn=fn):
1602 raise GitDiffRequired
1602 raise GitDiffRequired
1603 # Buffer the whole output until we are sure it can be generated
1603 # Buffer the whole output until we are sure it can be generated
1604 return list(difffn(opts.copy(git=False), losedata))
1604 return list(difffn(opts.copy(git=False), losedata))
1605 except GitDiffRequired:
1605 except GitDiffRequired:
1606 return difffn(opts.copy(git=True), None)
1606 return difffn(opts.copy(git=True), None)
1607 else:
1607 else:
1608 return difffn(opts, None)
1608 return difffn(opts, None)
1609
1609
1610 def difflabel(func, *args, **kw):
1610 def difflabel(func, *args, **kw):
1611 '''yields 2-tuples of (output, label) based on the output of func()'''
1611 '''yields 2-tuples of (output, label) based on the output of func()'''
1612 headprefixes = [('diff', 'diff.diffline'),
1612 headprefixes = [('diff', 'diff.diffline'),
1613 ('copy', 'diff.extended'),
1613 ('copy', 'diff.extended'),
1614 ('rename', 'diff.extended'),
1614 ('rename', 'diff.extended'),
1615 ('old', 'diff.extended'),
1615 ('old', 'diff.extended'),
1616 ('new', 'diff.extended'),
1616 ('new', 'diff.extended'),
1617 ('deleted', 'diff.extended'),
1617 ('deleted', 'diff.extended'),
1618 ('---', 'diff.file_a'),
1618 ('---', 'diff.file_a'),
1619 ('+++', 'diff.file_b')]
1619 ('+++', 'diff.file_b')]
1620 textprefixes = [('@', 'diff.hunk'),
1620 textprefixes = [('@', 'diff.hunk'),
1621 ('-', 'diff.deleted'),
1621 ('-', 'diff.deleted'),
1622 ('+', 'diff.inserted')]
1622 ('+', 'diff.inserted')]
1623 head = False
1623 head = False
1624 for chunk in func(*args, **kw):
1624 for chunk in func(*args, **kw):
1625 lines = chunk.split('\n')
1625 lines = chunk.split('\n')
1626 for i, line in enumerate(lines):
1626 for i, line in enumerate(lines):
1627 if i != 0:
1627 if i != 0:
1628 yield ('\n', '')
1628 yield ('\n', '')
1629 if head:
1629 if head:
1630 if line.startswith('@'):
1630 if line.startswith('@'):
1631 head = False
1631 head = False
1632 else:
1632 else:
1633 if line and line[0] not in ' +-@\\':
1633 if line and line[0] not in ' +-@\\':
1634 head = True
1634 head = True
1635 stripline = line
1635 stripline = line
1636 if not head and line and line[0] in '+-':
1636 if not head and line and line[0] in '+-':
1637 # highlight trailing whitespace, but only in changed lines
1637 # highlight trailing whitespace, but only in changed lines
1638 stripline = line.rstrip()
1638 stripline = line.rstrip()
1639 prefixes = textprefixes
1639 prefixes = textprefixes
1640 if head:
1640 if head:
1641 prefixes = headprefixes
1641 prefixes = headprefixes
1642 for prefix, label in prefixes:
1642 for prefix, label in prefixes:
1643 if stripline.startswith(prefix):
1643 if stripline.startswith(prefix):
1644 yield (stripline, label)
1644 yield (stripline, label)
1645 break
1645 break
1646 else:
1646 else:
1647 yield (line, '')
1647 yield (line, '')
1648 if line != stripline:
1648 if line != stripline:
1649 yield (line[len(stripline):], 'diff.trailingwhitespace')
1649 yield (line[len(stripline):], 'diff.trailingwhitespace')
1650
1650
1651 def diffui(*args, **kw):
1651 def diffui(*args, **kw):
1652 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1652 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1653 return difflabel(diff, *args, **kw)
1653 return difflabel(diff, *args, **kw)
1654
1654
1655
1655
1656 def _addmodehdr(header, omode, nmode):
1656 def _addmodehdr(header, omode, nmode):
1657 if omode != nmode:
1657 if omode != nmode:
1658 header.append('old mode %s\n' % omode)
1658 header.append('old mode %s\n' % omode)
1659 header.append('new mode %s\n' % nmode)
1659 header.append('new mode %s\n' % nmode)
1660
1660
1661 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1661 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1662 copy, getfilectx, opts, losedatafn, prefix):
1662 copy, getfilectx, opts, losedatafn, prefix):
1663
1663
1664 def join(f):
1664 def join(f):
1665 return os.path.join(prefix, f)
1665 return os.path.join(prefix, f)
1666
1666
1667 date1 = util.datestr(ctx1.date())
1667 date1 = util.datestr(ctx1.date())
1668 man1 = ctx1.manifest()
1668 man1 = ctx1.manifest()
1669
1669
1670 gone = set()
1670 gone = set()
1671 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1671 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1672
1672
1673 copyto = dict([(v, k) for k, v in copy.items()])
1673 copyto = dict([(v, k) for k, v in copy.items()])
1674
1674
1675 if opts.git:
1675 if opts.git:
1676 revs = None
1676 revs = None
1677
1677
1678 for f in sorted(modified + added + removed):
1678 for f in sorted(modified + added + removed):
1679 to = None
1679 to = None
1680 tn = None
1680 tn = None
1681 dodiff = True
1681 dodiff = True
1682 header = []
1682 header = []
1683 if f in man1:
1683 if f in man1:
1684 to = getfilectx(f, ctx1).data()
1684 to = getfilectx(f, ctx1).data()
1685 if f not in removed:
1685 if f not in removed:
1686 tn = getfilectx(f, ctx2).data()
1686 tn = getfilectx(f, ctx2).data()
1687 a, b = f, f
1687 a, b = f, f
1688 if opts.git or losedatafn:
1688 if opts.git or losedatafn:
1689 if f in added:
1689 if f in added:
1690 mode = gitmode[ctx2.flags(f)]
1690 mode = gitmode[ctx2.flags(f)]
1691 if f in copy or f in copyto:
1691 if f in copy or f in copyto:
1692 if opts.git:
1692 if opts.git:
1693 if f in copy:
1693 if f in copy:
1694 a = copy[f]
1694 a = copy[f]
1695 else:
1695 else:
1696 a = copyto[f]
1696 a = copyto[f]
1697 omode = gitmode[man1.flags(a)]
1697 omode = gitmode[man1.flags(a)]
1698 _addmodehdr(header, omode, mode)
1698 _addmodehdr(header, omode, mode)
1699 if a in removed and a not in gone:
1699 if a in removed and a not in gone:
1700 op = 'rename'
1700 op = 'rename'
1701 gone.add(a)
1701 gone.add(a)
1702 else:
1702 else:
1703 op = 'copy'
1703 op = 'copy'
1704 header.append('%s from %s\n' % (op, join(a)))
1704 header.append('%s from %s\n' % (op, join(a)))
1705 header.append('%s to %s\n' % (op, join(f)))
1705 header.append('%s to %s\n' % (op, join(f)))
1706 to = getfilectx(a, ctx1).data()
1706 to = getfilectx(a, ctx1).data()
1707 else:
1707 else:
1708 losedatafn(f)
1708 losedatafn(f)
1709 else:
1709 else:
1710 if opts.git:
1710 if opts.git:
1711 header.append('new file mode %s\n' % mode)
1711 header.append('new file mode %s\n' % mode)
1712 elif ctx2.flags(f):
1712 elif ctx2.flags(f):
1713 losedatafn(f)
1713 losedatafn(f)
1714 # In theory, if tn was copied or renamed we should check
1714 # In theory, if tn was copied or renamed we should check
1715 # if the source is binary too but the copy record already
1715 # if the source is binary too but the copy record already
1716 # forces git mode.
1716 # forces git mode.
1717 if util.binary(tn):
1717 if util.binary(tn):
1718 if opts.git:
1718 if opts.git:
1719 dodiff = 'binary'
1719 dodiff = 'binary'
1720 else:
1720 else:
1721 losedatafn(f)
1721 losedatafn(f)
1722 if not opts.git and not tn:
1722 if not opts.git and not tn:
1723 # regular diffs cannot represent new empty file
1723 # regular diffs cannot represent new empty file
1724 losedatafn(f)
1724 losedatafn(f)
1725 elif f in removed:
1725 elif f in removed:
1726 if opts.git:
1726 if opts.git:
1727 # have we already reported a copy above?
1727 # have we already reported a copy above?
1728 if ((f in copy and copy[f] in added
1728 if ((f in copy and copy[f] in added
1729 and copyto[copy[f]] == f) or
1729 and copyto[copy[f]] == f) or
1730 (f in copyto and copyto[f] in added
1730 (f in copyto and copyto[f] in added
1731 and copy[copyto[f]] == f)):
1731 and copy[copyto[f]] == f)):
1732 dodiff = False
1732 dodiff = False
1733 else:
1733 else:
1734 header.append('deleted file mode %s\n' %
1734 header.append('deleted file mode %s\n' %
1735 gitmode[man1.flags(f)])
1735 gitmode[man1.flags(f)])
1736 elif not to or util.binary(to):
1736 elif not to or util.binary(to):
1737 # regular diffs cannot represent empty file deletion
1737 # regular diffs cannot represent empty file deletion
1738 losedatafn(f)
1738 losedatafn(f)
1739 else:
1739 else:
1740 oflag = man1.flags(f)
1740 oflag = man1.flags(f)
1741 nflag = ctx2.flags(f)
1741 nflag = ctx2.flags(f)
1742 binary = util.binary(to) or util.binary(tn)
1742 binary = util.binary(to) or util.binary(tn)
1743 if opts.git:
1743 if opts.git:
1744 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1744 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1745 if binary:
1745 if binary:
1746 dodiff = 'binary'
1746 dodiff = 'binary'
1747 elif binary or nflag != oflag:
1747 elif binary or nflag != oflag:
1748 losedatafn(f)
1748 losedatafn(f)
1749 if opts.git:
1750 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1751
1749
1752 if dodiff:
1750 if dodiff:
1751 if opts.git or revs:
1752 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1753 if dodiff == 'binary':
1753 if dodiff == 'binary':
1754 text = mdiff.b85diff(to, tn)
1754 text = mdiff.b85diff(to, tn)
1755 else:
1755 else:
1756 text = mdiff.unidiff(to, date1,
1756 text = mdiff.unidiff(to, date1,
1757 # ctx2 date may be dynamic
1757 # ctx2 date may be dynamic
1758 tn, util.datestr(ctx2.date()),
1758 tn, util.datestr(ctx2.date()),
1759 join(a), join(b), revs, opts=opts)
1759 join(a), join(b), opts=opts)
1760 if header and (text or len(header) > 1):
1760 if header and (text or len(header) > 1):
1761 yield ''.join(header)
1761 yield ''.join(header)
1762 if text:
1762 if text:
1763 yield text
1763 yield text
1764
1764
1765 def diffstatsum(stats):
1765 def diffstatsum(stats):
1766 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1766 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1767 for f, a, r, b in stats:
1767 for f, a, r, b in stats:
1768 maxfile = max(maxfile, encoding.colwidth(f))
1768 maxfile = max(maxfile, encoding.colwidth(f))
1769 maxtotal = max(maxtotal, a + r)
1769 maxtotal = max(maxtotal, a + r)
1770 addtotal += a
1770 addtotal += a
1771 removetotal += r
1771 removetotal += r
1772 binary = binary or b
1772 binary = binary or b
1773
1773
1774 return maxfile, maxtotal, addtotal, removetotal, binary
1774 return maxfile, maxtotal, addtotal, removetotal, binary
1775
1775
1776 def diffstatdata(lines):
1776 def diffstatdata(lines):
1777 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1777 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1778
1778
1779 results = []
1779 results = []
1780 filename, adds, removes, isbinary = None, 0, 0, False
1780 filename, adds, removes, isbinary = None, 0, 0, False
1781
1781
1782 def addresult():
1782 def addresult():
1783 if filename:
1783 if filename:
1784 results.append((filename, adds, removes, isbinary))
1784 results.append((filename, adds, removes, isbinary))
1785
1785
1786 for line in lines:
1786 for line in lines:
1787 if line.startswith('diff'):
1787 if line.startswith('diff'):
1788 addresult()
1788 addresult()
1789 # set numbers to 0 anyway when starting new file
1789 # set numbers to 0 anyway when starting new file
1790 adds, removes, isbinary = 0, 0, False
1790 adds, removes, isbinary = 0, 0, False
1791 if line.startswith('diff --git'):
1791 if line.startswith('diff --git'):
1792 filename = gitre.search(line).group(1)
1792 filename = gitre.search(line).group(1)
1793 elif line.startswith('diff -r'):
1793 elif line.startswith('diff -r'):
1794 # format: "diff -r ... -r ... filename"
1794 # format: "diff -r ... -r ... filename"
1795 filename = diffre.search(line).group(1)
1795 filename = diffre.search(line).group(1)
1796 elif line.startswith('+') and not line.startswith('+++ '):
1796 elif line.startswith('+') and not line.startswith('+++ '):
1797 adds += 1
1797 adds += 1
1798 elif line.startswith('-') and not line.startswith('--- '):
1798 elif line.startswith('-') and not line.startswith('--- '):
1799 removes += 1
1799 removes += 1
1800 elif (line.startswith('GIT binary patch') or
1800 elif (line.startswith('GIT binary patch') or
1801 line.startswith('Binary file')):
1801 line.startswith('Binary file')):
1802 isbinary = True
1802 isbinary = True
1803 addresult()
1803 addresult()
1804 return results
1804 return results
1805
1805
1806 def diffstat(lines, width=80, git=False):
1806 def diffstat(lines, width=80, git=False):
1807 output = []
1807 output = []
1808 stats = diffstatdata(lines)
1808 stats = diffstatdata(lines)
1809 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1809 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1810
1810
1811 countwidth = len(str(maxtotal))
1811 countwidth = len(str(maxtotal))
1812 if hasbinary and countwidth < 3:
1812 if hasbinary and countwidth < 3:
1813 countwidth = 3
1813 countwidth = 3
1814 graphwidth = width - countwidth - maxname - 6
1814 graphwidth = width - countwidth - maxname - 6
1815 if graphwidth < 10:
1815 if graphwidth < 10:
1816 graphwidth = 10
1816 graphwidth = 10
1817
1817
1818 def scale(i):
1818 def scale(i):
1819 if maxtotal <= graphwidth:
1819 if maxtotal <= graphwidth:
1820 return i
1820 return i
1821 # If diffstat runs out of room it doesn't print anything,
1821 # If diffstat runs out of room it doesn't print anything,
1822 # which isn't very useful, so always print at least one + or -
1822 # which isn't very useful, so always print at least one + or -
1823 # if there were at least some changes.
1823 # if there were at least some changes.
1824 return max(i * graphwidth // maxtotal, int(bool(i)))
1824 return max(i * graphwidth // maxtotal, int(bool(i)))
1825
1825
1826 for filename, adds, removes, isbinary in stats:
1826 for filename, adds, removes, isbinary in stats:
1827 if isbinary:
1827 if isbinary:
1828 count = 'Bin'
1828 count = 'Bin'
1829 else:
1829 else:
1830 count = adds + removes
1830 count = adds + removes
1831 pluses = '+' * scale(adds)
1831 pluses = '+' * scale(adds)
1832 minuses = '-' * scale(removes)
1832 minuses = '-' * scale(removes)
1833 output.append(' %s%s | %*s %s%s\n' %
1833 output.append(' %s%s | %*s %s%s\n' %
1834 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1834 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1835 countwidth, count, pluses, minuses))
1835 countwidth, count, pluses, minuses))
1836
1836
1837 if stats:
1837 if stats:
1838 output.append(_(' %d files changed, %d insertions(+), '
1838 output.append(_(' %d files changed, %d insertions(+), '
1839 '%d deletions(-)\n')
1839 '%d deletions(-)\n')
1840 % (len(stats), totaladds, totalremoves))
1840 % (len(stats), totaladds, totalremoves))
1841
1841
1842 return ''.join(output)
1842 return ''.join(output)
1843
1843
1844 def diffstatui(*args, **kw):
1844 def diffstatui(*args, **kw):
1845 '''like diffstat(), but yields 2-tuples of (output, label) for
1845 '''like diffstat(), but yields 2-tuples of (output, label) for
1846 ui.write()
1846 ui.write()
1847 '''
1847 '''
1848
1848
1849 for line in diffstat(*args, **kw).splitlines():
1849 for line in diffstat(*args, **kw).splitlines():
1850 if line and line[-1] in '+-':
1850 if line and line[-1] in '+-':
1851 name, graph = line.rsplit(' ', 1)
1851 name, graph = line.rsplit(' ', 1)
1852 yield (name + ' ', '')
1852 yield (name + ' ', '')
1853 m = re.search(r'\++', graph)
1853 m = re.search(r'\++', graph)
1854 if m:
1854 if m:
1855 yield (m.group(0), 'diffstat.inserted')
1855 yield (m.group(0), 'diffstat.inserted')
1856 m = re.search(r'-+', graph)
1856 m = re.search(r'-+', graph)
1857 if m:
1857 if m:
1858 yield (m.group(0), 'diffstat.deleted')
1858 yield (m.group(0), 'diffstat.deleted')
1859 else:
1859 else:
1860 yield (line, '')
1860 yield (line, '')
1861 yield ('\n', '')
1861 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now