##// END OF EJS Templates
diff: move diffline to patch module...
Guillermo Pérez -
r17941:9a6e4d5d default
parent child Browse files
Show More
@@ -1,378 +1,365 b''
1 1 # mdiff.py - diff and patch routines for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import bdiff, mpatch, util
10 10 import re, struct, base85, zlib
11 11 from node import hex, nullid
12 12
13 13 def splitnewlines(text):
14 14 '''like str.splitlines, but only split on newlines.'''
15 15 lines = [l + '\n' for l in text.split('\n')]
16 16 if lines:
17 17 if lines[-1] == '\n':
18 18 lines.pop()
19 19 else:
20 20 lines[-1] = lines[-1][:-1]
21 21 return lines
22 22
23 23 class diffopts(object):
24 24 '''context is the number of context lines
25 25 text treats all files as text
26 26 showfunc enables diff -p output
27 27 git enables the git extended patch format
28 28 nodates removes dates from diff headers
29 29 ignorews ignores all whitespace changes in the diff
30 30 ignorewsamount ignores changes in the amount of whitespace
31 31 ignoreblanklines ignores changes whose lines are all blank
32 32 upgrade generates git diffs to avoid data loss
33 33 '''
34 34
35 35 defaults = {
36 36 'context': 3,
37 37 'text': False,
38 38 'showfunc': False,
39 39 'git': False,
40 40 'nodates': False,
41 41 'ignorews': False,
42 42 'ignorewsamount': False,
43 43 'ignoreblanklines': False,
44 44 'upgrade': False,
45 45 }
46 46
47 47 __slots__ = defaults.keys()
48 48
49 49 def __init__(self, **opts):
50 50 for k in self.__slots__:
51 51 v = opts.get(k)
52 52 if v is None:
53 53 v = self.defaults[k]
54 54 setattr(self, k, v)
55 55
56 56 try:
57 57 self.context = int(self.context)
58 58 except ValueError:
59 59 raise util.Abort(_('diff context lines count must be '
60 60 'an integer, not %r') % self.context)
61 61
62 62 def copy(self, **kwargs):
63 63 opts = dict((k, getattr(self, k)) for k in self.defaults)
64 64 opts.update(kwargs)
65 65 return diffopts(**opts)
66 66
67 67 defaultopts = diffopts()
68 68
69 69 def wsclean(opts, text, blank=True):
70 70 if opts.ignorews:
71 71 text = bdiff.fixws(text, 1)
72 72 elif opts.ignorewsamount:
73 73 text = bdiff.fixws(text, 0)
74 74 if blank and opts.ignoreblanklines:
75 75 text = re.sub('\n+', '\n', text).strip('\n')
76 76 return text
77 77
78 78 def splitblock(base1, lines1, base2, lines2, opts):
79 79 # The input lines matches except for interwoven blank lines. We
80 80 # transform it into a sequence of matching blocks and blank blocks.
81 81 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
82 82 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
83 83 s1, e1 = 0, len(lines1)
84 84 s2, e2 = 0, len(lines2)
85 85 while s1 < e1 or s2 < e2:
86 86 i1, i2, btype = s1, s2, '='
87 87 if (i1 >= e1 or lines1[i1] == 0
88 88 or i2 >= e2 or lines2[i2] == 0):
89 89 # Consume the block of blank lines
90 90 btype = '~'
91 91 while i1 < e1 and lines1[i1] == 0:
92 92 i1 += 1
93 93 while i2 < e2 and lines2[i2] == 0:
94 94 i2 += 1
95 95 else:
96 96 # Consume the matching lines
97 97 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
98 98 i1 += 1
99 99 i2 += 1
100 100 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
101 101 s1 = i1
102 102 s2 = i2
103 103
104 104 def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
105 105 """Return (block, type) tuples, where block is an mdiff.blocks
106 106 line entry. type is '=' for blocks matching exactly one another
107 107 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
108 108 matching only after having filtered blank lines. If refine is True,
109 109 then '~' blocks are refined and are only made of blank lines.
110 110 line1 and line2 are text1 and text2 split with splitnewlines() if
111 111 they are already available.
112 112 """
113 113 if opts is None:
114 114 opts = defaultopts
115 115 if opts.ignorews or opts.ignorewsamount:
116 116 text1 = wsclean(opts, text1, False)
117 117 text2 = wsclean(opts, text2, False)
118 118 diff = bdiff.blocks(text1, text2)
119 119 for i, s1 in enumerate(diff):
120 120 # The first match is special.
121 121 # we've either found a match starting at line 0 or a match later
122 122 # in the file. If it starts later, old and new below will both be
123 123 # empty and we'll continue to the next match.
124 124 if i > 0:
125 125 s = diff[i - 1]
126 126 else:
127 127 s = [0, 0, 0, 0]
128 128 s = [s[1], s1[0], s[3], s1[2]]
129 129
130 130 # bdiff sometimes gives huge matches past eof, this check eats them,
131 131 # and deals with the special first match case described above
132 132 if s[0] != s[1] or s[2] != s[3]:
133 133 type = '!'
134 134 if opts.ignoreblanklines:
135 135 if lines1 is None:
136 136 lines1 = splitnewlines(text1)
137 137 if lines2 is None:
138 138 lines2 = splitnewlines(text2)
139 139 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
140 140 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
141 141 if old == new:
142 142 type = '~'
143 143 yield s, type
144 144 yield s1, '='
145 145
146 def diffline(revs, a, b, opts):
147 parts = ['diff']
148 if opts.git:
149 parts.append('--git')
150 if revs and not opts.git:
151 parts.append(' '.join(["-r %s" % rev for rev in revs]))
152 if opts.git:
153 parts.append('a/%s' % a)
154 parts.append('b/%s' % b)
155 else:
156 parts.append(a)
157 return ' '.join(parts) + '\n'
158
159 146 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
160 147 def datetag(date, fn=None):
161 148 if not opts.git and not opts.nodates:
162 149 return '\t%s\n' % date
163 150 if fn and ' ' in fn:
164 151 return '\t\n'
165 152 return '\n'
166 153
167 154 if not a and not b:
168 155 return ""
169 156 epoch = util.datestr((0, 0))
170 157
171 158 fn1 = util.pconvert(fn1)
172 159 fn2 = util.pconvert(fn2)
173 160
174 161 if not opts.text and (util.binary(a) or util.binary(b)):
175 162 if a and b and len(a) == len(b) and a == b:
176 163 return ""
177 164 l = ['Binary file %s has changed\n' % fn1]
178 165 elif not a:
179 166 b = splitnewlines(b)
180 167 if a is None:
181 168 l1 = '--- /dev/null%s' % datetag(epoch)
182 169 else:
183 170 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
184 171 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
185 172 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
186 173 l = [l1, l2, l3] + ["+" + e for e in b]
187 174 elif not b:
188 175 a = splitnewlines(a)
189 176 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
190 177 if b is None:
191 178 l2 = '+++ /dev/null%s' % datetag(epoch)
192 179 else:
193 180 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
194 181 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
195 182 l = [l1, l2, l3] + ["-" + e for e in a]
196 183 else:
197 184 al = splitnewlines(a)
198 185 bl = splitnewlines(b)
199 186 l = list(_unidiff(a, b, al, bl, opts=opts))
200 187 if not l:
201 188 return ""
202 189
203 190 l.insert(0, "--- a/%s%s" % (fn1, datetag(ad, fn1)))
204 191 l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd, fn2)))
205 192
206 193 for ln in xrange(len(l)):
207 194 if l[ln][-1] != '\n':
208 195 l[ln] += "\n\ No newline at end of file\n"
209 196
210 197 return "".join(l)
211 198
212 199 # creates a headerless unified diff
213 200 # t1 and t2 are the text to be diffed
214 201 # l1 and l2 are the text broken up into lines
215 202 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
216 203 def contextend(l, len):
217 204 ret = l + opts.context
218 205 if ret > len:
219 206 ret = len
220 207 return ret
221 208
222 209 def contextstart(l):
223 210 ret = l - opts.context
224 211 if ret < 0:
225 212 return 0
226 213 return ret
227 214
228 215 lastfunc = [0, '']
229 216 def yieldhunk(hunk):
230 217 (astart, a2, bstart, b2, delta) = hunk
231 218 aend = contextend(a2, len(l1))
232 219 alen = aend - astart
233 220 blen = b2 - bstart + aend - a2
234 221
235 222 func = ""
236 223 if opts.showfunc:
237 224 lastpos, func = lastfunc
238 225 # walk backwards from the start of the context up to the start of
239 226 # the previous hunk context until we find a line starting with an
240 227 # alphanumeric char.
241 228 for i in xrange(astart - 1, lastpos - 1, -1):
242 229 if l1[i][0].isalnum():
243 230 func = ' ' + l1[i].rstrip()[:40]
244 231 lastfunc[1] = func
245 232 break
246 233 # by recording this hunk's starting point as the next place to
247 234 # start looking for function lines, we avoid reading any line in
248 235 # the file more than once.
249 236 lastfunc[0] = astart
250 237
251 238 # zero-length hunk ranges report their start line as one less
252 239 if alen:
253 240 astart += 1
254 241 if blen:
255 242 bstart += 1
256 243
257 244 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
258 245 bstart, blen, func)
259 246 for x in delta:
260 247 yield x
261 248 for x in xrange(a2, aend):
262 249 yield ' ' + l1[x]
263 250
264 251 # bdiff.blocks gives us the matching sequences in the files. The loop
265 252 # below finds the spaces between those matching sequences and translates
266 253 # them into diff output.
267 254 #
268 255 hunk = None
269 256 ignoredlines = 0
270 257 for s, stype in allblocks(t1, t2, opts, l1, l2):
271 258 a1, a2, b1, b2 = s
272 259 if stype != '!':
273 260 if stype == '~':
274 261 # The diff context lines are based on t1 content. When
275 262 # blank lines are ignored, the new lines offsets must
276 263 # be adjusted as if equivalent blocks ('~') had the
277 264 # same sizes on both sides.
278 265 ignoredlines += (b2 - b1) - (a2 - a1)
279 266 continue
280 267 delta = []
281 268 old = l1[a1:a2]
282 269 new = l2[b1:b2]
283 270
284 271 b1 -= ignoredlines
285 272 b2 -= ignoredlines
286 273 astart = contextstart(a1)
287 274 bstart = contextstart(b1)
288 275 prev = None
289 276 if hunk:
290 277 # join with the previous hunk if it falls inside the context
291 278 if astart < hunk[1] + opts.context + 1:
292 279 prev = hunk
293 280 astart = hunk[1]
294 281 bstart = hunk[3]
295 282 else:
296 283 for x in yieldhunk(hunk):
297 284 yield x
298 285 if prev:
299 286 # we've joined the previous hunk, record the new ending points.
300 287 hunk[1] = a2
301 288 hunk[3] = b2
302 289 delta = hunk[4]
303 290 else:
304 291 # create a new hunk
305 292 hunk = [astart, a2, bstart, b2, delta]
306 293
307 294 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
308 295 delta[len(delta):] = ['-' + x for x in old]
309 296 delta[len(delta):] = ['+' + x for x in new]
310 297
311 298 if hunk:
312 299 for x in yieldhunk(hunk):
313 300 yield x
314 301
315 302 def b85diff(to, tn):
316 303 '''print base85-encoded binary diff'''
317 304 def gitindex(text):
318 305 if not text:
319 306 return hex(nullid)
320 307 l = len(text)
321 308 s = util.sha1('blob %d\0' % l)
322 309 s.update(text)
323 310 return s.hexdigest()
324 311
325 312 def fmtline(line):
326 313 l = len(line)
327 314 if l <= 26:
328 315 l = chr(ord('A') + l - 1)
329 316 else:
330 317 l = chr(l - 26 + ord('a') - 1)
331 318 return '%c%s\n' % (l, base85.b85encode(line, True))
332 319
333 320 def chunk(text, csize=52):
334 321 l = len(text)
335 322 i = 0
336 323 while i < l:
337 324 yield text[i:i + csize]
338 325 i += csize
339 326
340 327 tohash = gitindex(to)
341 328 tnhash = gitindex(tn)
342 329 if tohash == tnhash:
343 330 return ""
344 331
345 332 # TODO: deltas
346 333 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
347 334 (tohash, tnhash, len(tn))]
348 335 for l in chunk(zlib.compress(tn)):
349 336 ret.append(fmtline(l))
350 337 ret.append('\n')
351 338 return ''.join(ret)
352 339
353 340 def patchtext(bin):
354 341 pos = 0
355 342 t = []
356 343 while pos < len(bin):
357 344 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
358 345 pos += 12
359 346 t.append(bin[pos:pos + l])
360 347 pos += l
361 348 return "".join(t)
362 349
363 350 def patch(a, bin):
364 351 if len(a) == 0:
365 352 # skip over trivial delta header
366 353 return util.buffer(bin, 12)
367 354 return mpatch.patches(a, [bin])
368 355
369 356 # similar to difflib.SequenceMatcher.get_matching_blocks
370 357 def get_matching_blocks(a, b):
371 358 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
372 359
373 360 def trivialdiffheader(length):
374 361 return struct.pack(">lll", 0, 0, length)
375 362
376 363 patches = mpatch.patches
377 364 patchedsize = mpatch.patchedsize
378 365 textdiff = bdiff.bdiff
@@ -1,1861 +1,1874 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re
10 10 import tempfile, zlib, shutil
11 11
12 12 from i18n import _
13 13 from node import hex, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 15 import context
16 16
17 17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 18
19 19 class PatchError(Exception):
20 20 pass
21 21
22 22
23 23 # public functions
24 24
25 25 def split(stream):
26 26 '''return an iterator of individual patches from a stream'''
27 27 def isheader(line, inheader):
28 28 if inheader and line[0] in (' ', '\t'):
29 29 # continuation
30 30 return True
31 31 if line[0] in (' ', '-', '+'):
32 32 # diff line - don't check for header pattern in there
33 33 return False
34 34 l = line.split(': ', 1)
35 35 return len(l) == 2 and ' ' not in l[0]
36 36
37 37 def chunk(lines):
38 38 return cStringIO.StringIO(''.join(lines))
39 39
40 40 def hgsplit(stream, cur):
41 41 inheader = True
42 42
43 43 for line in stream:
44 44 if not line.strip():
45 45 inheader = False
46 46 if not inheader and line.startswith('# HG changeset patch'):
47 47 yield chunk(cur)
48 48 cur = []
49 49 inheader = True
50 50
51 51 cur.append(line)
52 52
53 53 if cur:
54 54 yield chunk(cur)
55 55
56 56 def mboxsplit(stream, cur):
57 57 for line in stream:
58 58 if line.startswith('From '):
59 59 for c in split(chunk(cur[1:])):
60 60 yield c
61 61 cur = []
62 62
63 63 cur.append(line)
64 64
65 65 if cur:
66 66 for c in split(chunk(cur[1:])):
67 67 yield c
68 68
69 69 def mimesplit(stream, cur):
70 70 def msgfp(m):
71 71 fp = cStringIO.StringIO()
72 72 g = email.Generator.Generator(fp, mangle_from_=False)
73 73 g.flatten(m)
74 74 fp.seek(0)
75 75 return fp
76 76
77 77 for line in stream:
78 78 cur.append(line)
79 79 c = chunk(cur)
80 80
81 81 m = email.Parser.Parser().parse(c)
82 82 if not m.is_multipart():
83 83 yield msgfp(m)
84 84 else:
85 85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 86 for part in m.walk():
87 87 ct = part.get_content_type()
88 88 if ct not in ok_types:
89 89 continue
90 90 yield msgfp(part)
91 91
92 92 def headersplit(stream, cur):
93 93 inheader = False
94 94
95 95 for line in stream:
96 96 if not inheader and isheader(line, inheader):
97 97 yield chunk(cur)
98 98 cur = []
99 99 inheader = True
100 100 if inheader and not isheader(line, inheader):
101 101 inheader = False
102 102
103 103 cur.append(line)
104 104
105 105 if cur:
106 106 yield chunk(cur)
107 107
108 108 def remainder(cur):
109 109 yield chunk(cur)
110 110
111 111 class fiter(object):
112 112 def __init__(self, fp):
113 113 self.fp = fp
114 114
115 115 def __iter__(self):
116 116 return self
117 117
118 118 def next(self):
119 119 l = self.fp.readline()
120 120 if not l:
121 121 raise StopIteration
122 122 return l
123 123
124 124 inheader = False
125 125 cur = []
126 126
127 127 mimeheaders = ['content-type']
128 128
129 129 if not util.safehasattr(stream, 'next'):
130 130 # http responses, for example, have readline but not next
131 131 stream = fiter(stream)
132 132
133 133 for line in stream:
134 134 cur.append(line)
135 135 if line.startswith('# HG changeset patch'):
136 136 return hgsplit(stream, cur)
137 137 elif line.startswith('From '):
138 138 return mboxsplit(stream, cur)
139 139 elif isheader(line, inheader):
140 140 inheader = True
141 141 if line.split(':', 1)[0].lower() in mimeheaders:
142 142 # let email parser handle this
143 143 return mimesplit(stream, cur)
144 144 elif line.startswith('--- ') and inheader:
145 145 # No evil headers seen by diff start, split by hand
146 146 return headersplit(stream, cur)
147 147 # Not enough info, keep reading
148 148
149 149 # if we are here, we have a very plain patch
150 150 return remainder(cur)
151 151
152 152 def extract(ui, fileobj):
153 153 '''extract patch from data read from fileobj.
154 154
155 155 patch can be a normal patch or contained in an email message.
156 156
157 157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 158 Any item in the returned tuple can be None. If filename is None,
159 159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160 160
161 161 # attempt to detect the start of a patch
162 162 # (this heuristic is borrowed from quilt)
163 163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 165 r'---[ \t].*?^\+\+\+[ \t]|'
166 166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167 167
168 168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 169 tmpfp = os.fdopen(fd, 'w')
170 170 try:
171 171 msg = email.Parser.Parser().parse(fileobj)
172 172
173 173 subject = msg['Subject']
174 174 user = msg['From']
175 175 if not subject and not user:
176 176 # Not an email, restore parsed headers if any
177 177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178 178
179 179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 180 # should try to parse msg['Date']
181 181 date = None
182 182 nodeid = None
183 183 branch = None
184 184 parents = []
185 185
186 186 if subject:
187 187 if subject.startswith('[PATCH'):
188 188 pend = subject.find(']')
189 189 if pend >= 0:
190 190 subject = subject[pend + 1:].lstrip()
191 191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 192 ui.debug('Subject: %s\n' % subject)
193 193 if user:
194 194 ui.debug('From: %s\n' % user)
195 195 diffs_seen = 0
196 196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 197 message = ''
198 198 for part in msg.walk():
199 199 content_type = part.get_content_type()
200 200 ui.debug('Content-Type: %s\n' % content_type)
201 201 if content_type not in ok_types:
202 202 continue
203 203 payload = part.get_payload(decode=True)
204 204 m = diffre.search(payload)
205 205 if m:
206 206 hgpatch = False
207 207 hgpatchheader = False
208 208 ignoretext = False
209 209
210 210 ui.debug('found patch at byte %d\n' % m.start(0))
211 211 diffs_seen += 1
212 212 cfp = cStringIO.StringIO()
213 213 for line in payload[:m.start(0)].splitlines():
214 214 if line.startswith('# HG changeset patch') and not hgpatch:
215 215 ui.debug('patch generated by hg export\n')
216 216 hgpatch = True
217 217 hgpatchheader = True
218 218 # drop earlier commit message content
219 219 cfp.seek(0)
220 220 cfp.truncate()
221 221 subject = None
222 222 elif hgpatchheader:
223 223 if line.startswith('# User '):
224 224 user = line[7:]
225 225 ui.debug('From: %s\n' % user)
226 226 elif line.startswith("# Date "):
227 227 date = line[7:]
228 228 elif line.startswith("# Branch "):
229 229 branch = line[9:]
230 230 elif line.startswith("# Node ID "):
231 231 nodeid = line[10:]
232 232 elif line.startswith("# Parent "):
233 233 parents.append(line[9:].lstrip())
234 234 elif not line.startswith("# "):
235 235 hgpatchheader = False
236 236 elif line == '---' and gitsendmail:
237 237 ignoretext = True
238 238 if not hgpatchheader and not ignoretext:
239 239 cfp.write(line)
240 240 cfp.write('\n')
241 241 message = cfp.getvalue()
242 242 if tmpfp:
243 243 tmpfp.write(payload)
244 244 if not payload.endswith('\n'):
245 245 tmpfp.write('\n')
246 246 elif not diffs_seen and message and content_type == 'text/plain':
247 247 message += '\n' + payload
248 248 except: # re-raises
249 249 tmpfp.close()
250 250 os.unlink(tmpname)
251 251 raise
252 252
253 253 if subject and not message.startswith(subject):
254 254 message = '%s\n%s' % (subject, message)
255 255 tmpfp.close()
256 256 if not diffs_seen:
257 257 os.unlink(tmpname)
258 258 return None, message, user, date, branch, None, None, None
259 259 p1 = parents and parents.pop(0) or None
260 260 p2 = parents and parents.pop(0) or None
261 261 return tmpname, message, user, date, branch, nodeid, p1, p2
262 262
263 263 class patchmeta(object):
264 264 """Patched file metadata
265 265
266 266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 270 'islink' is True if the file is a symlink and 'isexec' is True if
271 271 the file is executable. Otherwise, 'mode' is None.
272 272 """
273 273 def __init__(self, path):
274 274 self.path = path
275 275 self.oldpath = None
276 276 self.mode = None
277 277 self.op = 'MODIFY'
278 278 self.binary = False
279 279
280 280 def setmode(self, mode):
281 281 islink = mode & 020000
282 282 isexec = mode & 0100
283 283 self.mode = (islink, isexec)
284 284
285 285 def copy(self):
286 286 other = patchmeta(self.path)
287 287 other.oldpath = self.oldpath
288 288 other.mode = self.mode
289 289 other.op = self.op
290 290 other.binary = self.binary
291 291 return other
292 292
293 293 def _ispatchinga(self, afile):
294 294 if afile == '/dev/null':
295 295 return self.op == 'ADD'
296 296 return afile == 'a/' + (self.oldpath or self.path)
297 297
298 298 def _ispatchingb(self, bfile):
299 299 if bfile == '/dev/null':
300 300 return self.op == 'DELETE'
301 301 return bfile == 'b/' + self.path
302 302
303 303 def ispatching(self, afile, bfile):
304 304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
305 305
306 306 def __repr__(self):
307 307 return "<patchmeta %s %r>" % (self.op, self.path)
308 308
309 309 def readgitpatch(lr):
310 310 """extract git-style metadata about patches from <patchname>"""
311 311
312 312 # Filter patch for git information
313 313 gp = None
314 314 gitpatches = []
315 315 for line in lr:
316 316 line = line.rstrip(' \r\n')
317 317 if line.startswith('diff --git'):
318 318 m = gitre.match(line)
319 319 if m:
320 320 if gp:
321 321 gitpatches.append(gp)
322 322 dst = m.group(2)
323 323 gp = patchmeta(dst)
324 324 elif gp:
325 325 if line.startswith('--- '):
326 326 gitpatches.append(gp)
327 327 gp = None
328 328 continue
329 329 if line.startswith('rename from '):
330 330 gp.op = 'RENAME'
331 331 gp.oldpath = line[12:]
332 332 elif line.startswith('rename to '):
333 333 gp.path = line[10:]
334 334 elif line.startswith('copy from '):
335 335 gp.op = 'COPY'
336 336 gp.oldpath = line[10:]
337 337 elif line.startswith('copy to '):
338 338 gp.path = line[8:]
339 339 elif line.startswith('deleted file'):
340 340 gp.op = 'DELETE'
341 341 elif line.startswith('new file mode '):
342 342 gp.op = 'ADD'
343 343 gp.setmode(int(line[-6:], 8))
344 344 elif line.startswith('new mode '):
345 345 gp.setmode(int(line[-6:], 8))
346 346 elif line.startswith('GIT binary patch'):
347 347 gp.binary = True
348 348 if gp:
349 349 gitpatches.append(gp)
350 350
351 351 return gitpatches
352 352
353 353 class linereader(object):
354 354 # simple class to allow pushing lines back into the input stream
355 355 def __init__(self, fp):
356 356 self.fp = fp
357 357 self.buf = []
358 358
359 359 def push(self, line):
360 360 if line is not None:
361 361 self.buf.append(line)
362 362
363 363 def readline(self):
364 364 if self.buf:
365 365 l = self.buf[0]
366 366 del self.buf[0]
367 367 return l
368 368 return self.fp.readline()
369 369
370 370 def __iter__(self):
371 371 while True:
372 372 l = self.readline()
373 373 if not l:
374 374 break
375 375 yield l
376 376
377 377 class abstractbackend(object):
378 378 def __init__(self, ui):
379 379 self.ui = ui
380 380
381 381 def getfile(self, fname):
382 382 """Return target file data and flags as a (data, (islink,
383 383 isexec)) tuple.
384 384 """
385 385 raise NotImplementedError
386 386
387 387 def setfile(self, fname, data, mode, copysource):
388 388 """Write data to target file fname and set its mode. mode is a
389 389 (islink, isexec) tuple. If data is None, the file content should
390 390 be left unchanged. If the file is modified after being copied,
391 391 copysource is set to the original file name.
392 392 """
393 393 raise NotImplementedError
394 394
395 395 def unlink(self, fname):
396 396 """Unlink target file."""
397 397 raise NotImplementedError
398 398
399 399 def writerej(self, fname, failed, total, lines):
400 400 """Write rejected lines for fname. total is the number of hunks
401 401 which failed to apply and total the total number of hunks for this
402 402 files.
403 403 """
404 404 pass
405 405
406 406 def exists(self, fname):
407 407 raise NotImplementedError
408 408
409 409 class fsbackend(abstractbackend):
410 410 def __init__(self, ui, basedir):
411 411 super(fsbackend, self).__init__(ui)
412 412 self.opener = scmutil.opener(basedir)
413 413
414 414 def _join(self, f):
415 415 return os.path.join(self.opener.base, f)
416 416
417 417 def getfile(self, fname):
418 418 path = self._join(fname)
419 419 if os.path.islink(path):
420 420 return (os.readlink(path), (True, False))
421 421 isexec = False
422 422 try:
423 423 isexec = os.lstat(path).st_mode & 0100 != 0
424 424 except OSError, e:
425 425 if e.errno != errno.ENOENT:
426 426 raise
427 427 return (self.opener.read(fname), (False, isexec))
428 428
429 429 def setfile(self, fname, data, mode, copysource):
430 430 islink, isexec = mode
431 431 if data is None:
432 432 util.setflags(self._join(fname), islink, isexec)
433 433 return
434 434 if islink:
435 435 self.opener.symlink(data, fname)
436 436 else:
437 437 self.opener.write(fname, data)
438 438 if isexec:
439 439 util.setflags(self._join(fname), False, True)
440 440
441 441 def unlink(self, fname):
442 442 try:
443 443 util.unlinkpath(self._join(fname))
444 444 except OSError, inst:
445 445 if inst.errno != errno.ENOENT:
446 446 raise
447 447
448 448 def writerej(self, fname, failed, total, lines):
449 449 fname = fname + ".rej"
450 450 self.ui.warn(
451 451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
452 452 (failed, total, fname))
453 453 fp = self.opener(fname, 'w')
454 454 fp.writelines(lines)
455 455 fp.close()
456 456
457 457 def exists(self, fname):
458 458 return os.path.lexists(self._join(fname))
459 459
460 460 class workingbackend(fsbackend):
461 461 def __init__(self, ui, repo, similarity):
462 462 super(workingbackend, self).__init__(ui, repo.root)
463 463 self.repo = repo
464 464 self.similarity = similarity
465 465 self.removed = set()
466 466 self.changed = set()
467 467 self.copied = []
468 468
469 469 def _checkknown(self, fname):
470 470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
471 471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
472 472
473 473 def setfile(self, fname, data, mode, copysource):
474 474 self._checkknown(fname)
475 475 super(workingbackend, self).setfile(fname, data, mode, copysource)
476 476 if copysource is not None:
477 477 self.copied.append((copysource, fname))
478 478 self.changed.add(fname)
479 479
480 480 def unlink(self, fname):
481 481 self._checkknown(fname)
482 482 super(workingbackend, self).unlink(fname)
483 483 self.removed.add(fname)
484 484 self.changed.add(fname)
485 485
486 486 def close(self):
487 487 wctx = self.repo[None]
488 488 addremoved = set(self.changed)
489 489 for src, dst in self.copied:
490 490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
491 491 if self.removed:
492 492 wctx.forget(sorted(self.removed))
493 493 for f in self.removed:
494 494 if f not in self.repo.dirstate:
495 495 # File was deleted and no longer belongs to the
496 496 # dirstate, it was probably marked added then
497 497 # deleted, and should not be considered by
498 498 # addremove().
499 499 addremoved.discard(f)
500 500 if addremoved:
501 501 cwd = self.repo.getcwd()
502 502 if cwd:
503 503 addremoved = [util.pathto(self.repo.root, cwd, f)
504 504 for f in addremoved]
505 505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
506 506 return sorted(self.changed)
507 507
508 508 class filestore(object):
509 509 def __init__(self, maxsize=None):
510 510 self.opener = None
511 511 self.files = {}
512 512 self.created = 0
513 513 self.maxsize = maxsize
514 514 if self.maxsize is None:
515 515 self.maxsize = 4*(2**20)
516 516 self.size = 0
517 517 self.data = {}
518 518
519 519 def setfile(self, fname, data, mode, copied=None):
520 520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 521 self.data[fname] = (data, mode, copied)
522 522 self.size += len(data)
523 523 else:
524 524 if self.opener is None:
525 525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 526 self.opener = scmutil.opener(root)
527 527 # Avoid filename issues with these simple names
528 528 fn = str(self.created)
529 529 self.opener.write(fn, data)
530 530 self.created += 1
531 531 self.files[fname] = (fn, mode, copied)
532 532
533 533 def getfile(self, fname):
534 534 if fname in self.data:
535 535 return self.data[fname]
536 536 if not self.opener or fname not in self.files:
537 537 raise IOError
538 538 fn, mode, copied = self.files[fname]
539 539 return self.opener.read(fn), mode, copied
540 540
541 541 def close(self):
542 542 if self.opener:
543 543 shutil.rmtree(self.opener.base)
544 544
545 545 class repobackend(abstractbackend):
546 546 def __init__(self, ui, repo, ctx, store):
547 547 super(repobackend, self).__init__(ui)
548 548 self.repo = repo
549 549 self.ctx = ctx
550 550 self.store = store
551 551 self.changed = set()
552 552 self.removed = set()
553 553 self.copied = {}
554 554
555 555 def _checkknown(self, fname):
556 556 if fname not in self.ctx:
557 557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558 558
559 559 def getfile(self, fname):
560 560 try:
561 561 fctx = self.ctx[fname]
562 562 except error.LookupError:
563 563 raise IOError
564 564 flags = fctx.flags()
565 565 return fctx.data(), ('l' in flags, 'x' in flags)
566 566
567 567 def setfile(self, fname, data, mode, copysource):
568 568 if copysource:
569 569 self._checkknown(copysource)
570 570 if data is None:
571 571 data = self.ctx[fname].data()
572 572 self.store.setfile(fname, data, mode, copysource)
573 573 self.changed.add(fname)
574 574 if copysource:
575 575 self.copied[fname] = copysource
576 576
577 577 def unlink(self, fname):
578 578 self._checkknown(fname)
579 579 self.removed.add(fname)
580 580
581 581 def exists(self, fname):
582 582 return fname in self.ctx
583 583
584 584 def close(self):
585 585 return self.changed | self.removed
586 586
587 587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591 591
592 592 class patchfile(object):
593 593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 594 self.fname = gp.path
595 595 self.eolmode = eolmode
596 596 self.eol = None
597 597 self.backend = backend
598 598 self.ui = ui
599 599 self.lines = []
600 600 self.exists = False
601 601 self.missing = True
602 602 self.mode = gp.mode
603 603 self.copysource = gp.oldpath
604 604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 605 self.remove = gp.op == 'DELETE'
606 606 try:
607 607 if self.copysource is None:
608 608 data, mode = backend.getfile(self.fname)
609 609 self.exists = True
610 610 else:
611 611 data, mode = store.getfile(self.copysource)[:2]
612 612 self.exists = backend.exists(self.fname)
613 613 self.missing = False
614 614 if data:
615 615 self.lines = mdiff.splitnewlines(data)
616 616 if self.mode is None:
617 617 self.mode = mode
618 618 if self.lines:
619 619 # Normalize line endings
620 620 if self.lines[0].endswith('\r\n'):
621 621 self.eol = '\r\n'
622 622 elif self.lines[0].endswith('\n'):
623 623 self.eol = '\n'
624 624 if eolmode != 'strict':
625 625 nlines = []
626 626 for l in self.lines:
627 627 if l.endswith('\r\n'):
628 628 l = l[:-2] + '\n'
629 629 nlines.append(l)
630 630 self.lines = nlines
631 631 except IOError:
632 632 if self.create:
633 633 self.missing = False
634 634 if self.mode is None:
635 635 self.mode = (False, False)
636 636 if self.missing:
637 637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
638 638
639 639 self.hash = {}
640 640 self.dirty = 0
641 641 self.offset = 0
642 642 self.skew = 0
643 643 self.rej = []
644 644 self.fileprinted = False
645 645 self.printfile(False)
646 646 self.hunks = 0
647 647
648 648 def writelines(self, fname, lines, mode):
649 649 if self.eolmode == 'auto':
650 650 eol = self.eol
651 651 elif self.eolmode == 'crlf':
652 652 eol = '\r\n'
653 653 else:
654 654 eol = '\n'
655 655
656 656 if self.eolmode != 'strict' and eol and eol != '\n':
657 657 rawlines = []
658 658 for l in lines:
659 659 if l and l[-1] == '\n':
660 660 l = l[:-1] + eol
661 661 rawlines.append(l)
662 662 lines = rawlines
663 663
664 664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
665 665
666 666 def printfile(self, warn):
667 667 if self.fileprinted:
668 668 return
669 669 if warn or self.ui.verbose:
670 670 self.fileprinted = True
671 671 s = _("patching file %s\n") % self.fname
672 672 if warn:
673 673 self.ui.warn(s)
674 674 else:
675 675 self.ui.note(s)
676 676
677 677
678 678 def findlines(self, l, linenum):
679 679 # looks through the hash and finds candidate lines. The
680 680 # result is a list of line numbers sorted based on distance
681 681 # from linenum
682 682
683 683 cand = self.hash.get(l, [])
684 684 if len(cand) > 1:
685 685 # resort our list of potentials forward then back.
686 686 cand.sort(key=lambda x: abs(x - linenum))
687 687 return cand
688 688
689 689 def write_rej(self):
690 690 # our rejects are a little different from patch(1). This always
691 691 # creates rejects in the same form as the original patch. A file
692 692 # header is inserted so that you can run the reject through patch again
693 693 # without having to type the filename.
694 694 if not self.rej:
695 695 return
696 696 base = os.path.basename(self.fname)
697 697 lines = ["--- %s\n+++ %s\n" % (base, base)]
698 698 for x in self.rej:
699 699 for l in x.hunk:
700 700 lines.append(l)
701 701 if l[-1] != '\n':
702 702 lines.append("\n\ No newline at end of file\n")
703 703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
704 704
705 705 def apply(self, h):
706 706 if not h.complete():
707 707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
708 708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
709 709 h.lenb))
710 710
711 711 self.hunks += 1
712 712
713 713 if self.missing:
714 714 self.rej.append(h)
715 715 return -1
716 716
717 717 if self.exists and self.create:
718 718 if self.copysource:
719 719 self.ui.warn(_("cannot create %s: destination already "
720 720 "exists\n" % self.fname))
721 721 else:
722 722 self.ui.warn(_("file %s already exists\n") % self.fname)
723 723 self.rej.append(h)
724 724 return -1
725 725
726 726 if isinstance(h, binhunk):
727 727 if self.remove:
728 728 self.backend.unlink(self.fname)
729 729 else:
730 730 self.lines[:] = h.new()
731 731 self.offset += len(h.new())
732 732 self.dirty = True
733 733 return 0
734 734
735 735 horig = h
736 736 if (self.eolmode in ('crlf', 'lf')
737 737 or self.eolmode == 'auto' and self.eol):
738 738 # If new eols are going to be normalized, then normalize
739 739 # hunk data before patching. Otherwise, preserve input
740 740 # line-endings.
741 741 h = h.getnormalized()
742 742
743 743 # fast case first, no offsets, no fuzz
744 744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 745 oldstart += self.offset
746 746 orig_start = oldstart
747 747 # if there's skew we want to emit the "(offset %d lines)" even
748 748 # when the hunk cleanly applies at start + skew, so skip the
749 749 # fast case code
750 750 if (self.skew == 0 and
751 751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 752 if self.remove:
753 753 self.backend.unlink(self.fname)
754 754 else:
755 755 self.lines[oldstart:oldstart + len(old)] = new
756 756 self.offset += len(new) - len(old)
757 757 self.dirty = True
758 758 return 0
759 759
760 760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 761 self.hash = {}
762 762 for x, s in enumerate(self.lines):
763 763 self.hash.setdefault(s, []).append(x)
764 764
765 765 for fuzzlen in xrange(3):
766 766 for toponly in [True, False]:
767 767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 768 oldstart = oldstart + self.offset + self.skew
769 769 oldstart = min(oldstart, len(self.lines))
770 770 if old:
771 771 cand = self.findlines(old[0][1:], oldstart)
772 772 else:
773 773 # Only adding lines with no or fuzzed context, just
774 774 # take the skew in account
775 775 cand = [oldstart]
776 776
777 777 for l in cand:
778 778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 779 self.lines[l : l + len(old)] = new
780 780 self.offset += len(new) - len(old)
781 781 self.skew = l - orig_start
782 782 self.dirty = True
783 783 offset = l - orig_start - fuzzlen
784 784 if fuzzlen:
785 785 msg = _("Hunk #%d succeeded at %d "
786 786 "with fuzz %d "
787 787 "(offset %d lines).\n")
788 788 self.printfile(True)
789 789 self.ui.warn(msg %
790 790 (h.number, l + 1, fuzzlen, offset))
791 791 else:
792 792 msg = _("Hunk #%d succeeded at %d "
793 793 "(offset %d lines).\n")
794 794 self.ui.note(msg % (h.number, l + 1, offset))
795 795 return fuzzlen
796 796 self.printfile(True)
797 797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 798 self.rej.append(horig)
799 799 return -1
800 800
801 801 def close(self):
802 802 if self.dirty:
803 803 self.writelines(self.fname, self.lines, self.mode)
804 804 self.write_rej()
805 805 return len(self.rej)
806 806
807 807 class hunk(object):
808 808 def __init__(self, desc, num, lr, context):
809 809 self.number = num
810 810 self.desc = desc
811 811 self.hunk = [desc]
812 812 self.a = []
813 813 self.b = []
814 814 self.starta = self.lena = None
815 815 self.startb = self.lenb = None
816 816 if lr is not None:
817 817 if context:
818 818 self.read_context_hunk(lr)
819 819 else:
820 820 self.read_unified_hunk(lr)
821 821
822 822 def getnormalized(self):
823 823 """Return a copy with line endings normalized to LF."""
824 824
825 825 def normalize(lines):
826 826 nlines = []
827 827 for line in lines:
828 828 if line.endswith('\r\n'):
829 829 line = line[:-2] + '\n'
830 830 nlines.append(line)
831 831 return nlines
832 832
833 833 # Dummy object, it is rebuilt manually
834 834 nh = hunk(self.desc, self.number, None, None)
835 835 nh.number = self.number
836 836 nh.desc = self.desc
837 837 nh.hunk = self.hunk
838 838 nh.a = normalize(self.a)
839 839 nh.b = normalize(self.b)
840 840 nh.starta = self.starta
841 841 nh.startb = self.startb
842 842 nh.lena = self.lena
843 843 nh.lenb = self.lenb
844 844 return nh
845 845
846 846 def read_unified_hunk(self, lr):
847 847 m = unidesc.match(self.desc)
848 848 if not m:
849 849 raise PatchError(_("bad hunk #%d") % self.number)
850 850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 851 if self.lena is None:
852 852 self.lena = 1
853 853 else:
854 854 self.lena = int(self.lena)
855 855 if self.lenb is None:
856 856 self.lenb = 1
857 857 else:
858 858 self.lenb = int(self.lenb)
859 859 self.starta = int(self.starta)
860 860 self.startb = int(self.startb)
861 861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 862 self.b)
863 863 # if we hit eof before finishing out the hunk, the last line will
864 864 # be zero length. Lets try to fix it up.
865 865 while len(self.hunk[-1]) == 0:
866 866 del self.hunk[-1]
867 867 del self.a[-1]
868 868 del self.b[-1]
869 869 self.lena -= 1
870 870 self.lenb -= 1
871 871 self._fixnewline(lr)
872 872
873 873 def read_context_hunk(self, lr):
874 874 self.desc = lr.readline()
875 875 m = contextdesc.match(self.desc)
876 876 if not m:
877 877 raise PatchError(_("bad hunk #%d") % self.number)
878 878 self.starta, aend = m.groups()
879 879 self.starta = int(self.starta)
880 880 if aend is None:
881 881 aend = self.starta
882 882 self.lena = int(aend) - self.starta
883 883 if self.starta:
884 884 self.lena += 1
885 885 for x in xrange(self.lena):
886 886 l = lr.readline()
887 887 if l.startswith('---'):
888 888 # lines addition, old block is empty
889 889 lr.push(l)
890 890 break
891 891 s = l[2:]
892 892 if l.startswith('- ') or l.startswith('! '):
893 893 u = '-' + s
894 894 elif l.startswith(' '):
895 895 u = ' ' + s
896 896 else:
897 897 raise PatchError(_("bad hunk #%d old text line %d") %
898 898 (self.number, x))
899 899 self.a.append(u)
900 900 self.hunk.append(u)
901 901
902 902 l = lr.readline()
903 903 if l.startswith('\ '):
904 904 s = self.a[-1][:-1]
905 905 self.a[-1] = s
906 906 self.hunk[-1] = s
907 907 l = lr.readline()
908 908 m = contextdesc.match(l)
909 909 if not m:
910 910 raise PatchError(_("bad hunk #%d") % self.number)
911 911 self.startb, bend = m.groups()
912 912 self.startb = int(self.startb)
913 913 if bend is None:
914 914 bend = self.startb
915 915 self.lenb = int(bend) - self.startb
916 916 if self.startb:
917 917 self.lenb += 1
918 918 hunki = 1
919 919 for x in xrange(self.lenb):
920 920 l = lr.readline()
921 921 if l.startswith('\ '):
922 922 # XXX: the only way to hit this is with an invalid line range.
923 923 # The no-eol marker is not counted in the line range, but I
924 924 # guess there are diff(1) out there which behave differently.
925 925 s = self.b[-1][:-1]
926 926 self.b[-1] = s
927 927 self.hunk[hunki - 1] = s
928 928 continue
929 929 if not l:
930 930 # line deletions, new block is empty and we hit EOF
931 931 lr.push(l)
932 932 break
933 933 s = l[2:]
934 934 if l.startswith('+ ') or l.startswith('! '):
935 935 u = '+' + s
936 936 elif l.startswith(' '):
937 937 u = ' ' + s
938 938 elif len(self.b) == 0:
939 939 # line deletions, new block is empty
940 940 lr.push(l)
941 941 break
942 942 else:
943 943 raise PatchError(_("bad hunk #%d old text line %d") %
944 944 (self.number, x))
945 945 self.b.append(s)
946 946 while True:
947 947 if hunki >= len(self.hunk):
948 948 h = ""
949 949 else:
950 950 h = self.hunk[hunki]
951 951 hunki += 1
952 952 if h == u:
953 953 break
954 954 elif h.startswith('-'):
955 955 continue
956 956 else:
957 957 self.hunk.insert(hunki - 1, u)
958 958 break
959 959
960 960 if not self.a:
961 961 # this happens when lines were only added to the hunk
962 962 for x in self.hunk:
963 963 if x.startswith('-') or x.startswith(' '):
964 964 self.a.append(x)
965 965 if not self.b:
966 966 # this happens when lines were only deleted from the hunk
967 967 for x in self.hunk:
968 968 if x.startswith('+') or x.startswith(' '):
969 969 self.b.append(x[1:])
970 970 # @@ -start,len +start,len @@
971 971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 972 self.startb, self.lenb)
973 973 self.hunk[0] = self.desc
974 974 self._fixnewline(lr)
975 975
976 976 def _fixnewline(self, lr):
977 977 l = lr.readline()
978 978 if l.startswith('\ '):
979 979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 980 else:
981 981 lr.push(l)
982 982
983 983 def complete(self):
984 984 return len(self.a) == self.lena and len(self.b) == self.lenb
985 985
986 986 def _fuzzit(self, old, new, fuzz, toponly):
987 987 # this removes context lines from the top and bottom of list 'l'. It
988 988 # checks the hunk to make sure only context lines are removed, and then
989 989 # returns a new shortened list of lines.
990 990 fuzz = min(fuzz, len(old))
991 991 if fuzz:
992 992 top = 0
993 993 bot = 0
994 994 hlen = len(self.hunk)
995 995 for x in xrange(hlen - 1):
996 996 # the hunk starts with the @@ line, so use x+1
997 997 if self.hunk[x + 1][0] == ' ':
998 998 top += 1
999 999 else:
1000 1000 break
1001 1001 if not toponly:
1002 1002 for x in xrange(hlen - 1):
1003 1003 if self.hunk[hlen - bot - 1][0] == ' ':
1004 1004 bot += 1
1005 1005 else:
1006 1006 break
1007 1007
1008 1008 bot = min(fuzz, bot)
1009 1009 top = min(fuzz, top)
1010 1010 return old[top:len(old)-bot], new[top:len(new)-bot], top
1011 1011 return old, new, 0
1012 1012
1013 1013 def fuzzit(self, fuzz, toponly):
1014 1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 1015 oldstart = self.starta + top
1016 1016 newstart = self.startb + top
1017 1017 # zero length hunk ranges already have their start decremented
1018 1018 if self.lena and oldstart > 0:
1019 1019 oldstart -= 1
1020 1020 if self.lenb and newstart > 0:
1021 1021 newstart -= 1
1022 1022 return old, oldstart, new, newstart
1023 1023
1024 1024 class binhunk(object):
1025 1025 'A binary patch file. Only understands literals so far.'
1026 1026 def __init__(self, lr, fname):
1027 1027 self.text = None
1028 1028 self.hunk = ['GIT binary patch\n']
1029 1029 self._fname = fname
1030 1030 self._read(lr)
1031 1031
1032 1032 def complete(self):
1033 1033 return self.text is not None
1034 1034
1035 1035 def new(self):
1036 1036 return [self.text]
1037 1037
1038 1038 def _read(self, lr):
1039 1039 def getline(lr, hunk):
1040 1040 l = lr.readline()
1041 1041 hunk.append(l)
1042 1042 return l.rstrip('\r\n')
1043 1043
1044 1044 while True:
1045 1045 line = getline(lr, self.hunk)
1046 1046 if not line:
1047 1047 raise PatchError(_('could not extract "%s" binary data')
1048 1048 % self._fname)
1049 1049 if line.startswith('literal '):
1050 1050 break
1051 1051 size = int(line[8:].rstrip())
1052 1052 dec = []
1053 1053 line = getline(lr, self.hunk)
1054 1054 while len(line) > 1:
1055 1055 l = line[0]
1056 1056 if l <= 'Z' and l >= 'A':
1057 1057 l = ord(l) - ord('A') + 1
1058 1058 else:
1059 1059 l = ord(l) - ord('a') + 27
1060 1060 try:
1061 1061 dec.append(base85.b85decode(line[1:])[:l])
1062 1062 except ValueError, e:
1063 1063 raise PatchError(_('could not decode "%s" binary patch: %s')
1064 1064 % (self._fname, str(e)))
1065 1065 line = getline(lr, self.hunk)
1066 1066 text = zlib.decompress(''.join(dec))
1067 1067 if len(text) != size:
1068 1068 raise PatchError(_('"%s" length is %d bytes, should be %d')
1069 1069 % (self._fname, len(text), size))
1070 1070 self.text = text
1071 1071
1072 1072 def parsefilename(str):
1073 1073 # --- filename \t|space stuff
1074 1074 s = str[4:].rstrip('\r\n')
1075 1075 i = s.find('\t')
1076 1076 if i < 0:
1077 1077 i = s.find(' ')
1078 1078 if i < 0:
1079 1079 return s
1080 1080 return s[:i]
1081 1081
1082 1082 def pathstrip(path, strip):
1083 1083 pathlen = len(path)
1084 1084 i = 0
1085 1085 if strip == 0:
1086 1086 return '', path.rstrip()
1087 1087 count = strip
1088 1088 while count > 0:
1089 1089 i = path.find('/', i)
1090 1090 if i == -1:
1091 1091 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1092 1092 (count, strip, path))
1093 1093 i += 1
1094 1094 # consume '//' in the path
1095 1095 while i < pathlen - 1 and path[i] == '/':
1096 1096 i += 1
1097 1097 count -= 1
1098 1098 return path[:i].lstrip(), path[i:].rstrip()
1099 1099
1100 1100 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1101 1101 nulla = afile_orig == "/dev/null"
1102 1102 nullb = bfile_orig == "/dev/null"
1103 1103 create = nulla and hunk.starta == 0 and hunk.lena == 0
1104 1104 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1105 1105 abase, afile = pathstrip(afile_orig, strip)
1106 1106 gooda = not nulla and backend.exists(afile)
1107 1107 bbase, bfile = pathstrip(bfile_orig, strip)
1108 1108 if afile == bfile:
1109 1109 goodb = gooda
1110 1110 else:
1111 1111 goodb = not nullb and backend.exists(bfile)
1112 1112 missing = not goodb and not gooda and not create
1113 1113
1114 1114 # some diff programs apparently produce patches where the afile is
1115 1115 # not /dev/null, but afile starts with bfile
1116 1116 abasedir = afile[:afile.rfind('/') + 1]
1117 1117 bbasedir = bfile[:bfile.rfind('/') + 1]
1118 1118 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1119 1119 and hunk.starta == 0 and hunk.lena == 0):
1120 1120 create = True
1121 1121 missing = False
1122 1122
1123 1123 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1124 1124 # diff is between a file and its backup. In this case, the original
1125 1125 # file should be patched (see original mpatch code).
1126 1126 isbackup = (abase == bbase and bfile.startswith(afile))
1127 1127 fname = None
1128 1128 if not missing:
1129 1129 if gooda and goodb:
1130 1130 fname = isbackup and afile or bfile
1131 1131 elif gooda:
1132 1132 fname = afile
1133 1133
1134 1134 if not fname:
1135 1135 if not nullb:
1136 1136 fname = isbackup and afile or bfile
1137 1137 elif not nulla:
1138 1138 fname = afile
1139 1139 else:
1140 1140 raise PatchError(_("undefined source and destination files"))
1141 1141
1142 1142 gp = patchmeta(fname)
1143 1143 if create:
1144 1144 gp.op = 'ADD'
1145 1145 elif remove:
1146 1146 gp.op = 'DELETE'
1147 1147 return gp
1148 1148
1149 1149 def scangitpatch(lr, firstline):
1150 1150 """
1151 1151 Git patches can emit:
1152 1152 - rename a to b
1153 1153 - change b
1154 1154 - copy a to c
1155 1155 - change c
1156 1156
1157 1157 We cannot apply this sequence as-is, the renamed 'a' could not be
1158 1158 found for it would have been renamed already. And we cannot copy
1159 1159 from 'b' instead because 'b' would have been changed already. So
1160 1160 we scan the git patch for copy and rename commands so we can
1161 1161 perform the copies ahead of time.
1162 1162 """
1163 1163 pos = 0
1164 1164 try:
1165 1165 pos = lr.fp.tell()
1166 1166 fp = lr.fp
1167 1167 except IOError:
1168 1168 fp = cStringIO.StringIO(lr.fp.read())
1169 1169 gitlr = linereader(fp)
1170 1170 gitlr.push(firstline)
1171 1171 gitpatches = readgitpatch(gitlr)
1172 1172 fp.seek(pos)
1173 1173 return gitpatches
1174 1174
1175 1175 def iterhunks(fp):
1176 1176 """Read a patch and yield the following events:
1177 1177 - ("file", afile, bfile, firsthunk): select a new target file.
1178 1178 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1179 1179 "file" event.
1180 1180 - ("git", gitchanges): current diff is in git format, gitchanges
1181 1181 maps filenames to gitpatch records. Unique event.
1182 1182 """
1183 1183 afile = ""
1184 1184 bfile = ""
1185 1185 state = None
1186 1186 hunknum = 0
1187 1187 emitfile = newfile = False
1188 1188 gitpatches = None
1189 1189
1190 1190 # our states
1191 1191 BFILE = 1
1192 1192 context = None
1193 1193 lr = linereader(fp)
1194 1194
1195 1195 while True:
1196 1196 x = lr.readline()
1197 1197 if not x:
1198 1198 break
1199 1199 if state == BFILE and (
1200 1200 (not context and x[0] == '@')
1201 1201 or (context is not False and x.startswith('***************'))
1202 1202 or x.startswith('GIT binary patch')):
1203 1203 gp = None
1204 1204 if (gitpatches and
1205 1205 gitpatches[-1].ispatching(afile, bfile)):
1206 1206 gp = gitpatches.pop()
1207 1207 if x.startswith('GIT binary patch'):
1208 1208 h = binhunk(lr, gp.path)
1209 1209 else:
1210 1210 if context is None and x.startswith('***************'):
1211 1211 context = True
1212 1212 h = hunk(x, hunknum + 1, lr, context)
1213 1213 hunknum += 1
1214 1214 if emitfile:
1215 1215 emitfile = False
1216 1216 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1217 1217 yield 'hunk', h
1218 1218 elif x.startswith('diff --git'):
1219 1219 m = gitre.match(x.rstrip(' \r\n'))
1220 1220 if not m:
1221 1221 continue
1222 1222 if gitpatches is None:
1223 1223 # scan whole input for git metadata
1224 1224 gitpatches = scangitpatch(lr, x)
1225 1225 yield 'git', [g.copy() for g in gitpatches
1226 1226 if g.op in ('COPY', 'RENAME')]
1227 1227 gitpatches.reverse()
1228 1228 afile = 'a/' + m.group(1)
1229 1229 bfile = 'b/' + m.group(2)
1230 1230 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1231 1231 gp = gitpatches.pop()
1232 1232 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1233 1233 if not gitpatches:
1234 1234 raise PatchError(_('failed to synchronize metadata for "%s"')
1235 1235 % afile[2:])
1236 1236 gp = gitpatches[-1]
1237 1237 newfile = True
1238 1238 elif x.startswith('---'):
1239 1239 # check for a unified diff
1240 1240 l2 = lr.readline()
1241 1241 if not l2.startswith('+++'):
1242 1242 lr.push(l2)
1243 1243 continue
1244 1244 newfile = True
1245 1245 context = False
1246 1246 afile = parsefilename(x)
1247 1247 bfile = parsefilename(l2)
1248 1248 elif x.startswith('***'):
1249 1249 # check for a context diff
1250 1250 l2 = lr.readline()
1251 1251 if not l2.startswith('---'):
1252 1252 lr.push(l2)
1253 1253 continue
1254 1254 l3 = lr.readline()
1255 1255 lr.push(l3)
1256 1256 if not l3.startswith("***************"):
1257 1257 lr.push(l2)
1258 1258 continue
1259 1259 newfile = True
1260 1260 context = True
1261 1261 afile = parsefilename(x)
1262 1262 bfile = parsefilename(l2)
1263 1263
1264 1264 if newfile:
1265 1265 newfile = False
1266 1266 emitfile = True
1267 1267 state = BFILE
1268 1268 hunknum = 0
1269 1269
1270 1270 while gitpatches:
1271 1271 gp = gitpatches.pop()
1272 1272 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1273 1273
1274 1274 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1275 1275 """Reads a patch from fp and tries to apply it.
1276 1276
1277 1277 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1278 1278 there was any fuzz.
1279 1279
1280 1280 If 'eolmode' is 'strict', the patch content and patched file are
1281 1281 read in binary mode. Otherwise, line endings are ignored when
1282 1282 patching then normalized according to 'eolmode'.
1283 1283 """
1284 1284 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1285 1285 eolmode=eolmode)
1286 1286
1287 1287 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1288 1288 eolmode='strict'):
1289 1289
1290 1290 def pstrip(p):
1291 1291 return pathstrip(p, strip - 1)[1]
1292 1292
1293 1293 rejects = 0
1294 1294 err = 0
1295 1295 current_file = None
1296 1296
1297 1297 for state, values in iterhunks(fp):
1298 1298 if state == 'hunk':
1299 1299 if not current_file:
1300 1300 continue
1301 1301 ret = current_file.apply(values)
1302 1302 if ret > 0:
1303 1303 err = 1
1304 1304 elif state == 'file':
1305 1305 if current_file:
1306 1306 rejects += current_file.close()
1307 1307 current_file = None
1308 1308 afile, bfile, first_hunk, gp = values
1309 1309 if gp:
1310 1310 gp.path = pstrip(gp.path)
1311 1311 if gp.oldpath:
1312 1312 gp.oldpath = pstrip(gp.oldpath)
1313 1313 else:
1314 1314 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1315 1315 if gp.op == 'RENAME':
1316 1316 backend.unlink(gp.oldpath)
1317 1317 if not first_hunk:
1318 1318 if gp.op == 'DELETE':
1319 1319 backend.unlink(gp.path)
1320 1320 continue
1321 1321 data, mode = None, None
1322 1322 if gp.op in ('RENAME', 'COPY'):
1323 1323 data, mode = store.getfile(gp.oldpath)[:2]
1324 1324 if gp.mode:
1325 1325 mode = gp.mode
1326 1326 if gp.op == 'ADD':
1327 1327 # Added files without content have no hunk and
1328 1328 # must be created
1329 1329 data = ''
1330 1330 if data or mode:
1331 1331 if (gp.op in ('ADD', 'RENAME', 'COPY')
1332 1332 and backend.exists(gp.path)):
1333 1333 raise PatchError(_("cannot create %s: destination "
1334 1334 "already exists") % gp.path)
1335 1335 backend.setfile(gp.path, data, mode, gp.oldpath)
1336 1336 continue
1337 1337 try:
1338 1338 current_file = patcher(ui, gp, backend, store,
1339 1339 eolmode=eolmode)
1340 1340 except PatchError, inst:
1341 1341 ui.warn(str(inst) + '\n')
1342 1342 current_file = None
1343 1343 rejects += 1
1344 1344 continue
1345 1345 elif state == 'git':
1346 1346 for gp in values:
1347 1347 path = pstrip(gp.oldpath)
1348 1348 try:
1349 1349 data, mode = backend.getfile(path)
1350 1350 except IOError, e:
1351 1351 if e.errno != errno.ENOENT:
1352 1352 raise
1353 1353 # The error ignored here will trigger a getfile()
1354 1354 # error in a place more appropriate for error
1355 1355 # handling, and will not interrupt the patching
1356 1356 # process.
1357 1357 else:
1358 1358 store.setfile(path, data, mode)
1359 1359 else:
1360 1360 raise util.Abort(_('unsupported parser state: %s') % state)
1361 1361
1362 1362 if current_file:
1363 1363 rejects += current_file.close()
1364 1364
1365 1365 if rejects:
1366 1366 return -1
1367 1367 return err
1368 1368
1369 1369 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1370 1370 similarity):
1371 1371 """use <patcher> to apply <patchname> to the working directory.
1372 1372 returns whether patch was applied with fuzz factor."""
1373 1373
1374 1374 fuzz = False
1375 1375 args = []
1376 1376 cwd = repo.root
1377 1377 if cwd:
1378 1378 args.append('-d %s' % util.shellquote(cwd))
1379 1379 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1380 1380 util.shellquote(patchname)))
1381 1381 try:
1382 1382 for line in fp:
1383 1383 line = line.rstrip()
1384 1384 ui.note(line + '\n')
1385 1385 if line.startswith('patching file '):
1386 1386 pf = util.parsepatchoutput(line)
1387 1387 printed_file = False
1388 1388 files.add(pf)
1389 1389 elif line.find('with fuzz') >= 0:
1390 1390 fuzz = True
1391 1391 if not printed_file:
1392 1392 ui.warn(pf + '\n')
1393 1393 printed_file = True
1394 1394 ui.warn(line + '\n')
1395 1395 elif line.find('saving rejects to file') >= 0:
1396 1396 ui.warn(line + '\n')
1397 1397 elif line.find('FAILED') >= 0:
1398 1398 if not printed_file:
1399 1399 ui.warn(pf + '\n')
1400 1400 printed_file = True
1401 1401 ui.warn(line + '\n')
1402 1402 finally:
1403 1403 if files:
1404 1404 cfiles = list(files)
1405 1405 cwd = repo.getcwd()
1406 1406 if cwd:
1407 1407 cfiles = [util.pathto(repo.root, cwd, f)
1408 1408 for f in cfiles]
1409 1409 scmutil.addremove(repo, cfiles, similarity=similarity)
1410 1410 code = fp.close()
1411 1411 if code:
1412 1412 raise PatchError(_("patch command failed: %s") %
1413 1413 util.explainexit(code)[0])
1414 1414 return fuzz
1415 1415
1416 1416 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1417 1417 if files is None:
1418 1418 files = set()
1419 1419 if eolmode is None:
1420 1420 eolmode = ui.config('patch', 'eol', 'strict')
1421 1421 if eolmode.lower() not in eolmodes:
1422 1422 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1423 1423 eolmode = eolmode.lower()
1424 1424
1425 1425 store = filestore()
1426 1426 try:
1427 1427 fp = open(patchobj, 'rb')
1428 1428 except TypeError:
1429 1429 fp = patchobj
1430 1430 try:
1431 1431 ret = applydiff(ui, fp, backend, store, strip=strip,
1432 1432 eolmode=eolmode)
1433 1433 finally:
1434 1434 if fp != patchobj:
1435 1435 fp.close()
1436 1436 files.update(backend.close())
1437 1437 store.close()
1438 1438 if ret < 0:
1439 1439 raise PatchError(_('patch failed to apply'))
1440 1440 return ret > 0
1441 1441
1442 1442 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1443 1443 similarity=0):
1444 1444 """use builtin patch to apply <patchobj> to the working directory.
1445 1445 returns whether patch was applied with fuzz factor."""
1446 1446 backend = workingbackend(ui, repo, similarity)
1447 1447 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1448 1448
1449 1449 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1450 1450 eolmode='strict'):
1451 1451 backend = repobackend(ui, repo, ctx, store)
1452 1452 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1453 1453
1454 1454 def makememctx(repo, parents, text, user, date, branch, files, store,
1455 1455 editor=None):
1456 1456 def getfilectx(repo, memctx, path):
1457 1457 data, (islink, isexec), copied = store.getfile(path)
1458 1458 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1459 1459 copied=copied)
1460 1460 extra = {}
1461 1461 if branch:
1462 1462 extra['branch'] = encoding.fromlocal(branch)
1463 1463 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1464 1464 date, extra)
1465 1465 if editor:
1466 1466 ctx._text = editor(repo, ctx, [])
1467 1467 return ctx
1468 1468
1469 1469 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1470 1470 similarity=0):
1471 1471 """Apply <patchname> to the working directory.
1472 1472
1473 1473 'eolmode' specifies how end of lines should be handled. It can be:
1474 1474 - 'strict': inputs are read in binary mode, EOLs are preserved
1475 1475 - 'crlf': EOLs are ignored when patching and reset to CRLF
1476 1476 - 'lf': EOLs are ignored when patching and reset to LF
1477 1477 - None: get it from user settings, default to 'strict'
1478 1478 'eolmode' is ignored when using an external patcher program.
1479 1479
1480 1480 Returns whether patch was applied with fuzz factor.
1481 1481 """
1482 1482 patcher = ui.config('ui', 'patch')
1483 1483 if files is None:
1484 1484 files = set()
1485 1485 try:
1486 1486 if patcher:
1487 1487 return _externalpatch(ui, repo, patcher, patchname, strip,
1488 1488 files, similarity)
1489 1489 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1490 1490 similarity)
1491 1491 except PatchError, err:
1492 1492 raise util.Abort(str(err))
1493 1493
1494 1494 def changedfiles(ui, repo, patchpath, strip=1):
1495 1495 backend = fsbackend(ui, repo.root)
1496 1496 fp = open(patchpath, 'rb')
1497 1497 try:
1498 1498 changed = set()
1499 1499 for state, values in iterhunks(fp):
1500 1500 if state == 'file':
1501 1501 afile, bfile, first_hunk, gp = values
1502 1502 if gp:
1503 1503 gp.path = pathstrip(gp.path, strip - 1)[1]
1504 1504 if gp.oldpath:
1505 1505 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1506 1506 else:
1507 1507 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1508 1508 changed.add(gp.path)
1509 1509 if gp.op == 'RENAME':
1510 1510 changed.add(gp.oldpath)
1511 1511 elif state not in ('hunk', 'git'):
1512 1512 raise util.Abort(_('unsupported parser state: %s') % state)
1513 1513 return changed
1514 1514 finally:
1515 1515 fp.close()
1516 1516
1517 1517 class GitDiffRequired(Exception):
1518 1518 pass
1519 1519
1520 1520 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1521 1521 def get(key, name=None, getter=ui.configbool):
1522 1522 return ((opts and opts.get(key)) or
1523 1523 getter(section, name or key, None, untrusted=untrusted))
1524 1524 return mdiff.diffopts(
1525 1525 text=opts and opts.get('text'),
1526 1526 git=get('git'),
1527 1527 nodates=get('nodates'),
1528 1528 showfunc=get('show_function', 'showfunc'),
1529 1529 ignorews=get('ignore_all_space', 'ignorews'),
1530 1530 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1531 1531 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1532 1532 context=get('unified', getter=ui.config))
1533 1533
1534 1534 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1535 1535 losedatafn=None, prefix=''):
1536 1536 '''yields diff of changes to files between two nodes, or node and
1537 1537 working directory.
1538 1538
1539 1539 if node1 is None, use first dirstate parent instead.
1540 1540 if node2 is None, compare node1 with working directory.
1541 1541
1542 1542 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1543 1543 every time some change cannot be represented with the current
1544 1544 patch format. Return False to upgrade to git patch format, True to
1545 1545 accept the loss or raise an exception to abort the diff. It is
1546 1546 called with the name of current file being diffed as 'fn'. If set
1547 1547 to None, patches will always be upgraded to git format when
1548 1548 necessary.
1549 1549
1550 1550 prefix is a filename prefix that is prepended to all filenames on
1551 1551 display (used for subrepos).
1552 1552 '''
1553 1553
1554 1554 if opts is None:
1555 1555 opts = mdiff.defaultopts
1556 1556
1557 1557 if not node1 and not node2:
1558 1558 node1 = repo.dirstate.p1()
1559 1559
1560 1560 def lrugetfilectx():
1561 1561 cache = {}
1562 1562 order = util.deque()
1563 1563 def getfilectx(f, ctx):
1564 1564 fctx = ctx.filectx(f, filelog=cache.get(f))
1565 1565 if f not in cache:
1566 1566 if len(cache) > 20:
1567 1567 del cache[order.popleft()]
1568 1568 cache[f] = fctx.filelog()
1569 1569 else:
1570 1570 order.remove(f)
1571 1571 order.append(f)
1572 1572 return fctx
1573 1573 return getfilectx
1574 1574 getfilectx = lrugetfilectx()
1575 1575
1576 1576 ctx1 = repo[node1]
1577 1577 ctx2 = repo[node2]
1578 1578
1579 1579 if not changes:
1580 1580 changes = repo.status(ctx1, ctx2, match=match)
1581 1581 modified, added, removed = changes[:3]
1582 1582
1583 1583 if not modified and not added and not removed:
1584 1584 return []
1585 1585
1586 1586 revs = None
1587 1587 if not repo.ui.quiet:
1588 1588 hexfunc = repo.ui.debugflag and hex or short
1589 1589 revs = [hexfunc(node) for node in [node1, node2] if node]
1590 1590
1591 1591 copy = {}
1592 1592 if opts.git or opts.upgrade:
1593 1593 copy = copies.pathcopies(ctx1, ctx2)
1594 1594
1595 1595 def difffn(opts, losedata):
1596 1596 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1597 1597 copy, getfilectx, opts, losedata, prefix)
1598 1598 if opts.upgrade and not opts.git:
1599 1599 try:
1600 1600 def losedata(fn):
1601 1601 if not losedatafn or not losedatafn(fn=fn):
1602 1602 raise GitDiffRequired
1603 1603 # Buffer the whole output until we are sure it can be generated
1604 1604 return list(difffn(opts.copy(git=False), losedata))
1605 1605 except GitDiffRequired:
1606 1606 return difffn(opts.copy(git=True), None)
1607 1607 else:
1608 1608 return difffn(opts, None)
1609 1609
1610 1610 def difflabel(func, *args, **kw):
1611 1611 '''yields 2-tuples of (output, label) based on the output of func()'''
1612 1612 headprefixes = [('diff', 'diff.diffline'),
1613 1613 ('copy', 'diff.extended'),
1614 1614 ('rename', 'diff.extended'),
1615 1615 ('old', 'diff.extended'),
1616 1616 ('new', 'diff.extended'),
1617 1617 ('deleted', 'diff.extended'),
1618 1618 ('---', 'diff.file_a'),
1619 1619 ('+++', 'diff.file_b')]
1620 1620 textprefixes = [('@', 'diff.hunk'),
1621 1621 ('-', 'diff.deleted'),
1622 1622 ('+', 'diff.inserted')]
1623 1623 head = False
1624 1624 for chunk in func(*args, **kw):
1625 1625 lines = chunk.split('\n')
1626 1626 for i, line in enumerate(lines):
1627 1627 if i != 0:
1628 1628 yield ('\n', '')
1629 1629 if head:
1630 1630 if line.startswith('@'):
1631 1631 head = False
1632 1632 else:
1633 1633 if line and line[0] not in ' +-@\\':
1634 1634 head = True
1635 1635 stripline = line
1636 1636 if not head and line and line[0] in '+-':
1637 1637 # highlight trailing whitespace, but only in changed lines
1638 1638 stripline = line.rstrip()
1639 1639 prefixes = textprefixes
1640 1640 if head:
1641 1641 prefixes = headprefixes
1642 1642 for prefix, label in prefixes:
1643 1643 if stripline.startswith(prefix):
1644 1644 yield (stripline, label)
1645 1645 break
1646 1646 else:
1647 1647 yield (line, '')
1648 1648 if line != stripline:
1649 1649 yield (line[len(stripline):], 'diff.trailingwhitespace')
1650 1650
1651 1651 def diffui(*args, **kw):
1652 1652 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1653 1653 return difflabel(diff, *args, **kw)
1654 1654
1655 1655
1656 1656 def _addmodehdr(header, omode, nmode):
1657 1657 if omode != nmode:
1658 1658 header.append('old mode %s\n' % omode)
1659 1659 header.append('new mode %s\n' % nmode)
1660 1660
1661 1661 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1662 1662 copy, getfilectx, opts, losedatafn, prefix):
1663 1663
1664 1664 def join(f):
1665 1665 return os.path.join(prefix, f)
1666 1666
1667 def diffline(revs, a, b, opts):
1668 parts = ['diff']
1669 if opts.git:
1670 parts.append('--git')
1671 if revs and not opts.git:
1672 parts.append(' '.join(["-r %s" % rev for rev in revs]))
1673 if opts.git:
1674 parts.append('a/%s' % a)
1675 parts.append('b/%s' % b)
1676 else:
1677 parts.append(a)
1678 return ' '.join(parts) + '\n'
1679
1667 1680 date1 = util.datestr(ctx1.date())
1668 1681 man1 = ctx1.manifest()
1669 1682
1670 1683 gone = set()
1671 1684 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1672 1685
1673 1686 copyto = dict([(v, k) for k, v in copy.items()])
1674 1687
1675 1688 if opts.git:
1676 1689 revs = None
1677 1690
1678 1691 for f in sorted(modified + added + removed):
1679 1692 to = None
1680 1693 tn = None
1681 1694 dodiff = True
1682 1695 header = []
1683 1696 if f in man1:
1684 1697 to = getfilectx(f, ctx1).data()
1685 1698 if f not in removed:
1686 1699 tn = getfilectx(f, ctx2).data()
1687 1700 a, b = f, f
1688 1701 if opts.git or losedatafn:
1689 1702 if f in added:
1690 1703 mode = gitmode[ctx2.flags(f)]
1691 1704 if f in copy or f in copyto:
1692 1705 if opts.git:
1693 1706 if f in copy:
1694 1707 a = copy[f]
1695 1708 else:
1696 1709 a = copyto[f]
1697 1710 omode = gitmode[man1.flags(a)]
1698 1711 _addmodehdr(header, omode, mode)
1699 1712 if a in removed and a not in gone:
1700 1713 op = 'rename'
1701 1714 gone.add(a)
1702 1715 else:
1703 1716 op = 'copy'
1704 1717 header.append('%s from %s\n' % (op, join(a)))
1705 1718 header.append('%s to %s\n' % (op, join(f)))
1706 1719 to = getfilectx(a, ctx1).data()
1707 1720 else:
1708 1721 losedatafn(f)
1709 1722 else:
1710 1723 if opts.git:
1711 1724 header.append('new file mode %s\n' % mode)
1712 1725 elif ctx2.flags(f):
1713 1726 losedatafn(f)
1714 1727 # In theory, if tn was copied or renamed we should check
1715 1728 # if the source is binary too but the copy record already
1716 1729 # forces git mode.
1717 1730 if util.binary(tn):
1718 1731 if opts.git:
1719 1732 dodiff = 'binary'
1720 1733 else:
1721 1734 losedatafn(f)
1722 1735 if not opts.git and not tn:
1723 1736 # regular diffs cannot represent new empty file
1724 1737 losedatafn(f)
1725 1738 elif f in removed:
1726 1739 if opts.git:
1727 1740 # have we already reported a copy above?
1728 1741 if ((f in copy and copy[f] in added
1729 1742 and copyto[copy[f]] == f) or
1730 1743 (f in copyto and copyto[f] in added
1731 1744 and copy[copyto[f]] == f)):
1732 1745 dodiff = False
1733 1746 else:
1734 1747 header.append('deleted file mode %s\n' %
1735 1748 gitmode[man1.flags(f)])
1736 1749 elif not to or util.binary(to):
1737 1750 # regular diffs cannot represent empty file deletion
1738 1751 losedatafn(f)
1739 1752 else:
1740 1753 oflag = man1.flags(f)
1741 1754 nflag = ctx2.flags(f)
1742 1755 binary = util.binary(to) or util.binary(tn)
1743 1756 if opts.git:
1744 1757 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1745 1758 if binary:
1746 1759 dodiff = 'binary'
1747 1760 elif binary or nflag != oflag:
1748 1761 losedatafn(f)
1749 1762
1750 1763 if dodiff:
1751 1764 if opts.git or revs:
1752 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1765 header.insert(0, diffline(revs, join(a), join(b), opts))
1753 1766 if dodiff == 'binary':
1754 1767 text = mdiff.b85diff(to, tn)
1755 1768 else:
1756 1769 text = mdiff.unidiff(to, date1,
1757 1770 # ctx2 date may be dynamic
1758 1771 tn, util.datestr(ctx2.date()),
1759 1772 join(a), join(b), opts=opts)
1760 1773 if header and (text or len(header) > 1):
1761 1774 yield ''.join(header)
1762 1775 if text:
1763 1776 yield text
1764 1777
1765 1778 def diffstatsum(stats):
1766 1779 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1767 1780 for f, a, r, b in stats:
1768 1781 maxfile = max(maxfile, encoding.colwidth(f))
1769 1782 maxtotal = max(maxtotal, a + r)
1770 1783 addtotal += a
1771 1784 removetotal += r
1772 1785 binary = binary or b
1773 1786
1774 1787 return maxfile, maxtotal, addtotal, removetotal, binary
1775 1788
1776 1789 def diffstatdata(lines):
1777 1790 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1778 1791
1779 1792 results = []
1780 1793 filename, adds, removes, isbinary = None, 0, 0, False
1781 1794
1782 1795 def addresult():
1783 1796 if filename:
1784 1797 results.append((filename, adds, removes, isbinary))
1785 1798
1786 1799 for line in lines:
1787 1800 if line.startswith('diff'):
1788 1801 addresult()
1789 1802 # set numbers to 0 anyway when starting new file
1790 1803 adds, removes, isbinary = 0, 0, False
1791 1804 if line.startswith('diff --git'):
1792 1805 filename = gitre.search(line).group(1)
1793 1806 elif line.startswith('diff -r'):
1794 1807 # format: "diff -r ... -r ... filename"
1795 1808 filename = diffre.search(line).group(1)
1796 1809 elif line.startswith('+') and not line.startswith('+++ '):
1797 1810 adds += 1
1798 1811 elif line.startswith('-') and not line.startswith('--- '):
1799 1812 removes += 1
1800 1813 elif (line.startswith('GIT binary patch') or
1801 1814 line.startswith('Binary file')):
1802 1815 isbinary = True
1803 1816 addresult()
1804 1817 return results
1805 1818
1806 1819 def diffstat(lines, width=80, git=False):
1807 1820 output = []
1808 1821 stats = diffstatdata(lines)
1809 1822 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1810 1823
1811 1824 countwidth = len(str(maxtotal))
1812 1825 if hasbinary and countwidth < 3:
1813 1826 countwidth = 3
1814 1827 graphwidth = width - countwidth - maxname - 6
1815 1828 if graphwidth < 10:
1816 1829 graphwidth = 10
1817 1830
1818 1831 def scale(i):
1819 1832 if maxtotal <= graphwidth:
1820 1833 return i
1821 1834 # If diffstat runs out of room it doesn't print anything,
1822 1835 # which isn't very useful, so always print at least one + or -
1823 1836 # if there were at least some changes.
1824 1837 return max(i * graphwidth // maxtotal, int(bool(i)))
1825 1838
1826 1839 for filename, adds, removes, isbinary in stats:
1827 1840 if isbinary:
1828 1841 count = 'Bin'
1829 1842 else:
1830 1843 count = adds + removes
1831 1844 pluses = '+' * scale(adds)
1832 1845 minuses = '-' * scale(removes)
1833 1846 output.append(' %s%s | %*s %s%s\n' %
1834 1847 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1835 1848 countwidth, count, pluses, minuses))
1836 1849
1837 1850 if stats:
1838 1851 output.append(_(' %d files changed, %d insertions(+), '
1839 1852 '%d deletions(-)\n')
1840 1853 % (len(stats), totaladds, totalremoves))
1841 1854
1842 1855 return ''.join(output)
1843 1856
1844 1857 def diffstatui(*args, **kw):
1845 1858 '''like diffstat(), but yields 2-tuples of (output, label) for
1846 1859 ui.write()
1847 1860 '''
1848 1861
1849 1862 for line in diffstat(*args, **kw).splitlines():
1850 1863 if line and line[-1] in '+-':
1851 1864 name, graph = line.rsplit(' ', 1)
1852 1865 yield (name + ' ', '')
1853 1866 m = re.search(r'\++', graph)
1854 1867 if m:
1855 1868 yield (m.group(0), 'diffstat.inserted')
1856 1869 m = re.search(r'-+', graph)
1857 1870 if m:
1858 1871 yield (m.group(0), 'diffstat.deleted')
1859 1872 else:
1860 1873 yield (line, '')
1861 1874 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now