##// END OF EJS Templates
with: use context manager for I/O in changedfiles in patch
Bryan O'Sullivan -
r27796:f7f3958d default
parent child Browse files
Show More
@@ -1,2578 +1,2575
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import cStringIO
11 import cStringIO
12 import collections
12 import collections
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 scmutil,
36 scmutil,
37 util,
37 util,
38 )
38 )
39
39
40 gitre = re.compile('diff --git a/(.*) b/(.*)')
40 gitre = re.compile('diff --git a/(.*) b/(.*)')
41 tabsplitter = re.compile(r'(\t+|[^\t]+)')
41 tabsplitter = re.compile(r'(\t+|[^\t]+)')
42
42
43 class PatchError(Exception):
43 class PatchError(Exception):
44 pass
44 pass
45
45
46
46
47 # public functions
47 # public functions
48
48
49 def split(stream):
49 def split(stream):
50 '''return an iterator of individual patches from a stream'''
50 '''return an iterator of individual patches from a stream'''
51 def isheader(line, inheader):
51 def isheader(line, inheader):
52 if inheader and line[0] in (' ', '\t'):
52 if inheader and line[0] in (' ', '\t'):
53 # continuation
53 # continuation
54 return True
54 return True
55 if line[0] in (' ', '-', '+'):
55 if line[0] in (' ', '-', '+'):
56 # diff line - don't check for header pattern in there
56 # diff line - don't check for header pattern in there
57 return False
57 return False
58 l = line.split(': ', 1)
58 l = line.split(': ', 1)
59 return len(l) == 2 and ' ' not in l[0]
59 return len(l) == 2 and ' ' not in l[0]
60
60
61 def chunk(lines):
61 def chunk(lines):
62 return cStringIO.StringIO(''.join(lines))
62 return cStringIO.StringIO(''.join(lines))
63
63
64 def hgsplit(stream, cur):
64 def hgsplit(stream, cur):
65 inheader = True
65 inheader = True
66
66
67 for line in stream:
67 for line in stream:
68 if not line.strip():
68 if not line.strip():
69 inheader = False
69 inheader = False
70 if not inheader and line.startswith('# HG changeset patch'):
70 if not inheader and line.startswith('# HG changeset patch'):
71 yield chunk(cur)
71 yield chunk(cur)
72 cur = []
72 cur = []
73 inheader = True
73 inheader = True
74
74
75 cur.append(line)
75 cur.append(line)
76
76
77 if cur:
77 if cur:
78 yield chunk(cur)
78 yield chunk(cur)
79
79
80 def mboxsplit(stream, cur):
80 def mboxsplit(stream, cur):
81 for line in stream:
81 for line in stream:
82 if line.startswith('From '):
82 if line.startswith('From '):
83 for c in split(chunk(cur[1:])):
83 for c in split(chunk(cur[1:])):
84 yield c
84 yield c
85 cur = []
85 cur = []
86
86
87 cur.append(line)
87 cur.append(line)
88
88
89 if cur:
89 if cur:
90 for c in split(chunk(cur[1:])):
90 for c in split(chunk(cur[1:])):
91 yield c
91 yield c
92
92
93 def mimesplit(stream, cur):
93 def mimesplit(stream, cur):
94 def msgfp(m):
94 def msgfp(m):
95 fp = cStringIO.StringIO()
95 fp = cStringIO.StringIO()
96 g = email.Generator.Generator(fp, mangle_from_=False)
96 g = email.Generator.Generator(fp, mangle_from_=False)
97 g.flatten(m)
97 g.flatten(m)
98 fp.seek(0)
98 fp.seek(0)
99 return fp
99 return fp
100
100
101 for line in stream:
101 for line in stream:
102 cur.append(line)
102 cur.append(line)
103 c = chunk(cur)
103 c = chunk(cur)
104
104
105 m = email.Parser.Parser().parse(c)
105 m = email.Parser.Parser().parse(c)
106 if not m.is_multipart():
106 if not m.is_multipart():
107 yield msgfp(m)
107 yield msgfp(m)
108 else:
108 else:
109 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
109 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
110 for part in m.walk():
110 for part in m.walk():
111 ct = part.get_content_type()
111 ct = part.get_content_type()
112 if ct not in ok_types:
112 if ct not in ok_types:
113 continue
113 continue
114 yield msgfp(part)
114 yield msgfp(part)
115
115
116 def headersplit(stream, cur):
116 def headersplit(stream, cur):
117 inheader = False
117 inheader = False
118
118
119 for line in stream:
119 for line in stream:
120 if not inheader and isheader(line, inheader):
120 if not inheader and isheader(line, inheader):
121 yield chunk(cur)
121 yield chunk(cur)
122 cur = []
122 cur = []
123 inheader = True
123 inheader = True
124 if inheader and not isheader(line, inheader):
124 if inheader and not isheader(line, inheader):
125 inheader = False
125 inheader = False
126
126
127 cur.append(line)
127 cur.append(line)
128
128
129 if cur:
129 if cur:
130 yield chunk(cur)
130 yield chunk(cur)
131
131
132 def remainder(cur):
132 def remainder(cur):
133 yield chunk(cur)
133 yield chunk(cur)
134
134
135 class fiter(object):
135 class fiter(object):
136 def __init__(self, fp):
136 def __init__(self, fp):
137 self.fp = fp
137 self.fp = fp
138
138
139 def __iter__(self):
139 def __iter__(self):
140 return self
140 return self
141
141
142 def next(self):
142 def next(self):
143 l = self.fp.readline()
143 l = self.fp.readline()
144 if not l:
144 if not l:
145 raise StopIteration
145 raise StopIteration
146 return l
146 return l
147
147
148 inheader = False
148 inheader = False
149 cur = []
149 cur = []
150
150
151 mimeheaders = ['content-type']
151 mimeheaders = ['content-type']
152
152
153 if not util.safehasattr(stream, 'next'):
153 if not util.safehasattr(stream, 'next'):
154 # http responses, for example, have readline but not next
154 # http responses, for example, have readline but not next
155 stream = fiter(stream)
155 stream = fiter(stream)
156
156
157 for line in stream:
157 for line in stream:
158 cur.append(line)
158 cur.append(line)
159 if line.startswith('# HG changeset patch'):
159 if line.startswith('# HG changeset patch'):
160 return hgsplit(stream, cur)
160 return hgsplit(stream, cur)
161 elif line.startswith('From '):
161 elif line.startswith('From '):
162 return mboxsplit(stream, cur)
162 return mboxsplit(stream, cur)
163 elif isheader(line, inheader):
163 elif isheader(line, inheader):
164 inheader = True
164 inheader = True
165 if line.split(':', 1)[0].lower() in mimeheaders:
165 if line.split(':', 1)[0].lower() in mimeheaders:
166 # let email parser handle this
166 # let email parser handle this
167 return mimesplit(stream, cur)
167 return mimesplit(stream, cur)
168 elif line.startswith('--- ') and inheader:
168 elif line.startswith('--- ') and inheader:
169 # No evil headers seen by diff start, split by hand
169 # No evil headers seen by diff start, split by hand
170 return headersplit(stream, cur)
170 return headersplit(stream, cur)
171 # Not enough info, keep reading
171 # Not enough info, keep reading
172
172
173 # if we are here, we have a very plain patch
173 # if we are here, we have a very plain patch
174 return remainder(cur)
174 return remainder(cur)
175
175
176 ## Some facility for extensible patch parsing:
176 ## Some facility for extensible patch parsing:
177 # list of pairs ("header to match", "data key")
177 # list of pairs ("header to match", "data key")
178 patchheadermap = [('Date', 'date'),
178 patchheadermap = [('Date', 'date'),
179 ('Branch', 'branch'),
179 ('Branch', 'branch'),
180 ('Node ID', 'nodeid'),
180 ('Node ID', 'nodeid'),
181 ]
181 ]
182
182
183 def extract(ui, fileobj):
183 def extract(ui, fileobj):
184 '''extract patch from data read from fileobj.
184 '''extract patch from data read from fileobj.
185
185
186 patch can be a normal patch or contained in an email message.
186 patch can be a normal patch or contained in an email message.
187
187
188 return a dictionary. Standard keys are:
188 return a dictionary. Standard keys are:
189 - filename,
189 - filename,
190 - message,
190 - message,
191 - user,
191 - user,
192 - date,
192 - date,
193 - branch,
193 - branch,
194 - node,
194 - node,
195 - p1,
195 - p1,
196 - p2.
196 - p2.
197 Any item can be missing from the dictionary. If filename is missing,
197 Any item can be missing from the dictionary. If filename is missing,
198 fileobj did not contain a patch. Caller must unlink filename when done.'''
198 fileobj did not contain a patch. Caller must unlink filename when done.'''
199
199
200 # attempt to detect the start of a patch
200 # attempt to detect the start of a patch
201 # (this heuristic is borrowed from quilt)
201 # (this heuristic is borrowed from quilt)
202 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
202 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
203 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
203 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
204 r'---[ \t].*?^\+\+\+[ \t]|'
204 r'---[ \t].*?^\+\+\+[ \t]|'
205 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
205 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
206
206
207 data = {}
207 data = {}
208 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
208 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
209 tmpfp = os.fdopen(fd, 'w')
209 tmpfp = os.fdopen(fd, 'w')
210 try:
210 try:
211 msg = email.Parser.Parser().parse(fileobj)
211 msg = email.Parser.Parser().parse(fileobj)
212
212
213 subject = msg['Subject']
213 subject = msg['Subject']
214 data['user'] = msg['From']
214 data['user'] = msg['From']
215 if not subject and not data['user']:
215 if not subject and not data['user']:
216 # Not an email, restore parsed headers if any
216 # Not an email, restore parsed headers if any
217 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
217 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
218
218
219 # should try to parse msg['Date']
219 # should try to parse msg['Date']
220 parents = []
220 parents = []
221
221
222 if subject:
222 if subject:
223 if subject.startswith('[PATCH'):
223 if subject.startswith('[PATCH'):
224 pend = subject.find(']')
224 pend = subject.find(']')
225 if pend >= 0:
225 if pend >= 0:
226 subject = subject[pend + 1:].lstrip()
226 subject = subject[pend + 1:].lstrip()
227 subject = re.sub(r'\n[ \t]+', ' ', subject)
227 subject = re.sub(r'\n[ \t]+', ' ', subject)
228 ui.debug('Subject: %s\n' % subject)
228 ui.debug('Subject: %s\n' % subject)
229 if data['user']:
229 if data['user']:
230 ui.debug('From: %s\n' % data['user'])
230 ui.debug('From: %s\n' % data['user'])
231 diffs_seen = 0
231 diffs_seen = 0
232 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
232 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
233 message = ''
233 message = ''
234 for part in msg.walk():
234 for part in msg.walk():
235 content_type = part.get_content_type()
235 content_type = part.get_content_type()
236 ui.debug('Content-Type: %s\n' % content_type)
236 ui.debug('Content-Type: %s\n' % content_type)
237 if content_type not in ok_types:
237 if content_type not in ok_types:
238 continue
238 continue
239 payload = part.get_payload(decode=True)
239 payload = part.get_payload(decode=True)
240 m = diffre.search(payload)
240 m = diffre.search(payload)
241 if m:
241 if m:
242 hgpatch = False
242 hgpatch = False
243 hgpatchheader = False
243 hgpatchheader = False
244 ignoretext = False
244 ignoretext = False
245
245
246 ui.debug('found patch at byte %d\n' % m.start(0))
246 ui.debug('found patch at byte %d\n' % m.start(0))
247 diffs_seen += 1
247 diffs_seen += 1
248 cfp = cStringIO.StringIO()
248 cfp = cStringIO.StringIO()
249 for line in payload[:m.start(0)].splitlines():
249 for line in payload[:m.start(0)].splitlines():
250 if line.startswith('# HG changeset patch') and not hgpatch:
250 if line.startswith('# HG changeset patch') and not hgpatch:
251 ui.debug('patch generated by hg export\n')
251 ui.debug('patch generated by hg export\n')
252 hgpatch = True
252 hgpatch = True
253 hgpatchheader = True
253 hgpatchheader = True
254 # drop earlier commit message content
254 # drop earlier commit message content
255 cfp.seek(0)
255 cfp.seek(0)
256 cfp.truncate()
256 cfp.truncate()
257 subject = None
257 subject = None
258 elif hgpatchheader:
258 elif hgpatchheader:
259 if line.startswith('# User '):
259 if line.startswith('# User '):
260 data['user'] = line[7:]
260 data['user'] = line[7:]
261 ui.debug('From: %s\n' % data['user'])
261 ui.debug('From: %s\n' % data['user'])
262 elif line.startswith("# Parent "):
262 elif line.startswith("# Parent "):
263 parents.append(line[9:].lstrip())
263 parents.append(line[9:].lstrip())
264 elif line.startswith("# "):
264 elif line.startswith("# "):
265 for header, key in patchheadermap:
265 for header, key in patchheadermap:
266 prefix = '# %s ' % header
266 prefix = '# %s ' % header
267 if line.startswith(prefix):
267 if line.startswith(prefix):
268 data[key] = line[len(prefix):]
268 data[key] = line[len(prefix):]
269 else:
269 else:
270 hgpatchheader = False
270 hgpatchheader = False
271 elif line == '---':
271 elif line == '---':
272 ignoretext = True
272 ignoretext = True
273 if not hgpatchheader and not ignoretext:
273 if not hgpatchheader and not ignoretext:
274 cfp.write(line)
274 cfp.write(line)
275 cfp.write('\n')
275 cfp.write('\n')
276 message = cfp.getvalue()
276 message = cfp.getvalue()
277 if tmpfp:
277 if tmpfp:
278 tmpfp.write(payload)
278 tmpfp.write(payload)
279 if not payload.endswith('\n'):
279 if not payload.endswith('\n'):
280 tmpfp.write('\n')
280 tmpfp.write('\n')
281 elif not diffs_seen and message and content_type == 'text/plain':
281 elif not diffs_seen and message and content_type == 'text/plain':
282 message += '\n' + payload
282 message += '\n' + payload
283 except: # re-raises
283 except: # re-raises
284 tmpfp.close()
284 tmpfp.close()
285 os.unlink(tmpname)
285 os.unlink(tmpname)
286 raise
286 raise
287
287
288 if subject and not message.startswith(subject):
288 if subject and not message.startswith(subject):
289 message = '%s\n%s' % (subject, message)
289 message = '%s\n%s' % (subject, message)
290 data['message'] = message
290 data['message'] = message
291 tmpfp.close()
291 tmpfp.close()
292 if parents:
292 if parents:
293 data['p1'] = parents.pop(0)
293 data['p1'] = parents.pop(0)
294 if parents:
294 if parents:
295 data['p2'] = parents.pop(0)
295 data['p2'] = parents.pop(0)
296
296
297 if diffs_seen:
297 if diffs_seen:
298 data['filename'] = tmpname
298 data['filename'] = tmpname
299 else:
299 else:
300 os.unlink(tmpname)
300 os.unlink(tmpname)
301 return data
301 return data
302
302
303 class patchmeta(object):
303 class patchmeta(object):
304 """Patched file metadata
304 """Patched file metadata
305
305
306 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
306 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
307 or COPY. 'path' is patched file path. 'oldpath' is set to the
307 or COPY. 'path' is patched file path. 'oldpath' is set to the
308 origin file when 'op' is either COPY or RENAME, None otherwise. If
308 origin file when 'op' is either COPY or RENAME, None otherwise. If
309 file mode is changed, 'mode' is a tuple (islink, isexec) where
309 file mode is changed, 'mode' is a tuple (islink, isexec) where
310 'islink' is True if the file is a symlink and 'isexec' is True if
310 'islink' is True if the file is a symlink and 'isexec' is True if
311 the file is executable. Otherwise, 'mode' is None.
311 the file is executable. Otherwise, 'mode' is None.
312 """
312 """
313 def __init__(self, path):
313 def __init__(self, path):
314 self.path = path
314 self.path = path
315 self.oldpath = None
315 self.oldpath = None
316 self.mode = None
316 self.mode = None
317 self.op = 'MODIFY'
317 self.op = 'MODIFY'
318 self.binary = False
318 self.binary = False
319
319
320 def setmode(self, mode):
320 def setmode(self, mode):
321 islink = mode & 0o20000
321 islink = mode & 0o20000
322 isexec = mode & 0o100
322 isexec = mode & 0o100
323 self.mode = (islink, isexec)
323 self.mode = (islink, isexec)
324
324
325 def copy(self):
325 def copy(self):
326 other = patchmeta(self.path)
326 other = patchmeta(self.path)
327 other.oldpath = self.oldpath
327 other.oldpath = self.oldpath
328 other.mode = self.mode
328 other.mode = self.mode
329 other.op = self.op
329 other.op = self.op
330 other.binary = self.binary
330 other.binary = self.binary
331 return other
331 return other
332
332
333 def _ispatchinga(self, afile):
333 def _ispatchinga(self, afile):
334 if afile == '/dev/null':
334 if afile == '/dev/null':
335 return self.op == 'ADD'
335 return self.op == 'ADD'
336 return afile == 'a/' + (self.oldpath or self.path)
336 return afile == 'a/' + (self.oldpath or self.path)
337
337
338 def _ispatchingb(self, bfile):
338 def _ispatchingb(self, bfile):
339 if bfile == '/dev/null':
339 if bfile == '/dev/null':
340 return self.op == 'DELETE'
340 return self.op == 'DELETE'
341 return bfile == 'b/' + self.path
341 return bfile == 'b/' + self.path
342
342
343 def ispatching(self, afile, bfile):
343 def ispatching(self, afile, bfile):
344 return self._ispatchinga(afile) and self._ispatchingb(bfile)
344 return self._ispatchinga(afile) and self._ispatchingb(bfile)
345
345
346 def __repr__(self):
346 def __repr__(self):
347 return "<patchmeta %s %r>" % (self.op, self.path)
347 return "<patchmeta %s %r>" % (self.op, self.path)
348
348
349 def readgitpatch(lr):
349 def readgitpatch(lr):
350 """extract git-style metadata about patches from <patchname>"""
350 """extract git-style metadata about patches from <patchname>"""
351
351
352 # Filter patch for git information
352 # Filter patch for git information
353 gp = None
353 gp = None
354 gitpatches = []
354 gitpatches = []
355 for line in lr:
355 for line in lr:
356 line = line.rstrip(' \r\n')
356 line = line.rstrip(' \r\n')
357 if line.startswith('diff --git a/'):
357 if line.startswith('diff --git a/'):
358 m = gitre.match(line)
358 m = gitre.match(line)
359 if m:
359 if m:
360 if gp:
360 if gp:
361 gitpatches.append(gp)
361 gitpatches.append(gp)
362 dst = m.group(2)
362 dst = m.group(2)
363 gp = patchmeta(dst)
363 gp = patchmeta(dst)
364 elif gp:
364 elif gp:
365 if line.startswith('--- '):
365 if line.startswith('--- '):
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 gp = None
367 gp = None
368 continue
368 continue
369 if line.startswith('rename from '):
369 if line.startswith('rename from '):
370 gp.op = 'RENAME'
370 gp.op = 'RENAME'
371 gp.oldpath = line[12:]
371 gp.oldpath = line[12:]
372 elif line.startswith('rename to '):
372 elif line.startswith('rename to '):
373 gp.path = line[10:]
373 gp.path = line[10:]
374 elif line.startswith('copy from '):
374 elif line.startswith('copy from '):
375 gp.op = 'COPY'
375 gp.op = 'COPY'
376 gp.oldpath = line[10:]
376 gp.oldpath = line[10:]
377 elif line.startswith('copy to '):
377 elif line.startswith('copy to '):
378 gp.path = line[8:]
378 gp.path = line[8:]
379 elif line.startswith('deleted file'):
379 elif line.startswith('deleted file'):
380 gp.op = 'DELETE'
380 gp.op = 'DELETE'
381 elif line.startswith('new file mode '):
381 elif line.startswith('new file mode '):
382 gp.op = 'ADD'
382 gp.op = 'ADD'
383 gp.setmode(int(line[-6:], 8))
383 gp.setmode(int(line[-6:], 8))
384 elif line.startswith('new mode '):
384 elif line.startswith('new mode '):
385 gp.setmode(int(line[-6:], 8))
385 gp.setmode(int(line[-6:], 8))
386 elif line.startswith('GIT binary patch'):
386 elif line.startswith('GIT binary patch'):
387 gp.binary = True
387 gp.binary = True
388 if gp:
388 if gp:
389 gitpatches.append(gp)
389 gitpatches.append(gp)
390
390
391 return gitpatches
391 return gitpatches
392
392
393 class linereader(object):
393 class linereader(object):
394 # simple class to allow pushing lines back into the input stream
394 # simple class to allow pushing lines back into the input stream
395 def __init__(self, fp):
395 def __init__(self, fp):
396 self.fp = fp
396 self.fp = fp
397 self.buf = []
397 self.buf = []
398
398
399 def push(self, line):
399 def push(self, line):
400 if line is not None:
400 if line is not None:
401 self.buf.append(line)
401 self.buf.append(line)
402
402
403 def readline(self):
403 def readline(self):
404 if self.buf:
404 if self.buf:
405 l = self.buf[0]
405 l = self.buf[0]
406 del self.buf[0]
406 del self.buf[0]
407 return l
407 return l
408 return self.fp.readline()
408 return self.fp.readline()
409
409
410 def __iter__(self):
410 def __iter__(self):
411 while True:
411 while True:
412 l = self.readline()
412 l = self.readline()
413 if not l:
413 if not l:
414 break
414 break
415 yield l
415 yield l
416
416
417 class abstractbackend(object):
417 class abstractbackend(object):
418 def __init__(self, ui):
418 def __init__(self, ui):
419 self.ui = ui
419 self.ui = ui
420
420
421 def getfile(self, fname):
421 def getfile(self, fname):
422 """Return target file data and flags as a (data, (islink,
422 """Return target file data and flags as a (data, (islink,
423 isexec)) tuple. Data is None if file is missing/deleted.
423 isexec)) tuple. Data is None if file is missing/deleted.
424 """
424 """
425 raise NotImplementedError
425 raise NotImplementedError
426
426
427 def setfile(self, fname, data, mode, copysource):
427 def setfile(self, fname, data, mode, copysource):
428 """Write data to target file fname and set its mode. mode is a
428 """Write data to target file fname and set its mode. mode is a
429 (islink, isexec) tuple. If data is None, the file content should
429 (islink, isexec) tuple. If data is None, the file content should
430 be left unchanged. If the file is modified after being copied,
430 be left unchanged. If the file is modified after being copied,
431 copysource is set to the original file name.
431 copysource is set to the original file name.
432 """
432 """
433 raise NotImplementedError
433 raise NotImplementedError
434
434
435 def unlink(self, fname):
435 def unlink(self, fname):
436 """Unlink target file."""
436 """Unlink target file."""
437 raise NotImplementedError
437 raise NotImplementedError
438
438
439 def writerej(self, fname, failed, total, lines):
439 def writerej(self, fname, failed, total, lines):
440 """Write rejected lines for fname. total is the number of hunks
440 """Write rejected lines for fname. total is the number of hunks
441 which failed to apply and total the total number of hunks for this
441 which failed to apply and total the total number of hunks for this
442 files.
442 files.
443 """
443 """
444 pass
444 pass
445
445
446 def exists(self, fname):
446 def exists(self, fname):
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 class fsbackend(abstractbackend):
449 class fsbackend(abstractbackend):
450 def __init__(self, ui, basedir):
450 def __init__(self, ui, basedir):
451 super(fsbackend, self).__init__(ui)
451 super(fsbackend, self).__init__(ui)
452 self.opener = scmutil.opener(basedir)
452 self.opener = scmutil.opener(basedir)
453
453
454 def _join(self, f):
454 def _join(self, f):
455 return os.path.join(self.opener.base, f)
455 return os.path.join(self.opener.base, f)
456
456
457 def getfile(self, fname):
457 def getfile(self, fname):
458 if self.opener.islink(fname):
458 if self.opener.islink(fname):
459 return (self.opener.readlink(fname), (True, False))
459 return (self.opener.readlink(fname), (True, False))
460
460
461 isexec = False
461 isexec = False
462 try:
462 try:
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 except OSError as e:
464 except OSError as e:
465 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
466 raise
466 raise
467 try:
467 try:
468 return (self.opener.read(fname), (False, isexec))
468 return (self.opener.read(fname), (False, isexec))
469 except IOError as e:
469 except IOError as e:
470 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
471 raise
471 raise
472 return None, None
472 return None, None
473
473
474 def setfile(self, fname, data, mode, copysource):
474 def setfile(self, fname, data, mode, copysource):
475 islink, isexec = mode
475 islink, isexec = mode
476 if data is None:
476 if data is None:
477 self.opener.setflags(fname, islink, isexec)
477 self.opener.setflags(fname, islink, isexec)
478 return
478 return
479 if islink:
479 if islink:
480 self.opener.symlink(data, fname)
480 self.opener.symlink(data, fname)
481 else:
481 else:
482 self.opener.write(fname, data)
482 self.opener.write(fname, data)
483 if isexec:
483 if isexec:
484 self.opener.setflags(fname, False, True)
484 self.opener.setflags(fname, False, True)
485
485
486 def unlink(self, fname):
486 def unlink(self, fname):
487 self.opener.unlinkpath(fname, ignoremissing=True)
487 self.opener.unlinkpath(fname, ignoremissing=True)
488
488
489 def writerej(self, fname, failed, total, lines):
489 def writerej(self, fname, failed, total, lines):
490 fname = fname + ".rej"
490 fname = fname + ".rej"
491 self.ui.warn(
491 self.ui.warn(
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 (failed, total, fname))
493 (failed, total, fname))
494 fp = self.opener(fname, 'w')
494 fp = self.opener(fname, 'w')
495 fp.writelines(lines)
495 fp.writelines(lines)
496 fp.close()
496 fp.close()
497
497
498 def exists(self, fname):
498 def exists(self, fname):
499 return self.opener.lexists(fname)
499 return self.opener.lexists(fname)
500
500
501 class workingbackend(fsbackend):
501 class workingbackend(fsbackend):
502 def __init__(self, ui, repo, similarity):
502 def __init__(self, ui, repo, similarity):
503 super(workingbackend, self).__init__(ui, repo.root)
503 super(workingbackend, self).__init__(ui, repo.root)
504 self.repo = repo
504 self.repo = repo
505 self.similarity = similarity
505 self.similarity = similarity
506 self.removed = set()
506 self.removed = set()
507 self.changed = set()
507 self.changed = set()
508 self.copied = []
508 self.copied = []
509
509
510 def _checkknown(self, fname):
510 def _checkknown(self, fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513
513
514 def setfile(self, fname, data, mode, copysource):
514 def setfile(self, fname, data, mode, copysource):
515 self._checkknown(fname)
515 self._checkknown(fname)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 if copysource is not None:
517 if copysource is not None:
518 self.copied.append((copysource, fname))
518 self.copied.append((copysource, fname))
519 self.changed.add(fname)
519 self.changed.add(fname)
520
520
521 def unlink(self, fname):
521 def unlink(self, fname):
522 self._checkknown(fname)
522 self._checkknown(fname)
523 super(workingbackend, self).unlink(fname)
523 super(workingbackend, self).unlink(fname)
524 self.removed.add(fname)
524 self.removed.add(fname)
525 self.changed.add(fname)
525 self.changed.add(fname)
526
526
527 def close(self):
527 def close(self):
528 wctx = self.repo[None]
528 wctx = self.repo[None]
529 changed = set(self.changed)
529 changed = set(self.changed)
530 for src, dst in self.copied:
530 for src, dst in self.copied:
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 if self.removed:
532 if self.removed:
533 wctx.forget(sorted(self.removed))
533 wctx.forget(sorted(self.removed))
534 for f in self.removed:
534 for f in self.removed:
535 if f not in self.repo.dirstate:
535 if f not in self.repo.dirstate:
536 # File was deleted and no longer belongs to the
536 # File was deleted and no longer belongs to the
537 # dirstate, it was probably marked added then
537 # dirstate, it was probably marked added then
538 # deleted, and should not be considered by
538 # deleted, and should not be considered by
539 # marktouched().
539 # marktouched().
540 changed.discard(f)
540 changed.discard(f)
541 if changed:
541 if changed:
542 scmutil.marktouched(self.repo, changed, self.similarity)
542 scmutil.marktouched(self.repo, changed, self.similarity)
543 return sorted(self.changed)
543 return sorted(self.changed)
544
544
545 class filestore(object):
545 class filestore(object):
546 def __init__(self, maxsize=None):
546 def __init__(self, maxsize=None):
547 self.opener = None
547 self.opener = None
548 self.files = {}
548 self.files = {}
549 self.created = 0
549 self.created = 0
550 self.maxsize = maxsize
550 self.maxsize = maxsize
551 if self.maxsize is None:
551 if self.maxsize is None:
552 self.maxsize = 4*(2**20)
552 self.maxsize = 4*(2**20)
553 self.size = 0
553 self.size = 0
554 self.data = {}
554 self.data = {}
555
555
556 def setfile(self, fname, data, mode, copied=None):
556 def setfile(self, fname, data, mode, copied=None):
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 self.data[fname] = (data, mode, copied)
558 self.data[fname] = (data, mode, copied)
559 self.size += len(data)
559 self.size += len(data)
560 else:
560 else:
561 if self.opener is None:
561 if self.opener is None:
562 root = tempfile.mkdtemp(prefix='hg-patch-')
562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 self.opener = scmutil.opener(root)
563 self.opener = scmutil.opener(root)
564 # Avoid filename issues with these simple names
564 # Avoid filename issues with these simple names
565 fn = str(self.created)
565 fn = str(self.created)
566 self.opener.write(fn, data)
566 self.opener.write(fn, data)
567 self.created += 1
567 self.created += 1
568 self.files[fname] = (fn, mode, copied)
568 self.files[fname] = (fn, mode, copied)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 if fname in self.data:
571 if fname in self.data:
572 return self.data[fname]
572 return self.data[fname]
573 if not self.opener or fname not in self.files:
573 if not self.opener or fname not in self.files:
574 return None, None, None
574 return None, None, None
575 fn, mode, copied = self.files[fname]
575 fn, mode, copied = self.files[fname]
576 return self.opener.read(fn), mode, copied
576 return self.opener.read(fn), mode, copied
577
577
578 def close(self):
578 def close(self):
579 if self.opener:
579 if self.opener:
580 shutil.rmtree(self.opener.base)
580 shutil.rmtree(self.opener.base)
581
581
582 class repobackend(abstractbackend):
582 class repobackend(abstractbackend):
583 def __init__(self, ui, repo, ctx, store):
583 def __init__(self, ui, repo, ctx, store):
584 super(repobackend, self).__init__(ui)
584 super(repobackend, self).__init__(ui)
585 self.repo = repo
585 self.repo = repo
586 self.ctx = ctx
586 self.ctx = ctx
587 self.store = store
587 self.store = store
588 self.changed = set()
588 self.changed = set()
589 self.removed = set()
589 self.removed = set()
590 self.copied = {}
590 self.copied = {}
591
591
592 def _checkknown(self, fname):
592 def _checkknown(self, fname):
593 if fname not in self.ctx:
593 if fname not in self.ctx:
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595
595
596 def getfile(self, fname):
596 def getfile(self, fname):
597 try:
597 try:
598 fctx = self.ctx[fname]
598 fctx = self.ctx[fname]
599 except error.LookupError:
599 except error.LookupError:
600 return None, None
600 return None, None
601 flags = fctx.flags()
601 flags = fctx.flags()
602 return fctx.data(), ('l' in flags, 'x' in flags)
602 return fctx.data(), ('l' in flags, 'x' in flags)
603
603
604 def setfile(self, fname, data, mode, copysource):
604 def setfile(self, fname, data, mode, copysource):
605 if copysource:
605 if copysource:
606 self._checkknown(copysource)
606 self._checkknown(copysource)
607 if data is None:
607 if data is None:
608 data = self.ctx[fname].data()
608 data = self.ctx[fname].data()
609 self.store.setfile(fname, data, mode, copysource)
609 self.store.setfile(fname, data, mode, copysource)
610 self.changed.add(fname)
610 self.changed.add(fname)
611 if copysource:
611 if copysource:
612 self.copied[fname] = copysource
612 self.copied[fname] = copysource
613
613
614 def unlink(self, fname):
614 def unlink(self, fname):
615 self._checkknown(fname)
615 self._checkknown(fname)
616 self.removed.add(fname)
616 self.removed.add(fname)
617
617
618 def exists(self, fname):
618 def exists(self, fname):
619 return fname in self.ctx
619 return fname in self.ctx
620
620
621 def close(self):
621 def close(self):
622 return self.changed | self.removed
622 return self.changed | self.removed
623
623
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628
628
629 class patchfile(object):
629 class patchfile(object):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 self.fname = gp.path
631 self.fname = gp.path
632 self.eolmode = eolmode
632 self.eolmode = eolmode
633 self.eol = None
633 self.eol = None
634 self.backend = backend
634 self.backend = backend
635 self.ui = ui
635 self.ui = ui
636 self.lines = []
636 self.lines = []
637 self.exists = False
637 self.exists = False
638 self.missing = True
638 self.missing = True
639 self.mode = gp.mode
639 self.mode = gp.mode
640 self.copysource = gp.oldpath
640 self.copysource = gp.oldpath
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.remove = gp.op == 'DELETE'
642 self.remove = gp.op == 'DELETE'
643 if self.copysource is None:
643 if self.copysource is None:
644 data, mode = backend.getfile(self.fname)
644 data, mode = backend.getfile(self.fname)
645 else:
645 else:
646 data, mode = store.getfile(self.copysource)[:2]
646 data, mode = store.getfile(self.copysource)[:2]
647 if data is not None:
647 if data is not None:
648 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.missing = False
649 self.missing = False
650 if data:
650 if data:
651 self.lines = mdiff.splitnewlines(data)
651 self.lines = mdiff.splitnewlines(data)
652 if self.mode is None:
652 if self.mode is None:
653 self.mode = mode
653 self.mode = mode
654 if self.lines:
654 if self.lines:
655 # Normalize line endings
655 # Normalize line endings
656 if self.lines[0].endswith('\r\n'):
656 if self.lines[0].endswith('\r\n'):
657 self.eol = '\r\n'
657 self.eol = '\r\n'
658 elif self.lines[0].endswith('\n'):
658 elif self.lines[0].endswith('\n'):
659 self.eol = '\n'
659 self.eol = '\n'
660 if eolmode != 'strict':
660 if eolmode != 'strict':
661 nlines = []
661 nlines = []
662 for l in self.lines:
662 for l in self.lines:
663 if l.endswith('\r\n'):
663 if l.endswith('\r\n'):
664 l = l[:-2] + '\n'
664 l = l[:-2] + '\n'
665 nlines.append(l)
665 nlines.append(l)
666 self.lines = nlines
666 self.lines = nlines
667 else:
667 else:
668 if self.create:
668 if self.create:
669 self.missing = False
669 self.missing = False
670 if self.mode is None:
670 if self.mode is None:
671 self.mode = (False, False)
671 self.mode = (False, False)
672 if self.missing:
672 if self.missing:
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674
674
675 self.hash = {}
675 self.hash = {}
676 self.dirty = 0
676 self.dirty = 0
677 self.offset = 0
677 self.offset = 0
678 self.skew = 0
678 self.skew = 0
679 self.rej = []
679 self.rej = []
680 self.fileprinted = False
680 self.fileprinted = False
681 self.printfile(False)
681 self.printfile(False)
682 self.hunks = 0
682 self.hunks = 0
683
683
684 def writelines(self, fname, lines, mode):
684 def writelines(self, fname, lines, mode):
685 if self.eolmode == 'auto':
685 if self.eolmode == 'auto':
686 eol = self.eol
686 eol = self.eol
687 elif self.eolmode == 'crlf':
687 elif self.eolmode == 'crlf':
688 eol = '\r\n'
688 eol = '\r\n'
689 else:
689 else:
690 eol = '\n'
690 eol = '\n'
691
691
692 if self.eolmode != 'strict' and eol and eol != '\n':
692 if self.eolmode != 'strict' and eol and eol != '\n':
693 rawlines = []
693 rawlines = []
694 for l in lines:
694 for l in lines:
695 if l and l[-1] == '\n':
695 if l and l[-1] == '\n':
696 l = l[:-1] + eol
696 l = l[:-1] + eol
697 rawlines.append(l)
697 rawlines.append(l)
698 lines = rawlines
698 lines = rawlines
699
699
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701
701
702 def printfile(self, warn):
702 def printfile(self, warn):
703 if self.fileprinted:
703 if self.fileprinted:
704 return
704 return
705 if warn or self.ui.verbose:
705 if warn or self.ui.verbose:
706 self.fileprinted = True
706 self.fileprinted = True
707 s = _("patching file %s\n") % self.fname
707 s = _("patching file %s\n") % self.fname
708 if warn:
708 if warn:
709 self.ui.warn(s)
709 self.ui.warn(s)
710 else:
710 else:
711 self.ui.note(s)
711 self.ui.note(s)
712
712
713
713
714 def findlines(self, l, linenum):
714 def findlines(self, l, linenum):
715 # looks through the hash and finds candidate lines. The
715 # looks through the hash and finds candidate lines. The
716 # result is a list of line numbers sorted based on distance
716 # result is a list of line numbers sorted based on distance
717 # from linenum
717 # from linenum
718
718
719 cand = self.hash.get(l, [])
719 cand = self.hash.get(l, [])
720 if len(cand) > 1:
720 if len(cand) > 1:
721 # resort our list of potentials forward then back.
721 # resort our list of potentials forward then back.
722 cand.sort(key=lambda x: abs(x - linenum))
722 cand.sort(key=lambda x: abs(x - linenum))
723 return cand
723 return cand
724
724
725 def write_rej(self):
725 def write_rej(self):
726 # our rejects are a little different from patch(1). This always
726 # our rejects are a little different from patch(1). This always
727 # creates rejects in the same form as the original patch. A file
727 # creates rejects in the same form as the original patch. A file
728 # header is inserted so that you can run the reject through patch again
728 # header is inserted so that you can run the reject through patch again
729 # without having to type the filename.
729 # without having to type the filename.
730 if not self.rej:
730 if not self.rej:
731 return
731 return
732 base = os.path.basename(self.fname)
732 base = os.path.basename(self.fname)
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 for x in self.rej:
734 for x in self.rej:
735 for l in x.hunk:
735 for l in x.hunk:
736 lines.append(l)
736 lines.append(l)
737 if l[-1] != '\n':
737 if l[-1] != '\n':
738 lines.append("\n\ No newline at end of file\n")
738 lines.append("\n\ No newline at end of file\n")
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740
740
741 def apply(self, h):
741 def apply(self, h):
742 if not h.complete():
742 if not h.complete():
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 h.lenb))
745 h.lenb))
746
746
747 self.hunks += 1
747 self.hunks += 1
748
748
749 if self.missing:
749 if self.missing:
750 self.rej.append(h)
750 self.rej.append(h)
751 return -1
751 return -1
752
752
753 if self.exists and self.create:
753 if self.exists and self.create:
754 if self.copysource:
754 if self.copysource:
755 self.ui.warn(_("cannot create %s: destination already "
755 self.ui.warn(_("cannot create %s: destination already "
756 "exists\n") % self.fname)
756 "exists\n") % self.fname)
757 else:
757 else:
758 self.ui.warn(_("file %s already exists\n") % self.fname)
758 self.ui.warn(_("file %s already exists\n") % self.fname)
759 self.rej.append(h)
759 self.rej.append(h)
760 return -1
760 return -1
761
761
762 if isinstance(h, binhunk):
762 if isinstance(h, binhunk):
763 if self.remove:
763 if self.remove:
764 self.backend.unlink(self.fname)
764 self.backend.unlink(self.fname)
765 else:
765 else:
766 l = h.new(self.lines)
766 l = h.new(self.lines)
767 self.lines[:] = l
767 self.lines[:] = l
768 self.offset += len(l)
768 self.offset += len(l)
769 self.dirty = True
769 self.dirty = True
770 return 0
770 return 0
771
771
772 horig = h
772 horig = h
773 if (self.eolmode in ('crlf', 'lf')
773 if (self.eolmode in ('crlf', 'lf')
774 or self.eolmode == 'auto' and self.eol):
774 or self.eolmode == 'auto' and self.eol):
775 # If new eols are going to be normalized, then normalize
775 # If new eols are going to be normalized, then normalize
776 # hunk data before patching. Otherwise, preserve input
776 # hunk data before patching. Otherwise, preserve input
777 # line-endings.
777 # line-endings.
778 h = h.getnormalized()
778 h = h.getnormalized()
779
779
780 # fast case first, no offsets, no fuzz
780 # fast case first, no offsets, no fuzz
781 old, oldstart, new, newstart = h.fuzzit(0, False)
781 old, oldstart, new, newstart = h.fuzzit(0, False)
782 oldstart += self.offset
782 oldstart += self.offset
783 orig_start = oldstart
783 orig_start = oldstart
784 # if there's skew we want to emit the "(offset %d lines)" even
784 # if there's skew we want to emit the "(offset %d lines)" even
785 # when the hunk cleanly applies at start + skew, so skip the
785 # when the hunk cleanly applies at start + skew, so skip the
786 # fast case code
786 # fast case code
787 if (self.skew == 0 and
787 if (self.skew == 0 and
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 if self.remove:
789 if self.remove:
790 self.backend.unlink(self.fname)
790 self.backend.unlink(self.fname)
791 else:
791 else:
792 self.lines[oldstart:oldstart + len(old)] = new
792 self.lines[oldstart:oldstart + len(old)] = new
793 self.offset += len(new) - len(old)
793 self.offset += len(new) - len(old)
794 self.dirty = True
794 self.dirty = True
795 return 0
795 return 0
796
796
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 self.hash = {}
798 self.hash = {}
799 for x, s in enumerate(self.lines):
799 for x, s in enumerate(self.lines):
800 self.hash.setdefault(s, []).append(x)
800 self.hash.setdefault(s, []).append(x)
801
801
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 for toponly in [True, False]:
803 for toponly in [True, False]:
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 oldstart = oldstart + self.offset + self.skew
805 oldstart = oldstart + self.offset + self.skew
806 oldstart = min(oldstart, len(self.lines))
806 oldstart = min(oldstart, len(self.lines))
807 if old:
807 if old:
808 cand = self.findlines(old[0][1:], oldstart)
808 cand = self.findlines(old[0][1:], oldstart)
809 else:
809 else:
810 # Only adding lines with no or fuzzed context, just
810 # Only adding lines with no or fuzzed context, just
811 # take the skew in account
811 # take the skew in account
812 cand = [oldstart]
812 cand = [oldstart]
813
813
814 for l in cand:
814 for l in cand:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 self.lines[l : l + len(old)] = new
816 self.lines[l : l + len(old)] = new
817 self.offset += len(new) - len(old)
817 self.offset += len(new) - len(old)
818 self.skew = l - orig_start
818 self.skew = l - orig_start
819 self.dirty = True
819 self.dirty = True
820 offset = l - orig_start - fuzzlen
820 offset = l - orig_start - fuzzlen
821 if fuzzlen:
821 if fuzzlen:
822 msg = _("Hunk #%d succeeded at %d "
822 msg = _("Hunk #%d succeeded at %d "
823 "with fuzz %d "
823 "with fuzz %d "
824 "(offset %d lines).\n")
824 "(offset %d lines).\n")
825 self.printfile(True)
825 self.printfile(True)
826 self.ui.warn(msg %
826 self.ui.warn(msg %
827 (h.number, l + 1, fuzzlen, offset))
827 (h.number, l + 1, fuzzlen, offset))
828 else:
828 else:
829 msg = _("Hunk #%d succeeded at %d "
829 msg = _("Hunk #%d succeeded at %d "
830 "(offset %d lines).\n")
830 "(offset %d lines).\n")
831 self.ui.note(msg % (h.number, l + 1, offset))
831 self.ui.note(msg % (h.number, l + 1, offset))
832 return fuzzlen
832 return fuzzlen
833 self.printfile(True)
833 self.printfile(True)
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 self.rej.append(horig)
835 self.rej.append(horig)
836 return -1
836 return -1
837
837
838 def close(self):
838 def close(self):
839 if self.dirty:
839 if self.dirty:
840 self.writelines(self.fname, self.lines, self.mode)
840 self.writelines(self.fname, self.lines, self.mode)
841 self.write_rej()
841 self.write_rej()
842 return len(self.rej)
842 return len(self.rej)
843
843
844 class header(object):
844 class header(object):
845 """patch header
845 """patch header
846 """
846 """
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
849 allhunks_re = re.compile('(?:index|deleted file) ')
849 allhunks_re = re.compile('(?:index|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 newfile_re = re.compile('(?:new file)')
852 newfile_re = re.compile('(?:new file)')
853
853
854 def __init__(self, header):
854 def __init__(self, header):
855 self.header = header
855 self.header = header
856 self.hunks = []
856 self.hunks = []
857
857
858 def binary(self):
858 def binary(self):
859 return any(h.startswith('index ') for h in self.header)
859 return any(h.startswith('index ') for h in self.header)
860
860
861 def pretty(self, fp):
861 def pretty(self, fp):
862 for h in self.header:
862 for h in self.header:
863 if h.startswith('index '):
863 if h.startswith('index '):
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 break
865 break
866 if self.pretty_re.match(h):
866 if self.pretty_re.match(h):
867 fp.write(h)
867 fp.write(h)
868 if self.binary():
868 if self.binary():
869 fp.write(_('this is a binary file\n'))
869 fp.write(_('this is a binary file\n'))
870 break
870 break
871 if h.startswith('---'):
871 if h.startswith('---'):
872 fp.write(_('%d hunks, %d lines changed\n') %
872 fp.write(_('%d hunks, %d lines changed\n') %
873 (len(self.hunks),
873 (len(self.hunks),
874 sum([max(h.added, h.removed) for h in self.hunks])))
874 sum([max(h.added, h.removed) for h in self.hunks])))
875 break
875 break
876 fp.write(h)
876 fp.write(h)
877
877
878 def write(self, fp):
878 def write(self, fp):
879 fp.write(''.join(self.header))
879 fp.write(''.join(self.header))
880
880
881 def allhunks(self):
881 def allhunks(self):
882 return any(self.allhunks_re.match(h) for h in self.header)
882 return any(self.allhunks_re.match(h) for h in self.header)
883
883
884 def files(self):
884 def files(self):
885 match = self.diffgit_re.match(self.header[0])
885 match = self.diffgit_re.match(self.header[0])
886 if match:
886 if match:
887 fromfile, tofile = match.groups()
887 fromfile, tofile = match.groups()
888 if fromfile == tofile:
888 if fromfile == tofile:
889 return [fromfile]
889 return [fromfile]
890 return [fromfile, tofile]
890 return [fromfile, tofile]
891 else:
891 else:
892 return self.diff_re.match(self.header[0]).groups()
892 return self.diff_re.match(self.header[0]).groups()
893
893
894 def filename(self):
894 def filename(self):
895 return self.files()[-1]
895 return self.files()[-1]
896
896
897 def __repr__(self):
897 def __repr__(self):
898 return '<header %s>' % (' '.join(map(repr, self.files())))
898 return '<header %s>' % (' '.join(map(repr, self.files())))
899
899
900 def isnewfile(self):
900 def isnewfile(self):
901 return any(self.newfile_re.match(h) for h in self.header)
901 return any(self.newfile_re.match(h) for h in self.header)
902
902
903 def special(self):
903 def special(self):
904 # Special files are shown only at the header level and not at the hunk
904 # Special files are shown only at the header level and not at the hunk
905 # level for example a file that has been deleted is a special file.
905 # level for example a file that has been deleted is a special file.
906 # The user cannot change the content of the operation, in the case of
906 # The user cannot change the content of the operation, in the case of
907 # the deleted file he has to take the deletion or not take it, he
907 # the deleted file he has to take the deletion or not take it, he
908 # cannot take some of it.
908 # cannot take some of it.
909 # Newly added files are special if they are empty, they are not special
909 # Newly added files are special if they are empty, they are not special
910 # if they have some content as we want to be able to change it
910 # if they have some content as we want to be able to change it
911 nocontent = len(self.header) == 2
911 nocontent = len(self.header) == 2
912 emptynewfile = self.isnewfile() and nocontent
912 emptynewfile = self.isnewfile() and nocontent
913 return emptynewfile or \
913 return emptynewfile or \
914 any(self.special_re.match(h) for h in self.header)
914 any(self.special_re.match(h) for h in self.header)
915
915
916 class recordhunk(object):
916 class recordhunk(object):
917 """patch hunk
917 """patch hunk
918
918
919 XXX shouldn't we merge this with the other hunk class?
919 XXX shouldn't we merge this with the other hunk class?
920 """
920 """
921 maxcontext = 3
921 maxcontext = 3
922
922
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 def trimcontext(number, lines):
924 def trimcontext(number, lines):
925 delta = len(lines) - self.maxcontext
925 delta = len(lines) - self.maxcontext
926 if False and delta > 0:
926 if False and delta > 0:
927 return number + delta, lines[:self.maxcontext]
927 return number + delta, lines[:self.maxcontext]
928 return number, lines
928 return number, lines
929
929
930 self.header = header
930 self.header = header
931 self.fromline, self.before = trimcontext(fromline, before)
931 self.fromline, self.before = trimcontext(fromline, before)
932 self.toline, self.after = trimcontext(toline, after)
932 self.toline, self.after = trimcontext(toline, after)
933 self.proc = proc
933 self.proc = proc
934 self.hunk = hunk
934 self.hunk = hunk
935 self.added, self.removed = self.countchanges(self.hunk)
935 self.added, self.removed = self.countchanges(self.hunk)
936
936
937 def __eq__(self, v):
937 def __eq__(self, v):
938 if not isinstance(v, recordhunk):
938 if not isinstance(v, recordhunk):
939 return False
939 return False
940
940
941 return ((v.hunk == self.hunk) and
941 return ((v.hunk == self.hunk) and
942 (v.proc == self.proc) and
942 (v.proc == self.proc) and
943 (self.fromline == v.fromline) and
943 (self.fromline == v.fromline) and
944 (self.header.files() == v.header.files()))
944 (self.header.files() == v.header.files()))
945
945
946 def __hash__(self):
946 def __hash__(self):
947 return hash((tuple(self.hunk),
947 return hash((tuple(self.hunk),
948 tuple(self.header.files()),
948 tuple(self.header.files()),
949 self.fromline,
949 self.fromline,
950 self.proc))
950 self.proc))
951
951
952 def countchanges(self, hunk):
952 def countchanges(self, hunk):
953 """hunk -> (n+,n-)"""
953 """hunk -> (n+,n-)"""
954 add = len([h for h in hunk if h[0] == '+'])
954 add = len([h for h in hunk if h[0] == '+'])
955 rem = len([h for h in hunk if h[0] == '-'])
955 rem = len([h for h in hunk if h[0] == '-'])
956 return add, rem
956 return add, rem
957
957
958 def write(self, fp):
958 def write(self, fp):
959 delta = len(self.before) + len(self.after)
959 delta = len(self.before) + len(self.after)
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 delta -= 1
961 delta -= 1
962 fromlen = delta + self.removed
962 fromlen = delta + self.removed
963 tolen = delta + self.added
963 tolen = delta + self.added
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 (self.fromline, fromlen, self.toline, tolen,
965 (self.fromline, fromlen, self.toline, tolen,
966 self.proc and (' ' + self.proc)))
966 self.proc and (' ' + self.proc)))
967 fp.write(''.join(self.before + self.hunk + self.after))
967 fp.write(''.join(self.before + self.hunk + self.after))
968
968
969 pretty = write
969 pretty = write
970
970
971 def filename(self):
971 def filename(self):
972 return self.header.filename()
972 return self.header.filename()
973
973
974 def __repr__(self):
974 def __repr__(self):
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976
976
977 def filterpatch(ui, headers, operation=None):
977 def filterpatch(ui, headers, operation=None):
978 """Interactively filter patch chunks into applied-only chunks"""
978 """Interactively filter patch chunks into applied-only chunks"""
979 if operation is None:
979 if operation is None:
980 operation = _('record')
980 operation = _('record')
981
981
982 def prompt(skipfile, skipall, query, chunk):
982 def prompt(skipfile, skipall, query, chunk):
983 """prompt query, and process base inputs
983 """prompt query, and process base inputs
984
984
985 - y/n for the rest of file
985 - y/n for the rest of file
986 - y/n for the rest
986 - y/n for the rest
987 - ? (help)
987 - ? (help)
988 - q (quit)
988 - q (quit)
989
989
990 Return True/False and possibly updated skipfile and skipall.
990 Return True/False and possibly updated skipfile and skipall.
991 """
991 """
992 newpatches = None
992 newpatches = None
993 if skipall is not None:
993 if skipall is not None:
994 return skipall, skipfile, skipall, newpatches
994 return skipall, skipfile, skipall, newpatches
995 if skipfile is not None:
995 if skipfile is not None:
996 return skipfile, skipfile, skipall, newpatches
996 return skipfile, skipfile, skipall, newpatches
997 while True:
997 while True:
998 resps = _('[Ynesfdaq?]'
998 resps = _('[Ynesfdaq?]'
999 '$$ &Yes, record this change'
999 '$$ &Yes, record this change'
1000 '$$ &No, skip this change'
1000 '$$ &No, skip this change'
1001 '$$ &Edit this change manually'
1001 '$$ &Edit this change manually'
1002 '$$ &Skip remaining changes to this file'
1002 '$$ &Skip remaining changes to this file'
1003 '$$ Record remaining changes to this &file'
1003 '$$ Record remaining changes to this &file'
1004 '$$ &Done, skip remaining changes and files'
1004 '$$ &Done, skip remaining changes and files'
1005 '$$ Record &all changes to all remaining files'
1005 '$$ Record &all changes to all remaining files'
1006 '$$ &Quit, recording no changes'
1006 '$$ &Quit, recording no changes'
1007 '$$ &? (display help)')
1007 '$$ &? (display help)')
1008 r = ui.promptchoice("%s %s" % (query, resps))
1008 r = ui.promptchoice("%s %s" % (query, resps))
1009 ui.write("\n")
1009 ui.write("\n")
1010 if r == 8: # ?
1010 if r == 8: # ?
1011 for c, t in ui.extractchoices(resps)[1]:
1011 for c, t in ui.extractchoices(resps)[1]:
1012 ui.write('%s - %s\n' % (c, t.lower()))
1012 ui.write('%s - %s\n' % (c, t.lower()))
1013 continue
1013 continue
1014 elif r == 0: # yes
1014 elif r == 0: # yes
1015 ret = True
1015 ret = True
1016 elif r == 1: # no
1016 elif r == 1: # no
1017 ret = False
1017 ret = False
1018 elif r == 2: # Edit patch
1018 elif r == 2: # Edit patch
1019 if chunk is None:
1019 if chunk is None:
1020 ui.write(_('cannot edit patch for whole file'))
1020 ui.write(_('cannot edit patch for whole file'))
1021 ui.write("\n")
1021 ui.write("\n")
1022 continue
1022 continue
1023 if chunk.header.binary():
1023 if chunk.header.binary():
1024 ui.write(_('cannot edit patch for binary file'))
1024 ui.write(_('cannot edit patch for binary file'))
1025 ui.write("\n")
1025 ui.write("\n")
1026 continue
1026 continue
1027 # Patch comment based on the Git one (based on comment at end of
1027 # Patch comment based on the Git one (based on comment at end of
1028 # https://mercurial-scm.org/wiki/RecordExtension)
1028 # https://mercurial-scm.org/wiki/RecordExtension)
1029 phelp = '---' + _("""
1029 phelp = '---' + _("""
1030 To remove '-' lines, make them ' ' lines (context).
1030 To remove '-' lines, make them ' ' lines (context).
1031 To remove '+' lines, delete them.
1031 To remove '+' lines, delete them.
1032 Lines starting with # will be removed from the patch.
1032 Lines starting with # will be removed from the patch.
1033
1033
1034 If the patch applies cleanly, the edited hunk will immediately be
1034 If the patch applies cleanly, the edited hunk will immediately be
1035 added to the record list. If it does not apply cleanly, a rejects
1035 added to the record list. If it does not apply cleanly, a rejects
1036 file will be generated: you can use that when you try again. If
1036 file will be generated: you can use that when you try again. If
1037 all lines of the hunk are removed, then the edit is aborted and
1037 all lines of the hunk are removed, then the edit is aborted and
1038 the hunk is left unchanged.
1038 the hunk is left unchanged.
1039 """)
1039 """)
1040 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1040 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1041 suffix=".diff", text=True)
1041 suffix=".diff", text=True)
1042 ncpatchfp = None
1042 ncpatchfp = None
1043 try:
1043 try:
1044 # Write the initial patch
1044 # Write the initial patch
1045 f = os.fdopen(patchfd, "w")
1045 f = os.fdopen(patchfd, "w")
1046 chunk.header.write(f)
1046 chunk.header.write(f)
1047 chunk.write(f)
1047 chunk.write(f)
1048 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1048 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1049 f.close()
1049 f.close()
1050 # Start the editor and wait for it to complete
1050 # Start the editor and wait for it to complete
1051 editor = ui.geteditor()
1051 editor = ui.geteditor()
1052 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1052 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1053 environ={'HGUSER': ui.username()})
1053 environ={'HGUSER': ui.username()})
1054 if ret != 0:
1054 if ret != 0:
1055 ui.warn(_("editor exited with exit code %d\n") % ret)
1055 ui.warn(_("editor exited with exit code %d\n") % ret)
1056 continue
1056 continue
1057 # Remove comment lines
1057 # Remove comment lines
1058 patchfp = open(patchfn)
1058 patchfp = open(patchfn)
1059 ncpatchfp = cStringIO.StringIO()
1059 ncpatchfp = cStringIO.StringIO()
1060 for line in patchfp:
1060 for line in patchfp:
1061 if not line.startswith('#'):
1061 if not line.startswith('#'):
1062 ncpatchfp.write(line)
1062 ncpatchfp.write(line)
1063 patchfp.close()
1063 patchfp.close()
1064 ncpatchfp.seek(0)
1064 ncpatchfp.seek(0)
1065 newpatches = parsepatch(ncpatchfp)
1065 newpatches = parsepatch(ncpatchfp)
1066 finally:
1066 finally:
1067 os.unlink(patchfn)
1067 os.unlink(patchfn)
1068 del ncpatchfp
1068 del ncpatchfp
1069 # Signal that the chunk shouldn't be applied as-is, but
1069 # Signal that the chunk shouldn't be applied as-is, but
1070 # provide the new patch to be used instead.
1070 # provide the new patch to be used instead.
1071 ret = False
1071 ret = False
1072 elif r == 3: # Skip
1072 elif r == 3: # Skip
1073 ret = skipfile = False
1073 ret = skipfile = False
1074 elif r == 4: # file (Record remaining)
1074 elif r == 4: # file (Record remaining)
1075 ret = skipfile = True
1075 ret = skipfile = True
1076 elif r == 5: # done, skip remaining
1076 elif r == 5: # done, skip remaining
1077 ret = skipall = False
1077 ret = skipall = False
1078 elif r == 6: # all
1078 elif r == 6: # all
1079 ret = skipall = True
1079 ret = skipall = True
1080 elif r == 7: # quit
1080 elif r == 7: # quit
1081 raise error.Abort(_('user quit'))
1081 raise error.Abort(_('user quit'))
1082 return ret, skipfile, skipall, newpatches
1082 return ret, skipfile, skipall, newpatches
1083
1083
1084 seen = set()
1084 seen = set()
1085 applied = {} # 'filename' -> [] of chunks
1085 applied = {} # 'filename' -> [] of chunks
1086 skipfile, skipall = None, None
1086 skipfile, skipall = None, None
1087 pos, total = 1, sum(len(h.hunks) for h in headers)
1087 pos, total = 1, sum(len(h.hunks) for h in headers)
1088 for h in headers:
1088 for h in headers:
1089 pos += len(h.hunks)
1089 pos += len(h.hunks)
1090 skipfile = None
1090 skipfile = None
1091 fixoffset = 0
1091 fixoffset = 0
1092 hdr = ''.join(h.header)
1092 hdr = ''.join(h.header)
1093 if hdr in seen:
1093 if hdr in seen:
1094 continue
1094 continue
1095 seen.add(hdr)
1095 seen.add(hdr)
1096 if skipall is None:
1096 if skipall is None:
1097 h.pretty(ui)
1097 h.pretty(ui)
1098 msg = (_('examine changes to %s?') %
1098 msg = (_('examine changes to %s?') %
1099 _(' and ').join("'%s'" % f for f in h.files()))
1099 _(' and ').join("'%s'" % f for f in h.files()))
1100 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1100 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1101 if not r:
1101 if not r:
1102 continue
1102 continue
1103 applied[h.filename()] = [h]
1103 applied[h.filename()] = [h]
1104 if h.allhunks():
1104 if h.allhunks():
1105 applied[h.filename()] += h.hunks
1105 applied[h.filename()] += h.hunks
1106 continue
1106 continue
1107 for i, chunk in enumerate(h.hunks):
1107 for i, chunk in enumerate(h.hunks):
1108 if skipfile is None and skipall is None:
1108 if skipfile is None and skipall is None:
1109 chunk.pretty(ui)
1109 chunk.pretty(ui)
1110 if total == 1:
1110 if total == 1:
1111 msg = _("record this change to '%s'?") % chunk.filename()
1111 msg = _("record this change to '%s'?") % chunk.filename()
1112 else:
1112 else:
1113 idx = pos - len(h.hunks) + i
1113 idx = pos - len(h.hunks) + i
1114 msg = _("record change %d/%d to '%s'?") % (idx, total,
1114 msg = _("record change %d/%d to '%s'?") % (idx, total,
1115 chunk.filename())
1115 chunk.filename())
1116 r, skipfile, skipall, newpatches = prompt(skipfile,
1116 r, skipfile, skipall, newpatches = prompt(skipfile,
1117 skipall, msg, chunk)
1117 skipall, msg, chunk)
1118 if r:
1118 if r:
1119 if fixoffset:
1119 if fixoffset:
1120 chunk = copy.copy(chunk)
1120 chunk = copy.copy(chunk)
1121 chunk.toline += fixoffset
1121 chunk.toline += fixoffset
1122 applied[chunk.filename()].append(chunk)
1122 applied[chunk.filename()].append(chunk)
1123 elif newpatches is not None:
1123 elif newpatches is not None:
1124 for newpatch in newpatches:
1124 for newpatch in newpatches:
1125 for newhunk in newpatch.hunks:
1125 for newhunk in newpatch.hunks:
1126 if fixoffset:
1126 if fixoffset:
1127 newhunk.toline += fixoffset
1127 newhunk.toline += fixoffset
1128 applied[newhunk.filename()].append(newhunk)
1128 applied[newhunk.filename()].append(newhunk)
1129 else:
1129 else:
1130 fixoffset += chunk.removed - chunk.added
1130 fixoffset += chunk.removed - chunk.added
1131 return (sum([h for h in applied.itervalues()
1131 return (sum([h for h in applied.itervalues()
1132 if h[0].special() or len(h) > 1], []), {})
1132 if h[0].special() or len(h) > 1], []), {})
1133 class hunk(object):
1133 class hunk(object):
1134 def __init__(self, desc, num, lr, context):
1134 def __init__(self, desc, num, lr, context):
1135 self.number = num
1135 self.number = num
1136 self.desc = desc
1136 self.desc = desc
1137 self.hunk = [desc]
1137 self.hunk = [desc]
1138 self.a = []
1138 self.a = []
1139 self.b = []
1139 self.b = []
1140 self.starta = self.lena = None
1140 self.starta = self.lena = None
1141 self.startb = self.lenb = None
1141 self.startb = self.lenb = None
1142 if lr is not None:
1142 if lr is not None:
1143 if context:
1143 if context:
1144 self.read_context_hunk(lr)
1144 self.read_context_hunk(lr)
1145 else:
1145 else:
1146 self.read_unified_hunk(lr)
1146 self.read_unified_hunk(lr)
1147
1147
1148 def getnormalized(self):
1148 def getnormalized(self):
1149 """Return a copy with line endings normalized to LF."""
1149 """Return a copy with line endings normalized to LF."""
1150
1150
1151 def normalize(lines):
1151 def normalize(lines):
1152 nlines = []
1152 nlines = []
1153 for line in lines:
1153 for line in lines:
1154 if line.endswith('\r\n'):
1154 if line.endswith('\r\n'):
1155 line = line[:-2] + '\n'
1155 line = line[:-2] + '\n'
1156 nlines.append(line)
1156 nlines.append(line)
1157 return nlines
1157 return nlines
1158
1158
1159 # Dummy object, it is rebuilt manually
1159 # Dummy object, it is rebuilt manually
1160 nh = hunk(self.desc, self.number, None, None)
1160 nh = hunk(self.desc, self.number, None, None)
1161 nh.number = self.number
1161 nh.number = self.number
1162 nh.desc = self.desc
1162 nh.desc = self.desc
1163 nh.hunk = self.hunk
1163 nh.hunk = self.hunk
1164 nh.a = normalize(self.a)
1164 nh.a = normalize(self.a)
1165 nh.b = normalize(self.b)
1165 nh.b = normalize(self.b)
1166 nh.starta = self.starta
1166 nh.starta = self.starta
1167 nh.startb = self.startb
1167 nh.startb = self.startb
1168 nh.lena = self.lena
1168 nh.lena = self.lena
1169 nh.lenb = self.lenb
1169 nh.lenb = self.lenb
1170 return nh
1170 return nh
1171
1171
1172 def read_unified_hunk(self, lr):
1172 def read_unified_hunk(self, lr):
1173 m = unidesc.match(self.desc)
1173 m = unidesc.match(self.desc)
1174 if not m:
1174 if not m:
1175 raise PatchError(_("bad hunk #%d") % self.number)
1175 raise PatchError(_("bad hunk #%d") % self.number)
1176 self.starta, self.lena, self.startb, self.lenb = m.groups()
1176 self.starta, self.lena, self.startb, self.lenb = m.groups()
1177 if self.lena is None:
1177 if self.lena is None:
1178 self.lena = 1
1178 self.lena = 1
1179 else:
1179 else:
1180 self.lena = int(self.lena)
1180 self.lena = int(self.lena)
1181 if self.lenb is None:
1181 if self.lenb is None:
1182 self.lenb = 1
1182 self.lenb = 1
1183 else:
1183 else:
1184 self.lenb = int(self.lenb)
1184 self.lenb = int(self.lenb)
1185 self.starta = int(self.starta)
1185 self.starta = int(self.starta)
1186 self.startb = int(self.startb)
1186 self.startb = int(self.startb)
1187 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1187 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1188 self.b)
1188 self.b)
1189 # if we hit eof before finishing out the hunk, the last line will
1189 # if we hit eof before finishing out the hunk, the last line will
1190 # be zero length. Lets try to fix it up.
1190 # be zero length. Lets try to fix it up.
1191 while len(self.hunk[-1]) == 0:
1191 while len(self.hunk[-1]) == 0:
1192 del self.hunk[-1]
1192 del self.hunk[-1]
1193 del self.a[-1]
1193 del self.a[-1]
1194 del self.b[-1]
1194 del self.b[-1]
1195 self.lena -= 1
1195 self.lena -= 1
1196 self.lenb -= 1
1196 self.lenb -= 1
1197 self._fixnewline(lr)
1197 self._fixnewline(lr)
1198
1198
1199 def read_context_hunk(self, lr):
1199 def read_context_hunk(self, lr):
1200 self.desc = lr.readline()
1200 self.desc = lr.readline()
1201 m = contextdesc.match(self.desc)
1201 m = contextdesc.match(self.desc)
1202 if not m:
1202 if not m:
1203 raise PatchError(_("bad hunk #%d") % self.number)
1203 raise PatchError(_("bad hunk #%d") % self.number)
1204 self.starta, aend = m.groups()
1204 self.starta, aend = m.groups()
1205 self.starta = int(self.starta)
1205 self.starta = int(self.starta)
1206 if aend is None:
1206 if aend is None:
1207 aend = self.starta
1207 aend = self.starta
1208 self.lena = int(aend) - self.starta
1208 self.lena = int(aend) - self.starta
1209 if self.starta:
1209 if self.starta:
1210 self.lena += 1
1210 self.lena += 1
1211 for x in xrange(self.lena):
1211 for x in xrange(self.lena):
1212 l = lr.readline()
1212 l = lr.readline()
1213 if l.startswith('---'):
1213 if l.startswith('---'):
1214 # lines addition, old block is empty
1214 # lines addition, old block is empty
1215 lr.push(l)
1215 lr.push(l)
1216 break
1216 break
1217 s = l[2:]
1217 s = l[2:]
1218 if l.startswith('- ') or l.startswith('! '):
1218 if l.startswith('- ') or l.startswith('! '):
1219 u = '-' + s
1219 u = '-' + s
1220 elif l.startswith(' '):
1220 elif l.startswith(' '):
1221 u = ' ' + s
1221 u = ' ' + s
1222 else:
1222 else:
1223 raise PatchError(_("bad hunk #%d old text line %d") %
1223 raise PatchError(_("bad hunk #%d old text line %d") %
1224 (self.number, x))
1224 (self.number, x))
1225 self.a.append(u)
1225 self.a.append(u)
1226 self.hunk.append(u)
1226 self.hunk.append(u)
1227
1227
1228 l = lr.readline()
1228 l = lr.readline()
1229 if l.startswith('\ '):
1229 if l.startswith('\ '):
1230 s = self.a[-1][:-1]
1230 s = self.a[-1][:-1]
1231 self.a[-1] = s
1231 self.a[-1] = s
1232 self.hunk[-1] = s
1232 self.hunk[-1] = s
1233 l = lr.readline()
1233 l = lr.readline()
1234 m = contextdesc.match(l)
1234 m = contextdesc.match(l)
1235 if not m:
1235 if not m:
1236 raise PatchError(_("bad hunk #%d") % self.number)
1236 raise PatchError(_("bad hunk #%d") % self.number)
1237 self.startb, bend = m.groups()
1237 self.startb, bend = m.groups()
1238 self.startb = int(self.startb)
1238 self.startb = int(self.startb)
1239 if bend is None:
1239 if bend is None:
1240 bend = self.startb
1240 bend = self.startb
1241 self.lenb = int(bend) - self.startb
1241 self.lenb = int(bend) - self.startb
1242 if self.startb:
1242 if self.startb:
1243 self.lenb += 1
1243 self.lenb += 1
1244 hunki = 1
1244 hunki = 1
1245 for x in xrange(self.lenb):
1245 for x in xrange(self.lenb):
1246 l = lr.readline()
1246 l = lr.readline()
1247 if l.startswith('\ '):
1247 if l.startswith('\ '):
1248 # XXX: the only way to hit this is with an invalid line range.
1248 # XXX: the only way to hit this is with an invalid line range.
1249 # The no-eol marker is not counted in the line range, but I
1249 # The no-eol marker is not counted in the line range, but I
1250 # guess there are diff(1) out there which behave differently.
1250 # guess there are diff(1) out there which behave differently.
1251 s = self.b[-1][:-1]
1251 s = self.b[-1][:-1]
1252 self.b[-1] = s
1252 self.b[-1] = s
1253 self.hunk[hunki - 1] = s
1253 self.hunk[hunki - 1] = s
1254 continue
1254 continue
1255 if not l:
1255 if not l:
1256 # line deletions, new block is empty and we hit EOF
1256 # line deletions, new block is empty and we hit EOF
1257 lr.push(l)
1257 lr.push(l)
1258 break
1258 break
1259 s = l[2:]
1259 s = l[2:]
1260 if l.startswith('+ ') or l.startswith('! '):
1260 if l.startswith('+ ') or l.startswith('! '):
1261 u = '+' + s
1261 u = '+' + s
1262 elif l.startswith(' '):
1262 elif l.startswith(' '):
1263 u = ' ' + s
1263 u = ' ' + s
1264 elif len(self.b) == 0:
1264 elif len(self.b) == 0:
1265 # line deletions, new block is empty
1265 # line deletions, new block is empty
1266 lr.push(l)
1266 lr.push(l)
1267 break
1267 break
1268 else:
1268 else:
1269 raise PatchError(_("bad hunk #%d old text line %d") %
1269 raise PatchError(_("bad hunk #%d old text line %d") %
1270 (self.number, x))
1270 (self.number, x))
1271 self.b.append(s)
1271 self.b.append(s)
1272 while True:
1272 while True:
1273 if hunki >= len(self.hunk):
1273 if hunki >= len(self.hunk):
1274 h = ""
1274 h = ""
1275 else:
1275 else:
1276 h = self.hunk[hunki]
1276 h = self.hunk[hunki]
1277 hunki += 1
1277 hunki += 1
1278 if h == u:
1278 if h == u:
1279 break
1279 break
1280 elif h.startswith('-'):
1280 elif h.startswith('-'):
1281 continue
1281 continue
1282 else:
1282 else:
1283 self.hunk.insert(hunki - 1, u)
1283 self.hunk.insert(hunki - 1, u)
1284 break
1284 break
1285
1285
1286 if not self.a:
1286 if not self.a:
1287 # this happens when lines were only added to the hunk
1287 # this happens when lines were only added to the hunk
1288 for x in self.hunk:
1288 for x in self.hunk:
1289 if x.startswith('-') or x.startswith(' '):
1289 if x.startswith('-') or x.startswith(' '):
1290 self.a.append(x)
1290 self.a.append(x)
1291 if not self.b:
1291 if not self.b:
1292 # this happens when lines were only deleted from the hunk
1292 # this happens when lines were only deleted from the hunk
1293 for x in self.hunk:
1293 for x in self.hunk:
1294 if x.startswith('+') or x.startswith(' '):
1294 if x.startswith('+') or x.startswith(' '):
1295 self.b.append(x[1:])
1295 self.b.append(x[1:])
1296 # @@ -start,len +start,len @@
1296 # @@ -start,len +start,len @@
1297 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1297 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1298 self.startb, self.lenb)
1298 self.startb, self.lenb)
1299 self.hunk[0] = self.desc
1299 self.hunk[0] = self.desc
1300 self._fixnewline(lr)
1300 self._fixnewline(lr)
1301
1301
1302 def _fixnewline(self, lr):
1302 def _fixnewline(self, lr):
1303 l = lr.readline()
1303 l = lr.readline()
1304 if l.startswith('\ '):
1304 if l.startswith('\ '):
1305 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1305 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1306 else:
1306 else:
1307 lr.push(l)
1307 lr.push(l)
1308
1308
1309 def complete(self):
1309 def complete(self):
1310 return len(self.a) == self.lena and len(self.b) == self.lenb
1310 return len(self.a) == self.lena and len(self.b) == self.lenb
1311
1311
1312 def _fuzzit(self, old, new, fuzz, toponly):
1312 def _fuzzit(self, old, new, fuzz, toponly):
1313 # this removes context lines from the top and bottom of list 'l'. It
1313 # this removes context lines from the top and bottom of list 'l'. It
1314 # checks the hunk to make sure only context lines are removed, and then
1314 # checks the hunk to make sure only context lines are removed, and then
1315 # returns a new shortened list of lines.
1315 # returns a new shortened list of lines.
1316 fuzz = min(fuzz, len(old))
1316 fuzz = min(fuzz, len(old))
1317 if fuzz:
1317 if fuzz:
1318 top = 0
1318 top = 0
1319 bot = 0
1319 bot = 0
1320 hlen = len(self.hunk)
1320 hlen = len(self.hunk)
1321 for x in xrange(hlen - 1):
1321 for x in xrange(hlen - 1):
1322 # the hunk starts with the @@ line, so use x+1
1322 # the hunk starts with the @@ line, so use x+1
1323 if self.hunk[x + 1][0] == ' ':
1323 if self.hunk[x + 1][0] == ' ':
1324 top += 1
1324 top += 1
1325 else:
1325 else:
1326 break
1326 break
1327 if not toponly:
1327 if not toponly:
1328 for x in xrange(hlen - 1):
1328 for x in xrange(hlen - 1):
1329 if self.hunk[hlen - bot - 1][0] == ' ':
1329 if self.hunk[hlen - bot - 1][0] == ' ':
1330 bot += 1
1330 bot += 1
1331 else:
1331 else:
1332 break
1332 break
1333
1333
1334 bot = min(fuzz, bot)
1334 bot = min(fuzz, bot)
1335 top = min(fuzz, top)
1335 top = min(fuzz, top)
1336 return old[top:len(old) - bot], new[top:len(new) - bot], top
1336 return old[top:len(old) - bot], new[top:len(new) - bot], top
1337 return old, new, 0
1337 return old, new, 0
1338
1338
1339 def fuzzit(self, fuzz, toponly):
1339 def fuzzit(self, fuzz, toponly):
1340 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1340 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1341 oldstart = self.starta + top
1341 oldstart = self.starta + top
1342 newstart = self.startb + top
1342 newstart = self.startb + top
1343 # zero length hunk ranges already have their start decremented
1343 # zero length hunk ranges already have their start decremented
1344 if self.lena and oldstart > 0:
1344 if self.lena and oldstart > 0:
1345 oldstart -= 1
1345 oldstart -= 1
1346 if self.lenb and newstart > 0:
1346 if self.lenb and newstart > 0:
1347 newstart -= 1
1347 newstart -= 1
1348 return old, oldstart, new, newstart
1348 return old, oldstart, new, newstart
1349
1349
1350 class binhunk(object):
1350 class binhunk(object):
1351 'A binary patch file.'
1351 'A binary patch file.'
1352 def __init__(self, lr, fname):
1352 def __init__(self, lr, fname):
1353 self.text = None
1353 self.text = None
1354 self.delta = False
1354 self.delta = False
1355 self.hunk = ['GIT binary patch\n']
1355 self.hunk = ['GIT binary patch\n']
1356 self._fname = fname
1356 self._fname = fname
1357 self._read(lr)
1357 self._read(lr)
1358
1358
1359 def complete(self):
1359 def complete(self):
1360 return self.text is not None
1360 return self.text is not None
1361
1361
1362 def new(self, lines):
1362 def new(self, lines):
1363 if self.delta:
1363 if self.delta:
1364 return [applybindelta(self.text, ''.join(lines))]
1364 return [applybindelta(self.text, ''.join(lines))]
1365 return [self.text]
1365 return [self.text]
1366
1366
1367 def _read(self, lr):
1367 def _read(self, lr):
1368 def getline(lr, hunk):
1368 def getline(lr, hunk):
1369 l = lr.readline()
1369 l = lr.readline()
1370 hunk.append(l)
1370 hunk.append(l)
1371 return l.rstrip('\r\n')
1371 return l.rstrip('\r\n')
1372
1372
1373 size = 0
1373 size = 0
1374 while True:
1374 while True:
1375 line = getline(lr, self.hunk)
1375 line = getline(lr, self.hunk)
1376 if not line:
1376 if not line:
1377 raise PatchError(_('could not extract "%s" binary data')
1377 raise PatchError(_('could not extract "%s" binary data')
1378 % self._fname)
1378 % self._fname)
1379 if line.startswith('literal '):
1379 if line.startswith('literal '):
1380 size = int(line[8:].rstrip())
1380 size = int(line[8:].rstrip())
1381 break
1381 break
1382 if line.startswith('delta '):
1382 if line.startswith('delta '):
1383 size = int(line[6:].rstrip())
1383 size = int(line[6:].rstrip())
1384 self.delta = True
1384 self.delta = True
1385 break
1385 break
1386 dec = []
1386 dec = []
1387 line = getline(lr, self.hunk)
1387 line = getline(lr, self.hunk)
1388 while len(line) > 1:
1388 while len(line) > 1:
1389 l = line[0]
1389 l = line[0]
1390 if l <= 'Z' and l >= 'A':
1390 if l <= 'Z' and l >= 'A':
1391 l = ord(l) - ord('A') + 1
1391 l = ord(l) - ord('A') + 1
1392 else:
1392 else:
1393 l = ord(l) - ord('a') + 27
1393 l = ord(l) - ord('a') + 27
1394 try:
1394 try:
1395 dec.append(base85.b85decode(line[1:])[:l])
1395 dec.append(base85.b85decode(line[1:])[:l])
1396 except ValueError as e:
1396 except ValueError as e:
1397 raise PatchError(_('could not decode "%s" binary patch: %s')
1397 raise PatchError(_('could not decode "%s" binary patch: %s')
1398 % (self._fname, str(e)))
1398 % (self._fname, str(e)))
1399 line = getline(lr, self.hunk)
1399 line = getline(lr, self.hunk)
1400 text = zlib.decompress(''.join(dec))
1400 text = zlib.decompress(''.join(dec))
1401 if len(text) != size:
1401 if len(text) != size:
1402 raise PatchError(_('"%s" length is %d bytes, should be %d')
1402 raise PatchError(_('"%s" length is %d bytes, should be %d')
1403 % (self._fname, len(text), size))
1403 % (self._fname, len(text), size))
1404 self.text = text
1404 self.text = text
1405
1405
1406 def parsefilename(str):
1406 def parsefilename(str):
1407 # --- filename \t|space stuff
1407 # --- filename \t|space stuff
1408 s = str[4:].rstrip('\r\n')
1408 s = str[4:].rstrip('\r\n')
1409 i = s.find('\t')
1409 i = s.find('\t')
1410 if i < 0:
1410 if i < 0:
1411 i = s.find(' ')
1411 i = s.find(' ')
1412 if i < 0:
1412 if i < 0:
1413 return s
1413 return s
1414 return s[:i]
1414 return s[:i]
1415
1415
1416 def reversehunks(hunks):
1416 def reversehunks(hunks):
1417 '''reverse the signs in the hunks given as argument
1417 '''reverse the signs in the hunks given as argument
1418
1418
1419 This function operates on hunks coming out of patch.filterpatch, that is
1419 This function operates on hunks coming out of patch.filterpatch, that is
1420 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1420 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1421
1421
1422 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1422 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1423 ... --- a/folder1/g
1423 ... --- a/folder1/g
1424 ... +++ b/folder1/g
1424 ... +++ b/folder1/g
1425 ... @@ -1,7 +1,7 @@
1425 ... @@ -1,7 +1,7 @@
1426 ... +firstline
1426 ... +firstline
1427 ... c
1427 ... c
1428 ... 1
1428 ... 1
1429 ... 2
1429 ... 2
1430 ... + 3
1430 ... + 3
1431 ... -4
1431 ... -4
1432 ... 5
1432 ... 5
1433 ... d
1433 ... d
1434 ... +lastline"""
1434 ... +lastline"""
1435 >>> hunks = parsepatch(rawpatch)
1435 >>> hunks = parsepatch(rawpatch)
1436 >>> hunkscomingfromfilterpatch = []
1436 >>> hunkscomingfromfilterpatch = []
1437 >>> for h in hunks:
1437 >>> for h in hunks:
1438 ... hunkscomingfromfilterpatch.append(h)
1438 ... hunkscomingfromfilterpatch.append(h)
1439 ... hunkscomingfromfilterpatch.extend(h.hunks)
1439 ... hunkscomingfromfilterpatch.extend(h.hunks)
1440
1440
1441 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1441 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1442 >>> fp = cStringIO.StringIO()
1442 >>> fp = cStringIO.StringIO()
1443 >>> for c in reversedhunks:
1443 >>> for c in reversedhunks:
1444 ... c.write(fp)
1444 ... c.write(fp)
1445 >>> fp.seek(0)
1445 >>> fp.seek(0)
1446 >>> reversedpatch = fp.read()
1446 >>> reversedpatch = fp.read()
1447 >>> print reversedpatch
1447 >>> print reversedpatch
1448 diff --git a/folder1/g b/folder1/g
1448 diff --git a/folder1/g b/folder1/g
1449 --- a/folder1/g
1449 --- a/folder1/g
1450 +++ b/folder1/g
1450 +++ b/folder1/g
1451 @@ -1,4 +1,3 @@
1451 @@ -1,4 +1,3 @@
1452 -firstline
1452 -firstline
1453 c
1453 c
1454 1
1454 1
1455 2
1455 2
1456 @@ -1,6 +2,6 @@
1456 @@ -1,6 +2,6 @@
1457 c
1457 c
1458 1
1458 1
1459 2
1459 2
1460 - 3
1460 - 3
1461 +4
1461 +4
1462 5
1462 5
1463 d
1463 d
1464 @@ -5,3 +6,2 @@
1464 @@ -5,3 +6,2 @@
1465 5
1465 5
1466 d
1466 d
1467 -lastline
1467 -lastline
1468
1468
1469 '''
1469 '''
1470
1470
1471 from . import crecord as crecordmod
1471 from . import crecord as crecordmod
1472 newhunks = []
1472 newhunks = []
1473 for c in hunks:
1473 for c in hunks:
1474 if isinstance(c, crecordmod.uihunk):
1474 if isinstance(c, crecordmod.uihunk):
1475 # curses hunks encapsulate the record hunk in _hunk
1475 # curses hunks encapsulate the record hunk in _hunk
1476 c = c._hunk
1476 c = c._hunk
1477 if isinstance(c, recordhunk):
1477 if isinstance(c, recordhunk):
1478 for j, line in enumerate(c.hunk):
1478 for j, line in enumerate(c.hunk):
1479 if line.startswith("-"):
1479 if line.startswith("-"):
1480 c.hunk[j] = "+" + c.hunk[j][1:]
1480 c.hunk[j] = "+" + c.hunk[j][1:]
1481 elif line.startswith("+"):
1481 elif line.startswith("+"):
1482 c.hunk[j] = "-" + c.hunk[j][1:]
1482 c.hunk[j] = "-" + c.hunk[j][1:]
1483 c.added, c.removed = c.removed, c.added
1483 c.added, c.removed = c.removed, c.added
1484 newhunks.append(c)
1484 newhunks.append(c)
1485 return newhunks
1485 return newhunks
1486
1486
1487 def parsepatch(originalchunks):
1487 def parsepatch(originalchunks):
1488 """patch -> [] of headers -> [] of hunks """
1488 """patch -> [] of headers -> [] of hunks """
1489 class parser(object):
1489 class parser(object):
1490 """patch parsing state machine"""
1490 """patch parsing state machine"""
1491 def __init__(self):
1491 def __init__(self):
1492 self.fromline = 0
1492 self.fromline = 0
1493 self.toline = 0
1493 self.toline = 0
1494 self.proc = ''
1494 self.proc = ''
1495 self.header = None
1495 self.header = None
1496 self.context = []
1496 self.context = []
1497 self.before = []
1497 self.before = []
1498 self.hunk = []
1498 self.hunk = []
1499 self.headers = []
1499 self.headers = []
1500
1500
1501 def addrange(self, limits):
1501 def addrange(self, limits):
1502 fromstart, fromend, tostart, toend, proc = limits
1502 fromstart, fromend, tostart, toend, proc = limits
1503 self.fromline = int(fromstart)
1503 self.fromline = int(fromstart)
1504 self.toline = int(tostart)
1504 self.toline = int(tostart)
1505 self.proc = proc
1505 self.proc = proc
1506
1506
1507 def addcontext(self, context):
1507 def addcontext(self, context):
1508 if self.hunk:
1508 if self.hunk:
1509 h = recordhunk(self.header, self.fromline, self.toline,
1509 h = recordhunk(self.header, self.fromline, self.toline,
1510 self.proc, self.before, self.hunk, context)
1510 self.proc, self.before, self.hunk, context)
1511 self.header.hunks.append(h)
1511 self.header.hunks.append(h)
1512 self.fromline += len(self.before) + h.removed
1512 self.fromline += len(self.before) + h.removed
1513 self.toline += len(self.before) + h.added
1513 self.toline += len(self.before) + h.added
1514 self.before = []
1514 self.before = []
1515 self.hunk = []
1515 self.hunk = []
1516 self.context = context
1516 self.context = context
1517
1517
1518 def addhunk(self, hunk):
1518 def addhunk(self, hunk):
1519 if self.context:
1519 if self.context:
1520 self.before = self.context
1520 self.before = self.context
1521 self.context = []
1521 self.context = []
1522 self.hunk = hunk
1522 self.hunk = hunk
1523
1523
1524 def newfile(self, hdr):
1524 def newfile(self, hdr):
1525 self.addcontext([])
1525 self.addcontext([])
1526 h = header(hdr)
1526 h = header(hdr)
1527 self.headers.append(h)
1527 self.headers.append(h)
1528 self.header = h
1528 self.header = h
1529
1529
1530 def addother(self, line):
1530 def addother(self, line):
1531 pass # 'other' lines are ignored
1531 pass # 'other' lines are ignored
1532
1532
1533 def finished(self):
1533 def finished(self):
1534 self.addcontext([])
1534 self.addcontext([])
1535 return self.headers
1535 return self.headers
1536
1536
1537 transitions = {
1537 transitions = {
1538 'file': {'context': addcontext,
1538 'file': {'context': addcontext,
1539 'file': newfile,
1539 'file': newfile,
1540 'hunk': addhunk,
1540 'hunk': addhunk,
1541 'range': addrange},
1541 'range': addrange},
1542 'context': {'file': newfile,
1542 'context': {'file': newfile,
1543 'hunk': addhunk,
1543 'hunk': addhunk,
1544 'range': addrange,
1544 'range': addrange,
1545 'other': addother},
1545 'other': addother},
1546 'hunk': {'context': addcontext,
1546 'hunk': {'context': addcontext,
1547 'file': newfile,
1547 'file': newfile,
1548 'range': addrange},
1548 'range': addrange},
1549 'range': {'context': addcontext,
1549 'range': {'context': addcontext,
1550 'hunk': addhunk},
1550 'hunk': addhunk},
1551 'other': {'other': addother},
1551 'other': {'other': addother},
1552 }
1552 }
1553
1553
1554 p = parser()
1554 p = parser()
1555 fp = cStringIO.StringIO()
1555 fp = cStringIO.StringIO()
1556 fp.write(''.join(originalchunks))
1556 fp.write(''.join(originalchunks))
1557 fp.seek(0)
1557 fp.seek(0)
1558
1558
1559 state = 'context'
1559 state = 'context'
1560 for newstate, data in scanpatch(fp):
1560 for newstate, data in scanpatch(fp):
1561 try:
1561 try:
1562 p.transitions[state][newstate](p, data)
1562 p.transitions[state][newstate](p, data)
1563 except KeyError:
1563 except KeyError:
1564 raise PatchError('unhandled transition: %s -> %s' %
1564 raise PatchError('unhandled transition: %s -> %s' %
1565 (state, newstate))
1565 (state, newstate))
1566 state = newstate
1566 state = newstate
1567 del fp
1567 del fp
1568 return p.finished()
1568 return p.finished()
1569
1569
1570 def pathtransform(path, strip, prefix):
1570 def pathtransform(path, strip, prefix):
1571 '''turn a path from a patch into a path suitable for the repository
1571 '''turn a path from a patch into a path suitable for the repository
1572
1572
1573 prefix, if not empty, is expected to be normalized with a / at the end.
1573 prefix, if not empty, is expected to be normalized with a / at the end.
1574
1574
1575 Returns (stripped components, path in repository).
1575 Returns (stripped components, path in repository).
1576
1576
1577 >>> pathtransform('a/b/c', 0, '')
1577 >>> pathtransform('a/b/c', 0, '')
1578 ('', 'a/b/c')
1578 ('', 'a/b/c')
1579 >>> pathtransform(' a/b/c ', 0, '')
1579 >>> pathtransform(' a/b/c ', 0, '')
1580 ('', ' a/b/c')
1580 ('', ' a/b/c')
1581 >>> pathtransform(' a/b/c ', 2, '')
1581 >>> pathtransform(' a/b/c ', 2, '')
1582 ('a/b/', 'c')
1582 ('a/b/', 'c')
1583 >>> pathtransform('a/b/c', 0, 'd/e/')
1583 >>> pathtransform('a/b/c', 0, 'd/e/')
1584 ('', 'd/e/a/b/c')
1584 ('', 'd/e/a/b/c')
1585 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1585 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1586 ('a//b/', 'd/e/c')
1586 ('a//b/', 'd/e/c')
1587 >>> pathtransform('a/b/c', 3, '')
1587 >>> pathtransform('a/b/c', 3, '')
1588 Traceback (most recent call last):
1588 Traceback (most recent call last):
1589 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1589 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1590 '''
1590 '''
1591 pathlen = len(path)
1591 pathlen = len(path)
1592 i = 0
1592 i = 0
1593 if strip == 0:
1593 if strip == 0:
1594 return '', prefix + path.rstrip()
1594 return '', prefix + path.rstrip()
1595 count = strip
1595 count = strip
1596 while count > 0:
1596 while count > 0:
1597 i = path.find('/', i)
1597 i = path.find('/', i)
1598 if i == -1:
1598 if i == -1:
1599 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1599 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1600 (count, strip, path))
1600 (count, strip, path))
1601 i += 1
1601 i += 1
1602 # consume '//' in the path
1602 # consume '//' in the path
1603 while i < pathlen - 1 and path[i] == '/':
1603 while i < pathlen - 1 and path[i] == '/':
1604 i += 1
1604 i += 1
1605 count -= 1
1605 count -= 1
1606 return path[:i].lstrip(), prefix + path[i:].rstrip()
1606 return path[:i].lstrip(), prefix + path[i:].rstrip()
1607
1607
1608 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1608 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1609 nulla = afile_orig == "/dev/null"
1609 nulla = afile_orig == "/dev/null"
1610 nullb = bfile_orig == "/dev/null"
1610 nullb = bfile_orig == "/dev/null"
1611 create = nulla and hunk.starta == 0 and hunk.lena == 0
1611 create = nulla and hunk.starta == 0 and hunk.lena == 0
1612 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1612 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1613 abase, afile = pathtransform(afile_orig, strip, prefix)
1613 abase, afile = pathtransform(afile_orig, strip, prefix)
1614 gooda = not nulla and backend.exists(afile)
1614 gooda = not nulla and backend.exists(afile)
1615 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1615 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1616 if afile == bfile:
1616 if afile == bfile:
1617 goodb = gooda
1617 goodb = gooda
1618 else:
1618 else:
1619 goodb = not nullb and backend.exists(bfile)
1619 goodb = not nullb and backend.exists(bfile)
1620 missing = not goodb and not gooda and not create
1620 missing = not goodb and not gooda and not create
1621
1621
1622 # some diff programs apparently produce patches where the afile is
1622 # some diff programs apparently produce patches where the afile is
1623 # not /dev/null, but afile starts with bfile
1623 # not /dev/null, but afile starts with bfile
1624 abasedir = afile[:afile.rfind('/') + 1]
1624 abasedir = afile[:afile.rfind('/') + 1]
1625 bbasedir = bfile[:bfile.rfind('/') + 1]
1625 bbasedir = bfile[:bfile.rfind('/') + 1]
1626 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1626 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1627 and hunk.starta == 0 and hunk.lena == 0):
1627 and hunk.starta == 0 and hunk.lena == 0):
1628 create = True
1628 create = True
1629 missing = False
1629 missing = False
1630
1630
1631 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1631 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1632 # diff is between a file and its backup. In this case, the original
1632 # diff is between a file and its backup. In this case, the original
1633 # file should be patched (see original mpatch code).
1633 # file should be patched (see original mpatch code).
1634 isbackup = (abase == bbase and bfile.startswith(afile))
1634 isbackup = (abase == bbase and bfile.startswith(afile))
1635 fname = None
1635 fname = None
1636 if not missing:
1636 if not missing:
1637 if gooda and goodb:
1637 if gooda and goodb:
1638 if isbackup:
1638 if isbackup:
1639 fname = afile
1639 fname = afile
1640 else:
1640 else:
1641 fname = bfile
1641 fname = bfile
1642 elif gooda:
1642 elif gooda:
1643 fname = afile
1643 fname = afile
1644
1644
1645 if not fname:
1645 if not fname:
1646 if not nullb:
1646 if not nullb:
1647 if isbackup:
1647 if isbackup:
1648 fname = afile
1648 fname = afile
1649 else:
1649 else:
1650 fname = bfile
1650 fname = bfile
1651 elif not nulla:
1651 elif not nulla:
1652 fname = afile
1652 fname = afile
1653 else:
1653 else:
1654 raise PatchError(_("undefined source and destination files"))
1654 raise PatchError(_("undefined source and destination files"))
1655
1655
1656 gp = patchmeta(fname)
1656 gp = patchmeta(fname)
1657 if create:
1657 if create:
1658 gp.op = 'ADD'
1658 gp.op = 'ADD'
1659 elif remove:
1659 elif remove:
1660 gp.op = 'DELETE'
1660 gp.op = 'DELETE'
1661 return gp
1661 return gp
1662
1662
1663 def scanpatch(fp):
1663 def scanpatch(fp):
1664 """like patch.iterhunks, but yield different events
1664 """like patch.iterhunks, but yield different events
1665
1665
1666 - ('file', [header_lines + fromfile + tofile])
1666 - ('file', [header_lines + fromfile + tofile])
1667 - ('context', [context_lines])
1667 - ('context', [context_lines])
1668 - ('hunk', [hunk_lines])
1668 - ('hunk', [hunk_lines])
1669 - ('range', (-start,len, +start,len, proc))
1669 - ('range', (-start,len, +start,len, proc))
1670 """
1670 """
1671 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1671 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1672 lr = linereader(fp)
1672 lr = linereader(fp)
1673
1673
1674 def scanwhile(first, p):
1674 def scanwhile(first, p):
1675 """scan lr while predicate holds"""
1675 """scan lr while predicate holds"""
1676 lines = [first]
1676 lines = [first]
1677 while True:
1677 while True:
1678 line = lr.readline()
1678 line = lr.readline()
1679 if not line:
1679 if not line:
1680 break
1680 break
1681 if p(line):
1681 if p(line):
1682 lines.append(line)
1682 lines.append(line)
1683 else:
1683 else:
1684 lr.push(line)
1684 lr.push(line)
1685 break
1685 break
1686 return lines
1686 return lines
1687
1687
1688 while True:
1688 while True:
1689 line = lr.readline()
1689 line = lr.readline()
1690 if not line:
1690 if not line:
1691 break
1691 break
1692 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1692 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1693 def notheader(line):
1693 def notheader(line):
1694 s = line.split(None, 1)
1694 s = line.split(None, 1)
1695 return not s or s[0] not in ('---', 'diff')
1695 return not s or s[0] not in ('---', 'diff')
1696 header = scanwhile(line, notheader)
1696 header = scanwhile(line, notheader)
1697 fromfile = lr.readline()
1697 fromfile = lr.readline()
1698 if fromfile.startswith('---'):
1698 if fromfile.startswith('---'):
1699 tofile = lr.readline()
1699 tofile = lr.readline()
1700 header += [fromfile, tofile]
1700 header += [fromfile, tofile]
1701 else:
1701 else:
1702 lr.push(fromfile)
1702 lr.push(fromfile)
1703 yield 'file', header
1703 yield 'file', header
1704 elif line[0] == ' ':
1704 elif line[0] == ' ':
1705 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1705 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1706 elif line[0] in '-+':
1706 elif line[0] in '-+':
1707 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1707 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1708 else:
1708 else:
1709 m = lines_re.match(line)
1709 m = lines_re.match(line)
1710 if m:
1710 if m:
1711 yield 'range', m.groups()
1711 yield 'range', m.groups()
1712 else:
1712 else:
1713 yield 'other', line
1713 yield 'other', line
1714
1714
1715 def scangitpatch(lr, firstline):
1715 def scangitpatch(lr, firstline):
1716 """
1716 """
1717 Git patches can emit:
1717 Git patches can emit:
1718 - rename a to b
1718 - rename a to b
1719 - change b
1719 - change b
1720 - copy a to c
1720 - copy a to c
1721 - change c
1721 - change c
1722
1722
1723 We cannot apply this sequence as-is, the renamed 'a' could not be
1723 We cannot apply this sequence as-is, the renamed 'a' could not be
1724 found for it would have been renamed already. And we cannot copy
1724 found for it would have been renamed already. And we cannot copy
1725 from 'b' instead because 'b' would have been changed already. So
1725 from 'b' instead because 'b' would have been changed already. So
1726 we scan the git patch for copy and rename commands so we can
1726 we scan the git patch for copy and rename commands so we can
1727 perform the copies ahead of time.
1727 perform the copies ahead of time.
1728 """
1728 """
1729 pos = 0
1729 pos = 0
1730 try:
1730 try:
1731 pos = lr.fp.tell()
1731 pos = lr.fp.tell()
1732 fp = lr.fp
1732 fp = lr.fp
1733 except IOError:
1733 except IOError:
1734 fp = cStringIO.StringIO(lr.fp.read())
1734 fp = cStringIO.StringIO(lr.fp.read())
1735 gitlr = linereader(fp)
1735 gitlr = linereader(fp)
1736 gitlr.push(firstline)
1736 gitlr.push(firstline)
1737 gitpatches = readgitpatch(gitlr)
1737 gitpatches = readgitpatch(gitlr)
1738 fp.seek(pos)
1738 fp.seek(pos)
1739 return gitpatches
1739 return gitpatches
1740
1740
1741 def iterhunks(fp):
1741 def iterhunks(fp):
1742 """Read a patch and yield the following events:
1742 """Read a patch and yield the following events:
1743 - ("file", afile, bfile, firsthunk): select a new target file.
1743 - ("file", afile, bfile, firsthunk): select a new target file.
1744 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1744 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1745 "file" event.
1745 "file" event.
1746 - ("git", gitchanges): current diff is in git format, gitchanges
1746 - ("git", gitchanges): current diff is in git format, gitchanges
1747 maps filenames to gitpatch records. Unique event.
1747 maps filenames to gitpatch records. Unique event.
1748 """
1748 """
1749 afile = ""
1749 afile = ""
1750 bfile = ""
1750 bfile = ""
1751 state = None
1751 state = None
1752 hunknum = 0
1752 hunknum = 0
1753 emitfile = newfile = False
1753 emitfile = newfile = False
1754 gitpatches = None
1754 gitpatches = None
1755
1755
1756 # our states
1756 # our states
1757 BFILE = 1
1757 BFILE = 1
1758 context = None
1758 context = None
1759 lr = linereader(fp)
1759 lr = linereader(fp)
1760
1760
1761 while True:
1761 while True:
1762 x = lr.readline()
1762 x = lr.readline()
1763 if not x:
1763 if not x:
1764 break
1764 break
1765 if state == BFILE and (
1765 if state == BFILE and (
1766 (not context and x[0] == '@')
1766 (not context and x[0] == '@')
1767 or (context is not False and x.startswith('***************'))
1767 or (context is not False and x.startswith('***************'))
1768 or x.startswith('GIT binary patch')):
1768 or x.startswith('GIT binary patch')):
1769 gp = None
1769 gp = None
1770 if (gitpatches and
1770 if (gitpatches and
1771 gitpatches[-1].ispatching(afile, bfile)):
1771 gitpatches[-1].ispatching(afile, bfile)):
1772 gp = gitpatches.pop()
1772 gp = gitpatches.pop()
1773 if x.startswith('GIT binary patch'):
1773 if x.startswith('GIT binary patch'):
1774 h = binhunk(lr, gp.path)
1774 h = binhunk(lr, gp.path)
1775 else:
1775 else:
1776 if context is None and x.startswith('***************'):
1776 if context is None and x.startswith('***************'):
1777 context = True
1777 context = True
1778 h = hunk(x, hunknum + 1, lr, context)
1778 h = hunk(x, hunknum + 1, lr, context)
1779 hunknum += 1
1779 hunknum += 1
1780 if emitfile:
1780 if emitfile:
1781 emitfile = False
1781 emitfile = False
1782 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1782 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1783 yield 'hunk', h
1783 yield 'hunk', h
1784 elif x.startswith('diff --git a/'):
1784 elif x.startswith('diff --git a/'):
1785 m = gitre.match(x.rstrip(' \r\n'))
1785 m = gitre.match(x.rstrip(' \r\n'))
1786 if not m:
1786 if not m:
1787 continue
1787 continue
1788 if gitpatches is None:
1788 if gitpatches is None:
1789 # scan whole input for git metadata
1789 # scan whole input for git metadata
1790 gitpatches = scangitpatch(lr, x)
1790 gitpatches = scangitpatch(lr, x)
1791 yield 'git', [g.copy() for g in gitpatches
1791 yield 'git', [g.copy() for g in gitpatches
1792 if g.op in ('COPY', 'RENAME')]
1792 if g.op in ('COPY', 'RENAME')]
1793 gitpatches.reverse()
1793 gitpatches.reverse()
1794 afile = 'a/' + m.group(1)
1794 afile = 'a/' + m.group(1)
1795 bfile = 'b/' + m.group(2)
1795 bfile = 'b/' + m.group(2)
1796 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1796 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1797 gp = gitpatches.pop()
1797 gp = gitpatches.pop()
1798 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1798 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1799 if not gitpatches:
1799 if not gitpatches:
1800 raise PatchError(_('failed to synchronize metadata for "%s"')
1800 raise PatchError(_('failed to synchronize metadata for "%s"')
1801 % afile[2:])
1801 % afile[2:])
1802 gp = gitpatches[-1]
1802 gp = gitpatches[-1]
1803 newfile = True
1803 newfile = True
1804 elif x.startswith('---'):
1804 elif x.startswith('---'):
1805 # check for a unified diff
1805 # check for a unified diff
1806 l2 = lr.readline()
1806 l2 = lr.readline()
1807 if not l2.startswith('+++'):
1807 if not l2.startswith('+++'):
1808 lr.push(l2)
1808 lr.push(l2)
1809 continue
1809 continue
1810 newfile = True
1810 newfile = True
1811 context = False
1811 context = False
1812 afile = parsefilename(x)
1812 afile = parsefilename(x)
1813 bfile = parsefilename(l2)
1813 bfile = parsefilename(l2)
1814 elif x.startswith('***'):
1814 elif x.startswith('***'):
1815 # check for a context diff
1815 # check for a context diff
1816 l2 = lr.readline()
1816 l2 = lr.readline()
1817 if not l2.startswith('---'):
1817 if not l2.startswith('---'):
1818 lr.push(l2)
1818 lr.push(l2)
1819 continue
1819 continue
1820 l3 = lr.readline()
1820 l3 = lr.readline()
1821 lr.push(l3)
1821 lr.push(l3)
1822 if not l3.startswith("***************"):
1822 if not l3.startswith("***************"):
1823 lr.push(l2)
1823 lr.push(l2)
1824 continue
1824 continue
1825 newfile = True
1825 newfile = True
1826 context = True
1826 context = True
1827 afile = parsefilename(x)
1827 afile = parsefilename(x)
1828 bfile = parsefilename(l2)
1828 bfile = parsefilename(l2)
1829
1829
1830 if newfile:
1830 if newfile:
1831 newfile = False
1831 newfile = False
1832 emitfile = True
1832 emitfile = True
1833 state = BFILE
1833 state = BFILE
1834 hunknum = 0
1834 hunknum = 0
1835
1835
1836 while gitpatches:
1836 while gitpatches:
1837 gp = gitpatches.pop()
1837 gp = gitpatches.pop()
1838 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1838 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1839
1839
1840 def applybindelta(binchunk, data):
1840 def applybindelta(binchunk, data):
1841 """Apply a binary delta hunk
1841 """Apply a binary delta hunk
1842 The algorithm used is the algorithm from git's patch-delta.c
1842 The algorithm used is the algorithm from git's patch-delta.c
1843 """
1843 """
1844 def deltahead(binchunk):
1844 def deltahead(binchunk):
1845 i = 0
1845 i = 0
1846 for c in binchunk:
1846 for c in binchunk:
1847 i += 1
1847 i += 1
1848 if not (ord(c) & 0x80):
1848 if not (ord(c) & 0x80):
1849 return i
1849 return i
1850 return i
1850 return i
1851 out = ""
1851 out = ""
1852 s = deltahead(binchunk)
1852 s = deltahead(binchunk)
1853 binchunk = binchunk[s:]
1853 binchunk = binchunk[s:]
1854 s = deltahead(binchunk)
1854 s = deltahead(binchunk)
1855 binchunk = binchunk[s:]
1855 binchunk = binchunk[s:]
1856 i = 0
1856 i = 0
1857 while i < len(binchunk):
1857 while i < len(binchunk):
1858 cmd = ord(binchunk[i])
1858 cmd = ord(binchunk[i])
1859 i += 1
1859 i += 1
1860 if (cmd & 0x80):
1860 if (cmd & 0x80):
1861 offset = 0
1861 offset = 0
1862 size = 0
1862 size = 0
1863 if (cmd & 0x01):
1863 if (cmd & 0x01):
1864 offset = ord(binchunk[i])
1864 offset = ord(binchunk[i])
1865 i += 1
1865 i += 1
1866 if (cmd & 0x02):
1866 if (cmd & 0x02):
1867 offset |= ord(binchunk[i]) << 8
1867 offset |= ord(binchunk[i]) << 8
1868 i += 1
1868 i += 1
1869 if (cmd & 0x04):
1869 if (cmd & 0x04):
1870 offset |= ord(binchunk[i]) << 16
1870 offset |= ord(binchunk[i]) << 16
1871 i += 1
1871 i += 1
1872 if (cmd & 0x08):
1872 if (cmd & 0x08):
1873 offset |= ord(binchunk[i]) << 24
1873 offset |= ord(binchunk[i]) << 24
1874 i += 1
1874 i += 1
1875 if (cmd & 0x10):
1875 if (cmd & 0x10):
1876 size = ord(binchunk[i])
1876 size = ord(binchunk[i])
1877 i += 1
1877 i += 1
1878 if (cmd & 0x20):
1878 if (cmd & 0x20):
1879 size |= ord(binchunk[i]) << 8
1879 size |= ord(binchunk[i]) << 8
1880 i += 1
1880 i += 1
1881 if (cmd & 0x40):
1881 if (cmd & 0x40):
1882 size |= ord(binchunk[i]) << 16
1882 size |= ord(binchunk[i]) << 16
1883 i += 1
1883 i += 1
1884 if size == 0:
1884 if size == 0:
1885 size = 0x10000
1885 size = 0x10000
1886 offset_end = offset + size
1886 offset_end = offset + size
1887 out += data[offset:offset_end]
1887 out += data[offset:offset_end]
1888 elif cmd != 0:
1888 elif cmd != 0:
1889 offset_end = i + cmd
1889 offset_end = i + cmd
1890 out += binchunk[i:offset_end]
1890 out += binchunk[i:offset_end]
1891 i += cmd
1891 i += cmd
1892 else:
1892 else:
1893 raise PatchError(_('unexpected delta opcode 0'))
1893 raise PatchError(_('unexpected delta opcode 0'))
1894 return out
1894 return out
1895
1895
1896 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1896 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1897 """Reads a patch from fp and tries to apply it.
1897 """Reads a patch from fp and tries to apply it.
1898
1898
1899 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1899 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1900 there was any fuzz.
1900 there was any fuzz.
1901
1901
1902 If 'eolmode' is 'strict', the patch content and patched file are
1902 If 'eolmode' is 'strict', the patch content and patched file are
1903 read in binary mode. Otherwise, line endings are ignored when
1903 read in binary mode. Otherwise, line endings are ignored when
1904 patching then normalized according to 'eolmode'.
1904 patching then normalized according to 'eolmode'.
1905 """
1905 """
1906 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1906 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1907 prefix=prefix, eolmode=eolmode)
1907 prefix=prefix, eolmode=eolmode)
1908
1908
1909 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1909 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1910 eolmode='strict'):
1910 eolmode='strict'):
1911
1911
1912 if prefix:
1912 if prefix:
1913 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1913 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1914 prefix)
1914 prefix)
1915 if prefix != '':
1915 if prefix != '':
1916 prefix += '/'
1916 prefix += '/'
1917 def pstrip(p):
1917 def pstrip(p):
1918 return pathtransform(p, strip - 1, prefix)[1]
1918 return pathtransform(p, strip - 1, prefix)[1]
1919
1919
1920 rejects = 0
1920 rejects = 0
1921 err = 0
1921 err = 0
1922 current_file = None
1922 current_file = None
1923
1923
1924 for state, values in iterhunks(fp):
1924 for state, values in iterhunks(fp):
1925 if state == 'hunk':
1925 if state == 'hunk':
1926 if not current_file:
1926 if not current_file:
1927 continue
1927 continue
1928 ret = current_file.apply(values)
1928 ret = current_file.apply(values)
1929 if ret > 0:
1929 if ret > 0:
1930 err = 1
1930 err = 1
1931 elif state == 'file':
1931 elif state == 'file':
1932 if current_file:
1932 if current_file:
1933 rejects += current_file.close()
1933 rejects += current_file.close()
1934 current_file = None
1934 current_file = None
1935 afile, bfile, first_hunk, gp = values
1935 afile, bfile, first_hunk, gp = values
1936 if gp:
1936 if gp:
1937 gp.path = pstrip(gp.path)
1937 gp.path = pstrip(gp.path)
1938 if gp.oldpath:
1938 if gp.oldpath:
1939 gp.oldpath = pstrip(gp.oldpath)
1939 gp.oldpath = pstrip(gp.oldpath)
1940 else:
1940 else:
1941 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1941 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1942 prefix)
1942 prefix)
1943 if gp.op == 'RENAME':
1943 if gp.op == 'RENAME':
1944 backend.unlink(gp.oldpath)
1944 backend.unlink(gp.oldpath)
1945 if not first_hunk:
1945 if not first_hunk:
1946 if gp.op == 'DELETE':
1946 if gp.op == 'DELETE':
1947 backend.unlink(gp.path)
1947 backend.unlink(gp.path)
1948 continue
1948 continue
1949 data, mode = None, None
1949 data, mode = None, None
1950 if gp.op in ('RENAME', 'COPY'):
1950 if gp.op in ('RENAME', 'COPY'):
1951 data, mode = store.getfile(gp.oldpath)[:2]
1951 data, mode = store.getfile(gp.oldpath)[:2]
1952 # FIXME: failing getfile has never been handled here
1952 # FIXME: failing getfile has never been handled here
1953 assert data is not None
1953 assert data is not None
1954 if gp.mode:
1954 if gp.mode:
1955 mode = gp.mode
1955 mode = gp.mode
1956 if gp.op == 'ADD':
1956 if gp.op == 'ADD':
1957 # Added files without content have no hunk and
1957 # Added files without content have no hunk and
1958 # must be created
1958 # must be created
1959 data = ''
1959 data = ''
1960 if data or mode:
1960 if data or mode:
1961 if (gp.op in ('ADD', 'RENAME', 'COPY')
1961 if (gp.op in ('ADD', 'RENAME', 'COPY')
1962 and backend.exists(gp.path)):
1962 and backend.exists(gp.path)):
1963 raise PatchError(_("cannot create %s: destination "
1963 raise PatchError(_("cannot create %s: destination "
1964 "already exists") % gp.path)
1964 "already exists") % gp.path)
1965 backend.setfile(gp.path, data, mode, gp.oldpath)
1965 backend.setfile(gp.path, data, mode, gp.oldpath)
1966 continue
1966 continue
1967 try:
1967 try:
1968 current_file = patcher(ui, gp, backend, store,
1968 current_file = patcher(ui, gp, backend, store,
1969 eolmode=eolmode)
1969 eolmode=eolmode)
1970 except PatchError as inst:
1970 except PatchError as inst:
1971 ui.warn(str(inst) + '\n')
1971 ui.warn(str(inst) + '\n')
1972 current_file = None
1972 current_file = None
1973 rejects += 1
1973 rejects += 1
1974 continue
1974 continue
1975 elif state == 'git':
1975 elif state == 'git':
1976 for gp in values:
1976 for gp in values:
1977 path = pstrip(gp.oldpath)
1977 path = pstrip(gp.oldpath)
1978 data, mode = backend.getfile(path)
1978 data, mode = backend.getfile(path)
1979 if data is None:
1979 if data is None:
1980 # The error ignored here will trigger a getfile()
1980 # The error ignored here will trigger a getfile()
1981 # error in a place more appropriate for error
1981 # error in a place more appropriate for error
1982 # handling, and will not interrupt the patching
1982 # handling, and will not interrupt the patching
1983 # process.
1983 # process.
1984 pass
1984 pass
1985 else:
1985 else:
1986 store.setfile(path, data, mode)
1986 store.setfile(path, data, mode)
1987 else:
1987 else:
1988 raise error.Abort(_('unsupported parser state: %s') % state)
1988 raise error.Abort(_('unsupported parser state: %s') % state)
1989
1989
1990 if current_file:
1990 if current_file:
1991 rejects += current_file.close()
1991 rejects += current_file.close()
1992
1992
1993 if rejects:
1993 if rejects:
1994 return -1
1994 return -1
1995 return err
1995 return err
1996
1996
1997 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1997 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1998 similarity):
1998 similarity):
1999 """use <patcher> to apply <patchname> to the working directory.
1999 """use <patcher> to apply <patchname> to the working directory.
2000 returns whether patch was applied with fuzz factor."""
2000 returns whether patch was applied with fuzz factor."""
2001
2001
2002 fuzz = False
2002 fuzz = False
2003 args = []
2003 args = []
2004 cwd = repo.root
2004 cwd = repo.root
2005 if cwd:
2005 if cwd:
2006 args.append('-d %s' % util.shellquote(cwd))
2006 args.append('-d %s' % util.shellquote(cwd))
2007 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2007 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2008 util.shellquote(patchname)))
2008 util.shellquote(patchname)))
2009 try:
2009 try:
2010 for line in fp:
2010 for line in fp:
2011 line = line.rstrip()
2011 line = line.rstrip()
2012 ui.note(line + '\n')
2012 ui.note(line + '\n')
2013 if line.startswith('patching file '):
2013 if line.startswith('patching file '):
2014 pf = util.parsepatchoutput(line)
2014 pf = util.parsepatchoutput(line)
2015 printed_file = False
2015 printed_file = False
2016 files.add(pf)
2016 files.add(pf)
2017 elif line.find('with fuzz') >= 0:
2017 elif line.find('with fuzz') >= 0:
2018 fuzz = True
2018 fuzz = True
2019 if not printed_file:
2019 if not printed_file:
2020 ui.warn(pf + '\n')
2020 ui.warn(pf + '\n')
2021 printed_file = True
2021 printed_file = True
2022 ui.warn(line + '\n')
2022 ui.warn(line + '\n')
2023 elif line.find('saving rejects to file') >= 0:
2023 elif line.find('saving rejects to file') >= 0:
2024 ui.warn(line + '\n')
2024 ui.warn(line + '\n')
2025 elif line.find('FAILED') >= 0:
2025 elif line.find('FAILED') >= 0:
2026 if not printed_file:
2026 if not printed_file:
2027 ui.warn(pf + '\n')
2027 ui.warn(pf + '\n')
2028 printed_file = True
2028 printed_file = True
2029 ui.warn(line + '\n')
2029 ui.warn(line + '\n')
2030 finally:
2030 finally:
2031 if files:
2031 if files:
2032 scmutil.marktouched(repo, files, similarity)
2032 scmutil.marktouched(repo, files, similarity)
2033 code = fp.close()
2033 code = fp.close()
2034 if code:
2034 if code:
2035 raise PatchError(_("patch command failed: %s") %
2035 raise PatchError(_("patch command failed: %s") %
2036 util.explainexit(code)[0])
2036 util.explainexit(code)[0])
2037 return fuzz
2037 return fuzz
2038
2038
2039 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2039 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2040 eolmode='strict'):
2040 eolmode='strict'):
2041 if files is None:
2041 if files is None:
2042 files = set()
2042 files = set()
2043 if eolmode is None:
2043 if eolmode is None:
2044 eolmode = ui.config('patch', 'eol', 'strict')
2044 eolmode = ui.config('patch', 'eol', 'strict')
2045 if eolmode.lower() not in eolmodes:
2045 if eolmode.lower() not in eolmodes:
2046 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2046 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2047 eolmode = eolmode.lower()
2047 eolmode = eolmode.lower()
2048
2048
2049 store = filestore()
2049 store = filestore()
2050 try:
2050 try:
2051 fp = open(patchobj, 'rb')
2051 fp = open(patchobj, 'rb')
2052 except TypeError:
2052 except TypeError:
2053 fp = patchobj
2053 fp = patchobj
2054 try:
2054 try:
2055 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2055 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2056 eolmode=eolmode)
2056 eolmode=eolmode)
2057 finally:
2057 finally:
2058 if fp != patchobj:
2058 if fp != patchobj:
2059 fp.close()
2059 fp.close()
2060 files.update(backend.close())
2060 files.update(backend.close())
2061 store.close()
2061 store.close()
2062 if ret < 0:
2062 if ret < 0:
2063 raise PatchError(_('patch failed to apply'))
2063 raise PatchError(_('patch failed to apply'))
2064 return ret > 0
2064 return ret > 0
2065
2065
2066 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2066 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2067 eolmode='strict', similarity=0):
2067 eolmode='strict', similarity=0):
2068 """use builtin patch to apply <patchobj> to the working directory.
2068 """use builtin patch to apply <patchobj> to the working directory.
2069 returns whether patch was applied with fuzz factor."""
2069 returns whether patch was applied with fuzz factor."""
2070 backend = workingbackend(ui, repo, similarity)
2070 backend = workingbackend(ui, repo, similarity)
2071 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2071 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2072
2072
2073 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2073 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2074 eolmode='strict'):
2074 eolmode='strict'):
2075 backend = repobackend(ui, repo, ctx, store)
2075 backend = repobackend(ui, repo, ctx, store)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077
2077
2078 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2078 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2079 similarity=0):
2079 similarity=0):
2080 """Apply <patchname> to the working directory.
2080 """Apply <patchname> to the working directory.
2081
2081
2082 'eolmode' specifies how end of lines should be handled. It can be:
2082 'eolmode' specifies how end of lines should be handled. It can be:
2083 - 'strict': inputs are read in binary mode, EOLs are preserved
2083 - 'strict': inputs are read in binary mode, EOLs are preserved
2084 - 'crlf': EOLs are ignored when patching and reset to CRLF
2084 - 'crlf': EOLs are ignored when patching and reset to CRLF
2085 - 'lf': EOLs are ignored when patching and reset to LF
2085 - 'lf': EOLs are ignored when patching and reset to LF
2086 - None: get it from user settings, default to 'strict'
2086 - None: get it from user settings, default to 'strict'
2087 'eolmode' is ignored when using an external patcher program.
2087 'eolmode' is ignored when using an external patcher program.
2088
2088
2089 Returns whether patch was applied with fuzz factor.
2089 Returns whether patch was applied with fuzz factor.
2090 """
2090 """
2091 patcher = ui.config('ui', 'patch')
2091 patcher = ui.config('ui', 'patch')
2092 if files is None:
2092 if files is None:
2093 files = set()
2093 files = set()
2094 if patcher:
2094 if patcher:
2095 return _externalpatch(ui, repo, patcher, patchname, strip,
2095 return _externalpatch(ui, repo, patcher, patchname, strip,
2096 files, similarity)
2096 files, similarity)
2097 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2097 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2098 similarity)
2098 similarity)
2099
2099
2100 def changedfiles(ui, repo, patchpath, strip=1):
2100 def changedfiles(ui, repo, patchpath, strip=1):
2101 backend = fsbackend(ui, repo.root)
2101 backend = fsbackend(ui, repo.root)
2102 fp = open(patchpath, 'rb')
2102 with open(patchpath, 'rb') as fp:
2103 try:
2104 changed = set()
2103 changed = set()
2105 for state, values in iterhunks(fp):
2104 for state, values in iterhunks(fp):
2106 if state == 'file':
2105 if state == 'file':
2107 afile, bfile, first_hunk, gp = values
2106 afile, bfile, first_hunk, gp = values
2108 if gp:
2107 if gp:
2109 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2108 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2110 if gp.oldpath:
2109 if gp.oldpath:
2111 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2110 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2112 else:
2111 else:
2113 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2112 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2114 '')
2113 '')
2115 changed.add(gp.path)
2114 changed.add(gp.path)
2116 if gp.op == 'RENAME':
2115 if gp.op == 'RENAME':
2117 changed.add(gp.oldpath)
2116 changed.add(gp.oldpath)
2118 elif state not in ('hunk', 'git'):
2117 elif state not in ('hunk', 'git'):
2119 raise error.Abort(_('unsupported parser state: %s') % state)
2118 raise error.Abort(_('unsupported parser state: %s') % state)
2120 return changed
2119 return changed
2121 finally:
2122 fp.close()
2123
2120
2124 class GitDiffRequired(Exception):
2121 class GitDiffRequired(Exception):
2125 pass
2122 pass
2126
2123
2127 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2124 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2128 '''return diffopts with all features supported and parsed'''
2125 '''return diffopts with all features supported and parsed'''
2129 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2126 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2130 git=True, whitespace=True, formatchanging=True)
2127 git=True, whitespace=True, formatchanging=True)
2131
2128
2132 diffopts = diffallopts
2129 diffopts = diffallopts
2133
2130
2134 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2131 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2135 whitespace=False, formatchanging=False):
2132 whitespace=False, formatchanging=False):
2136 '''return diffopts with only opted-in features parsed
2133 '''return diffopts with only opted-in features parsed
2137
2134
2138 Features:
2135 Features:
2139 - git: git-style diffs
2136 - git: git-style diffs
2140 - whitespace: whitespace options like ignoreblanklines and ignorews
2137 - whitespace: whitespace options like ignoreblanklines and ignorews
2141 - formatchanging: options that will likely break or cause correctness issues
2138 - formatchanging: options that will likely break or cause correctness issues
2142 with most diff parsers
2139 with most diff parsers
2143 '''
2140 '''
2144 def get(key, name=None, getter=ui.configbool, forceplain=None):
2141 def get(key, name=None, getter=ui.configbool, forceplain=None):
2145 if opts:
2142 if opts:
2146 v = opts.get(key)
2143 v = opts.get(key)
2147 if v:
2144 if v:
2148 return v
2145 return v
2149 if forceplain is not None and ui.plain():
2146 if forceplain is not None and ui.plain():
2150 return forceplain
2147 return forceplain
2151 return getter(section, name or key, None, untrusted=untrusted)
2148 return getter(section, name or key, None, untrusted=untrusted)
2152
2149
2153 # core options, expected to be understood by every diff parser
2150 # core options, expected to be understood by every diff parser
2154 buildopts = {
2151 buildopts = {
2155 'nodates': get('nodates'),
2152 'nodates': get('nodates'),
2156 'showfunc': get('show_function', 'showfunc'),
2153 'showfunc': get('show_function', 'showfunc'),
2157 'context': get('unified', getter=ui.config),
2154 'context': get('unified', getter=ui.config),
2158 }
2155 }
2159
2156
2160 if git:
2157 if git:
2161 buildopts['git'] = get('git')
2158 buildopts['git'] = get('git')
2162 if whitespace:
2159 if whitespace:
2163 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2160 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2164 buildopts['ignorewsamount'] = get('ignore_space_change',
2161 buildopts['ignorewsamount'] = get('ignore_space_change',
2165 'ignorewsamount')
2162 'ignorewsamount')
2166 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2163 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2167 'ignoreblanklines')
2164 'ignoreblanklines')
2168 if formatchanging:
2165 if formatchanging:
2169 buildopts['text'] = opts and opts.get('text')
2166 buildopts['text'] = opts and opts.get('text')
2170 buildopts['nobinary'] = get('nobinary', forceplain=False)
2167 buildopts['nobinary'] = get('nobinary', forceplain=False)
2171 buildopts['noprefix'] = get('noprefix', forceplain=False)
2168 buildopts['noprefix'] = get('noprefix', forceplain=False)
2172
2169
2173 return mdiff.diffopts(**buildopts)
2170 return mdiff.diffopts(**buildopts)
2174
2171
2175 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2172 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2176 losedatafn=None, prefix='', relroot=''):
2173 losedatafn=None, prefix='', relroot=''):
2177 '''yields diff of changes to files between two nodes, or node and
2174 '''yields diff of changes to files between two nodes, or node and
2178 working directory.
2175 working directory.
2179
2176
2180 if node1 is None, use first dirstate parent instead.
2177 if node1 is None, use first dirstate parent instead.
2181 if node2 is None, compare node1 with working directory.
2178 if node2 is None, compare node1 with working directory.
2182
2179
2183 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2180 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2184 every time some change cannot be represented with the current
2181 every time some change cannot be represented with the current
2185 patch format. Return False to upgrade to git patch format, True to
2182 patch format. Return False to upgrade to git patch format, True to
2186 accept the loss or raise an exception to abort the diff. It is
2183 accept the loss or raise an exception to abort the diff. It is
2187 called with the name of current file being diffed as 'fn'. If set
2184 called with the name of current file being diffed as 'fn'. If set
2188 to None, patches will always be upgraded to git format when
2185 to None, patches will always be upgraded to git format when
2189 necessary.
2186 necessary.
2190
2187
2191 prefix is a filename prefix that is prepended to all filenames on
2188 prefix is a filename prefix that is prepended to all filenames on
2192 display (used for subrepos).
2189 display (used for subrepos).
2193
2190
2194 relroot, if not empty, must be normalized with a trailing /. Any match
2191 relroot, if not empty, must be normalized with a trailing /. Any match
2195 patterns that fall outside it will be ignored.'''
2192 patterns that fall outside it will be ignored.'''
2196
2193
2197 if opts is None:
2194 if opts is None:
2198 opts = mdiff.defaultopts
2195 opts = mdiff.defaultopts
2199
2196
2200 if not node1 and not node2:
2197 if not node1 and not node2:
2201 node1 = repo.dirstate.p1()
2198 node1 = repo.dirstate.p1()
2202
2199
2203 def lrugetfilectx():
2200 def lrugetfilectx():
2204 cache = {}
2201 cache = {}
2205 order = collections.deque()
2202 order = collections.deque()
2206 def getfilectx(f, ctx):
2203 def getfilectx(f, ctx):
2207 fctx = ctx.filectx(f, filelog=cache.get(f))
2204 fctx = ctx.filectx(f, filelog=cache.get(f))
2208 if f not in cache:
2205 if f not in cache:
2209 if len(cache) > 20:
2206 if len(cache) > 20:
2210 del cache[order.popleft()]
2207 del cache[order.popleft()]
2211 cache[f] = fctx.filelog()
2208 cache[f] = fctx.filelog()
2212 else:
2209 else:
2213 order.remove(f)
2210 order.remove(f)
2214 order.append(f)
2211 order.append(f)
2215 return fctx
2212 return fctx
2216 return getfilectx
2213 return getfilectx
2217 getfilectx = lrugetfilectx()
2214 getfilectx = lrugetfilectx()
2218
2215
2219 ctx1 = repo[node1]
2216 ctx1 = repo[node1]
2220 ctx2 = repo[node2]
2217 ctx2 = repo[node2]
2221
2218
2222 relfiltered = False
2219 relfiltered = False
2223 if relroot != '' and match.always():
2220 if relroot != '' and match.always():
2224 # as a special case, create a new matcher with just the relroot
2221 # as a special case, create a new matcher with just the relroot
2225 pats = [relroot]
2222 pats = [relroot]
2226 match = scmutil.match(ctx2, pats, default='path')
2223 match = scmutil.match(ctx2, pats, default='path')
2227 relfiltered = True
2224 relfiltered = True
2228
2225
2229 if not changes:
2226 if not changes:
2230 changes = repo.status(ctx1, ctx2, match=match)
2227 changes = repo.status(ctx1, ctx2, match=match)
2231 modified, added, removed = changes[:3]
2228 modified, added, removed = changes[:3]
2232
2229
2233 if not modified and not added and not removed:
2230 if not modified and not added and not removed:
2234 return []
2231 return []
2235
2232
2236 if repo.ui.debugflag:
2233 if repo.ui.debugflag:
2237 hexfunc = hex
2234 hexfunc = hex
2238 else:
2235 else:
2239 hexfunc = short
2236 hexfunc = short
2240 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2237 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2241
2238
2242 copy = {}
2239 copy = {}
2243 if opts.git or opts.upgrade:
2240 if opts.git or opts.upgrade:
2244 copy = copies.pathcopies(ctx1, ctx2, match=match)
2241 copy = copies.pathcopies(ctx1, ctx2, match=match)
2245
2242
2246 if relroot is not None:
2243 if relroot is not None:
2247 if not relfiltered:
2244 if not relfiltered:
2248 # XXX this would ideally be done in the matcher, but that is
2245 # XXX this would ideally be done in the matcher, but that is
2249 # generally meant to 'or' patterns, not 'and' them. In this case we
2246 # generally meant to 'or' patterns, not 'and' them. In this case we
2250 # need to 'and' all the patterns from the matcher with relroot.
2247 # need to 'and' all the patterns from the matcher with relroot.
2251 def filterrel(l):
2248 def filterrel(l):
2252 return [f for f in l if f.startswith(relroot)]
2249 return [f for f in l if f.startswith(relroot)]
2253 modified = filterrel(modified)
2250 modified = filterrel(modified)
2254 added = filterrel(added)
2251 added = filterrel(added)
2255 removed = filterrel(removed)
2252 removed = filterrel(removed)
2256 relfiltered = True
2253 relfiltered = True
2257 # filter out copies where either side isn't inside the relative root
2254 # filter out copies where either side isn't inside the relative root
2258 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2255 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2259 if dst.startswith(relroot)
2256 if dst.startswith(relroot)
2260 and src.startswith(relroot)))
2257 and src.startswith(relroot)))
2261
2258
2262 def difffn(opts, losedata):
2259 def difffn(opts, losedata):
2263 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2260 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2264 copy, getfilectx, opts, losedata, prefix, relroot)
2261 copy, getfilectx, opts, losedata, prefix, relroot)
2265 if opts.upgrade and not opts.git:
2262 if opts.upgrade and not opts.git:
2266 try:
2263 try:
2267 def losedata(fn):
2264 def losedata(fn):
2268 if not losedatafn or not losedatafn(fn=fn):
2265 if not losedatafn or not losedatafn(fn=fn):
2269 raise GitDiffRequired
2266 raise GitDiffRequired
2270 # Buffer the whole output until we are sure it can be generated
2267 # Buffer the whole output until we are sure it can be generated
2271 return list(difffn(opts.copy(git=False), losedata))
2268 return list(difffn(opts.copy(git=False), losedata))
2272 except GitDiffRequired:
2269 except GitDiffRequired:
2273 return difffn(opts.copy(git=True), None)
2270 return difffn(opts.copy(git=True), None)
2274 else:
2271 else:
2275 return difffn(opts, None)
2272 return difffn(opts, None)
2276
2273
2277 def difflabel(func, *args, **kw):
2274 def difflabel(func, *args, **kw):
2278 '''yields 2-tuples of (output, label) based on the output of func()'''
2275 '''yields 2-tuples of (output, label) based on the output of func()'''
2279 headprefixes = [('diff', 'diff.diffline'),
2276 headprefixes = [('diff', 'diff.diffline'),
2280 ('copy', 'diff.extended'),
2277 ('copy', 'diff.extended'),
2281 ('rename', 'diff.extended'),
2278 ('rename', 'diff.extended'),
2282 ('old', 'diff.extended'),
2279 ('old', 'diff.extended'),
2283 ('new', 'diff.extended'),
2280 ('new', 'diff.extended'),
2284 ('deleted', 'diff.extended'),
2281 ('deleted', 'diff.extended'),
2285 ('---', 'diff.file_a'),
2282 ('---', 'diff.file_a'),
2286 ('+++', 'diff.file_b')]
2283 ('+++', 'diff.file_b')]
2287 textprefixes = [('@', 'diff.hunk'),
2284 textprefixes = [('@', 'diff.hunk'),
2288 ('-', 'diff.deleted'),
2285 ('-', 'diff.deleted'),
2289 ('+', 'diff.inserted')]
2286 ('+', 'diff.inserted')]
2290 head = False
2287 head = False
2291 for chunk in func(*args, **kw):
2288 for chunk in func(*args, **kw):
2292 lines = chunk.split('\n')
2289 lines = chunk.split('\n')
2293 for i, line in enumerate(lines):
2290 for i, line in enumerate(lines):
2294 if i != 0:
2291 if i != 0:
2295 yield ('\n', '')
2292 yield ('\n', '')
2296 if head:
2293 if head:
2297 if line.startswith('@'):
2294 if line.startswith('@'):
2298 head = False
2295 head = False
2299 else:
2296 else:
2300 if line and line[0] not in ' +-@\\':
2297 if line and line[0] not in ' +-@\\':
2301 head = True
2298 head = True
2302 stripline = line
2299 stripline = line
2303 diffline = False
2300 diffline = False
2304 if not head and line and line[0] in '+-':
2301 if not head and line and line[0] in '+-':
2305 # highlight tabs and trailing whitespace, but only in
2302 # highlight tabs and trailing whitespace, but only in
2306 # changed lines
2303 # changed lines
2307 stripline = line.rstrip()
2304 stripline = line.rstrip()
2308 diffline = True
2305 diffline = True
2309
2306
2310 prefixes = textprefixes
2307 prefixes = textprefixes
2311 if head:
2308 if head:
2312 prefixes = headprefixes
2309 prefixes = headprefixes
2313 for prefix, label in prefixes:
2310 for prefix, label in prefixes:
2314 if stripline.startswith(prefix):
2311 if stripline.startswith(prefix):
2315 if diffline:
2312 if diffline:
2316 for token in tabsplitter.findall(stripline):
2313 for token in tabsplitter.findall(stripline):
2317 if '\t' == token[0]:
2314 if '\t' == token[0]:
2318 yield (token, 'diff.tab')
2315 yield (token, 'diff.tab')
2319 else:
2316 else:
2320 yield (token, label)
2317 yield (token, label)
2321 else:
2318 else:
2322 yield (stripline, label)
2319 yield (stripline, label)
2323 break
2320 break
2324 else:
2321 else:
2325 yield (line, '')
2322 yield (line, '')
2326 if line != stripline:
2323 if line != stripline:
2327 yield (line[len(stripline):], 'diff.trailingwhitespace')
2324 yield (line[len(stripline):], 'diff.trailingwhitespace')
2328
2325
2329 def diffui(*args, **kw):
2326 def diffui(*args, **kw):
2330 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2327 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2331 return difflabel(diff, *args, **kw)
2328 return difflabel(diff, *args, **kw)
2332
2329
2333 def _filepairs(ctx1, modified, added, removed, copy, opts):
2330 def _filepairs(ctx1, modified, added, removed, copy, opts):
2334 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2331 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2335 before and f2 is the the name after. For added files, f1 will be None,
2332 before and f2 is the the name after. For added files, f1 will be None,
2336 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2333 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2337 or 'rename' (the latter two only if opts.git is set).'''
2334 or 'rename' (the latter two only if opts.git is set).'''
2338 gone = set()
2335 gone = set()
2339
2336
2340 copyto = dict([(v, k) for k, v in copy.items()])
2337 copyto = dict([(v, k) for k, v in copy.items()])
2341
2338
2342 addedset, removedset = set(added), set(removed)
2339 addedset, removedset = set(added), set(removed)
2343 # Fix up added, since merged-in additions appear as
2340 # Fix up added, since merged-in additions appear as
2344 # modifications during merges
2341 # modifications during merges
2345 for f in modified:
2342 for f in modified:
2346 if f not in ctx1:
2343 if f not in ctx1:
2347 addedset.add(f)
2344 addedset.add(f)
2348
2345
2349 for f in sorted(modified + added + removed):
2346 for f in sorted(modified + added + removed):
2350 copyop = None
2347 copyop = None
2351 f1, f2 = f, f
2348 f1, f2 = f, f
2352 if f in addedset:
2349 if f in addedset:
2353 f1 = None
2350 f1 = None
2354 if f in copy:
2351 if f in copy:
2355 if opts.git:
2352 if opts.git:
2356 f1 = copy[f]
2353 f1 = copy[f]
2357 if f1 in removedset and f1 not in gone:
2354 if f1 in removedset and f1 not in gone:
2358 copyop = 'rename'
2355 copyop = 'rename'
2359 gone.add(f1)
2356 gone.add(f1)
2360 else:
2357 else:
2361 copyop = 'copy'
2358 copyop = 'copy'
2362 elif f in removedset:
2359 elif f in removedset:
2363 f2 = None
2360 f2 = None
2364 if opts.git:
2361 if opts.git:
2365 # have we already reported a copy above?
2362 # have we already reported a copy above?
2366 if (f in copyto and copyto[f] in addedset
2363 if (f in copyto and copyto[f] in addedset
2367 and copy[copyto[f]] == f):
2364 and copy[copyto[f]] == f):
2368 continue
2365 continue
2369 yield f1, f2, copyop
2366 yield f1, f2, copyop
2370
2367
2371 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2368 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2372 copy, getfilectx, opts, losedatafn, prefix, relroot):
2369 copy, getfilectx, opts, losedatafn, prefix, relroot):
2373 '''given input data, generate a diff and yield it in blocks
2370 '''given input data, generate a diff and yield it in blocks
2374
2371
2375 If generating a diff would lose data like flags or binary data and
2372 If generating a diff would lose data like flags or binary data and
2376 losedatafn is not None, it will be called.
2373 losedatafn is not None, it will be called.
2377
2374
2378 relroot is removed and prefix is added to every path in the diff output.
2375 relroot is removed and prefix is added to every path in the diff output.
2379
2376
2380 If relroot is not empty, this function expects every path in modified,
2377 If relroot is not empty, this function expects every path in modified,
2381 added, removed and copy to start with it.'''
2378 added, removed and copy to start with it.'''
2382
2379
2383 def gitindex(text):
2380 def gitindex(text):
2384 if not text:
2381 if not text:
2385 text = ""
2382 text = ""
2386 l = len(text)
2383 l = len(text)
2387 s = util.sha1('blob %d\0' % l)
2384 s = util.sha1('blob %d\0' % l)
2388 s.update(text)
2385 s.update(text)
2389 return s.hexdigest()
2386 return s.hexdigest()
2390
2387
2391 if opts.noprefix:
2388 if opts.noprefix:
2392 aprefix = bprefix = ''
2389 aprefix = bprefix = ''
2393 else:
2390 else:
2394 aprefix = 'a/'
2391 aprefix = 'a/'
2395 bprefix = 'b/'
2392 bprefix = 'b/'
2396
2393
2397 def diffline(f, revs):
2394 def diffline(f, revs):
2398 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2395 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2399 return 'diff %s %s' % (revinfo, f)
2396 return 'diff %s %s' % (revinfo, f)
2400
2397
2401 date1 = util.datestr(ctx1.date())
2398 date1 = util.datestr(ctx1.date())
2402 date2 = util.datestr(ctx2.date())
2399 date2 = util.datestr(ctx2.date())
2403
2400
2404 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2401 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2405
2402
2406 if relroot != '' and (repo.ui.configbool('devel', 'all')
2403 if relroot != '' and (repo.ui.configbool('devel', 'all')
2407 or repo.ui.configbool('devel', 'check-relroot')):
2404 or repo.ui.configbool('devel', 'check-relroot')):
2408 for f in modified + added + removed + copy.keys() + copy.values():
2405 for f in modified + added + removed + copy.keys() + copy.values():
2409 if f is not None and not f.startswith(relroot):
2406 if f is not None and not f.startswith(relroot):
2410 raise AssertionError(
2407 raise AssertionError(
2411 "file %s doesn't start with relroot %s" % (f, relroot))
2408 "file %s doesn't start with relroot %s" % (f, relroot))
2412
2409
2413 for f1, f2, copyop in _filepairs(
2410 for f1, f2, copyop in _filepairs(
2414 ctx1, modified, added, removed, copy, opts):
2411 ctx1, modified, added, removed, copy, opts):
2415 content1 = None
2412 content1 = None
2416 content2 = None
2413 content2 = None
2417 flag1 = None
2414 flag1 = None
2418 flag2 = None
2415 flag2 = None
2419 if f1:
2416 if f1:
2420 content1 = getfilectx(f1, ctx1).data()
2417 content1 = getfilectx(f1, ctx1).data()
2421 if opts.git or losedatafn:
2418 if opts.git or losedatafn:
2422 flag1 = ctx1.flags(f1)
2419 flag1 = ctx1.flags(f1)
2423 if f2:
2420 if f2:
2424 content2 = getfilectx(f2, ctx2).data()
2421 content2 = getfilectx(f2, ctx2).data()
2425 if opts.git or losedatafn:
2422 if opts.git or losedatafn:
2426 flag2 = ctx2.flags(f2)
2423 flag2 = ctx2.flags(f2)
2427 binary = False
2424 binary = False
2428 if opts.git or losedatafn:
2425 if opts.git or losedatafn:
2429 binary = util.binary(content1) or util.binary(content2)
2426 binary = util.binary(content1) or util.binary(content2)
2430
2427
2431 if losedatafn and not opts.git:
2428 if losedatafn and not opts.git:
2432 if (binary or
2429 if (binary or
2433 # copy/rename
2430 # copy/rename
2434 f2 in copy or
2431 f2 in copy or
2435 # empty file creation
2432 # empty file creation
2436 (not f1 and not content2) or
2433 (not f1 and not content2) or
2437 # empty file deletion
2434 # empty file deletion
2438 (not content1 and not f2) or
2435 (not content1 and not f2) or
2439 # create with flags
2436 # create with flags
2440 (not f1 and flag2) or
2437 (not f1 and flag2) or
2441 # change flags
2438 # change flags
2442 (f1 and f2 and flag1 != flag2)):
2439 (f1 and f2 and flag1 != flag2)):
2443 losedatafn(f2 or f1)
2440 losedatafn(f2 or f1)
2444
2441
2445 path1 = f1 or f2
2442 path1 = f1 or f2
2446 path2 = f2 or f1
2443 path2 = f2 or f1
2447 path1 = posixpath.join(prefix, path1[len(relroot):])
2444 path1 = posixpath.join(prefix, path1[len(relroot):])
2448 path2 = posixpath.join(prefix, path2[len(relroot):])
2445 path2 = posixpath.join(prefix, path2[len(relroot):])
2449 header = []
2446 header = []
2450 if opts.git:
2447 if opts.git:
2451 header.append('diff --git %s%s %s%s' %
2448 header.append('diff --git %s%s %s%s' %
2452 (aprefix, path1, bprefix, path2))
2449 (aprefix, path1, bprefix, path2))
2453 if not f1: # added
2450 if not f1: # added
2454 header.append('new file mode %s' % gitmode[flag2])
2451 header.append('new file mode %s' % gitmode[flag2])
2455 elif not f2: # removed
2452 elif not f2: # removed
2456 header.append('deleted file mode %s' % gitmode[flag1])
2453 header.append('deleted file mode %s' % gitmode[flag1])
2457 else: # modified/copied/renamed
2454 else: # modified/copied/renamed
2458 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2455 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2459 if mode1 != mode2:
2456 if mode1 != mode2:
2460 header.append('old mode %s' % mode1)
2457 header.append('old mode %s' % mode1)
2461 header.append('new mode %s' % mode2)
2458 header.append('new mode %s' % mode2)
2462 if copyop is not None:
2459 if copyop is not None:
2463 header.append('%s from %s' % (copyop, path1))
2460 header.append('%s from %s' % (copyop, path1))
2464 header.append('%s to %s' % (copyop, path2))
2461 header.append('%s to %s' % (copyop, path2))
2465 elif revs and not repo.ui.quiet:
2462 elif revs and not repo.ui.quiet:
2466 header.append(diffline(path1, revs))
2463 header.append(diffline(path1, revs))
2467
2464
2468 if binary and opts.git and not opts.nobinary:
2465 if binary and opts.git and not opts.nobinary:
2469 text = mdiff.b85diff(content1, content2)
2466 text = mdiff.b85diff(content1, content2)
2470 if text:
2467 if text:
2471 header.append('index %s..%s' %
2468 header.append('index %s..%s' %
2472 (gitindex(content1), gitindex(content2)))
2469 (gitindex(content1), gitindex(content2)))
2473 else:
2470 else:
2474 text = mdiff.unidiff(content1, date1,
2471 text = mdiff.unidiff(content1, date1,
2475 content2, date2,
2472 content2, date2,
2476 path1, path2, opts=opts)
2473 path1, path2, opts=opts)
2477 if header and (text or len(header) > 1):
2474 if header and (text or len(header) > 1):
2478 yield '\n'.join(header) + '\n'
2475 yield '\n'.join(header) + '\n'
2479 if text:
2476 if text:
2480 yield text
2477 yield text
2481
2478
2482 def diffstatsum(stats):
2479 def diffstatsum(stats):
2483 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2480 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2484 for f, a, r, b in stats:
2481 for f, a, r, b in stats:
2485 maxfile = max(maxfile, encoding.colwidth(f))
2482 maxfile = max(maxfile, encoding.colwidth(f))
2486 maxtotal = max(maxtotal, a + r)
2483 maxtotal = max(maxtotal, a + r)
2487 addtotal += a
2484 addtotal += a
2488 removetotal += r
2485 removetotal += r
2489 binary = binary or b
2486 binary = binary or b
2490
2487
2491 return maxfile, maxtotal, addtotal, removetotal, binary
2488 return maxfile, maxtotal, addtotal, removetotal, binary
2492
2489
2493 def diffstatdata(lines):
2490 def diffstatdata(lines):
2494 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2491 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2495
2492
2496 results = []
2493 results = []
2497 filename, adds, removes, isbinary = None, 0, 0, False
2494 filename, adds, removes, isbinary = None, 0, 0, False
2498
2495
2499 def addresult():
2496 def addresult():
2500 if filename:
2497 if filename:
2501 results.append((filename, adds, removes, isbinary))
2498 results.append((filename, adds, removes, isbinary))
2502
2499
2503 for line in lines:
2500 for line in lines:
2504 if line.startswith('diff'):
2501 if line.startswith('diff'):
2505 addresult()
2502 addresult()
2506 # set numbers to 0 anyway when starting new file
2503 # set numbers to 0 anyway when starting new file
2507 adds, removes, isbinary = 0, 0, False
2504 adds, removes, isbinary = 0, 0, False
2508 if line.startswith('diff --git a/'):
2505 if line.startswith('diff --git a/'):
2509 filename = gitre.search(line).group(2)
2506 filename = gitre.search(line).group(2)
2510 elif line.startswith('diff -r'):
2507 elif line.startswith('diff -r'):
2511 # format: "diff -r ... -r ... filename"
2508 # format: "diff -r ... -r ... filename"
2512 filename = diffre.search(line).group(1)
2509 filename = diffre.search(line).group(1)
2513 elif line.startswith('+') and not line.startswith('+++ '):
2510 elif line.startswith('+') and not line.startswith('+++ '):
2514 adds += 1
2511 adds += 1
2515 elif line.startswith('-') and not line.startswith('--- '):
2512 elif line.startswith('-') and not line.startswith('--- '):
2516 removes += 1
2513 removes += 1
2517 elif (line.startswith('GIT binary patch') or
2514 elif (line.startswith('GIT binary patch') or
2518 line.startswith('Binary file')):
2515 line.startswith('Binary file')):
2519 isbinary = True
2516 isbinary = True
2520 addresult()
2517 addresult()
2521 return results
2518 return results
2522
2519
2523 def diffstat(lines, width=80, git=False):
2520 def diffstat(lines, width=80, git=False):
2524 output = []
2521 output = []
2525 stats = diffstatdata(lines)
2522 stats = diffstatdata(lines)
2526 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2523 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2527
2524
2528 countwidth = len(str(maxtotal))
2525 countwidth = len(str(maxtotal))
2529 if hasbinary and countwidth < 3:
2526 if hasbinary and countwidth < 3:
2530 countwidth = 3
2527 countwidth = 3
2531 graphwidth = width - countwidth - maxname - 6
2528 graphwidth = width - countwidth - maxname - 6
2532 if graphwidth < 10:
2529 if graphwidth < 10:
2533 graphwidth = 10
2530 graphwidth = 10
2534
2531
2535 def scale(i):
2532 def scale(i):
2536 if maxtotal <= graphwidth:
2533 if maxtotal <= graphwidth:
2537 return i
2534 return i
2538 # If diffstat runs out of room it doesn't print anything,
2535 # If diffstat runs out of room it doesn't print anything,
2539 # which isn't very useful, so always print at least one + or -
2536 # which isn't very useful, so always print at least one + or -
2540 # if there were at least some changes.
2537 # if there were at least some changes.
2541 return max(i * graphwidth // maxtotal, int(bool(i)))
2538 return max(i * graphwidth // maxtotal, int(bool(i)))
2542
2539
2543 for filename, adds, removes, isbinary in stats:
2540 for filename, adds, removes, isbinary in stats:
2544 if isbinary:
2541 if isbinary:
2545 count = 'Bin'
2542 count = 'Bin'
2546 else:
2543 else:
2547 count = adds + removes
2544 count = adds + removes
2548 pluses = '+' * scale(adds)
2545 pluses = '+' * scale(adds)
2549 minuses = '-' * scale(removes)
2546 minuses = '-' * scale(removes)
2550 output.append(' %s%s | %*s %s%s\n' %
2547 output.append(' %s%s | %*s %s%s\n' %
2551 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2548 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2552 countwidth, count, pluses, minuses))
2549 countwidth, count, pluses, minuses))
2553
2550
2554 if stats:
2551 if stats:
2555 output.append(_(' %d files changed, %d insertions(+), '
2552 output.append(_(' %d files changed, %d insertions(+), '
2556 '%d deletions(-)\n')
2553 '%d deletions(-)\n')
2557 % (len(stats), totaladds, totalremoves))
2554 % (len(stats), totaladds, totalremoves))
2558
2555
2559 return ''.join(output)
2556 return ''.join(output)
2560
2557
2561 def diffstatui(*args, **kw):
2558 def diffstatui(*args, **kw):
2562 '''like diffstat(), but yields 2-tuples of (output, label) for
2559 '''like diffstat(), but yields 2-tuples of (output, label) for
2563 ui.write()
2560 ui.write()
2564 '''
2561 '''
2565
2562
2566 for line in diffstat(*args, **kw).splitlines():
2563 for line in diffstat(*args, **kw).splitlines():
2567 if line and line[-1] in '+-':
2564 if line and line[-1] in '+-':
2568 name, graph = line.rsplit(' ', 1)
2565 name, graph = line.rsplit(' ', 1)
2569 yield (name + ' ', '')
2566 yield (name + ' ', '')
2570 m = re.search(r'\++', graph)
2567 m = re.search(r'\++', graph)
2571 if m:
2568 if m:
2572 yield (m.group(0), 'diffstat.inserted')
2569 yield (m.group(0), 'diffstat.inserted')
2573 m = re.search(r'-+', graph)
2570 m = re.search(r'-+', graph)
2574 if m:
2571 if m:
2575 yield (m.group(0), 'diffstat.deleted')
2572 yield (m.group(0), 'diffstat.deleted')
2576 else:
2573 else:
2577 yield (line, '')
2574 yield (line, '')
2578 yield ('\n', '')
2575 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now