##// END OF EJS Templates
patch: check length of git index header only if integer is specified...
Yuya Nishihara -
r30819:89772662 default
parent child Browse files
Show More
@@ -1,2653 +1,2653 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 )
40 )
41 stringio = util.stringio
41 stringio = util.stringio
42
42
43 gitre = re.compile('diff --git a/(.*) b/(.*)')
43 gitre = re.compile('diff --git a/(.*) b/(.*)')
44 tabsplitter = re.compile(r'(\t+|[^\t]+)')
44 tabsplitter = re.compile(r'(\t+|[^\t]+)')
45
45
46 class PatchError(Exception):
46 class PatchError(Exception):
47 pass
47 pass
48
48
49
49
50 # public functions
50 # public functions
51
51
52 def split(stream):
52 def split(stream):
53 '''return an iterator of individual patches from a stream'''
53 '''return an iterator of individual patches from a stream'''
54 def isheader(line, inheader):
54 def isheader(line, inheader):
55 if inheader and line[0] in (' ', '\t'):
55 if inheader and line[0] in (' ', '\t'):
56 # continuation
56 # continuation
57 return True
57 return True
58 if line[0] in (' ', '-', '+'):
58 if line[0] in (' ', '-', '+'):
59 # diff line - don't check for header pattern in there
59 # diff line - don't check for header pattern in there
60 return False
60 return False
61 l = line.split(': ', 1)
61 l = line.split(': ', 1)
62 return len(l) == 2 and ' ' not in l[0]
62 return len(l) == 2 and ' ' not in l[0]
63
63
64 def chunk(lines):
64 def chunk(lines):
65 return stringio(''.join(lines))
65 return stringio(''.join(lines))
66
66
67 def hgsplit(stream, cur):
67 def hgsplit(stream, cur):
68 inheader = True
68 inheader = True
69
69
70 for line in stream:
70 for line in stream:
71 if not line.strip():
71 if not line.strip():
72 inheader = False
72 inheader = False
73 if not inheader and line.startswith('# HG changeset patch'):
73 if not inheader and line.startswith('# HG changeset patch'):
74 yield chunk(cur)
74 yield chunk(cur)
75 cur = []
75 cur = []
76 inheader = True
76 inheader = True
77
77
78 cur.append(line)
78 cur.append(line)
79
79
80 if cur:
80 if cur:
81 yield chunk(cur)
81 yield chunk(cur)
82
82
83 def mboxsplit(stream, cur):
83 def mboxsplit(stream, cur):
84 for line in stream:
84 for line in stream:
85 if line.startswith('From '):
85 if line.startswith('From '):
86 for c in split(chunk(cur[1:])):
86 for c in split(chunk(cur[1:])):
87 yield c
87 yield c
88 cur = []
88 cur = []
89
89
90 cur.append(line)
90 cur.append(line)
91
91
92 if cur:
92 if cur:
93 for c in split(chunk(cur[1:])):
93 for c in split(chunk(cur[1:])):
94 yield c
94 yield c
95
95
96 def mimesplit(stream, cur):
96 def mimesplit(stream, cur):
97 def msgfp(m):
97 def msgfp(m):
98 fp = stringio()
98 fp = stringio()
99 g = email.Generator.Generator(fp, mangle_from_=False)
99 g = email.Generator.Generator(fp, mangle_from_=False)
100 g.flatten(m)
100 g.flatten(m)
101 fp.seek(0)
101 fp.seek(0)
102 return fp
102 return fp
103
103
104 for line in stream:
104 for line in stream:
105 cur.append(line)
105 cur.append(line)
106 c = chunk(cur)
106 c = chunk(cur)
107
107
108 m = email.Parser.Parser().parse(c)
108 m = email.Parser.Parser().parse(c)
109 if not m.is_multipart():
109 if not m.is_multipart():
110 yield msgfp(m)
110 yield msgfp(m)
111 else:
111 else:
112 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
112 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 for part in m.walk():
113 for part in m.walk():
114 ct = part.get_content_type()
114 ct = part.get_content_type()
115 if ct not in ok_types:
115 if ct not in ok_types:
116 continue
116 continue
117 yield msgfp(part)
117 yield msgfp(part)
118
118
119 def headersplit(stream, cur):
119 def headersplit(stream, cur):
120 inheader = False
120 inheader = False
121
121
122 for line in stream:
122 for line in stream:
123 if not inheader and isheader(line, inheader):
123 if not inheader and isheader(line, inheader):
124 yield chunk(cur)
124 yield chunk(cur)
125 cur = []
125 cur = []
126 inheader = True
126 inheader = True
127 if inheader and not isheader(line, inheader):
127 if inheader and not isheader(line, inheader):
128 inheader = False
128 inheader = False
129
129
130 cur.append(line)
130 cur.append(line)
131
131
132 if cur:
132 if cur:
133 yield chunk(cur)
133 yield chunk(cur)
134
134
135 def remainder(cur):
135 def remainder(cur):
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 class fiter(object):
138 class fiter(object):
139 def __init__(self, fp):
139 def __init__(self, fp):
140 self.fp = fp
140 self.fp = fp
141
141
142 def __iter__(self):
142 def __iter__(self):
143 return self
143 return self
144
144
145 def next(self):
145 def next(self):
146 l = self.fp.readline()
146 l = self.fp.readline()
147 if not l:
147 if not l:
148 raise StopIteration
148 raise StopIteration
149 return l
149 return l
150
150
151 inheader = False
151 inheader = False
152 cur = []
152 cur = []
153
153
154 mimeheaders = ['content-type']
154 mimeheaders = ['content-type']
155
155
156 if not util.safehasattr(stream, 'next'):
156 if not util.safehasattr(stream, 'next'):
157 # http responses, for example, have readline but not next
157 # http responses, for example, have readline but not next
158 stream = fiter(stream)
158 stream = fiter(stream)
159
159
160 for line in stream:
160 for line in stream:
161 cur.append(line)
161 cur.append(line)
162 if line.startswith('# HG changeset patch'):
162 if line.startswith('# HG changeset patch'):
163 return hgsplit(stream, cur)
163 return hgsplit(stream, cur)
164 elif line.startswith('From '):
164 elif line.startswith('From '):
165 return mboxsplit(stream, cur)
165 return mboxsplit(stream, cur)
166 elif isheader(line, inheader):
166 elif isheader(line, inheader):
167 inheader = True
167 inheader = True
168 if line.split(':', 1)[0].lower() in mimeheaders:
168 if line.split(':', 1)[0].lower() in mimeheaders:
169 # let email parser handle this
169 # let email parser handle this
170 return mimesplit(stream, cur)
170 return mimesplit(stream, cur)
171 elif line.startswith('--- ') and inheader:
171 elif line.startswith('--- ') and inheader:
172 # No evil headers seen by diff start, split by hand
172 # No evil headers seen by diff start, split by hand
173 return headersplit(stream, cur)
173 return headersplit(stream, cur)
174 # Not enough info, keep reading
174 # Not enough info, keep reading
175
175
176 # if we are here, we have a very plain patch
176 # if we are here, we have a very plain patch
177 return remainder(cur)
177 return remainder(cur)
178
178
179 ## Some facility for extensible patch parsing:
179 ## Some facility for extensible patch parsing:
180 # list of pairs ("header to match", "data key")
180 # list of pairs ("header to match", "data key")
181 patchheadermap = [('Date', 'date'),
181 patchheadermap = [('Date', 'date'),
182 ('Branch', 'branch'),
182 ('Branch', 'branch'),
183 ('Node ID', 'nodeid'),
183 ('Node ID', 'nodeid'),
184 ]
184 ]
185
185
186 def extract(ui, fileobj):
186 def extract(ui, fileobj):
187 '''extract patch from data read from fileobj.
187 '''extract patch from data read from fileobj.
188
188
189 patch can be a normal patch or contained in an email message.
189 patch can be a normal patch or contained in an email message.
190
190
191 return a dictionary. Standard keys are:
191 return a dictionary. Standard keys are:
192 - filename,
192 - filename,
193 - message,
193 - message,
194 - user,
194 - user,
195 - date,
195 - date,
196 - branch,
196 - branch,
197 - node,
197 - node,
198 - p1,
198 - p1,
199 - p2.
199 - p2.
200 Any item can be missing from the dictionary. If filename is missing,
200 Any item can be missing from the dictionary. If filename is missing,
201 fileobj did not contain a patch. Caller must unlink filename when done.'''
201 fileobj did not contain a patch. Caller must unlink filename when done.'''
202
202
203 # attempt to detect the start of a patch
203 # attempt to detect the start of a patch
204 # (this heuristic is borrowed from quilt)
204 # (this heuristic is borrowed from quilt)
205 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
205 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
206 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 r'---[ \t].*?^\+\+\+[ \t]|'
207 r'---[ \t].*?^\+\+\+[ \t]|'
208 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
208 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
209
209
210 data = {}
210 data = {}
211 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
211 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
212 tmpfp = os.fdopen(fd, 'w')
212 tmpfp = os.fdopen(fd, 'w')
213 try:
213 try:
214 msg = email.Parser.Parser().parse(fileobj)
214 msg = email.Parser.Parser().parse(fileobj)
215
215
216 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
216 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
217 data['user'] = msg['From'] and mail.headdecode(msg['From'])
217 data['user'] = msg['From'] and mail.headdecode(msg['From'])
218 if not subject and not data['user']:
218 if not subject and not data['user']:
219 # Not an email, restore parsed headers if any
219 # Not an email, restore parsed headers if any
220 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
220 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
221
221
222 # should try to parse msg['Date']
222 # should try to parse msg['Date']
223 parents = []
223 parents = []
224
224
225 if subject:
225 if subject:
226 if subject.startswith('[PATCH'):
226 if subject.startswith('[PATCH'):
227 pend = subject.find(']')
227 pend = subject.find(']')
228 if pend >= 0:
228 if pend >= 0:
229 subject = subject[pend + 1:].lstrip()
229 subject = subject[pend + 1:].lstrip()
230 subject = re.sub(r'\n[ \t]+', ' ', subject)
230 subject = re.sub(r'\n[ \t]+', ' ', subject)
231 ui.debug('Subject: %s\n' % subject)
231 ui.debug('Subject: %s\n' % subject)
232 if data['user']:
232 if data['user']:
233 ui.debug('From: %s\n' % data['user'])
233 ui.debug('From: %s\n' % data['user'])
234 diffs_seen = 0
234 diffs_seen = 0
235 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
235 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
236 message = ''
236 message = ''
237 for part in msg.walk():
237 for part in msg.walk():
238 content_type = part.get_content_type()
238 content_type = part.get_content_type()
239 ui.debug('Content-Type: %s\n' % content_type)
239 ui.debug('Content-Type: %s\n' % content_type)
240 if content_type not in ok_types:
240 if content_type not in ok_types:
241 continue
241 continue
242 payload = part.get_payload(decode=True)
242 payload = part.get_payload(decode=True)
243 m = diffre.search(payload)
243 m = diffre.search(payload)
244 if m:
244 if m:
245 hgpatch = False
245 hgpatch = False
246 hgpatchheader = False
246 hgpatchheader = False
247 ignoretext = False
247 ignoretext = False
248
248
249 ui.debug('found patch at byte %d\n' % m.start(0))
249 ui.debug('found patch at byte %d\n' % m.start(0))
250 diffs_seen += 1
250 diffs_seen += 1
251 cfp = stringio()
251 cfp = stringio()
252 for line in payload[:m.start(0)].splitlines():
252 for line in payload[:m.start(0)].splitlines():
253 if line.startswith('# HG changeset patch') and not hgpatch:
253 if line.startswith('# HG changeset patch') and not hgpatch:
254 ui.debug('patch generated by hg export\n')
254 ui.debug('patch generated by hg export\n')
255 hgpatch = True
255 hgpatch = True
256 hgpatchheader = True
256 hgpatchheader = True
257 # drop earlier commit message content
257 # drop earlier commit message content
258 cfp.seek(0)
258 cfp.seek(0)
259 cfp.truncate()
259 cfp.truncate()
260 subject = None
260 subject = None
261 elif hgpatchheader:
261 elif hgpatchheader:
262 if line.startswith('# User '):
262 if line.startswith('# User '):
263 data['user'] = line[7:]
263 data['user'] = line[7:]
264 ui.debug('From: %s\n' % data['user'])
264 ui.debug('From: %s\n' % data['user'])
265 elif line.startswith("# Parent "):
265 elif line.startswith("# Parent "):
266 parents.append(line[9:].lstrip())
266 parents.append(line[9:].lstrip())
267 elif line.startswith("# "):
267 elif line.startswith("# "):
268 for header, key in patchheadermap:
268 for header, key in patchheadermap:
269 prefix = '# %s ' % header
269 prefix = '# %s ' % header
270 if line.startswith(prefix):
270 if line.startswith(prefix):
271 data[key] = line[len(prefix):]
271 data[key] = line[len(prefix):]
272 else:
272 else:
273 hgpatchheader = False
273 hgpatchheader = False
274 elif line == '---':
274 elif line == '---':
275 ignoretext = True
275 ignoretext = True
276 if not hgpatchheader and not ignoretext:
276 if not hgpatchheader and not ignoretext:
277 cfp.write(line)
277 cfp.write(line)
278 cfp.write('\n')
278 cfp.write('\n')
279 message = cfp.getvalue()
279 message = cfp.getvalue()
280 if tmpfp:
280 if tmpfp:
281 tmpfp.write(payload)
281 tmpfp.write(payload)
282 if not payload.endswith('\n'):
282 if not payload.endswith('\n'):
283 tmpfp.write('\n')
283 tmpfp.write('\n')
284 elif not diffs_seen and message and content_type == 'text/plain':
284 elif not diffs_seen and message and content_type == 'text/plain':
285 message += '\n' + payload
285 message += '\n' + payload
286 except: # re-raises
286 except: # re-raises
287 tmpfp.close()
287 tmpfp.close()
288 os.unlink(tmpname)
288 os.unlink(tmpname)
289 raise
289 raise
290
290
291 if subject and not message.startswith(subject):
291 if subject and not message.startswith(subject):
292 message = '%s\n%s' % (subject, message)
292 message = '%s\n%s' % (subject, message)
293 data['message'] = message
293 data['message'] = message
294 tmpfp.close()
294 tmpfp.close()
295 if parents:
295 if parents:
296 data['p1'] = parents.pop(0)
296 data['p1'] = parents.pop(0)
297 if parents:
297 if parents:
298 data['p2'] = parents.pop(0)
298 data['p2'] = parents.pop(0)
299
299
300 if diffs_seen:
300 if diffs_seen:
301 data['filename'] = tmpname
301 data['filename'] = tmpname
302 else:
302 else:
303 os.unlink(tmpname)
303 os.unlink(tmpname)
304 return data
304 return data
305
305
306 class patchmeta(object):
306 class patchmeta(object):
307 """Patched file metadata
307 """Patched file metadata
308
308
309 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
309 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
310 or COPY. 'path' is patched file path. 'oldpath' is set to the
310 or COPY. 'path' is patched file path. 'oldpath' is set to the
311 origin file when 'op' is either COPY or RENAME, None otherwise. If
311 origin file when 'op' is either COPY or RENAME, None otherwise. If
312 file mode is changed, 'mode' is a tuple (islink, isexec) where
312 file mode is changed, 'mode' is a tuple (islink, isexec) where
313 'islink' is True if the file is a symlink and 'isexec' is True if
313 'islink' is True if the file is a symlink and 'isexec' is True if
314 the file is executable. Otherwise, 'mode' is None.
314 the file is executable. Otherwise, 'mode' is None.
315 """
315 """
316 def __init__(self, path):
316 def __init__(self, path):
317 self.path = path
317 self.path = path
318 self.oldpath = None
318 self.oldpath = None
319 self.mode = None
319 self.mode = None
320 self.op = 'MODIFY'
320 self.op = 'MODIFY'
321 self.binary = False
321 self.binary = False
322
322
323 def setmode(self, mode):
323 def setmode(self, mode):
324 islink = mode & 0o20000
324 islink = mode & 0o20000
325 isexec = mode & 0o100
325 isexec = mode & 0o100
326 self.mode = (islink, isexec)
326 self.mode = (islink, isexec)
327
327
328 def copy(self):
328 def copy(self):
329 other = patchmeta(self.path)
329 other = patchmeta(self.path)
330 other.oldpath = self.oldpath
330 other.oldpath = self.oldpath
331 other.mode = self.mode
331 other.mode = self.mode
332 other.op = self.op
332 other.op = self.op
333 other.binary = self.binary
333 other.binary = self.binary
334 return other
334 return other
335
335
336 def _ispatchinga(self, afile):
336 def _ispatchinga(self, afile):
337 if afile == '/dev/null':
337 if afile == '/dev/null':
338 return self.op == 'ADD'
338 return self.op == 'ADD'
339 return afile == 'a/' + (self.oldpath or self.path)
339 return afile == 'a/' + (self.oldpath or self.path)
340
340
341 def _ispatchingb(self, bfile):
341 def _ispatchingb(self, bfile):
342 if bfile == '/dev/null':
342 if bfile == '/dev/null':
343 return self.op == 'DELETE'
343 return self.op == 'DELETE'
344 return bfile == 'b/' + self.path
344 return bfile == 'b/' + self.path
345
345
346 def ispatching(self, afile, bfile):
346 def ispatching(self, afile, bfile):
347 return self._ispatchinga(afile) and self._ispatchingb(bfile)
347 return self._ispatchinga(afile) and self._ispatchingb(bfile)
348
348
349 def __repr__(self):
349 def __repr__(self):
350 return "<patchmeta %s %r>" % (self.op, self.path)
350 return "<patchmeta %s %r>" % (self.op, self.path)
351
351
352 def readgitpatch(lr):
352 def readgitpatch(lr):
353 """extract git-style metadata about patches from <patchname>"""
353 """extract git-style metadata about patches from <patchname>"""
354
354
355 # Filter patch for git information
355 # Filter patch for git information
356 gp = None
356 gp = None
357 gitpatches = []
357 gitpatches = []
358 for line in lr:
358 for line in lr:
359 line = line.rstrip(' \r\n')
359 line = line.rstrip(' \r\n')
360 if line.startswith('diff --git a/'):
360 if line.startswith('diff --git a/'):
361 m = gitre.match(line)
361 m = gitre.match(line)
362 if m:
362 if m:
363 if gp:
363 if gp:
364 gitpatches.append(gp)
364 gitpatches.append(gp)
365 dst = m.group(2)
365 dst = m.group(2)
366 gp = patchmeta(dst)
366 gp = patchmeta(dst)
367 elif gp:
367 elif gp:
368 if line.startswith('--- '):
368 if line.startswith('--- '):
369 gitpatches.append(gp)
369 gitpatches.append(gp)
370 gp = None
370 gp = None
371 continue
371 continue
372 if line.startswith('rename from '):
372 if line.startswith('rename from '):
373 gp.op = 'RENAME'
373 gp.op = 'RENAME'
374 gp.oldpath = line[12:]
374 gp.oldpath = line[12:]
375 elif line.startswith('rename to '):
375 elif line.startswith('rename to '):
376 gp.path = line[10:]
376 gp.path = line[10:]
377 elif line.startswith('copy from '):
377 elif line.startswith('copy from '):
378 gp.op = 'COPY'
378 gp.op = 'COPY'
379 gp.oldpath = line[10:]
379 gp.oldpath = line[10:]
380 elif line.startswith('copy to '):
380 elif line.startswith('copy to '):
381 gp.path = line[8:]
381 gp.path = line[8:]
382 elif line.startswith('deleted file'):
382 elif line.startswith('deleted file'):
383 gp.op = 'DELETE'
383 gp.op = 'DELETE'
384 elif line.startswith('new file mode '):
384 elif line.startswith('new file mode '):
385 gp.op = 'ADD'
385 gp.op = 'ADD'
386 gp.setmode(int(line[-6:], 8))
386 gp.setmode(int(line[-6:], 8))
387 elif line.startswith('new mode '):
387 elif line.startswith('new mode '):
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('GIT binary patch'):
389 elif line.startswith('GIT binary patch'):
390 gp.binary = True
390 gp.binary = True
391 if gp:
391 if gp:
392 gitpatches.append(gp)
392 gitpatches.append(gp)
393
393
394 return gitpatches
394 return gitpatches
395
395
396 class linereader(object):
396 class linereader(object):
397 # simple class to allow pushing lines back into the input stream
397 # simple class to allow pushing lines back into the input stream
398 def __init__(self, fp):
398 def __init__(self, fp):
399 self.fp = fp
399 self.fp = fp
400 self.buf = []
400 self.buf = []
401
401
402 def push(self, line):
402 def push(self, line):
403 if line is not None:
403 if line is not None:
404 self.buf.append(line)
404 self.buf.append(line)
405
405
406 def readline(self):
406 def readline(self):
407 if self.buf:
407 if self.buf:
408 l = self.buf[0]
408 l = self.buf[0]
409 del self.buf[0]
409 del self.buf[0]
410 return l
410 return l
411 return self.fp.readline()
411 return self.fp.readline()
412
412
413 def __iter__(self):
413 def __iter__(self):
414 return iter(self.readline, '')
414 return iter(self.readline, '')
415
415
416 class abstractbackend(object):
416 class abstractbackend(object):
417 def __init__(self, ui):
417 def __init__(self, ui):
418 self.ui = ui
418 self.ui = ui
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 """Return target file data and flags as a (data, (islink,
421 """Return target file data and flags as a (data, (islink,
422 isexec)) tuple. Data is None if file is missing/deleted.
422 isexec)) tuple. Data is None if file is missing/deleted.
423 """
423 """
424 raise NotImplementedError
424 raise NotImplementedError
425
425
426 def setfile(self, fname, data, mode, copysource):
426 def setfile(self, fname, data, mode, copysource):
427 """Write data to target file fname and set its mode. mode is a
427 """Write data to target file fname and set its mode. mode is a
428 (islink, isexec) tuple. If data is None, the file content should
428 (islink, isexec) tuple. If data is None, the file content should
429 be left unchanged. If the file is modified after being copied,
429 be left unchanged. If the file is modified after being copied,
430 copysource is set to the original file name.
430 copysource is set to the original file name.
431 """
431 """
432 raise NotImplementedError
432 raise NotImplementedError
433
433
434 def unlink(self, fname):
434 def unlink(self, fname):
435 """Unlink target file."""
435 """Unlink target file."""
436 raise NotImplementedError
436 raise NotImplementedError
437
437
438 def writerej(self, fname, failed, total, lines):
438 def writerej(self, fname, failed, total, lines):
439 """Write rejected lines for fname. total is the number of hunks
439 """Write rejected lines for fname. total is the number of hunks
440 which failed to apply and total the total number of hunks for this
440 which failed to apply and total the total number of hunks for this
441 files.
441 files.
442 """
442 """
443 pass
443 pass
444
444
445 def exists(self, fname):
445 def exists(self, fname):
446 raise NotImplementedError
446 raise NotImplementedError
447
447
448 class fsbackend(abstractbackend):
448 class fsbackend(abstractbackend):
449 def __init__(self, ui, basedir):
449 def __init__(self, ui, basedir):
450 super(fsbackend, self).__init__(ui)
450 super(fsbackend, self).__init__(ui)
451 self.opener = scmutil.opener(basedir)
451 self.opener = scmutil.opener(basedir)
452
452
453 def _join(self, f):
453 def _join(self, f):
454 return os.path.join(self.opener.base, f)
454 return os.path.join(self.opener.base, f)
455
455
456 def getfile(self, fname):
456 def getfile(self, fname):
457 if self.opener.islink(fname):
457 if self.opener.islink(fname):
458 return (self.opener.readlink(fname), (True, False))
458 return (self.opener.readlink(fname), (True, False))
459
459
460 isexec = False
460 isexec = False
461 try:
461 try:
462 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
462 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 except OSError as e:
463 except OSError as e:
464 if e.errno != errno.ENOENT:
464 if e.errno != errno.ENOENT:
465 raise
465 raise
466 try:
466 try:
467 return (self.opener.read(fname), (False, isexec))
467 return (self.opener.read(fname), (False, isexec))
468 except IOError as e:
468 except IOError as e:
469 if e.errno != errno.ENOENT:
469 if e.errno != errno.ENOENT:
470 raise
470 raise
471 return None, None
471 return None, None
472
472
473 def setfile(self, fname, data, mode, copysource):
473 def setfile(self, fname, data, mode, copysource):
474 islink, isexec = mode
474 islink, isexec = mode
475 if data is None:
475 if data is None:
476 self.opener.setflags(fname, islink, isexec)
476 self.opener.setflags(fname, islink, isexec)
477 return
477 return
478 if islink:
478 if islink:
479 self.opener.symlink(data, fname)
479 self.opener.symlink(data, fname)
480 else:
480 else:
481 self.opener.write(fname, data)
481 self.opener.write(fname, data)
482 if isexec:
482 if isexec:
483 self.opener.setflags(fname, False, True)
483 self.opener.setflags(fname, False, True)
484
484
485 def unlink(self, fname):
485 def unlink(self, fname):
486 self.opener.unlinkpath(fname, ignoremissing=True)
486 self.opener.unlinkpath(fname, ignoremissing=True)
487
487
488 def writerej(self, fname, failed, total, lines):
488 def writerej(self, fname, failed, total, lines):
489 fname = fname + ".rej"
489 fname = fname + ".rej"
490 self.ui.warn(
490 self.ui.warn(
491 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
491 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 (failed, total, fname))
492 (failed, total, fname))
493 fp = self.opener(fname, 'w')
493 fp = self.opener(fname, 'w')
494 fp.writelines(lines)
494 fp.writelines(lines)
495 fp.close()
495 fp.close()
496
496
497 def exists(self, fname):
497 def exists(self, fname):
498 return self.opener.lexists(fname)
498 return self.opener.lexists(fname)
499
499
500 class workingbackend(fsbackend):
500 class workingbackend(fsbackend):
501 def __init__(self, ui, repo, similarity):
501 def __init__(self, ui, repo, similarity):
502 super(workingbackend, self).__init__(ui, repo.root)
502 super(workingbackend, self).__init__(ui, repo.root)
503 self.repo = repo
503 self.repo = repo
504 self.similarity = similarity
504 self.similarity = similarity
505 self.removed = set()
505 self.removed = set()
506 self.changed = set()
506 self.changed = set()
507 self.copied = []
507 self.copied = []
508
508
509 def _checkknown(self, fname):
509 def _checkknown(self, fname):
510 if self.repo.dirstate[fname] == '?' and self.exists(fname):
510 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
511 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512
512
513 def setfile(self, fname, data, mode, copysource):
513 def setfile(self, fname, data, mode, copysource):
514 self._checkknown(fname)
514 self._checkknown(fname)
515 super(workingbackend, self).setfile(fname, data, mode, copysource)
515 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 if copysource is not None:
516 if copysource is not None:
517 self.copied.append((copysource, fname))
517 self.copied.append((copysource, fname))
518 self.changed.add(fname)
518 self.changed.add(fname)
519
519
520 def unlink(self, fname):
520 def unlink(self, fname):
521 self._checkknown(fname)
521 self._checkknown(fname)
522 super(workingbackend, self).unlink(fname)
522 super(workingbackend, self).unlink(fname)
523 self.removed.add(fname)
523 self.removed.add(fname)
524 self.changed.add(fname)
524 self.changed.add(fname)
525
525
526 def close(self):
526 def close(self):
527 wctx = self.repo[None]
527 wctx = self.repo[None]
528 changed = set(self.changed)
528 changed = set(self.changed)
529 for src, dst in self.copied:
529 for src, dst in self.copied:
530 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
530 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 if self.removed:
531 if self.removed:
532 wctx.forget(sorted(self.removed))
532 wctx.forget(sorted(self.removed))
533 for f in self.removed:
533 for f in self.removed:
534 if f not in self.repo.dirstate:
534 if f not in self.repo.dirstate:
535 # File was deleted and no longer belongs to the
535 # File was deleted and no longer belongs to the
536 # dirstate, it was probably marked added then
536 # dirstate, it was probably marked added then
537 # deleted, and should not be considered by
537 # deleted, and should not be considered by
538 # marktouched().
538 # marktouched().
539 changed.discard(f)
539 changed.discard(f)
540 if changed:
540 if changed:
541 scmutil.marktouched(self.repo, changed, self.similarity)
541 scmutil.marktouched(self.repo, changed, self.similarity)
542 return sorted(self.changed)
542 return sorted(self.changed)
543
543
544 class filestore(object):
544 class filestore(object):
545 def __init__(self, maxsize=None):
545 def __init__(self, maxsize=None):
546 self.opener = None
546 self.opener = None
547 self.files = {}
547 self.files = {}
548 self.created = 0
548 self.created = 0
549 self.maxsize = maxsize
549 self.maxsize = maxsize
550 if self.maxsize is None:
550 if self.maxsize is None:
551 self.maxsize = 4*(2**20)
551 self.maxsize = 4*(2**20)
552 self.size = 0
552 self.size = 0
553 self.data = {}
553 self.data = {}
554
554
555 def setfile(self, fname, data, mode, copied=None):
555 def setfile(self, fname, data, mode, copied=None):
556 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
556 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 self.data[fname] = (data, mode, copied)
557 self.data[fname] = (data, mode, copied)
558 self.size += len(data)
558 self.size += len(data)
559 else:
559 else:
560 if self.opener is None:
560 if self.opener is None:
561 root = tempfile.mkdtemp(prefix='hg-patch-')
561 root = tempfile.mkdtemp(prefix='hg-patch-')
562 self.opener = scmutil.opener(root)
562 self.opener = scmutil.opener(root)
563 # Avoid filename issues with these simple names
563 # Avoid filename issues with these simple names
564 fn = str(self.created)
564 fn = str(self.created)
565 self.opener.write(fn, data)
565 self.opener.write(fn, data)
566 self.created += 1
566 self.created += 1
567 self.files[fname] = (fn, mode, copied)
567 self.files[fname] = (fn, mode, copied)
568
568
569 def getfile(self, fname):
569 def getfile(self, fname):
570 if fname in self.data:
570 if fname in self.data:
571 return self.data[fname]
571 return self.data[fname]
572 if not self.opener or fname not in self.files:
572 if not self.opener or fname not in self.files:
573 return None, None, None
573 return None, None, None
574 fn, mode, copied = self.files[fname]
574 fn, mode, copied = self.files[fname]
575 return self.opener.read(fn), mode, copied
575 return self.opener.read(fn), mode, copied
576
576
577 def close(self):
577 def close(self):
578 if self.opener:
578 if self.opener:
579 shutil.rmtree(self.opener.base)
579 shutil.rmtree(self.opener.base)
580
580
581 class repobackend(abstractbackend):
581 class repobackend(abstractbackend):
582 def __init__(self, ui, repo, ctx, store):
582 def __init__(self, ui, repo, ctx, store):
583 super(repobackend, self).__init__(ui)
583 super(repobackend, self).__init__(ui)
584 self.repo = repo
584 self.repo = repo
585 self.ctx = ctx
585 self.ctx = ctx
586 self.store = store
586 self.store = store
587 self.changed = set()
587 self.changed = set()
588 self.removed = set()
588 self.removed = set()
589 self.copied = {}
589 self.copied = {}
590
590
591 def _checkknown(self, fname):
591 def _checkknown(self, fname):
592 if fname not in self.ctx:
592 if fname not in self.ctx:
593 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
593 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594
594
595 def getfile(self, fname):
595 def getfile(self, fname):
596 try:
596 try:
597 fctx = self.ctx[fname]
597 fctx = self.ctx[fname]
598 except error.LookupError:
598 except error.LookupError:
599 return None, None
599 return None, None
600 flags = fctx.flags()
600 flags = fctx.flags()
601 return fctx.data(), ('l' in flags, 'x' in flags)
601 return fctx.data(), ('l' in flags, 'x' in flags)
602
602
603 def setfile(self, fname, data, mode, copysource):
603 def setfile(self, fname, data, mode, copysource):
604 if copysource:
604 if copysource:
605 self._checkknown(copysource)
605 self._checkknown(copysource)
606 if data is None:
606 if data is None:
607 data = self.ctx[fname].data()
607 data = self.ctx[fname].data()
608 self.store.setfile(fname, data, mode, copysource)
608 self.store.setfile(fname, data, mode, copysource)
609 self.changed.add(fname)
609 self.changed.add(fname)
610 if copysource:
610 if copysource:
611 self.copied[fname] = copysource
611 self.copied[fname] = copysource
612
612
613 def unlink(self, fname):
613 def unlink(self, fname):
614 self._checkknown(fname)
614 self._checkknown(fname)
615 self.removed.add(fname)
615 self.removed.add(fname)
616
616
617 def exists(self, fname):
617 def exists(self, fname):
618 return fname in self.ctx
618 return fname in self.ctx
619
619
620 def close(self):
620 def close(self):
621 return self.changed | self.removed
621 return self.changed | self.removed
622
622
623 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
623 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
624 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
625 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 eolmodes = ['strict', 'crlf', 'lf', 'auto']
626 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627
627
628 class patchfile(object):
628 class patchfile(object):
629 def __init__(self, ui, gp, backend, store, eolmode='strict'):
629 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 self.fname = gp.path
630 self.fname = gp.path
631 self.eolmode = eolmode
631 self.eolmode = eolmode
632 self.eol = None
632 self.eol = None
633 self.backend = backend
633 self.backend = backend
634 self.ui = ui
634 self.ui = ui
635 self.lines = []
635 self.lines = []
636 self.exists = False
636 self.exists = False
637 self.missing = True
637 self.missing = True
638 self.mode = gp.mode
638 self.mode = gp.mode
639 self.copysource = gp.oldpath
639 self.copysource = gp.oldpath
640 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
640 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.remove = gp.op == 'DELETE'
641 self.remove = gp.op == 'DELETE'
642 if self.copysource is None:
642 if self.copysource is None:
643 data, mode = backend.getfile(self.fname)
643 data, mode = backend.getfile(self.fname)
644 else:
644 else:
645 data, mode = store.getfile(self.copysource)[:2]
645 data, mode = store.getfile(self.copysource)[:2]
646 if data is not None:
646 if data is not None:
647 self.exists = self.copysource is None or backend.exists(self.fname)
647 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.missing = False
648 self.missing = False
649 if data:
649 if data:
650 self.lines = mdiff.splitnewlines(data)
650 self.lines = mdiff.splitnewlines(data)
651 if self.mode is None:
651 if self.mode is None:
652 self.mode = mode
652 self.mode = mode
653 if self.lines:
653 if self.lines:
654 # Normalize line endings
654 # Normalize line endings
655 if self.lines[0].endswith('\r\n'):
655 if self.lines[0].endswith('\r\n'):
656 self.eol = '\r\n'
656 self.eol = '\r\n'
657 elif self.lines[0].endswith('\n'):
657 elif self.lines[0].endswith('\n'):
658 self.eol = '\n'
658 self.eol = '\n'
659 if eolmode != 'strict':
659 if eolmode != 'strict':
660 nlines = []
660 nlines = []
661 for l in self.lines:
661 for l in self.lines:
662 if l.endswith('\r\n'):
662 if l.endswith('\r\n'):
663 l = l[:-2] + '\n'
663 l = l[:-2] + '\n'
664 nlines.append(l)
664 nlines.append(l)
665 self.lines = nlines
665 self.lines = nlines
666 else:
666 else:
667 if self.create:
667 if self.create:
668 self.missing = False
668 self.missing = False
669 if self.mode is None:
669 if self.mode is None:
670 self.mode = (False, False)
670 self.mode = (False, False)
671 if self.missing:
671 if self.missing:
672 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
672 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
673 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
674 "current directory)\n"))
674 "current directory)\n"))
675
675
676 self.hash = {}
676 self.hash = {}
677 self.dirty = 0
677 self.dirty = 0
678 self.offset = 0
678 self.offset = 0
679 self.skew = 0
679 self.skew = 0
680 self.rej = []
680 self.rej = []
681 self.fileprinted = False
681 self.fileprinted = False
682 self.printfile(False)
682 self.printfile(False)
683 self.hunks = 0
683 self.hunks = 0
684
684
685 def writelines(self, fname, lines, mode):
685 def writelines(self, fname, lines, mode):
686 if self.eolmode == 'auto':
686 if self.eolmode == 'auto':
687 eol = self.eol
687 eol = self.eol
688 elif self.eolmode == 'crlf':
688 elif self.eolmode == 'crlf':
689 eol = '\r\n'
689 eol = '\r\n'
690 else:
690 else:
691 eol = '\n'
691 eol = '\n'
692
692
693 if self.eolmode != 'strict' and eol and eol != '\n':
693 if self.eolmode != 'strict' and eol and eol != '\n':
694 rawlines = []
694 rawlines = []
695 for l in lines:
695 for l in lines:
696 if l and l[-1] == '\n':
696 if l and l[-1] == '\n':
697 l = l[:-1] + eol
697 l = l[:-1] + eol
698 rawlines.append(l)
698 rawlines.append(l)
699 lines = rawlines
699 lines = rawlines
700
700
701 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702
702
703 def printfile(self, warn):
703 def printfile(self, warn):
704 if self.fileprinted:
704 if self.fileprinted:
705 return
705 return
706 if warn or self.ui.verbose:
706 if warn or self.ui.verbose:
707 self.fileprinted = True
707 self.fileprinted = True
708 s = _("patching file %s\n") % self.fname
708 s = _("patching file %s\n") % self.fname
709 if warn:
709 if warn:
710 self.ui.warn(s)
710 self.ui.warn(s)
711 else:
711 else:
712 self.ui.note(s)
712 self.ui.note(s)
713
713
714
714
715 def findlines(self, l, linenum):
715 def findlines(self, l, linenum):
716 # looks through the hash and finds candidate lines. The
716 # looks through the hash and finds candidate lines. The
717 # result is a list of line numbers sorted based on distance
717 # result is a list of line numbers sorted based on distance
718 # from linenum
718 # from linenum
719
719
720 cand = self.hash.get(l, [])
720 cand = self.hash.get(l, [])
721 if len(cand) > 1:
721 if len(cand) > 1:
722 # resort our list of potentials forward then back.
722 # resort our list of potentials forward then back.
723 cand.sort(key=lambda x: abs(x - linenum))
723 cand.sort(key=lambda x: abs(x - linenum))
724 return cand
724 return cand
725
725
726 def write_rej(self):
726 def write_rej(self):
727 # our rejects are a little different from patch(1). This always
727 # our rejects are a little different from patch(1). This always
728 # creates rejects in the same form as the original patch. A file
728 # creates rejects in the same form as the original patch. A file
729 # header is inserted so that you can run the reject through patch again
729 # header is inserted so that you can run the reject through patch again
730 # without having to type the filename.
730 # without having to type the filename.
731 if not self.rej:
731 if not self.rej:
732 return
732 return
733 base = os.path.basename(self.fname)
733 base = os.path.basename(self.fname)
734 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 for x in self.rej:
735 for x in self.rej:
736 for l in x.hunk:
736 for l in x.hunk:
737 lines.append(l)
737 lines.append(l)
738 if l[-1] != '\n':
738 if l[-1] != '\n':
739 lines.append("\n\ No newline at end of file\n")
739 lines.append("\n\ No newline at end of file\n")
740 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741
741
742 def apply(self, h):
742 def apply(self, h):
743 if not h.complete():
743 if not h.complete():
744 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 h.lenb))
746 h.lenb))
747
747
748 self.hunks += 1
748 self.hunks += 1
749
749
750 if self.missing:
750 if self.missing:
751 self.rej.append(h)
751 self.rej.append(h)
752 return -1
752 return -1
753
753
754 if self.exists and self.create:
754 if self.exists and self.create:
755 if self.copysource:
755 if self.copysource:
756 self.ui.warn(_("cannot create %s: destination already "
756 self.ui.warn(_("cannot create %s: destination already "
757 "exists\n") % self.fname)
757 "exists\n") % self.fname)
758 else:
758 else:
759 self.ui.warn(_("file %s already exists\n") % self.fname)
759 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.rej.append(h)
760 self.rej.append(h)
761 return -1
761 return -1
762
762
763 if isinstance(h, binhunk):
763 if isinstance(h, binhunk):
764 if self.remove:
764 if self.remove:
765 self.backend.unlink(self.fname)
765 self.backend.unlink(self.fname)
766 else:
766 else:
767 l = h.new(self.lines)
767 l = h.new(self.lines)
768 self.lines[:] = l
768 self.lines[:] = l
769 self.offset += len(l)
769 self.offset += len(l)
770 self.dirty = True
770 self.dirty = True
771 return 0
771 return 0
772
772
773 horig = h
773 horig = h
774 if (self.eolmode in ('crlf', 'lf')
774 if (self.eolmode in ('crlf', 'lf')
775 or self.eolmode == 'auto' and self.eol):
775 or self.eolmode == 'auto' and self.eol):
776 # If new eols are going to be normalized, then normalize
776 # If new eols are going to be normalized, then normalize
777 # hunk data before patching. Otherwise, preserve input
777 # hunk data before patching. Otherwise, preserve input
778 # line-endings.
778 # line-endings.
779 h = h.getnormalized()
779 h = h.getnormalized()
780
780
781 # fast case first, no offsets, no fuzz
781 # fast case first, no offsets, no fuzz
782 old, oldstart, new, newstart = h.fuzzit(0, False)
782 old, oldstart, new, newstart = h.fuzzit(0, False)
783 oldstart += self.offset
783 oldstart += self.offset
784 orig_start = oldstart
784 orig_start = oldstart
785 # if there's skew we want to emit the "(offset %d lines)" even
785 # if there's skew we want to emit the "(offset %d lines)" even
786 # when the hunk cleanly applies at start + skew, so skip the
786 # when the hunk cleanly applies at start + skew, so skip the
787 # fast case code
787 # fast case code
788 if (self.skew == 0 and
788 if (self.skew == 0 and
789 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 if self.remove:
790 if self.remove:
791 self.backend.unlink(self.fname)
791 self.backend.unlink(self.fname)
792 else:
792 else:
793 self.lines[oldstart:oldstart + len(old)] = new
793 self.lines[oldstart:oldstart + len(old)] = new
794 self.offset += len(new) - len(old)
794 self.offset += len(new) - len(old)
795 self.dirty = True
795 self.dirty = True
796 return 0
796 return 0
797
797
798 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 self.hash = {}
799 self.hash = {}
800 for x, s in enumerate(self.lines):
800 for x, s in enumerate(self.lines):
801 self.hash.setdefault(s, []).append(x)
801 self.hash.setdefault(s, []).append(x)
802
802
803 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
804 for toponly in [True, False]:
804 for toponly in [True, False]:
805 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 oldstart = oldstart + self.offset + self.skew
806 oldstart = oldstart + self.offset + self.skew
807 oldstart = min(oldstart, len(self.lines))
807 oldstart = min(oldstart, len(self.lines))
808 if old:
808 if old:
809 cand = self.findlines(old[0][1:], oldstart)
809 cand = self.findlines(old[0][1:], oldstart)
810 else:
810 else:
811 # Only adding lines with no or fuzzed context, just
811 # Only adding lines with no or fuzzed context, just
812 # take the skew in account
812 # take the skew in account
813 cand = [oldstart]
813 cand = [oldstart]
814
814
815 for l in cand:
815 for l in cand:
816 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 self.lines[l : l + len(old)] = new
817 self.lines[l : l + len(old)] = new
818 self.offset += len(new) - len(old)
818 self.offset += len(new) - len(old)
819 self.skew = l - orig_start
819 self.skew = l - orig_start
820 self.dirty = True
820 self.dirty = True
821 offset = l - orig_start - fuzzlen
821 offset = l - orig_start - fuzzlen
822 if fuzzlen:
822 if fuzzlen:
823 msg = _("Hunk #%d succeeded at %d "
823 msg = _("Hunk #%d succeeded at %d "
824 "with fuzz %d "
824 "with fuzz %d "
825 "(offset %d lines).\n")
825 "(offset %d lines).\n")
826 self.printfile(True)
826 self.printfile(True)
827 self.ui.warn(msg %
827 self.ui.warn(msg %
828 (h.number, l + 1, fuzzlen, offset))
828 (h.number, l + 1, fuzzlen, offset))
829 else:
829 else:
830 msg = _("Hunk #%d succeeded at %d "
830 msg = _("Hunk #%d succeeded at %d "
831 "(offset %d lines).\n")
831 "(offset %d lines).\n")
832 self.ui.note(msg % (h.number, l + 1, offset))
832 self.ui.note(msg % (h.number, l + 1, offset))
833 return fuzzlen
833 return fuzzlen
834 self.printfile(True)
834 self.printfile(True)
835 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.rej.append(horig)
836 self.rej.append(horig)
837 return -1
837 return -1
838
838
839 def close(self):
839 def close(self):
840 if self.dirty:
840 if self.dirty:
841 self.writelines(self.fname, self.lines, self.mode)
841 self.writelines(self.fname, self.lines, self.mode)
842 self.write_rej()
842 self.write_rej()
843 return len(self.rej)
843 return len(self.rej)
844
844
845 class header(object):
845 class header(object):
846 """patch header
846 """patch header
847 """
847 """
848 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diff_re = re.compile('diff -r .* (.*)$')
849 diff_re = re.compile('diff -r .* (.*)$')
850 allhunks_re = re.compile('(?:index|deleted file) ')
850 allhunks_re = re.compile('(?:index|deleted file) ')
851 pretty_re = re.compile('(?:new file|deleted file) ')
851 pretty_re = re.compile('(?:new file|deleted file) ')
852 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 newfile_re = re.compile('(?:new file)')
853 newfile_re = re.compile('(?:new file)')
854
854
855 def __init__(self, header):
855 def __init__(self, header):
856 self.header = header
856 self.header = header
857 self.hunks = []
857 self.hunks = []
858
858
859 def binary(self):
859 def binary(self):
860 return any(h.startswith('index ') for h in self.header)
860 return any(h.startswith('index ') for h in self.header)
861
861
862 def pretty(self, fp):
862 def pretty(self, fp):
863 for h in self.header:
863 for h in self.header:
864 if h.startswith('index '):
864 if h.startswith('index '):
865 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 break
866 break
867 if self.pretty_re.match(h):
867 if self.pretty_re.match(h):
868 fp.write(h)
868 fp.write(h)
869 if self.binary():
869 if self.binary():
870 fp.write(_('this is a binary file\n'))
870 fp.write(_('this is a binary file\n'))
871 break
871 break
872 if h.startswith('---'):
872 if h.startswith('---'):
873 fp.write(_('%d hunks, %d lines changed\n') %
873 fp.write(_('%d hunks, %d lines changed\n') %
874 (len(self.hunks),
874 (len(self.hunks),
875 sum([max(h.added, h.removed) for h in self.hunks])))
875 sum([max(h.added, h.removed) for h in self.hunks])))
876 break
876 break
877 fp.write(h)
877 fp.write(h)
878
878
879 def write(self, fp):
879 def write(self, fp):
880 fp.write(''.join(self.header))
880 fp.write(''.join(self.header))
881
881
882 def allhunks(self):
882 def allhunks(self):
883 return any(self.allhunks_re.match(h) for h in self.header)
883 return any(self.allhunks_re.match(h) for h in self.header)
884
884
885 def files(self):
885 def files(self):
886 match = self.diffgit_re.match(self.header[0])
886 match = self.diffgit_re.match(self.header[0])
887 if match:
887 if match:
888 fromfile, tofile = match.groups()
888 fromfile, tofile = match.groups()
889 if fromfile == tofile:
889 if fromfile == tofile:
890 return [fromfile]
890 return [fromfile]
891 return [fromfile, tofile]
891 return [fromfile, tofile]
892 else:
892 else:
893 return self.diff_re.match(self.header[0]).groups()
893 return self.diff_re.match(self.header[0]).groups()
894
894
895 def filename(self):
895 def filename(self):
896 return self.files()[-1]
896 return self.files()[-1]
897
897
898 def __repr__(self):
898 def __repr__(self):
899 return '<header %s>' % (' '.join(map(repr, self.files())))
899 return '<header %s>' % (' '.join(map(repr, self.files())))
900
900
901 def isnewfile(self):
901 def isnewfile(self):
902 return any(self.newfile_re.match(h) for h in self.header)
902 return any(self.newfile_re.match(h) for h in self.header)
903
903
904 def special(self):
904 def special(self):
905 # Special files are shown only at the header level and not at the hunk
905 # Special files are shown only at the header level and not at the hunk
906 # level for example a file that has been deleted is a special file.
906 # level for example a file that has been deleted is a special file.
907 # The user cannot change the content of the operation, in the case of
907 # The user cannot change the content of the operation, in the case of
908 # the deleted file he has to take the deletion or not take it, he
908 # the deleted file he has to take the deletion or not take it, he
909 # cannot take some of it.
909 # cannot take some of it.
910 # Newly added files are special if they are empty, they are not special
910 # Newly added files are special if they are empty, they are not special
911 # if they have some content as we want to be able to change it
911 # if they have some content as we want to be able to change it
912 nocontent = len(self.header) == 2
912 nocontent = len(self.header) == 2
913 emptynewfile = self.isnewfile() and nocontent
913 emptynewfile = self.isnewfile() and nocontent
914 return emptynewfile or \
914 return emptynewfile or \
915 any(self.special_re.match(h) for h in self.header)
915 any(self.special_re.match(h) for h in self.header)
916
916
917 class recordhunk(object):
917 class recordhunk(object):
918 """patch hunk
918 """patch hunk
919
919
920 XXX shouldn't we merge this with the other hunk class?
920 XXX shouldn't we merge this with the other hunk class?
921 """
921 """
922 maxcontext = 3
922 maxcontext = 3
923
923
924 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 def __init__(self, header, fromline, toline, proc, before, hunk, after):
925 def trimcontext(number, lines):
925 def trimcontext(number, lines):
926 delta = len(lines) - self.maxcontext
926 delta = len(lines) - self.maxcontext
927 if False and delta > 0:
927 if False and delta > 0:
928 return number + delta, lines[:self.maxcontext]
928 return number + delta, lines[:self.maxcontext]
929 return number, lines
929 return number, lines
930
930
931 self.header = header
931 self.header = header
932 self.fromline, self.before = trimcontext(fromline, before)
932 self.fromline, self.before = trimcontext(fromline, before)
933 self.toline, self.after = trimcontext(toline, after)
933 self.toline, self.after = trimcontext(toline, after)
934 self.proc = proc
934 self.proc = proc
935 self.hunk = hunk
935 self.hunk = hunk
936 self.added, self.removed = self.countchanges(self.hunk)
936 self.added, self.removed = self.countchanges(self.hunk)
937
937
938 def __eq__(self, v):
938 def __eq__(self, v):
939 if not isinstance(v, recordhunk):
939 if not isinstance(v, recordhunk):
940 return False
940 return False
941
941
942 return ((v.hunk == self.hunk) and
942 return ((v.hunk == self.hunk) and
943 (v.proc == self.proc) and
943 (v.proc == self.proc) and
944 (self.fromline == v.fromline) and
944 (self.fromline == v.fromline) and
945 (self.header.files() == v.header.files()))
945 (self.header.files() == v.header.files()))
946
946
947 def __hash__(self):
947 def __hash__(self):
948 return hash((tuple(self.hunk),
948 return hash((tuple(self.hunk),
949 tuple(self.header.files()),
949 tuple(self.header.files()),
950 self.fromline,
950 self.fromline,
951 self.proc))
951 self.proc))
952
952
953 def countchanges(self, hunk):
953 def countchanges(self, hunk):
954 """hunk -> (n+,n-)"""
954 """hunk -> (n+,n-)"""
955 add = len([h for h in hunk if h[0] == '+'])
955 add = len([h for h in hunk if h[0] == '+'])
956 rem = len([h for h in hunk if h[0] == '-'])
956 rem = len([h for h in hunk if h[0] == '-'])
957 return add, rem
957 return add, rem
958
958
959 def write(self, fp):
959 def write(self, fp):
960 delta = len(self.before) + len(self.after)
960 delta = len(self.before) + len(self.after)
961 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 if self.after and self.after[-1] == '\\ No newline at end of file\n':
962 delta -= 1
962 delta -= 1
963 fromlen = delta + self.removed
963 fromlen = delta + self.removed
964 tolen = delta + self.added
964 tolen = delta + self.added
965 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
966 (self.fromline, fromlen, self.toline, tolen,
966 (self.fromline, fromlen, self.toline, tolen,
967 self.proc and (' ' + self.proc)))
967 self.proc and (' ' + self.proc)))
968 fp.write(''.join(self.before + self.hunk + self.after))
968 fp.write(''.join(self.before + self.hunk + self.after))
969
969
970 pretty = write
970 pretty = write
971
971
972 def filename(self):
972 def filename(self):
973 return self.header.filename()
973 return self.header.filename()
974
974
975 def __repr__(self):
975 def __repr__(self):
976 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976 return '<hunk %r@%d>' % (self.filename(), self.fromline)
977
977
978 def filterpatch(ui, headers, operation=None):
978 def filterpatch(ui, headers, operation=None):
979 """Interactively filter patch chunks into applied-only chunks"""
979 """Interactively filter patch chunks into applied-only chunks"""
980 if operation is None:
980 if operation is None:
981 operation = 'record'
981 operation = 'record'
982 messages = {
982 messages = {
983 'multiple': {
983 'multiple': {
984 'discard': _("discard change %d/%d to '%s'?"),
984 'discard': _("discard change %d/%d to '%s'?"),
985 'record': _("record change %d/%d to '%s'?"),
985 'record': _("record change %d/%d to '%s'?"),
986 'revert': _("revert change %d/%d to '%s'?"),
986 'revert': _("revert change %d/%d to '%s'?"),
987 }[operation],
987 }[operation],
988 'single': {
988 'single': {
989 'discard': _("discard this change to '%s'?"),
989 'discard': _("discard this change to '%s'?"),
990 'record': _("record this change to '%s'?"),
990 'record': _("record this change to '%s'?"),
991 'revert': _("revert this change to '%s'?"),
991 'revert': _("revert this change to '%s'?"),
992 }[operation],
992 }[operation],
993 }
993 }
994
994
995 def prompt(skipfile, skipall, query, chunk):
995 def prompt(skipfile, skipall, query, chunk):
996 """prompt query, and process base inputs
996 """prompt query, and process base inputs
997
997
998 - y/n for the rest of file
998 - y/n for the rest of file
999 - y/n for the rest
999 - y/n for the rest
1000 - ? (help)
1000 - ? (help)
1001 - q (quit)
1001 - q (quit)
1002
1002
1003 Return True/False and possibly updated skipfile and skipall.
1003 Return True/False and possibly updated skipfile and skipall.
1004 """
1004 """
1005 newpatches = None
1005 newpatches = None
1006 if skipall is not None:
1006 if skipall is not None:
1007 return skipall, skipfile, skipall, newpatches
1007 return skipall, skipfile, skipall, newpatches
1008 if skipfile is not None:
1008 if skipfile is not None:
1009 return skipfile, skipfile, skipall, newpatches
1009 return skipfile, skipfile, skipall, newpatches
1010 while True:
1010 while True:
1011 resps = _('[Ynesfdaq?]'
1011 resps = _('[Ynesfdaq?]'
1012 '$$ &Yes, record this change'
1012 '$$ &Yes, record this change'
1013 '$$ &No, skip this change'
1013 '$$ &No, skip this change'
1014 '$$ &Edit this change manually'
1014 '$$ &Edit this change manually'
1015 '$$ &Skip remaining changes to this file'
1015 '$$ &Skip remaining changes to this file'
1016 '$$ Record remaining changes to this &file'
1016 '$$ Record remaining changes to this &file'
1017 '$$ &Done, skip remaining changes and files'
1017 '$$ &Done, skip remaining changes and files'
1018 '$$ Record &all changes to all remaining files'
1018 '$$ Record &all changes to all remaining files'
1019 '$$ &Quit, recording no changes'
1019 '$$ &Quit, recording no changes'
1020 '$$ &? (display help)')
1020 '$$ &? (display help)')
1021 r = ui.promptchoice("%s %s" % (query, resps))
1021 r = ui.promptchoice("%s %s" % (query, resps))
1022 ui.write("\n")
1022 ui.write("\n")
1023 if r == 8: # ?
1023 if r == 8: # ?
1024 for c, t in ui.extractchoices(resps)[1]:
1024 for c, t in ui.extractchoices(resps)[1]:
1025 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1025 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1026 continue
1026 continue
1027 elif r == 0: # yes
1027 elif r == 0: # yes
1028 ret = True
1028 ret = True
1029 elif r == 1: # no
1029 elif r == 1: # no
1030 ret = False
1030 ret = False
1031 elif r == 2: # Edit patch
1031 elif r == 2: # Edit patch
1032 if chunk is None:
1032 if chunk is None:
1033 ui.write(_('cannot edit patch for whole file'))
1033 ui.write(_('cannot edit patch for whole file'))
1034 ui.write("\n")
1034 ui.write("\n")
1035 continue
1035 continue
1036 if chunk.header.binary():
1036 if chunk.header.binary():
1037 ui.write(_('cannot edit patch for binary file'))
1037 ui.write(_('cannot edit patch for binary file'))
1038 ui.write("\n")
1038 ui.write("\n")
1039 continue
1039 continue
1040 # Patch comment based on the Git one (based on comment at end of
1040 # Patch comment based on the Git one (based on comment at end of
1041 # https://mercurial-scm.org/wiki/RecordExtension)
1041 # https://mercurial-scm.org/wiki/RecordExtension)
1042 phelp = '---' + _("""
1042 phelp = '---' + _("""
1043 To remove '-' lines, make them ' ' lines (context).
1043 To remove '-' lines, make them ' ' lines (context).
1044 To remove '+' lines, delete them.
1044 To remove '+' lines, delete them.
1045 Lines starting with # will be removed from the patch.
1045 Lines starting with # will be removed from the patch.
1046
1046
1047 If the patch applies cleanly, the edited hunk will immediately be
1047 If the patch applies cleanly, the edited hunk will immediately be
1048 added to the record list. If it does not apply cleanly, a rejects
1048 added to the record list. If it does not apply cleanly, a rejects
1049 file will be generated: you can use that when you try again. If
1049 file will be generated: you can use that when you try again. If
1050 all lines of the hunk are removed, then the edit is aborted and
1050 all lines of the hunk are removed, then the edit is aborted and
1051 the hunk is left unchanged.
1051 the hunk is left unchanged.
1052 """)
1052 """)
1053 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1053 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1054 suffix=".diff", text=True)
1054 suffix=".diff", text=True)
1055 ncpatchfp = None
1055 ncpatchfp = None
1056 try:
1056 try:
1057 # Write the initial patch
1057 # Write the initial patch
1058 f = os.fdopen(patchfd, "w")
1058 f = os.fdopen(patchfd, "w")
1059 chunk.header.write(f)
1059 chunk.header.write(f)
1060 chunk.write(f)
1060 chunk.write(f)
1061 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1061 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1062 f.close()
1062 f.close()
1063 # Start the editor and wait for it to complete
1063 # Start the editor and wait for it to complete
1064 editor = ui.geteditor()
1064 editor = ui.geteditor()
1065 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1065 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1066 environ={'HGUSER': ui.username()})
1066 environ={'HGUSER': ui.username()})
1067 if ret != 0:
1067 if ret != 0:
1068 ui.warn(_("editor exited with exit code %d\n") % ret)
1068 ui.warn(_("editor exited with exit code %d\n") % ret)
1069 continue
1069 continue
1070 # Remove comment lines
1070 # Remove comment lines
1071 patchfp = open(patchfn)
1071 patchfp = open(patchfn)
1072 ncpatchfp = stringio()
1072 ncpatchfp = stringio()
1073 for line in util.iterfile(patchfp):
1073 for line in util.iterfile(patchfp):
1074 if not line.startswith('#'):
1074 if not line.startswith('#'):
1075 ncpatchfp.write(line)
1075 ncpatchfp.write(line)
1076 patchfp.close()
1076 patchfp.close()
1077 ncpatchfp.seek(0)
1077 ncpatchfp.seek(0)
1078 newpatches = parsepatch(ncpatchfp)
1078 newpatches = parsepatch(ncpatchfp)
1079 finally:
1079 finally:
1080 os.unlink(patchfn)
1080 os.unlink(patchfn)
1081 del ncpatchfp
1081 del ncpatchfp
1082 # Signal that the chunk shouldn't be applied as-is, but
1082 # Signal that the chunk shouldn't be applied as-is, but
1083 # provide the new patch to be used instead.
1083 # provide the new patch to be used instead.
1084 ret = False
1084 ret = False
1085 elif r == 3: # Skip
1085 elif r == 3: # Skip
1086 ret = skipfile = False
1086 ret = skipfile = False
1087 elif r == 4: # file (Record remaining)
1087 elif r == 4: # file (Record remaining)
1088 ret = skipfile = True
1088 ret = skipfile = True
1089 elif r == 5: # done, skip remaining
1089 elif r == 5: # done, skip remaining
1090 ret = skipall = False
1090 ret = skipall = False
1091 elif r == 6: # all
1091 elif r == 6: # all
1092 ret = skipall = True
1092 ret = skipall = True
1093 elif r == 7: # quit
1093 elif r == 7: # quit
1094 raise error.Abort(_('user quit'))
1094 raise error.Abort(_('user quit'))
1095 return ret, skipfile, skipall, newpatches
1095 return ret, skipfile, skipall, newpatches
1096
1096
1097 seen = set()
1097 seen = set()
1098 applied = {} # 'filename' -> [] of chunks
1098 applied = {} # 'filename' -> [] of chunks
1099 skipfile, skipall = None, None
1099 skipfile, skipall = None, None
1100 pos, total = 1, sum(len(h.hunks) for h in headers)
1100 pos, total = 1, sum(len(h.hunks) for h in headers)
1101 for h in headers:
1101 for h in headers:
1102 pos += len(h.hunks)
1102 pos += len(h.hunks)
1103 skipfile = None
1103 skipfile = None
1104 fixoffset = 0
1104 fixoffset = 0
1105 hdr = ''.join(h.header)
1105 hdr = ''.join(h.header)
1106 if hdr in seen:
1106 if hdr in seen:
1107 continue
1107 continue
1108 seen.add(hdr)
1108 seen.add(hdr)
1109 if skipall is None:
1109 if skipall is None:
1110 h.pretty(ui)
1110 h.pretty(ui)
1111 msg = (_('examine changes to %s?') %
1111 msg = (_('examine changes to %s?') %
1112 _(' and ').join("'%s'" % f for f in h.files()))
1112 _(' and ').join("'%s'" % f for f in h.files()))
1113 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1113 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1114 if not r:
1114 if not r:
1115 continue
1115 continue
1116 applied[h.filename()] = [h]
1116 applied[h.filename()] = [h]
1117 if h.allhunks():
1117 if h.allhunks():
1118 applied[h.filename()] += h.hunks
1118 applied[h.filename()] += h.hunks
1119 continue
1119 continue
1120 for i, chunk in enumerate(h.hunks):
1120 for i, chunk in enumerate(h.hunks):
1121 if skipfile is None and skipall is None:
1121 if skipfile is None and skipall is None:
1122 chunk.pretty(ui)
1122 chunk.pretty(ui)
1123 if total == 1:
1123 if total == 1:
1124 msg = messages['single'] % chunk.filename()
1124 msg = messages['single'] % chunk.filename()
1125 else:
1125 else:
1126 idx = pos - len(h.hunks) + i
1126 idx = pos - len(h.hunks) + i
1127 msg = messages['multiple'] % (idx, total, chunk.filename())
1127 msg = messages['multiple'] % (idx, total, chunk.filename())
1128 r, skipfile, skipall, newpatches = prompt(skipfile,
1128 r, skipfile, skipall, newpatches = prompt(skipfile,
1129 skipall, msg, chunk)
1129 skipall, msg, chunk)
1130 if r:
1130 if r:
1131 if fixoffset:
1131 if fixoffset:
1132 chunk = copy.copy(chunk)
1132 chunk = copy.copy(chunk)
1133 chunk.toline += fixoffset
1133 chunk.toline += fixoffset
1134 applied[chunk.filename()].append(chunk)
1134 applied[chunk.filename()].append(chunk)
1135 elif newpatches is not None:
1135 elif newpatches is not None:
1136 for newpatch in newpatches:
1136 for newpatch in newpatches:
1137 for newhunk in newpatch.hunks:
1137 for newhunk in newpatch.hunks:
1138 if fixoffset:
1138 if fixoffset:
1139 newhunk.toline += fixoffset
1139 newhunk.toline += fixoffset
1140 applied[newhunk.filename()].append(newhunk)
1140 applied[newhunk.filename()].append(newhunk)
1141 else:
1141 else:
1142 fixoffset += chunk.removed - chunk.added
1142 fixoffset += chunk.removed - chunk.added
1143 return (sum([h for h in applied.itervalues()
1143 return (sum([h for h in applied.itervalues()
1144 if h[0].special() or len(h) > 1], []), {})
1144 if h[0].special() or len(h) > 1], []), {})
1145 class hunk(object):
1145 class hunk(object):
1146 def __init__(self, desc, num, lr, context):
1146 def __init__(self, desc, num, lr, context):
1147 self.number = num
1147 self.number = num
1148 self.desc = desc
1148 self.desc = desc
1149 self.hunk = [desc]
1149 self.hunk = [desc]
1150 self.a = []
1150 self.a = []
1151 self.b = []
1151 self.b = []
1152 self.starta = self.lena = None
1152 self.starta = self.lena = None
1153 self.startb = self.lenb = None
1153 self.startb = self.lenb = None
1154 if lr is not None:
1154 if lr is not None:
1155 if context:
1155 if context:
1156 self.read_context_hunk(lr)
1156 self.read_context_hunk(lr)
1157 else:
1157 else:
1158 self.read_unified_hunk(lr)
1158 self.read_unified_hunk(lr)
1159
1159
1160 def getnormalized(self):
1160 def getnormalized(self):
1161 """Return a copy with line endings normalized to LF."""
1161 """Return a copy with line endings normalized to LF."""
1162
1162
1163 def normalize(lines):
1163 def normalize(lines):
1164 nlines = []
1164 nlines = []
1165 for line in lines:
1165 for line in lines:
1166 if line.endswith('\r\n'):
1166 if line.endswith('\r\n'):
1167 line = line[:-2] + '\n'
1167 line = line[:-2] + '\n'
1168 nlines.append(line)
1168 nlines.append(line)
1169 return nlines
1169 return nlines
1170
1170
1171 # Dummy object, it is rebuilt manually
1171 # Dummy object, it is rebuilt manually
1172 nh = hunk(self.desc, self.number, None, None)
1172 nh = hunk(self.desc, self.number, None, None)
1173 nh.number = self.number
1173 nh.number = self.number
1174 nh.desc = self.desc
1174 nh.desc = self.desc
1175 nh.hunk = self.hunk
1175 nh.hunk = self.hunk
1176 nh.a = normalize(self.a)
1176 nh.a = normalize(self.a)
1177 nh.b = normalize(self.b)
1177 nh.b = normalize(self.b)
1178 nh.starta = self.starta
1178 nh.starta = self.starta
1179 nh.startb = self.startb
1179 nh.startb = self.startb
1180 nh.lena = self.lena
1180 nh.lena = self.lena
1181 nh.lenb = self.lenb
1181 nh.lenb = self.lenb
1182 return nh
1182 return nh
1183
1183
1184 def read_unified_hunk(self, lr):
1184 def read_unified_hunk(self, lr):
1185 m = unidesc.match(self.desc)
1185 m = unidesc.match(self.desc)
1186 if not m:
1186 if not m:
1187 raise PatchError(_("bad hunk #%d") % self.number)
1187 raise PatchError(_("bad hunk #%d") % self.number)
1188 self.starta, self.lena, self.startb, self.lenb = m.groups()
1188 self.starta, self.lena, self.startb, self.lenb = m.groups()
1189 if self.lena is None:
1189 if self.lena is None:
1190 self.lena = 1
1190 self.lena = 1
1191 else:
1191 else:
1192 self.lena = int(self.lena)
1192 self.lena = int(self.lena)
1193 if self.lenb is None:
1193 if self.lenb is None:
1194 self.lenb = 1
1194 self.lenb = 1
1195 else:
1195 else:
1196 self.lenb = int(self.lenb)
1196 self.lenb = int(self.lenb)
1197 self.starta = int(self.starta)
1197 self.starta = int(self.starta)
1198 self.startb = int(self.startb)
1198 self.startb = int(self.startb)
1199 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1199 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1200 self.b)
1200 self.b)
1201 # if we hit eof before finishing out the hunk, the last line will
1201 # if we hit eof before finishing out the hunk, the last line will
1202 # be zero length. Lets try to fix it up.
1202 # be zero length. Lets try to fix it up.
1203 while len(self.hunk[-1]) == 0:
1203 while len(self.hunk[-1]) == 0:
1204 del self.hunk[-1]
1204 del self.hunk[-1]
1205 del self.a[-1]
1205 del self.a[-1]
1206 del self.b[-1]
1206 del self.b[-1]
1207 self.lena -= 1
1207 self.lena -= 1
1208 self.lenb -= 1
1208 self.lenb -= 1
1209 self._fixnewline(lr)
1209 self._fixnewline(lr)
1210
1210
1211 def read_context_hunk(self, lr):
1211 def read_context_hunk(self, lr):
1212 self.desc = lr.readline()
1212 self.desc = lr.readline()
1213 m = contextdesc.match(self.desc)
1213 m = contextdesc.match(self.desc)
1214 if not m:
1214 if not m:
1215 raise PatchError(_("bad hunk #%d") % self.number)
1215 raise PatchError(_("bad hunk #%d") % self.number)
1216 self.starta, aend = m.groups()
1216 self.starta, aend = m.groups()
1217 self.starta = int(self.starta)
1217 self.starta = int(self.starta)
1218 if aend is None:
1218 if aend is None:
1219 aend = self.starta
1219 aend = self.starta
1220 self.lena = int(aend) - self.starta
1220 self.lena = int(aend) - self.starta
1221 if self.starta:
1221 if self.starta:
1222 self.lena += 1
1222 self.lena += 1
1223 for x in xrange(self.lena):
1223 for x in xrange(self.lena):
1224 l = lr.readline()
1224 l = lr.readline()
1225 if l.startswith('---'):
1225 if l.startswith('---'):
1226 # lines addition, old block is empty
1226 # lines addition, old block is empty
1227 lr.push(l)
1227 lr.push(l)
1228 break
1228 break
1229 s = l[2:]
1229 s = l[2:]
1230 if l.startswith('- ') or l.startswith('! '):
1230 if l.startswith('- ') or l.startswith('! '):
1231 u = '-' + s
1231 u = '-' + s
1232 elif l.startswith(' '):
1232 elif l.startswith(' '):
1233 u = ' ' + s
1233 u = ' ' + s
1234 else:
1234 else:
1235 raise PatchError(_("bad hunk #%d old text line %d") %
1235 raise PatchError(_("bad hunk #%d old text line %d") %
1236 (self.number, x))
1236 (self.number, x))
1237 self.a.append(u)
1237 self.a.append(u)
1238 self.hunk.append(u)
1238 self.hunk.append(u)
1239
1239
1240 l = lr.readline()
1240 l = lr.readline()
1241 if l.startswith('\ '):
1241 if l.startswith('\ '):
1242 s = self.a[-1][:-1]
1242 s = self.a[-1][:-1]
1243 self.a[-1] = s
1243 self.a[-1] = s
1244 self.hunk[-1] = s
1244 self.hunk[-1] = s
1245 l = lr.readline()
1245 l = lr.readline()
1246 m = contextdesc.match(l)
1246 m = contextdesc.match(l)
1247 if not m:
1247 if not m:
1248 raise PatchError(_("bad hunk #%d") % self.number)
1248 raise PatchError(_("bad hunk #%d") % self.number)
1249 self.startb, bend = m.groups()
1249 self.startb, bend = m.groups()
1250 self.startb = int(self.startb)
1250 self.startb = int(self.startb)
1251 if bend is None:
1251 if bend is None:
1252 bend = self.startb
1252 bend = self.startb
1253 self.lenb = int(bend) - self.startb
1253 self.lenb = int(bend) - self.startb
1254 if self.startb:
1254 if self.startb:
1255 self.lenb += 1
1255 self.lenb += 1
1256 hunki = 1
1256 hunki = 1
1257 for x in xrange(self.lenb):
1257 for x in xrange(self.lenb):
1258 l = lr.readline()
1258 l = lr.readline()
1259 if l.startswith('\ '):
1259 if l.startswith('\ '):
1260 # XXX: the only way to hit this is with an invalid line range.
1260 # XXX: the only way to hit this is with an invalid line range.
1261 # The no-eol marker is not counted in the line range, but I
1261 # The no-eol marker is not counted in the line range, but I
1262 # guess there are diff(1) out there which behave differently.
1262 # guess there are diff(1) out there which behave differently.
1263 s = self.b[-1][:-1]
1263 s = self.b[-1][:-1]
1264 self.b[-1] = s
1264 self.b[-1] = s
1265 self.hunk[hunki - 1] = s
1265 self.hunk[hunki - 1] = s
1266 continue
1266 continue
1267 if not l:
1267 if not l:
1268 # line deletions, new block is empty and we hit EOF
1268 # line deletions, new block is empty and we hit EOF
1269 lr.push(l)
1269 lr.push(l)
1270 break
1270 break
1271 s = l[2:]
1271 s = l[2:]
1272 if l.startswith('+ ') or l.startswith('! '):
1272 if l.startswith('+ ') or l.startswith('! '):
1273 u = '+' + s
1273 u = '+' + s
1274 elif l.startswith(' '):
1274 elif l.startswith(' '):
1275 u = ' ' + s
1275 u = ' ' + s
1276 elif len(self.b) == 0:
1276 elif len(self.b) == 0:
1277 # line deletions, new block is empty
1277 # line deletions, new block is empty
1278 lr.push(l)
1278 lr.push(l)
1279 break
1279 break
1280 else:
1280 else:
1281 raise PatchError(_("bad hunk #%d old text line %d") %
1281 raise PatchError(_("bad hunk #%d old text line %d") %
1282 (self.number, x))
1282 (self.number, x))
1283 self.b.append(s)
1283 self.b.append(s)
1284 while True:
1284 while True:
1285 if hunki >= len(self.hunk):
1285 if hunki >= len(self.hunk):
1286 h = ""
1286 h = ""
1287 else:
1287 else:
1288 h = self.hunk[hunki]
1288 h = self.hunk[hunki]
1289 hunki += 1
1289 hunki += 1
1290 if h == u:
1290 if h == u:
1291 break
1291 break
1292 elif h.startswith('-'):
1292 elif h.startswith('-'):
1293 continue
1293 continue
1294 else:
1294 else:
1295 self.hunk.insert(hunki - 1, u)
1295 self.hunk.insert(hunki - 1, u)
1296 break
1296 break
1297
1297
1298 if not self.a:
1298 if not self.a:
1299 # this happens when lines were only added to the hunk
1299 # this happens when lines were only added to the hunk
1300 for x in self.hunk:
1300 for x in self.hunk:
1301 if x.startswith('-') or x.startswith(' '):
1301 if x.startswith('-') or x.startswith(' '):
1302 self.a.append(x)
1302 self.a.append(x)
1303 if not self.b:
1303 if not self.b:
1304 # this happens when lines were only deleted from the hunk
1304 # this happens when lines were only deleted from the hunk
1305 for x in self.hunk:
1305 for x in self.hunk:
1306 if x.startswith('+') or x.startswith(' '):
1306 if x.startswith('+') or x.startswith(' '):
1307 self.b.append(x[1:])
1307 self.b.append(x[1:])
1308 # @@ -start,len +start,len @@
1308 # @@ -start,len +start,len @@
1309 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1309 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1310 self.startb, self.lenb)
1310 self.startb, self.lenb)
1311 self.hunk[0] = self.desc
1311 self.hunk[0] = self.desc
1312 self._fixnewline(lr)
1312 self._fixnewline(lr)
1313
1313
1314 def _fixnewline(self, lr):
1314 def _fixnewline(self, lr):
1315 l = lr.readline()
1315 l = lr.readline()
1316 if l.startswith('\ '):
1316 if l.startswith('\ '):
1317 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1317 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1318 else:
1318 else:
1319 lr.push(l)
1319 lr.push(l)
1320
1320
1321 def complete(self):
1321 def complete(self):
1322 return len(self.a) == self.lena and len(self.b) == self.lenb
1322 return len(self.a) == self.lena and len(self.b) == self.lenb
1323
1323
1324 def _fuzzit(self, old, new, fuzz, toponly):
1324 def _fuzzit(self, old, new, fuzz, toponly):
1325 # this removes context lines from the top and bottom of list 'l'. It
1325 # this removes context lines from the top and bottom of list 'l'. It
1326 # checks the hunk to make sure only context lines are removed, and then
1326 # checks the hunk to make sure only context lines are removed, and then
1327 # returns a new shortened list of lines.
1327 # returns a new shortened list of lines.
1328 fuzz = min(fuzz, len(old))
1328 fuzz = min(fuzz, len(old))
1329 if fuzz:
1329 if fuzz:
1330 top = 0
1330 top = 0
1331 bot = 0
1331 bot = 0
1332 hlen = len(self.hunk)
1332 hlen = len(self.hunk)
1333 for x in xrange(hlen - 1):
1333 for x in xrange(hlen - 1):
1334 # the hunk starts with the @@ line, so use x+1
1334 # the hunk starts with the @@ line, so use x+1
1335 if self.hunk[x + 1][0] == ' ':
1335 if self.hunk[x + 1][0] == ' ':
1336 top += 1
1336 top += 1
1337 else:
1337 else:
1338 break
1338 break
1339 if not toponly:
1339 if not toponly:
1340 for x in xrange(hlen - 1):
1340 for x in xrange(hlen - 1):
1341 if self.hunk[hlen - bot - 1][0] == ' ':
1341 if self.hunk[hlen - bot - 1][0] == ' ':
1342 bot += 1
1342 bot += 1
1343 else:
1343 else:
1344 break
1344 break
1345
1345
1346 bot = min(fuzz, bot)
1346 bot = min(fuzz, bot)
1347 top = min(fuzz, top)
1347 top = min(fuzz, top)
1348 return old[top:len(old) - bot], new[top:len(new) - bot], top
1348 return old[top:len(old) - bot], new[top:len(new) - bot], top
1349 return old, new, 0
1349 return old, new, 0
1350
1350
1351 def fuzzit(self, fuzz, toponly):
1351 def fuzzit(self, fuzz, toponly):
1352 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1352 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1353 oldstart = self.starta + top
1353 oldstart = self.starta + top
1354 newstart = self.startb + top
1354 newstart = self.startb + top
1355 # zero length hunk ranges already have their start decremented
1355 # zero length hunk ranges already have their start decremented
1356 if self.lena and oldstart > 0:
1356 if self.lena and oldstart > 0:
1357 oldstart -= 1
1357 oldstart -= 1
1358 if self.lenb and newstart > 0:
1358 if self.lenb and newstart > 0:
1359 newstart -= 1
1359 newstart -= 1
1360 return old, oldstart, new, newstart
1360 return old, oldstart, new, newstart
1361
1361
1362 class binhunk(object):
1362 class binhunk(object):
1363 'A binary patch file.'
1363 'A binary patch file.'
1364 def __init__(self, lr, fname):
1364 def __init__(self, lr, fname):
1365 self.text = None
1365 self.text = None
1366 self.delta = False
1366 self.delta = False
1367 self.hunk = ['GIT binary patch\n']
1367 self.hunk = ['GIT binary patch\n']
1368 self._fname = fname
1368 self._fname = fname
1369 self._read(lr)
1369 self._read(lr)
1370
1370
1371 def complete(self):
1371 def complete(self):
1372 return self.text is not None
1372 return self.text is not None
1373
1373
1374 def new(self, lines):
1374 def new(self, lines):
1375 if self.delta:
1375 if self.delta:
1376 return [applybindelta(self.text, ''.join(lines))]
1376 return [applybindelta(self.text, ''.join(lines))]
1377 return [self.text]
1377 return [self.text]
1378
1378
1379 def _read(self, lr):
1379 def _read(self, lr):
1380 def getline(lr, hunk):
1380 def getline(lr, hunk):
1381 l = lr.readline()
1381 l = lr.readline()
1382 hunk.append(l)
1382 hunk.append(l)
1383 return l.rstrip('\r\n')
1383 return l.rstrip('\r\n')
1384
1384
1385 size = 0
1385 size = 0
1386 while True:
1386 while True:
1387 line = getline(lr, self.hunk)
1387 line = getline(lr, self.hunk)
1388 if not line:
1388 if not line:
1389 raise PatchError(_('could not extract "%s" binary data')
1389 raise PatchError(_('could not extract "%s" binary data')
1390 % self._fname)
1390 % self._fname)
1391 if line.startswith('literal '):
1391 if line.startswith('literal '):
1392 size = int(line[8:].rstrip())
1392 size = int(line[8:].rstrip())
1393 break
1393 break
1394 if line.startswith('delta '):
1394 if line.startswith('delta '):
1395 size = int(line[6:].rstrip())
1395 size = int(line[6:].rstrip())
1396 self.delta = True
1396 self.delta = True
1397 break
1397 break
1398 dec = []
1398 dec = []
1399 line = getline(lr, self.hunk)
1399 line = getline(lr, self.hunk)
1400 while len(line) > 1:
1400 while len(line) > 1:
1401 l = line[0]
1401 l = line[0]
1402 if l <= 'Z' and l >= 'A':
1402 if l <= 'Z' and l >= 'A':
1403 l = ord(l) - ord('A') + 1
1403 l = ord(l) - ord('A') + 1
1404 else:
1404 else:
1405 l = ord(l) - ord('a') + 27
1405 l = ord(l) - ord('a') + 27
1406 try:
1406 try:
1407 dec.append(base85.b85decode(line[1:])[:l])
1407 dec.append(base85.b85decode(line[1:])[:l])
1408 except ValueError as e:
1408 except ValueError as e:
1409 raise PatchError(_('could not decode "%s" binary patch: %s')
1409 raise PatchError(_('could not decode "%s" binary patch: %s')
1410 % (self._fname, str(e)))
1410 % (self._fname, str(e)))
1411 line = getline(lr, self.hunk)
1411 line = getline(lr, self.hunk)
1412 text = zlib.decompress(''.join(dec))
1412 text = zlib.decompress(''.join(dec))
1413 if len(text) != size:
1413 if len(text) != size:
1414 raise PatchError(_('"%s" length is %d bytes, should be %d')
1414 raise PatchError(_('"%s" length is %d bytes, should be %d')
1415 % (self._fname, len(text), size))
1415 % (self._fname, len(text), size))
1416 self.text = text
1416 self.text = text
1417
1417
1418 def parsefilename(str):
1418 def parsefilename(str):
1419 # --- filename \t|space stuff
1419 # --- filename \t|space stuff
1420 s = str[4:].rstrip('\r\n')
1420 s = str[4:].rstrip('\r\n')
1421 i = s.find('\t')
1421 i = s.find('\t')
1422 if i < 0:
1422 if i < 0:
1423 i = s.find(' ')
1423 i = s.find(' ')
1424 if i < 0:
1424 if i < 0:
1425 return s
1425 return s
1426 return s[:i]
1426 return s[:i]
1427
1427
1428 def reversehunks(hunks):
1428 def reversehunks(hunks):
1429 '''reverse the signs in the hunks given as argument
1429 '''reverse the signs in the hunks given as argument
1430
1430
1431 This function operates on hunks coming out of patch.filterpatch, that is
1431 This function operates on hunks coming out of patch.filterpatch, that is
1432 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1432 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1433
1433
1434 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1434 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1435 ... --- a/folder1/g
1435 ... --- a/folder1/g
1436 ... +++ b/folder1/g
1436 ... +++ b/folder1/g
1437 ... @@ -1,7 +1,7 @@
1437 ... @@ -1,7 +1,7 @@
1438 ... +firstline
1438 ... +firstline
1439 ... c
1439 ... c
1440 ... 1
1440 ... 1
1441 ... 2
1441 ... 2
1442 ... + 3
1442 ... + 3
1443 ... -4
1443 ... -4
1444 ... 5
1444 ... 5
1445 ... d
1445 ... d
1446 ... +lastline"""
1446 ... +lastline"""
1447 >>> hunks = parsepatch(rawpatch)
1447 >>> hunks = parsepatch(rawpatch)
1448 >>> hunkscomingfromfilterpatch = []
1448 >>> hunkscomingfromfilterpatch = []
1449 >>> for h in hunks:
1449 >>> for h in hunks:
1450 ... hunkscomingfromfilterpatch.append(h)
1450 ... hunkscomingfromfilterpatch.append(h)
1451 ... hunkscomingfromfilterpatch.extend(h.hunks)
1451 ... hunkscomingfromfilterpatch.extend(h.hunks)
1452
1452
1453 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1453 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1454 >>> from . import util
1454 >>> from . import util
1455 >>> fp = util.stringio()
1455 >>> fp = util.stringio()
1456 >>> for c in reversedhunks:
1456 >>> for c in reversedhunks:
1457 ... c.write(fp)
1457 ... c.write(fp)
1458 >>> fp.seek(0)
1458 >>> fp.seek(0)
1459 >>> reversedpatch = fp.read()
1459 >>> reversedpatch = fp.read()
1460 >>> print reversedpatch
1460 >>> print reversedpatch
1461 diff --git a/folder1/g b/folder1/g
1461 diff --git a/folder1/g b/folder1/g
1462 --- a/folder1/g
1462 --- a/folder1/g
1463 +++ b/folder1/g
1463 +++ b/folder1/g
1464 @@ -1,4 +1,3 @@
1464 @@ -1,4 +1,3 @@
1465 -firstline
1465 -firstline
1466 c
1466 c
1467 1
1467 1
1468 2
1468 2
1469 @@ -1,6 +2,6 @@
1469 @@ -1,6 +2,6 @@
1470 c
1470 c
1471 1
1471 1
1472 2
1472 2
1473 - 3
1473 - 3
1474 +4
1474 +4
1475 5
1475 5
1476 d
1476 d
1477 @@ -5,3 +6,2 @@
1477 @@ -5,3 +6,2 @@
1478 5
1478 5
1479 d
1479 d
1480 -lastline
1480 -lastline
1481
1481
1482 '''
1482 '''
1483
1483
1484 from . import crecord as crecordmod
1484 from . import crecord as crecordmod
1485 newhunks = []
1485 newhunks = []
1486 for c in hunks:
1486 for c in hunks:
1487 if isinstance(c, crecordmod.uihunk):
1487 if isinstance(c, crecordmod.uihunk):
1488 # curses hunks encapsulate the record hunk in _hunk
1488 # curses hunks encapsulate the record hunk in _hunk
1489 c = c._hunk
1489 c = c._hunk
1490 if isinstance(c, recordhunk):
1490 if isinstance(c, recordhunk):
1491 for j, line in enumerate(c.hunk):
1491 for j, line in enumerate(c.hunk):
1492 if line.startswith("-"):
1492 if line.startswith("-"):
1493 c.hunk[j] = "+" + c.hunk[j][1:]
1493 c.hunk[j] = "+" + c.hunk[j][1:]
1494 elif line.startswith("+"):
1494 elif line.startswith("+"):
1495 c.hunk[j] = "-" + c.hunk[j][1:]
1495 c.hunk[j] = "-" + c.hunk[j][1:]
1496 c.added, c.removed = c.removed, c.added
1496 c.added, c.removed = c.removed, c.added
1497 newhunks.append(c)
1497 newhunks.append(c)
1498 return newhunks
1498 return newhunks
1499
1499
1500 def parsepatch(originalchunks):
1500 def parsepatch(originalchunks):
1501 """patch -> [] of headers -> [] of hunks """
1501 """patch -> [] of headers -> [] of hunks """
1502 class parser(object):
1502 class parser(object):
1503 """patch parsing state machine"""
1503 """patch parsing state machine"""
1504 def __init__(self):
1504 def __init__(self):
1505 self.fromline = 0
1505 self.fromline = 0
1506 self.toline = 0
1506 self.toline = 0
1507 self.proc = ''
1507 self.proc = ''
1508 self.header = None
1508 self.header = None
1509 self.context = []
1509 self.context = []
1510 self.before = []
1510 self.before = []
1511 self.hunk = []
1511 self.hunk = []
1512 self.headers = []
1512 self.headers = []
1513
1513
1514 def addrange(self, limits):
1514 def addrange(self, limits):
1515 fromstart, fromend, tostart, toend, proc = limits
1515 fromstart, fromend, tostart, toend, proc = limits
1516 self.fromline = int(fromstart)
1516 self.fromline = int(fromstart)
1517 self.toline = int(tostart)
1517 self.toline = int(tostart)
1518 self.proc = proc
1518 self.proc = proc
1519
1519
1520 def addcontext(self, context):
1520 def addcontext(self, context):
1521 if self.hunk:
1521 if self.hunk:
1522 h = recordhunk(self.header, self.fromline, self.toline,
1522 h = recordhunk(self.header, self.fromline, self.toline,
1523 self.proc, self.before, self.hunk, context)
1523 self.proc, self.before, self.hunk, context)
1524 self.header.hunks.append(h)
1524 self.header.hunks.append(h)
1525 self.fromline += len(self.before) + h.removed
1525 self.fromline += len(self.before) + h.removed
1526 self.toline += len(self.before) + h.added
1526 self.toline += len(self.before) + h.added
1527 self.before = []
1527 self.before = []
1528 self.hunk = []
1528 self.hunk = []
1529 self.context = context
1529 self.context = context
1530
1530
1531 def addhunk(self, hunk):
1531 def addhunk(self, hunk):
1532 if self.context:
1532 if self.context:
1533 self.before = self.context
1533 self.before = self.context
1534 self.context = []
1534 self.context = []
1535 self.hunk = hunk
1535 self.hunk = hunk
1536
1536
1537 def newfile(self, hdr):
1537 def newfile(self, hdr):
1538 self.addcontext([])
1538 self.addcontext([])
1539 h = header(hdr)
1539 h = header(hdr)
1540 self.headers.append(h)
1540 self.headers.append(h)
1541 self.header = h
1541 self.header = h
1542
1542
1543 def addother(self, line):
1543 def addother(self, line):
1544 pass # 'other' lines are ignored
1544 pass # 'other' lines are ignored
1545
1545
1546 def finished(self):
1546 def finished(self):
1547 self.addcontext([])
1547 self.addcontext([])
1548 return self.headers
1548 return self.headers
1549
1549
1550 transitions = {
1550 transitions = {
1551 'file': {'context': addcontext,
1551 'file': {'context': addcontext,
1552 'file': newfile,
1552 'file': newfile,
1553 'hunk': addhunk,
1553 'hunk': addhunk,
1554 'range': addrange},
1554 'range': addrange},
1555 'context': {'file': newfile,
1555 'context': {'file': newfile,
1556 'hunk': addhunk,
1556 'hunk': addhunk,
1557 'range': addrange,
1557 'range': addrange,
1558 'other': addother},
1558 'other': addother},
1559 'hunk': {'context': addcontext,
1559 'hunk': {'context': addcontext,
1560 'file': newfile,
1560 'file': newfile,
1561 'range': addrange},
1561 'range': addrange},
1562 'range': {'context': addcontext,
1562 'range': {'context': addcontext,
1563 'hunk': addhunk},
1563 'hunk': addhunk},
1564 'other': {'other': addother},
1564 'other': {'other': addother},
1565 }
1565 }
1566
1566
1567 p = parser()
1567 p = parser()
1568 fp = stringio()
1568 fp = stringio()
1569 fp.write(''.join(originalchunks))
1569 fp.write(''.join(originalchunks))
1570 fp.seek(0)
1570 fp.seek(0)
1571
1571
1572 state = 'context'
1572 state = 'context'
1573 for newstate, data in scanpatch(fp):
1573 for newstate, data in scanpatch(fp):
1574 try:
1574 try:
1575 p.transitions[state][newstate](p, data)
1575 p.transitions[state][newstate](p, data)
1576 except KeyError:
1576 except KeyError:
1577 raise PatchError('unhandled transition: %s -> %s' %
1577 raise PatchError('unhandled transition: %s -> %s' %
1578 (state, newstate))
1578 (state, newstate))
1579 state = newstate
1579 state = newstate
1580 del fp
1580 del fp
1581 return p.finished()
1581 return p.finished()
1582
1582
1583 def pathtransform(path, strip, prefix):
1583 def pathtransform(path, strip, prefix):
1584 '''turn a path from a patch into a path suitable for the repository
1584 '''turn a path from a patch into a path suitable for the repository
1585
1585
1586 prefix, if not empty, is expected to be normalized with a / at the end.
1586 prefix, if not empty, is expected to be normalized with a / at the end.
1587
1587
1588 Returns (stripped components, path in repository).
1588 Returns (stripped components, path in repository).
1589
1589
1590 >>> pathtransform('a/b/c', 0, '')
1590 >>> pathtransform('a/b/c', 0, '')
1591 ('', 'a/b/c')
1591 ('', 'a/b/c')
1592 >>> pathtransform(' a/b/c ', 0, '')
1592 >>> pathtransform(' a/b/c ', 0, '')
1593 ('', ' a/b/c')
1593 ('', ' a/b/c')
1594 >>> pathtransform(' a/b/c ', 2, '')
1594 >>> pathtransform(' a/b/c ', 2, '')
1595 ('a/b/', 'c')
1595 ('a/b/', 'c')
1596 >>> pathtransform('a/b/c', 0, 'd/e/')
1596 >>> pathtransform('a/b/c', 0, 'd/e/')
1597 ('', 'd/e/a/b/c')
1597 ('', 'd/e/a/b/c')
1598 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1598 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1599 ('a//b/', 'd/e/c')
1599 ('a//b/', 'd/e/c')
1600 >>> pathtransform('a/b/c', 3, '')
1600 >>> pathtransform('a/b/c', 3, '')
1601 Traceback (most recent call last):
1601 Traceback (most recent call last):
1602 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1602 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1603 '''
1603 '''
1604 pathlen = len(path)
1604 pathlen = len(path)
1605 i = 0
1605 i = 0
1606 if strip == 0:
1606 if strip == 0:
1607 return '', prefix + path.rstrip()
1607 return '', prefix + path.rstrip()
1608 count = strip
1608 count = strip
1609 while count > 0:
1609 while count > 0:
1610 i = path.find('/', i)
1610 i = path.find('/', i)
1611 if i == -1:
1611 if i == -1:
1612 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1612 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1613 (count, strip, path))
1613 (count, strip, path))
1614 i += 1
1614 i += 1
1615 # consume '//' in the path
1615 # consume '//' in the path
1616 while i < pathlen - 1 and path[i] == '/':
1616 while i < pathlen - 1 and path[i] == '/':
1617 i += 1
1617 i += 1
1618 count -= 1
1618 count -= 1
1619 return path[:i].lstrip(), prefix + path[i:].rstrip()
1619 return path[:i].lstrip(), prefix + path[i:].rstrip()
1620
1620
1621 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1621 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1622 nulla = afile_orig == "/dev/null"
1622 nulla = afile_orig == "/dev/null"
1623 nullb = bfile_orig == "/dev/null"
1623 nullb = bfile_orig == "/dev/null"
1624 create = nulla and hunk.starta == 0 and hunk.lena == 0
1624 create = nulla and hunk.starta == 0 and hunk.lena == 0
1625 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1625 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1626 abase, afile = pathtransform(afile_orig, strip, prefix)
1626 abase, afile = pathtransform(afile_orig, strip, prefix)
1627 gooda = not nulla and backend.exists(afile)
1627 gooda = not nulla and backend.exists(afile)
1628 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1628 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1629 if afile == bfile:
1629 if afile == bfile:
1630 goodb = gooda
1630 goodb = gooda
1631 else:
1631 else:
1632 goodb = not nullb and backend.exists(bfile)
1632 goodb = not nullb and backend.exists(bfile)
1633 missing = not goodb and not gooda and not create
1633 missing = not goodb and not gooda and not create
1634
1634
1635 # some diff programs apparently produce patches where the afile is
1635 # some diff programs apparently produce patches where the afile is
1636 # not /dev/null, but afile starts with bfile
1636 # not /dev/null, but afile starts with bfile
1637 abasedir = afile[:afile.rfind('/') + 1]
1637 abasedir = afile[:afile.rfind('/') + 1]
1638 bbasedir = bfile[:bfile.rfind('/') + 1]
1638 bbasedir = bfile[:bfile.rfind('/') + 1]
1639 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1639 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1640 and hunk.starta == 0 and hunk.lena == 0):
1640 and hunk.starta == 0 and hunk.lena == 0):
1641 create = True
1641 create = True
1642 missing = False
1642 missing = False
1643
1643
1644 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1644 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1645 # diff is between a file and its backup. In this case, the original
1645 # diff is between a file and its backup. In this case, the original
1646 # file should be patched (see original mpatch code).
1646 # file should be patched (see original mpatch code).
1647 isbackup = (abase == bbase and bfile.startswith(afile))
1647 isbackup = (abase == bbase and bfile.startswith(afile))
1648 fname = None
1648 fname = None
1649 if not missing:
1649 if not missing:
1650 if gooda and goodb:
1650 if gooda and goodb:
1651 if isbackup:
1651 if isbackup:
1652 fname = afile
1652 fname = afile
1653 else:
1653 else:
1654 fname = bfile
1654 fname = bfile
1655 elif gooda:
1655 elif gooda:
1656 fname = afile
1656 fname = afile
1657
1657
1658 if not fname:
1658 if not fname:
1659 if not nullb:
1659 if not nullb:
1660 if isbackup:
1660 if isbackup:
1661 fname = afile
1661 fname = afile
1662 else:
1662 else:
1663 fname = bfile
1663 fname = bfile
1664 elif not nulla:
1664 elif not nulla:
1665 fname = afile
1665 fname = afile
1666 else:
1666 else:
1667 raise PatchError(_("undefined source and destination files"))
1667 raise PatchError(_("undefined source and destination files"))
1668
1668
1669 gp = patchmeta(fname)
1669 gp = patchmeta(fname)
1670 if create:
1670 if create:
1671 gp.op = 'ADD'
1671 gp.op = 'ADD'
1672 elif remove:
1672 elif remove:
1673 gp.op = 'DELETE'
1673 gp.op = 'DELETE'
1674 return gp
1674 return gp
1675
1675
1676 def scanpatch(fp):
1676 def scanpatch(fp):
1677 """like patch.iterhunks, but yield different events
1677 """like patch.iterhunks, but yield different events
1678
1678
1679 - ('file', [header_lines + fromfile + tofile])
1679 - ('file', [header_lines + fromfile + tofile])
1680 - ('context', [context_lines])
1680 - ('context', [context_lines])
1681 - ('hunk', [hunk_lines])
1681 - ('hunk', [hunk_lines])
1682 - ('range', (-start,len, +start,len, proc))
1682 - ('range', (-start,len, +start,len, proc))
1683 """
1683 """
1684 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1684 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1685 lr = linereader(fp)
1685 lr = linereader(fp)
1686
1686
1687 def scanwhile(first, p):
1687 def scanwhile(first, p):
1688 """scan lr while predicate holds"""
1688 """scan lr while predicate holds"""
1689 lines = [first]
1689 lines = [first]
1690 for line in iter(lr.readline, ''):
1690 for line in iter(lr.readline, ''):
1691 if p(line):
1691 if p(line):
1692 lines.append(line)
1692 lines.append(line)
1693 else:
1693 else:
1694 lr.push(line)
1694 lr.push(line)
1695 break
1695 break
1696 return lines
1696 return lines
1697
1697
1698 for line in iter(lr.readline, ''):
1698 for line in iter(lr.readline, ''):
1699 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1699 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1700 def notheader(line):
1700 def notheader(line):
1701 s = line.split(None, 1)
1701 s = line.split(None, 1)
1702 return not s or s[0] not in ('---', 'diff')
1702 return not s or s[0] not in ('---', 'diff')
1703 header = scanwhile(line, notheader)
1703 header = scanwhile(line, notheader)
1704 fromfile = lr.readline()
1704 fromfile = lr.readline()
1705 if fromfile.startswith('---'):
1705 if fromfile.startswith('---'):
1706 tofile = lr.readline()
1706 tofile = lr.readline()
1707 header += [fromfile, tofile]
1707 header += [fromfile, tofile]
1708 else:
1708 else:
1709 lr.push(fromfile)
1709 lr.push(fromfile)
1710 yield 'file', header
1710 yield 'file', header
1711 elif line[0] == ' ':
1711 elif line[0] == ' ':
1712 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1712 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1713 elif line[0] in '-+':
1713 elif line[0] in '-+':
1714 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1714 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1715 else:
1715 else:
1716 m = lines_re.match(line)
1716 m = lines_re.match(line)
1717 if m:
1717 if m:
1718 yield 'range', m.groups()
1718 yield 'range', m.groups()
1719 else:
1719 else:
1720 yield 'other', line
1720 yield 'other', line
1721
1721
1722 def scangitpatch(lr, firstline):
1722 def scangitpatch(lr, firstline):
1723 """
1723 """
1724 Git patches can emit:
1724 Git patches can emit:
1725 - rename a to b
1725 - rename a to b
1726 - change b
1726 - change b
1727 - copy a to c
1727 - copy a to c
1728 - change c
1728 - change c
1729
1729
1730 We cannot apply this sequence as-is, the renamed 'a' could not be
1730 We cannot apply this sequence as-is, the renamed 'a' could not be
1731 found for it would have been renamed already. And we cannot copy
1731 found for it would have been renamed already. And we cannot copy
1732 from 'b' instead because 'b' would have been changed already. So
1732 from 'b' instead because 'b' would have been changed already. So
1733 we scan the git patch for copy and rename commands so we can
1733 we scan the git patch for copy and rename commands so we can
1734 perform the copies ahead of time.
1734 perform the copies ahead of time.
1735 """
1735 """
1736 pos = 0
1736 pos = 0
1737 try:
1737 try:
1738 pos = lr.fp.tell()
1738 pos = lr.fp.tell()
1739 fp = lr.fp
1739 fp = lr.fp
1740 except IOError:
1740 except IOError:
1741 fp = stringio(lr.fp.read())
1741 fp = stringio(lr.fp.read())
1742 gitlr = linereader(fp)
1742 gitlr = linereader(fp)
1743 gitlr.push(firstline)
1743 gitlr.push(firstline)
1744 gitpatches = readgitpatch(gitlr)
1744 gitpatches = readgitpatch(gitlr)
1745 fp.seek(pos)
1745 fp.seek(pos)
1746 return gitpatches
1746 return gitpatches
1747
1747
1748 def iterhunks(fp):
1748 def iterhunks(fp):
1749 """Read a patch and yield the following events:
1749 """Read a patch and yield the following events:
1750 - ("file", afile, bfile, firsthunk): select a new target file.
1750 - ("file", afile, bfile, firsthunk): select a new target file.
1751 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1751 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1752 "file" event.
1752 "file" event.
1753 - ("git", gitchanges): current diff is in git format, gitchanges
1753 - ("git", gitchanges): current diff is in git format, gitchanges
1754 maps filenames to gitpatch records. Unique event.
1754 maps filenames to gitpatch records. Unique event.
1755 """
1755 """
1756 afile = ""
1756 afile = ""
1757 bfile = ""
1757 bfile = ""
1758 state = None
1758 state = None
1759 hunknum = 0
1759 hunknum = 0
1760 emitfile = newfile = False
1760 emitfile = newfile = False
1761 gitpatches = None
1761 gitpatches = None
1762
1762
1763 # our states
1763 # our states
1764 BFILE = 1
1764 BFILE = 1
1765 context = None
1765 context = None
1766 lr = linereader(fp)
1766 lr = linereader(fp)
1767
1767
1768 for x in iter(lr.readline, ''):
1768 for x in iter(lr.readline, ''):
1769 if state == BFILE and (
1769 if state == BFILE and (
1770 (not context and x[0] == '@')
1770 (not context and x[0] == '@')
1771 or (context is not False and x.startswith('***************'))
1771 or (context is not False and x.startswith('***************'))
1772 or x.startswith('GIT binary patch')):
1772 or x.startswith('GIT binary patch')):
1773 gp = None
1773 gp = None
1774 if (gitpatches and
1774 if (gitpatches and
1775 gitpatches[-1].ispatching(afile, bfile)):
1775 gitpatches[-1].ispatching(afile, bfile)):
1776 gp = gitpatches.pop()
1776 gp = gitpatches.pop()
1777 if x.startswith('GIT binary patch'):
1777 if x.startswith('GIT binary patch'):
1778 h = binhunk(lr, gp.path)
1778 h = binhunk(lr, gp.path)
1779 else:
1779 else:
1780 if context is None and x.startswith('***************'):
1780 if context is None and x.startswith('***************'):
1781 context = True
1781 context = True
1782 h = hunk(x, hunknum + 1, lr, context)
1782 h = hunk(x, hunknum + 1, lr, context)
1783 hunknum += 1
1783 hunknum += 1
1784 if emitfile:
1784 if emitfile:
1785 emitfile = False
1785 emitfile = False
1786 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1786 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1787 yield 'hunk', h
1787 yield 'hunk', h
1788 elif x.startswith('diff --git a/'):
1788 elif x.startswith('diff --git a/'):
1789 m = gitre.match(x.rstrip(' \r\n'))
1789 m = gitre.match(x.rstrip(' \r\n'))
1790 if not m:
1790 if not m:
1791 continue
1791 continue
1792 if gitpatches is None:
1792 if gitpatches is None:
1793 # scan whole input for git metadata
1793 # scan whole input for git metadata
1794 gitpatches = scangitpatch(lr, x)
1794 gitpatches = scangitpatch(lr, x)
1795 yield 'git', [g.copy() for g in gitpatches
1795 yield 'git', [g.copy() for g in gitpatches
1796 if g.op in ('COPY', 'RENAME')]
1796 if g.op in ('COPY', 'RENAME')]
1797 gitpatches.reverse()
1797 gitpatches.reverse()
1798 afile = 'a/' + m.group(1)
1798 afile = 'a/' + m.group(1)
1799 bfile = 'b/' + m.group(2)
1799 bfile = 'b/' + m.group(2)
1800 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1800 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1801 gp = gitpatches.pop()
1801 gp = gitpatches.pop()
1802 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1802 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1803 if not gitpatches:
1803 if not gitpatches:
1804 raise PatchError(_('failed to synchronize metadata for "%s"')
1804 raise PatchError(_('failed to synchronize metadata for "%s"')
1805 % afile[2:])
1805 % afile[2:])
1806 gp = gitpatches[-1]
1806 gp = gitpatches[-1]
1807 newfile = True
1807 newfile = True
1808 elif x.startswith('---'):
1808 elif x.startswith('---'):
1809 # check for a unified diff
1809 # check for a unified diff
1810 l2 = lr.readline()
1810 l2 = lr.readline()
1811 if not l2.startswith('+++'):
1811 if not l2.startswith('+++'):
1812 lr.push(l2)
1812 lr.push(l2)
1813 continue
1813 continue
1814 newfile = True
1814 newfile = True
1815 context = False
1815 context = False
1816 afile = parsefilename(x)
1816 afile = parsefilename(x)
1817 bfile = parsefilename(l2)
1817 bfile = parsefilename(l2)
1818 elif x.startswith('***'):
1818 elif x.startswith('***'):
1819 # check for a context diff
1819 # check for a context diff
1820 l2 = lr.readline()
1820 l2 = lr.readline()
1821 if not l2.startswith('---'):
1821 if not l2.startswith('---'):
1822 lr.push(l2)
1822 lr.push(l2)
1823 continue
1823 continue
1824 l3 = lr.readline()
1824 l3 = lr.readline()
1825 lr.push(l3)
1825 lr.push(l3)
1826 if not l3.startswith("***************"):
1826 if not l3.startswith("***************"):
1827 lr.push(l2)
1827 lr.push(l2)
1828 continue
1828 continue
1829 newfile = True
1829 newfile = True
1830 context = True
1830 context = True
1831 afile = parsefilename(x)
1831 afile = parsefilename(x)
1832 bfile = parsefilename(l2)
1832 bfile = parsefilename(l2)
1833
1833
1834 if newfile:
1834 if newfile:
1835 newfile = False
1835 newfile = False
1836 emitfile = True
1836 emitfile = True
1837 state = BFILE
1837 state = BFILE
1838 hunknum = 0
1838 hunknum = 0
1839
1839
1840 while gitpatches:
1840 while gitpatches:
1841 gp = gitpatches.pop()
1841 gp = gitpatches.pop()
1842 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1842 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1843
1843
1844 def applybindelta(binchunk, data):
1844 def applybindelta(binchunk, data):
1845 """Apply a binary delta hunk
1845 """Apply a binary delta hunk
1846 The algorithm used is the algorithm from git's patch-delta.c
1846 The algorithm used is the algorithm from git's patch-delta.c
1847 """
1847 """
1848 def deltahead(binchunk):
1848 def deltahead(binchunk):
1849 i = 0
1849 i = 0
1850 for c in binchunk:
1850 for c in binchunk:
1851 i += 1
1851 i += 1
1852 if not (ord(c) & 0x80):
1852 if not (ord(c) & 0x80):
1853 return i
1853 return i
1854 return i
1854 return i
1855 out = ""
1855 out = ""
1856 s = deltahead(binchunk)
1856 s = deltahead(binchunk)
1857 binchunk = binchunk[s:]
1857 binchunk = binchunk[s:]
1858 s = deltahead(binchunk)
1858 s = deltahead(binchunk)
1859 binchunk = binchunk[s:]
1859 binchunk = binchunk[s:]
1860 i = 0
1860 i = 0
1861 while i < len(binchunk):
1861 while i < len(binchunk):
1862 cmd = ord(binchunk[i])
1862 cmd = ord(binchunk[i])
1863 i += 1
1863 i += 1
1864 if (cmd & 0x80):
1864 if (cmd & 0x80):
1865 offset = 0
1865 offset = 0
1866 size = 0
1866 size = 0
1867 if (cmd & 0x01):
1867 if (cmd & 0x01):
1868 offset = ord(binchunk[i])
1868 offset = ord(binchunk[i])
1869 i += 1
1869 i += 1
1870 if (cmd & 0x02):
1870 if (cmd & 0x02):
1871 offset |= ord(binchunk[i]) << 8
1871 offset |= ord(binchunk[i]) << 8
1872 i += 1
1872 i += 1
1873 if (cmd & 0x04):
1873 if (cmd & 0x04):
1874 offset |= ord(binchunk[i]) << 16
1874 offset |= ord(binchunk[i]) << 16
1875 i += 1
1875 i += 1
1876 if (cmd & 0x08):
1876 if (cmd & 0x08):
1877 offset |= ord(binchunk[i]) << 24
1877 offset |= ord(binchunk[i]) << 24
1878 i += 1
1878 i += 1
1879 if (cmd & 0x10):
1879 if (cmd & 0x10):
1880 size = ord(binchunk[i])
1880 size = ord(binchunk[i])
1881 i += 1
1881 i += 1
1882 if (cmd & 0x20):
1882 if (cmd & 0x20):
1883 size |= ord(binchunk[i]) << 8
1883 size |= ord(binchunk[i]) << 8
1884 i += 1
1884 i += 1
1885 if (cmd & 0x40):
1885 if (cmd & 0x40):
1886 size |= ord(binchunk[i]) << 16
1886 size |= ord(binchunk[i]) << 16
1887 i += 1
1887 i += 1
1888 if size == 0:
1888 if size == 0:
1889 size = 0x10000
1889 size = 0x10000
1890 offset_end = offset + size
1890 offset_end = offset + size
1891 out += data[offset:offset_end]
1891 out += data[offset:offset_end]
1892 elif cmd != 0:
1892 elif cmd != 0:
1893 offset_end = i + cmd
1893 offset_end = i + cmd
1894 out += binchunk[i:offset_end]
1894 out += binchunk[i:offset_end]
1895 i += cmd
1895 i += cmd
1896 else:
1896 else:
1897 raise PatchError(_('unexpected delta opcode 0'))
1897 raise PatchError(_('unexpected delta opcode 0'))
1898 return out
1898 return out
1899
1899
1900 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1900 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1901 """Reads a patch from fp and tries to apply it.
1901 """Reads a patch from fp and tries to apply it.
1902
1902
1903 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1903 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1904 there was any fuzz.
1904 there was any fuzz.
1905
1905
1906 If 'eolmode' is 'strict', the patch content and patched file are
1906 If 'eolmode' is 'strict', the patch content and patched file are
1907 read in binary mode. Otherwise, line endings are ignored when
1907 read in binary mode. Otherwise, line endings are ignored when
1908 patching then normalized according to 'eolmode'.
1908 patching then normalized according to 'eolmode'.
1909 """
1909 """
1910 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1910 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1911 prefix=prefix, eolmode=eolmode)
1911 prefix=prefix, eolmode=eolmode)
1912
1912
1913 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1913 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1914 eolmode='strict'):
1914 eolmode='strict'):
1915
1915
1916 if prefix:
1916 if prefix:
1917 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1917 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1918 prefix)
1918 prefix)
1919 if prefix != '':
1919 if prefix != '':
1920 prefix += '/'
1920 prefix += '/'
1921 def pstrip(p):
1921 def pstrip(p):
1922 return pathtransform(p, strip - 1, prefix)[1]
1922 return pathtransform(p, strip - 1, prefix)[1]
1923
1923
1924 rejects = 0
1924 rejects = 0
1925 err = 0
1925 err = 0
1926 current_file = None
1926 current_file = None
1927
1927
1928 for state, values in iterhunks(fp):
1928 for state, values in iterhunks(fp):
1929 if state == 'hunk':
1929 if state == 'hunk':
1930 if not current_file:
1930 if not current_file:
1931 continue
1931 continue
1932 ret = current_file.apply(values)
1932 ret = current_file.apply(values)
1933 if ret > 0:
1933 if ret > 0:
1934 err = 1
1934 err = 1
1935 elif state == 'file':
1935 elif state == 'file':
1936 if current_file:
1936 if current_file:
1937 rejects += current_file.close()
1937 rejects += current_file.close()
1938 current_file = None
1938 current_file = None
1939 afile, bfile, first_hunk, gp = values
1939 afile, bfile, first_hunk, gp = values
1940 if gp:
1940 if gp:
1941 gp.path = pstrip(gp.path)
1941 gp.path = pstrip(gp.path)
1942 if gp.oldpath:
1942 if gp.oldpath:
1943 gp.oldpath = pstrip(gp.oldpath)
1943 gp.oldpath = pstrip(gp.oldpath)
1944 else:
1944 else:
1945 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1945 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1946 prefix)
1946 prefix)
1947 if gp.op == 'RENAME':
1947 if gp.op == 'RENAME':
1948 backend.unlink(gp.oldpath)
1948 backend.unlink(gp.oldpath)
1949 if not first_hunk:
1949 if not first_hunk:
1950 if gp.op == 'DELETE':
1950 if gp.op == 'DELETE':
1951 backend.unlink(gp.path)
1951 backend.unlink(gp.path)
1952 continue
1952 continue
1953 data, mode = None, None
1953 data, mode = None, None
1954 if gp.op in ('RENAME', 'COPY'):
1954 if gp.op in ('RENAME', 'COPY'):
1955 data, mode = store.getfile(gp.oldpath)[:2]
1955 data, mode = store.getfile(gp.oldpath)[:2]
1956 if data is None:
1956 if data is None:
1957 # This means that the old path does not exist
1957 # This means that the old path does not exist
1958 raise PatchError(_("source file '%s' does not exist")
1958 raise PatchError(_("source file '%s' does not exist")
1959 % gp.oldpath)
1959 % gp.oldpath)
1960 if gp.mode:
1960 if gp.mode:
1961 mode = gp.mode
1961 mode = gp.mode
1962 if gp.op == 'ADD':
1962 if gp.op == 'ADD':
1963 # Added files without content have no hunk and
1963 # Added files without content have no hunk and
1964 # must be created
1964 # must be created
1965 data = ''
1965 data = ''
1966 if data or mode:
1966 if data or mode:
1967 if (gp.op in ('ADD', 'RENAME', 'COPY')
1967 if (gp.op in ('ADD', 'RENAME', 'COPY')
1968 and backend.exists(gp.path)):
1968 and backend.exists(gp.path)):
1969 raise PatchError(_("cannot create %s: destination "
1969 raise PatchError(_("cannot create %s: destination "
1970 "already exists") % gp.path)
1970 "already exists") % gp.path)
1971 backend.setfile(gp.path, data, mode, gp.oldpath)
1971 backend.setfile(gp.path, data, mode, gp.oldpath)
1972 continue
1972 continue
1973 try:
1973 try:
1974 current_file = patcher(ui, gp, backend, store,
1974 current_file = patcher(ui, gp, backend, store,
1975 eolmode=eolmode)
1975 eolmode=eolmode)
1976 except PatchError as inst:
1976 except PatchError as inst:
1977 ui.warn(str(inst) + '\n')
1977 ui.warn(str(inst) + '\n')
1978 current_file = None
1978 current_file = None
1979 rejects += 1
1979 rejects += 1
1980 continue
1980 continue
1981 elif state == 'git':
1981 elif state == 'git':
1982 for gp in values:
1982 for gp in values:
1983 path = pstrip(gp.oldpath)
1983 path = pstrip(gp.oldpath)
1984 data, mode = backend.getfile(path)
1984 data, mode = backend.getfile(path)
1985 if data is None:
1985 if data is None:
1986 # The error ignored here will trigger a getfile()
1986 # The error ignored here will trigger a getfile()
1987 # error in a place more appropriate for error
1987 # error in a place more appropriate for error
1988 # handling, and will not interrupt the patching
1988 # handling, and will not interrupt the patching
1989 # process.
1989 # process.
1990 pass
1990 pass
1991 else:
1991 else:
1992 store.setfile(path, data, mode)
1992 store.setfile(path, data, mode)
1993 else:
1993 else:
1994 raise error.Abort(_('unsupported parser state: %s') % state)
1994 raise error.Abort(_('unsupported parser state: %s') % state)
1995
1995
1996 if current_file:
1996 if current_file:
1997 rejects += current_file.close()
1997 rejects += current_file.close()
1998
1998
1999 if rejects:
1999 if rejects:
2000 return -1
2000 return -1
2001 return err
2001 return err
2002
2002
2003 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2003 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2004 similarity):
2004 similarity):
2005 """use <patcher> to apply <patchname> to the working directory.
2005 """use <patcher> to apply <patchname> to the working directory.
2006 returns whether patch was applied with fuzz factor."""
2006 returns whether patch was applied with fuzz factor."""
2007
2007
2008 fuzz = False
2008 fuzz = False
2009 args = []
2009 args = []
2010 cwd = repo.root
2010 cwd = repo.root
2011 if cwd:
2011 if cwd:
2012 args.append('-d %s' % util.shellquote(cwd))
2012 args.append('-d %s' % util.shellquote(cwd))
2013 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2013 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2014 util.shellquote(patchname)))
2014 util.shellquote(patchname)))
2015 try:
2015 try:
2016 for line in util.iterfile(fp):
2016 for line in util.iterfile(fp):
2017 line = line.rstrip()
2017 line = line.rstrip()
2018 ui.note(line + '\n')
2018 ui.note(line + '\n')
2019 if line.startswith('patching file '):
2019 if line.startswith('patching file '):
2020 pf = util.parsepatchoutput(line)
2020 pf = util.parsepatchoutput(line)
2021 printed_file = False
2021 printed_file = False
2022 files.add(pf)
2022 files.add(pf)
2023 elif line.find('with fuzz') >= 0:
2023 elif line.find('with fuzz') >= 0:
2024 fuzz = True
2024 fuzz = True
2025 if not printed_file:
2025 if not printed_file:
2026 ui.warn(pf + '\n')
2026 ui.warn(pf + '\n')
2027 printed_file = True
2027 printed_file = True
2028 ui.warn(line + '\n')
2028 ui.warn(line + '\n')
2029 elif line.find('saving rejects to file') >= 0:
2029 elif line.find('saving rejects to file') >= 0:
2030 ui.warn(line + '\n')
2030 ui.warn(line + '\n')
2031 elif line.find('FAILED') >= 0:
2031 elif line.find('FAILED') >= 0:
2032 if not printed_file:
2032 if not printed_file:
2033 ui.warn(pf + '\n')
2033 ui.warn(pf + '\n')
2034 printed_file = True
2034 printed_file = True
2035 ui.warn(line + '\n')
2035 ui.warn(line + '\n')
2036 finally:
2036 finally:
2037 if files:
2037 if files:
2038 scmutil.marktouched(repo, files, similarity)
2038 scmutil.marktouched(repo, files, similarity)
2039 code = fp.close()
2039 code = fp.close()
2040 if code:
2040 if code:
2041 raise PatchError(_("patch command failed: %s") %
2041 raise PatchError(_("patch command failed: %s") %
2042 util.explainexit(code)[0])
2042 util.explainexit(code)[0])
2043 return fuzz
2043 return fuzz
2044
2044
2045 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2045 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2046 eolmode='strict'):
2046 eolmode='strict'):
2047 if files is None:
2047 if files is None:
2048 files = set()
2048 files = set()
2049 if eolmode is None:
2049 if eolmode is None:
2050 eolmode = ui.config('patch', 'eol', 'strict')
2050 eolmode = ui.config('patch', 'eol', 'strict')
2051 if eolmode.lower() not in eolmodes:
2051 if eolmode.lower() not in eolmodes:
2052 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2052 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2053 eolmode = eolmode.lower()
2053 eolmode = eolmode.lower()
2054
2054
2055 store = filestore()
2055 store = filestore()
2056 try:
2056 try:
2057 fp = open(patchobj, 'rb')
2057 fp = open(patchobj, 'rb')
2058 except TypeError:
2058 except TypeError:
2059 fp = patchobj
2059 fp = patchobj
2060 try:
2060 try:
2061 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2061 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2062 eolmode=eolmode)
2062 eolmode=eolmode)
2063 finally:
2063 finally:
2064 if fp != patchobj:
2064 if fp != patchobj:
2065 fp.close()
2065 fp.close()
2066 files.update(backend.close())
2066 files.update(backend.close())
2067 store.close()
2067 store.close()
2068 if ret < 0:
2068 if ret < 0:
2069 raise PatchError(_('patch failed to apply'))
2069 raise PatchError(_('patch failed to apply'))
2070 return ret > 0
2070 return ret > 0
2071
2071
2072 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2072 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2073 eolmode='strict', similarity=0):
2073 eolmode='strict', similarity=0):
2074 """use builtin patch to apply <patchobj> to the working directory.
2074 """use builtin patch to apply <patchobj> to the working directory.
2075 returns whether patch was applied with fuzz factor."""
2075 returns whether patch was applied with fuzz factor."""
2076 backend = workingbackend(ui, repo, similarity)
2076 backend = workingbackend(ui, repo, similarity)
2077 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2078
2078
2079 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2079 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2080 eolmode='strict'):
2080 eolmode='strict'):
2081 backend = repobackend(ui, repo, ctx, store)
2081 backend = repobackend(ui, repo, ctx, store)
2082 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2082 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2083
2083
2084 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2084 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2085 similarity=0):
2085 similarity=0):
2086 """Apply <patchname> to the working directory.
2086 """Apply <patchname> to the working directory.
2087
2087
2088 'eolmode' specifies how end of lines should be handled. It can be:
2088 'eolmode' specifies how end of lines should be handled. It can be:
2089 - 'strict': inputs are read in binary mode, EOLs are preserved
2089 - 'strict': inputs are read in binary mode, EOLs are preserved
2090 - 'crlf': EOLs are ignored when patching and reset to CRLF
2090 - 'crlf': EOLs are ignored when patching and reset to CRLF
2091 - 'lf': EOLs are ignored when patching and reset to LF
2091 - 'lf': EOLs are ignored when patching and reset to LF
2092 - None: get it from user settings, default to 'strict'
2092 - None: get it from user settings, default to 'strict'
2093 'eolmode' is ignored when using an external patcher program.
2093 'eolmode' is ignored when using an external patcher program.
2094
2094
2095 Returns whether patch was applied with fuzz factor.
2095 Returns whether patch was applied with fuzz factor.
2096 """
2096 """
2097 patcher = ui.config('ui', 'patch')
2097 patcher = ui.config('ui', 'patch')
2098 if files is None:
2098 if files is None:
2099 files = set()
2099 files = set()
2100 if patcher:
2100 if patcher:
2101 return _externalpatch(ui, repo, patcher, patchname, strip,
2101 return _externalpatch(ui, repo, patcher, patchname, strip,
2102 files, similarity)
2102 files, similarity)
2103 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2103 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2104 similarity)
2104 similarity)
2105
2105
2106 def changedfiles(ui, repo, patchpath, strip=1):
2106 def changedfiles(ui, repo, patchpath, strip=1):
2107 backend = fsbackend(ui, repo.root)
2107 backend = fsbackend(ui, repo.root)
2108 with open(patchpath, 'rb') as fp:
2108 with open(patchpath, 'rb') as fp:
2109 changed = set()
2109 changed = set()
2110 for state, values in iterhunks(fp):
2110 for state, values in iterhunks(fp):
2111 if state == 'file':
2111 if state == 'file':
2112 afile, bfile, first_hunk, gp = values
2112 afile, bfile, first_hunk, gp = values
2113 if gp:
2113 if gp:
2114 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2114 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2115 if gp.oldpath:
2115 if gp.oldpath:
2116 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2116 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2117 else:
2117 else:
2118 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2118 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2119 '')
2119 '')
2120 changed.add(gp.path)
2120 changed.add(gp.path)
2121 if gp.op == 'RENAME':
2121 if gp.op == 'RENAME':
2122 changed.add(gp.oldpath)
2122 changed.add(gp.oldpath)
2123 elif state not in ('hunk', 'git'):
2123 elif state not in ('hunk', 'git'):
2124 raise error.Abort(_('unsupported parser state: %s') % state)
2124 raise error.Abort(_('unsupported parser state: %s') % state)
2125 return changed
2125 return changed
2126
2126
2127 class GitDiffRequired(Exception):
2127 class GitDiffRequired(Exception):
2128 pass
2128 pass
2129
2129
2130 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2130 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2131 '''return diffopts with all features supported and parsed'''
2131 '''return diffopts with all features supported and parsed'''
2132 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2132 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2133 git=True, whitespace=True, formatchanging=True)
2133 git=True, whitespace=True, formatchanging=True)
2134
2134
2135 diffopts = diffallopts
2135 diffopts = diffallopts
2136
2136
2137 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2137 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2138 whitespace=False, formatchanging=False):
2138 whitespace=False, formatchanging=False):
2139 '''return diffopts with only opted-in features parsed
2139 '''return diffopts with only opted-in features parsed
2140
2140
2141 Features:
2141 Features:
2142 - git: git-style diffs
2142 - git: git-style diffs
2143 - whitespace: whitespace options like ignoreblanklines and ignorews
2143 - whitespace: whitespace options like ignoreblanklines and ignorews
2144 - formatchanging: options that will likely break or cause correctness issues
2144 - formatchanging: options that will likely break or cause correctness issues
2145 with most diff parsers
2145 with most diff parsers
2146 '''
2146 '''
2147 def get(key, name=None, getter=ui.configbool, forceplain=None):
2147 def get(key, name=None, getter=ui.configbool, forceplain=None):
2148 if opts:
2148 if opts:
2149 v = opts.get(key)
2149 v = opts.get(key)
2150 # diffopts flags are either None-default (which is passed
2150 # diffopts flags are either None-default (which is passed
2151 # through unchanged, so we can identify unset values), or
2151 # through unchanged, so we can identify unset values), or
2152 # some other falsey default (eg --unified, which defaults
2152 # some other falsey default (eg --unified, which defaults
2153 # to an empty string). We only want to override the config
2153 # to an empty string). We only want to override the config
2154 # entries from hgrc with command line values if they
2154 # entries from hgrc with command line values if they
2155 # appear to have been set, which is any truthy value,
2155 # appear to have been set, which is any truthy value,
2156 # True, or False.
2156 # True, or False.
2157 if v or isinstance(v, bool):
2157 if v or isinstance(v, bool):
2158 return v
2158 return v
2159 if forceplain is not None and ui.plain():
2159 if forceplain is not None and ui.plain():
2160 return forceplain
2160 return forceplain
2161 return getter(section, name or key, None, untrusted=untrusted)
2161 return getter(section, name or key, None, untrusted=untrusted)
2162
2162
2163 # core options, expected to be understood by every diff parser
2163 # core options, expected to be understood by every diff parser
2164 buildopts = {
2164 buildopts = {
2165 'nodates': get('nodates'),
2165 'nodates': get('nodates'),
2166 'showfunc': get('show_function', 'showfunc'),
2166 'showfunc': get('show_function', 'showfunc'),
2167 'context': get('unified', getter=ui.config),
2167 'context': get('unified', getter=ui.config),
2168 }
2168 }
2169
2169
2170 if git:
2170 if git:
2171 buildopts['git'] = get('git')
2171 buildopts['git'] = get('git')
2172
2172
2173 # since this is in the experimental section, we need to call
2173 # since this is in the experimental section, we need to call
2174 # ui.configbool directory
2174 # ui.configbool directory
2175 buildopts['showsimilarity'] = ui.configbool('experimental',
2175 buildopts['showsimilarity'] = ui.configbool('experimental',
2176 'extendedheader.similarity')
2176 'extendedheader.similarity')
2177
2177
2178 # need to inspect the ui object instead of using get() since we want to
2178 # need to inspect the ui object instead of using get() since we want to
2179 # test for an int
2179 # test for an int
2180 hconf = ui.config('experimental', 'extendedheader.index')
2180 hconf = ui.config('experimental', 'extendedheader.index')
2181 if hconf is not None:
2181 if hconf is not None:
2182 hlen = None
2182 hlen = None
2183 try:
2183 try:
2184 # the hash config could be an integer (for length of hash) or a
2184 # the hash config could be an integer (for length of hash) or a
2185 # word (e.g. short, full, none)
2185 # word (e.g. short, full, none)
2186 hlen = int(hconf)
2186 hlen = int(hconf)
2187 if hlen < 0 or hlen > 40:
2188 msg = _("invalid length for extendedheader.index: '%d'\n")
2189 ui.warn(msg % hlen)
2187 except ValueError:
2190 except ValueError:
2188 # default value
2191 # default value
2189 if hconf == 'short' or hconf == '':
2192 if hconf == 'short' or hconf == '':
2190 hlen = 12
2193 hlen = 12
2191 elif hconf == 'full':
2194 elif hconf == 'full':
2192 hlen = 40
2195 hlen = 40
2193 elif hconf != 'none':
2196 elif hconf != 'none':
2194 msg = _("invalid value for extendedheader.index: '%s'\n")
2197 msg = _("invalid value for extendedheader.index: '%s'\n")
2195 ui.warn(msg % hconf)
2198 ui.warn(msg % hconf)
2196 finally:
2199 finally:
2197 if hlen < 0 or hlen > 40:
2198 msg = _("invalid length for extendedheader.index: '%d'\n")
2199 ui.warn(msg % hlen)
2200 buildopts['index'] = hlen
2200 buildopts['index'] = hlen
2201
2201
2202 if whitespace:
2202 if whitespace:
2203 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2203 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2204 buildopts['ignorewsamount'] = get('ignore_space_change',
2204 buildopts['ignorewsamount'] = get('ignore_space_change',
2205 'ignorewsamount')
2205 'ignorewsamount')
2206 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2206 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2207 'ignoreblanklines')
2207 'ignoreblanklines')
2208 if formatchanging:
2208 if formatchanging:
2209 buildopts['text'] = opts and opts.get('text')
2209 buildopts['text'] = opts and opts.get('text')
2210 buildopts['nobinary'] = get('nobinary', forceplain=False)
2210 buildopts['nobinary'] = get('nobinary', forceplain=False)
2211 buildopts['noprefix'] = get('noprefix', forceplain=False)
2211 buildopts['noprefix'] = get('noprefix', forceplain=False)
2212
2212
2213 return mdiff.diffopts(**buildopts)
2213 return mdiff.diffopts(**buildopts)
2214
2214
2215 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2215 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2216 losedatafn=None, prefix='', relroot='', copy=None):
2216 losedatafn=None, prefix='', relroot='', copy=None):
2217 '''yields diff of changes to files between two nodes, or node and
2217 '''yields diff of changes to files between two nodes, or node and
2218 working directory.
2218 working directory.
2219
2219
2220 if node1 is None, use first dirstate parent instead.
2220 if node1 is None, use first dirstate parent instead.
2221 if node2 is None, compare node1 with working directory.
2221 if node2 is None, compare node1 with working directory.
2222
2222
2223 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2223 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2224 every time some change cannot be represented with the current
2224 every time some change cannot be represented with the current
2225 patch format. Return False to upgrade to git patch format, True to
2225 patch format. Return False to upgrade to git patch format, True to
2226 accept the loss or raise an exception to abort the diff. It is
2226 accept the loss or raise an exception to abort the diff. It is
2227 called with the name of current file being diffed as 'fn'. If set
2227 called with the name of current file being diffed as 'fn'. If set
2228 to None, patches will always be upgraded to git format when
2228 to None, patches will always be upgraded to git format when
2229 necessary.
2229 necessary.
2230
2230
2231 prefix is a filename prefix that is prepended to all filenames on
2231 prefix is a filename prefix that is prepended to all filenames on
2232 display (used for subrepos).
2232 display (used for subrepos).
2233
2233
2234 relroot, if not empty, must be normalized with a trailing /. Any match
2234 relroot, if not empty, must be normalized with a trailing /. Any match
2235 patterns that fall outside it will be ignored.
2235 patterns that fall outside it will be ignored.
2236
2236
2237 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2237 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2238 information.'''
2238 information.'''
2239
2239
2240 if opts is None:
2240 if opts is None:
2241 opts = mdiff.defaultopts
2241 opts = mdiff.defaultopts
2242
2242
2243 if not node1 and not node2:
2243 if not node1 and not node2:
2244 node1 = repo.dirstate.p1()
2244 node1 = repo.dirstate.p1()
2245
2245
2246 def lrugetfilectx():
2246 def lrugetfilectx():
2247 cache = {}
2247 cache = {}
2248 order = collections.deque()
2248 order = collections.deque()
2249 def getfilectx(f, ctx):
2249 def getfilectx(f, ctx):
2250 fctx = ctx.filectx(f, filelog=cache.get(f))
2250 fctx = ctx.filectx(f, filelog=cache.get(f))
2251 if f not in cache:
2251 if f not in cache:
2252 if len(cache) > 20:
2252 if len(cache) > 20:
2253 del cache[order.popleft()]
2253 del cache[order.popleft()]
2254 cache[f] = fctx.filelog()
2254 cache[f] = fctx.filelog()
2255 else:
2255 else:
2256 order.remove(f)
2256 order.remove(f)
2257 order.append(f)
2257 order.append(f)
2258 return fctx
2258 return fctx
2259 return getfilectx
2259 return getfilectx
2260 getfilectx = lrugetfilectx()
2260 getfilectx = lrugetfilectx()
2261
2261
2262 ctx1 = repo[node1]
2262 ctx1 = repo[node1]
2263 ctx2 = repo[node2]
2263 ctx2 = repo[node2]
2264
2264
2265 relfiltered = False
2265 relfiltered = False
2266 if relroot != '' and match.always():
2266 if relroot != '' and match.always():
2267 # as a special case, create a new matcher with just the relroot
2267 # as a special case, create a new matcher with just the relroot
2268 pats = [relroot]
2268 pats = [relroot]
2269 match = scmutil.match(ctx2, pats, default='path')
2269 match = scmutil.match(ctx2, pats, default='path')
2270 relfiltered = True
2270 relfiltered = True
2271
2271
2272 if not changes:
2272 if not changes:
2273 changes = repo.status(ctx1, ctx2, match=match)
2273 changes = repo.status(ctx1, ctx2, match=match)
2274 modified, added, removed = changes[:3]
2274 modified, added, removed = changes[:3]
2275
2275
2276 if not modified and not added and not removed:
2276 if not modified and not added and not removed:
2277 return []
2277 return []
2278
2278
2279 if repo.ui.debugflag:
2279 if repo.ui.debugflag:
2280 hexfunc = hex
2280 hexfunc = hex
2281 else:
2281 else:
2282 hexfunc = short
2282 hexfunc = short
2283 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2283 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2284
2284
2285 if copy is None:
2285 if copy is None:
2286 copy = {}
2286 copy = {}
2287 if opts.git or opts.upgrade:
2287 if opts.git or opts.upgrade:
2288 copy = copies.pathcopies(ctx1, ctx2, match=match)
2288 copy = copies.pathcopies(ctx1, ctx2, match=match)
2289
2289
2290 if relroot is not None:
2290 if relroot is not None:
2291 if not relfiltered:
2291 if not relfiltered:
2292 # XXX this would ideally be done in the matcher, but that is
2292 # XXX this would ideally be done in the matcher, but that is
2293 # generally meant to 'or' patterns, not 'and' them. In this case we
2293 # generally meant to 'or' patterns, not 'and' them. In this case we
2294 # need to 'and' all the patterns from the matcher with relroot.
2294 # need to 'and' all the patterns from the matcher with relroot.
2295 def filterrel(l):
2295 def filterrel(l):
2296 return [f for f in l if f.startswith(relroot)]
2296 return [f for f in l if f.startswith(relroot)]
2297 modified = filterrel(modified)
2297 modified = filterrel(modified)
2298 added = filterrel(added)
2298 added = filterrel(added)
2299 removed = filterrel(removed)
2299 removed = filterrel(removed)
2300 relfiltered = True
2300 relfiltered = True
2301 # filter out copies where either side isn't inside the relative root
2301 # filter out copies where either side isn't inside the relative root
2302 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2302 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2303 if dst.startswith(relroot)
2303 if dst.startswith(relroot)
2304 and src.startswith(relroot)))
2304 and src.startswith(relroot)))
2305
2305
2306 modifiedset = set(modified)
2306 modifiedset = set(modified)
2307 addedset = set(added)
2307 addedset = set(added)
2308 removedset = set(removed)
2308 removedset = set(removed)
2309 for f in modified:
2309 for f in modified:
2310 if f not in ctx1:
2310 if f not in ctx1:
2311 # Fix up added, since merged-in additions appear as
2311 # Fix up added, since merged-in additions appear as
2312 # modifications during merges
2312 # modifications during merges
2313 modifiedset.remove(f)
2313 modifiedset.remove(f)
2314 addedset.add(f)
2314 addedset.add(f)
2315 for f in removed:
2315 for f in removed:
2316 if f not in ctx1:
2316 if f not in ctx1:
2317 # Merged-in additions that are then removed are reported as removed.
2317 # Merged-in additions that are then removed are reported as removed.
2318 # They are not in ctx1, so We don't want to show them in the diff.
2318 # They are not in ctx1, so We don't want to show them in the diff.
2319 removedset.remove(f)
2319 removedset.remove(f)
2320 modified = sorted(modifiedset)
2320 modified = sorted(modifiedset)
2321 added = sorted(addedset)
2321 added = sorted(addedset)
2322 removed = sorted(removedset)
2322 removed = sorted(removedset)
2323 for dst, src in copy.items():
2323 for dst, src in copy.items():
2324 if src not in ctx1:
2324 if src not in ctx1:
2325 # Files merged in during a merge and then copied/renamed are
2325 # Files merged in during a merge and then copied/renamed are
2326 # reported as copies. We want to show them in the diff as additions.
2326 # reported as copies. We want to show them in the diff as additions.
2327 del copy[dst]
2327 del copy[dst]
2328
2328
2329 def difffn(opts, losedata):
2329 def difffn(opts, losedata):
2330 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2330 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2331 copy, getfilectx, opts, losedata, prefix, relroot)
2331 copy, getfilectx, opts, losedata, prefix, relroot)
2332 if opts.upgrade and not opts.git:
2332 if opts.upgrade and not opts.git:
2333 try:
2333 try:
2334 def losedata(fn):
2334 def losedata(fn):
2335 if not losedatafn or not losedatafn(fn=fn):
2335 if not losedatafn or not losedatafn(fn=fn):
2336 raise GitDiffRequired
2336 raise GitDiffRequired
2337 # Buffer the whole output until we are sure it can be generated
2337 # Buffer the whole output until we are sure it can be generated
2338 return list(difffn(opts.copy(git=False), losedata))
2338 return list(difffn(opts.copy(git=False), losedata))
2339 except GitDiffRequired:
2339 except GitDiffRequired:
2340 return difffn(opts.copy(git=True), None)
2340 return difffn(opts.copy(git=True), None)
2341 else:
2341 else:
2342 return difffn(opts, None)
2342 return difffn(opts, None)
2343
2343
2344 def difflabel(func, *args, **kw):
2344 def difflabel(func, *args, **kw):
2345 '''yields 2-tuples of (output, label) based on the output of func()'''
2345 '''yields 2-tuples of (output, label) based on the output of func()'''
2346 headprefixes = [('diff', 'diff.diffline'),
2346 headprefixes = [('diff', 'diff.diffline'),
2347 ('copy', 'diff.extended'),
2347 ('copy', 'diff.extended'),
2348 ('rename', 'diff.extended'),
2348 ('rename', 'diff.extended'),
2349 ('old', 'diff.extended'),
2349 ('old', 'diff.extended'),
2350 ('new', 'diff.extended'),
2350 ('new', 'diff.extended'),
2351 ('deleted', 'diff.extended'),
2351 ('deleted', 'diff.extended'),
2352 ('index', 'diff.extended'),
2352 ('index', 'diff.extended'),
2353 ('similarity', 'diff.extended'),
2353 ('similarity', 'diff.extended'),
2354 ('---', 'diff.file_a'),
2354 ('---', 'diff.file_a'),
2355 ('+++', 'diff.file_b')]
2355 ('+++', 'diff.file_b')]
2356 textprefixes = [('@', 'diff.hunk'),
2356 textprefixes = [('@', 'diff.hunk'),
2357 ('-', 'diff.deleted'),
2357 ('-', 'diff.deleted'),
2358 ('+', 'diff.inserted')]
2358 ('+', 'diff.inserted')]
2359 head = False
2359 head = False
2360 for chunk in func(*args, **kw):
2360 for chunk in func(*args, **kw):
2361 lines = chunk.split('\n')
2361 lines = chunk.split('\n')
2362 for i, line in enumerate(lines):
2362 for i, line in enumerate(lines):
2363 if i != 0:
2363 if i != 0:
2364 yield ('\n', '')
2364 yield ('\n', '')
2365 if head:
2365 if head:
2366 if line.startswith('@'):
2366 if line.startswith('@'):
2367 head = False
2367 head = False
2368 else:
2368 else:
2369 if line and line[0] not in ' +-@\\':
2369 if line and line[0] not in ' +-@\\':
2370 head = True
2370 head = True
2371 stripline = line
2371 stripline = line
2372 diffline = False
2372 diffline = False
2373 if not head and line and line[0] in '+-':
2373 if not head and line and line[0] in '+-':
2374 # highlight tabs and trailing whitespace, but only in
2374 # highlight tabs and trailing whitespace, but only in
2375 # changed lines
2375 # changed lines
2376 stripline = line.rstrip()
2376 stripline = line.rstrip()
2377 diffline = True
2377 diffline = True
2378
2378
2379 prefixes = textprefixes
2379 prefixes = textprefixes
2380 if head:
2380 if head:
2381 prefixes = headprefixes
2381 prefixes = headprefixes
2382 for prefix, label in prefixes:
2382 for prefix, label in prefixes:
2383 if stripline.startswith(prefix):
2383 if stripline.startswith(prefix):
2384 if diffline:
2384 if diffline:
2385 for token in tabsplitter.findall(stripline):
2385 for token in tabsplitter.findall(stripline):
2386 if '\t' == token[0]:
2386 if '\t' == token[0]:
2387 yield (token, 'diff.tab')
2387 yield (token, 'diff.tab')
2388 else:
2388 else:
2389 yield (token, label)
2389 yield (token, label)
2390 else:
2390 else:
2391 yield (stripline, label)
2391 yield (stripline, label)
2392 break
2392 break
2393 else:
2393 else:
2394 yield (line, '')
2394 yield (line, '')
2395 if line != stripline:
2395 if line != stripline:
2396 yield (line[len(stripline):], 'diff.trailingwhitespace')
2396 yield (line[len(stripline):], 'diff.trailingwhitespace')
2397
2397
2398 def diffui(*args, **kw):
2398 def diffui(*args, **kw):
2399 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2399 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2400 return difflabel(diff, *args, **kw)
2400 return difflabel(diff, *args, **kw)
2401
2401
2402 def _filepairs(modified, added, removed, copy, opts):
2402 def _filepairs(modified, added, removed, copy, opts):
2403 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2403 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2404 before and f2 is the the name after. For added files, f1 will be None,
2404 before and f2 is the the name after. For added files, f1 will be None,
2405 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2405 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2406 or 'rename' (the latter two only if opts.git is set).'''
2406 or 'rename' (the latter two only if opts.git is set).'''
2407 gone = set()
2407 gone = set()
2408
2408
2409 copyto = dict([(v, k) for k, v in copy.items()])
2409 copyto = dict([(v, k) for k, v in copy.items()])
2410
2410
2411 addedset, removedset = set(added), set(removed)
2411 addedset, removedset = set(added), set(removed)
2412
2412
2413 for f in sorted(modified + added + removed):
2413 for f in sorted(modified + added + removed):
2414 copyop = None
2414 copyop = None
2415 f1, f2 = f, f
2415 f1, f2 = f, f
2416 if f in addedset:
2416 if f in addedset:
2417 f1 = None
2417 f1 = None
2418 if f in copy:
2418 if f in copy:
2419 if opts.git:
2419 if opts.git:
2420 f1 = copy[f]
2420 f1 = copy[f]
2421 if f1 in removedset and f1 not in gone:
2421 if f1 in removedset and f1 not in gone:
2422 copyop = 'rename'
2422 copyop = 'rename'
2423 gone.add(f1)
2423 gone.add(f1)
2424 else:
2424 else:
2425 copyop = 'copy'
2425 copyop = 'copy'
2426 elif f in removedset:
2426 elif f in removedset:
2427 f2 = None
2427 f2 = None
2428 if opts.git:
2428 if opts.git:
2429 # have we already reported a copy above?
2429 # have we already reported a copy above?
2430 if (f in copyto and copyto[f] in addedset
2430 if (f in copyto and copyto[f] in addedset
2431 and copy[copyto[f]] == f):
2431 and copy[copyto[f]] == f):
2432 continue
2432 continue
2433 yield f1, f2, copyop
2433 yield f1, f2, copyop
2434
2434
2435 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2435 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2436 copy, getfilectx, opts, losedatafn, prefix, relroot):
2436 copy, getfilectx, opts, losedatafn, prefix, relroot):
2437 '''given input data, generate a diff and yield it in blocks
2437 '''given input data, generate a diff and yield it in blocks
2438
2438
2439 If generating a diff would lose data like flags or binary data and
2439 If generating a diff would lose data like flags or binary data and
2440 losedatafn is not None, it will be called.
2440 losedatafn is not None, it will be called.
2441
2441
2442 relroot is removed and prefix is added to every path in the diff output.
2442 relroot is removed and prefix is added to every path in the diff output.
2443
2443
2444 If relroot is not empty, this function expects every path in modified,
2444 If relroot is not empty, this function expects every path in modified,
2445 added, removed and copy to start with it.'''
2445 added, removed and copy to start with it.'''
2446
2446
2447 def gitindex(text):
2447 def gitindex(text):
2448 if not text:
2448 if not text:
2449 text = ""
2449 text = ""
2450 l = len(text)
2450 l = len(text)
2451 s = hashlib.sha1('blob %d\0' % l)
2451 s = hashlib.sha1('blob %d\0' % l)
2452 s.update(text)
2452 s.update(text)
2453 return s.hexdigest()
2453 return s.hexdigest()
2454
2454
2455 if opts.noprefix:
2455 if opts.noprefix:
2456 aprefix = bprefix = ''
2456 aprefix = bprefix = ''
2457 else:
2457 else:
2458 aprefix = 'a/'
2458 aprefix = 'a/'
2459 bprefix = 'b/'
2459 bprefix = 'b/'
2460
2460
2461 def diffline(f, revs):
2461 def diffline(f, revs):
2462 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2462 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2463 return 'diff %s %s' % (revinfo, f)
2463 return 'diff %s %s' % (revinfo, f)
2464
2464
2465 date1 = util.datestr(ctx1.date())
2465 date1 = util.datestr(ctx1.date())
2466 date2 = util.datestr(ctx2.date())
2466 date2 = util.datestr(ctx2.date())
2467
2467
2468 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2468 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2469
2469
2470 if relroot != '' and (repo.ui.configbool('devel', 'all')
2470 if relroot != '' and (repo.ui.configbool('devel', 'all')
2471 or repo.ui.configbool('devel', 'check-relroot')):
2471 or repo.ui.configbool('devel', 'check-relroot')):
2472 for f in modified + added + removed + copy.keys() + copy.values():
2472 for f in modified + added + removed + copy.keys() + copy.values():
2473 if f is not None and not f.startswith(relroot):
2473 if f is not None and not f.startswith(relroot):
2474 raise AssertionError(
2474 raise AssertionError(
2475 "file %s doesn't start with relroot %s" % (f, relroot))
2475 "file %s doesn't start with relroot %s" % (f, relroot))
2476
2476
2477 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2477 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2478 content1 = None
2478 content1 = None
2479 content2 = None
2479 content2 = None
2480 flag1 = None
2480 flag1 = None
2481 flag2 = None
2481 flag2 = None
2482 if f1:
2482 if f1:
2483 content1 = getfilectx(f1, ctx1).data()
2483 content1 = getfilectx(f1, ctx1).data()
2484 if opts.git or losedatafn:
2484 if opts.git or losedatafn:
2485 flag1 = ctx1.flags(f1)
2485 flag1 = ctx1.flags(f1)
2486 if f2:
2486 if f2:
2487 content2 = getfilectx(f2, ctx2).data()
2487 content2 = getfilectx(f2, ctx2).data()
2488 if opts.git or losedatafn:
2488 if opts.git or losedatafn:
2489 flag2 = ctx2.flags(f2)
2489 flag2 = ctx2.flags(f2)
2490 binary = False
2490 binary = False
2491 if opts.git or losedatafn:
2491 if opts.git or losedatafn:
2492 binary = util.binary(content1) or util.binary(content2)
2492 binary = util.binary(content1) or util.binary(content2)
2493
2493
2494 if losedatafn and not opts.git:
2494 if losedatafn and not opts.git:
2495 if (binary or
2495 if (binary or
2496 # copy/rename
2496 # copy/rename
2497 f2 in copy or
2497 f2 in copy or
2498 # empty file creation
2498 # empty file creation
2499 (not f1 and not content2) or
2499 (not f1 and not content2) or
2500 # empty file deletion
2500 # empty file deletion
2501 (not content1 and not f2) or
2501 (not content1 and not f2) or
2502 # create with flags
2502 # create with flags
2503 (not f1 and flag2) or
2503 (not f1 and flag2) or
2504 # change flags
2504 # change flags
2505 (f1 and f2 and flag1 != flag2)):
2505 (f1 and f2 and flag1 != flag2)):
2506 losedatafn(f2 or f1)
2506 losedatafn(f2 or f1)
2507
2507
2508 path1 = f1 or f2
2508 path1 = f1 or f2
2509 path2 = f2 or f1
2509 path2 = f2 or f1
2510 path1 = posixpath.join(prefix, path1[len(relroot):])
2510 path1 = posixpath.join(prefix, path1[len(relroot):])
2511 path2 = posixpath.join(prefix, path2[len(relroot):])
2511 path2 = posixpath.join(prefix, path2[len(relroot):])
2512 header = []
2512 header = []
2513 if opts.git:
2513 if opts.git:
2514 header.append('diff --git %s%s %s%s' %
2514 header.append('diff --git %s%s %s%s' %
2515 (aprefix, path1, bprefix, path2))
2515 (aprefix, path1, bprefix, path2))
2516 if not f1: # added
2516 if not f1: # added
2517 header.append('new file mode %s' % gitmode[flag2])
2517 header.append('new file mode %s' % gitmode[flag2])
2518 elif not f2: # removed
2518 elif not f2: # removed
2519 header.append('deleted file mode %s' % gitmode[flag1])
2519 header.append('deleted file mode %s' % gitmode[flag1])
2520 else: # modified/copied/renamed
2520 else: # modified/copied/renamed
2521 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2521 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2522 if mode1 != mode2:
2522 if mode1 != mode2:
2523 header.append('old mode %s' % mode1)
2523 header.append('old mode %s' % mode1)
2524 header.append('new mode %s' % mode2)
2524 header.append('new mode %s' % mode2)
2525 if copyop is not None:
2525 if copyop is not None:
2526 if opts.showsimilarity:
2526 if opts.showsimilarity:
2527 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2527 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2528 header.append('similarity index %d%%' % sim)
2528 header.append('similarity index %d%%' % sim)
2529 header.append('%s from %s' % (copyop, path1))
2529 header.append('%s from %s' % (copyop, path1))
2530 header.append('%s to %s' % (copyop, path2))
2530 header.append('%s to %s' % (copyop, path2))
2531 elif revs and not repo.ui.quiet:
2531 elif revs and not repo.ui.quiet:
2532 header.append(diffline(path1, revs))
2532 header.append(diffline(path1, revs))
2533
2533
2534 if binary and opts.git and not opts.nobinary:
2534 if binary and opts.git and not opts.nobinary:
2535 text = mdiff.b85diff(content1, content2)
2535 text = mdiff.b85diff(content1, content2)
2536 if text:
2536 if text:
2537 header.append('index %s..%s' %
2537 header.append('index %s..%s' %
2538 (gitindex(content1), gitindex(content2)))
2538 (gitindex(content1), gitindex(content2)))
2539 else:
2539 else:
2540 if opts.git and opts.index > 0:
2540 if opts.git and opts.index > 0:
2541 flag = flag1
2541 flag = flag1
2542 if flag is None:
2542 if flag is None:
2543 flag = flag2
2543 flag = flag2
2544 header.append('index %s..%s %s' %
2544 header.append('index %s..%s %s' %
2545 (gitindex(content1)[0:opts.index],
2545 (gitindex(content1)[0:opts.index],
2546 gitindex(content2)[0:opts.index],
2546 gitindex(content2)[0:opts.index],
2547 gitmode[flag]))
2547 gitmode[flag]))
2548
2548
2549 text = mdiff.unidiff(content1, date1,
2549 text = mdiff.unidiff(content1, date1,
2550 content2, date2,
2550 content2, date2,
2551 path1, path2, opts=opts)
2551 path1, path2, opts=opts)
2552 if header and (text or len(header) > 1):
2552 if header and (text or len(header) > 1):
2553 yield '\n'.join(header) + '\n'
2553 yield '\n'.join(header) + '\n'
2554 if text:
2554 if text:
2555 yield text
2555 yield text
2556
2556
2557 def diffstatsum(stats):
2557 def diffstatsum(stats):
2558 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2558 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2559 for f, a, r, b in stats:
2559 for f, a, r, b in stats:
2560 maxfile = max(maxfile, encoding.colwidth(f))
2560 maxfile = max(maxfile, encoding.colwidth(f))
2561 maxtotal = max(maxtotal, a + r)
2561 maxtotal = max(maxtotal, a + r)
2562 addtotal += a
2562 addtotal += a
2563 removetotal += r
2563 removetotal += r
2564 binary = binary or b
2564 binary = binary or b
2565
2565
2566 return maxfile, maxtotal, addtotal, removetotal, binary
2566 return maxfile, maxtotal, addtotal, removetotal, binary
2567
2567
2568 def diffstatdata(lines):
2568 def diffstatdata(lines):
2569 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2569 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2570
2570
2571 results = []
2571 results = []
2572 filename, adds, removes, isbinary = None, 0, 0, False
2572 filename, adds, removes, isbinary = None, 0, 0, False
2573
2573
2574 def addresult():
2574 def addresult():
2575 if filename:
2575 if filename:
2576 results.append((filename, adds, removes, isbinary))
2576 results.append((filename, adds, removes, isbinary))
2577
2577
2578 for line in lines:
2578 for line in lines:
2579 if line.startswith('diff'):
2579 if line.startswith('diff'):
2580 addresult()
2580 addresult()
2581 # set numbers to 0 anyway when starting new file
2581 # set numbers to 0 anyway when starting new file
2582 adds, removes, isbinary = 0, 0, False
2582 adds, removes, isbinary = 0, 0, False
2583 if line.startswith('diff --git a/'):
2583 if line.startswith('diff --git a/'):
2584 filename = gitre.search(line).group(2)
2584 filename = gitre.search(line).group(2)
2585 elif line.startswith('diff -r'):
2585 elif line.startswith('diff -r'):
2586 # format: "diff -r ... -r ... filename"
2586 # format: "diff -r ... -r ... filename"
2587 filename = diffre.search(line).group(1)
2587 filename = diffre.search(line).group(1)
2588 elif line.startswith('+') and not line.startswith('+++ '):
2588 elif line.startswith('+') and not line.startswith('+++ '):
2589 adds += 1
2589 adds += 1
2590 elif line.startswith('-') and not line.startswith('--- '):
2590 elif line.startswith('-') and not line.startswith('--- '):
2591 removes += 1
2591 removes += 1
2592 elif (line.startswith('GIT binary patch') or
2592 elif (line.startswith('GIT binary patch') or
2593 line.startswith('Binary file')):
2593 line.startswith('Binary file')):
2594 isbinary = True
2594 isbinary = True
2595 addresult()
2595 addresult()
2596 return results
2596 return results
2597
2597
2598 def diffstat(lines, width=80):
2598 def diffstat(lines, width=80):
2599 output = []
2599 output = []
2600 stats = diffstatdata(lines)
2600 stats = diffstatdata(lines)
2601 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2601 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2602
2602
2603 countwidth = len(str(maxtotal))
2603 countwidth = len(str(maxtotal))
2604 if hasbinary and countwidth < 3:
2604 if hasbinary and countwidth < 3:
2605 countwidth = 3
2605 countwidth = 3
2606 graphwidth = width - countwidth - maxname - 6
2606 graphwidth = width - countwidth - maxname - 6
2607 if graphwidth < 10:
2607 if graphwidth < 10:
2608 graphwidth = 10
2608 graphwidth = 10
2609
2609
2610 def scale(i):
2610 def scale(i):
2611 if maxtotal <= graphwidth:
2611 if maxtotal <= graphwidth:
2612 return i
2612 return i
2613 # If diffstat runs out of room it doesn't print anything,
2613 # If diffstat runs out of room it doesn't print anything,
2614 # which isn't very useful, so always print at least one + or -
2614 # which isn't very useful, so always print at least one + or -
2615 # if there were at least some changes.
2615 # if there were at least some changes.
2616 return max(i * graphwidth // maxtotal, int(bool(i)))
2616 return max(i * graphwidth // maxtotal, int(bool(i)))
2617
2617
2618 for filename, adds, removes, isbinary in stats:
2618 for filename, adds, removes, isbinary in stats:
2619 if isbinary:
2619 if isbinary:
2620 count = 'Bin'
2620 count = 'Bin'
2621 else:
2621 else:
2622 count = adds + removes
2622 count = adds + removes
2623 pluses = '+' * scale(adds)
2623 pluses = '+' * scale(adds)
2624 minuses = '-' * scale(removes)
2624 minuses = '-' * scale(removes)
2625 output.append(' %s%s | %*s %s%s\n' %
2625 output.append(' %s%s | %*s %s%s\n' %
2626 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2626 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2627 countwidth, count, pluses, minuses))
2627 countwidth, count, pluses, minuses))
2628
2628
2629 if stats:
2629 if stats:
2630 output.append(_(' %d files changed, %d insertions(+), '
2630 output.append(_(' %d files changed, %d insertions(+), '
2631 '%d deletions(-)\n')
2631 '%d deletions(-)\n')
2632 % (len(stats), totaladds, totalremoves))
2632 % (len(stats), totaladds, totalremoves))
2633
2633
2634 return ''.join(output)
2634 return ''.join(output)
2635
2635
2636 def diffstatui(*args, **kw):
2636 def diffstatui(*args, **kw):
2637 '''like diffstat(), but yields 2-tuples of (output, label) for
2637 '''like diffstat(), but yields 2-tuples of (output, label) for
2638 ui.write()
2638 ui.write()
2639 '''
2639 '''
2640
2640
2641 for line in diffstat(*args, **kw).splitlines():
2641 for line in diffstat(*args, **kw).splitlines():
2642 if line and line[-1] in '+-':
2642 if line and line[-1] in '+-':
2643 name, graph = line.rsplit(' ', 1)
2643 name, graph = line.rsplit(' ', 1)
2644 yield (name + ' ', '')
2644 yield (name + ' ', '')
2645 m = re.search(r'\++', graph)
2645 m = re.search(r'\++', graph)
2646 if m:
2646 if m:
2647 yield (m.group(0), 'diffstat.inserted')
2647 yield (m.group(0), 'diffstat.inserted')
2648 m = re.search(r'-+', graph)
2648 m = re.search(r'-+', graph)
2649 if m:
2649 if m:
2650 yield (m.group(0), 'diffstat.deleted')
2650 yield (m.group(0), 'diffstat.deleted')
2651 else:
2651 else:
2652 yield (line, '')
2652 yield (line, '')
2653 yield ('\n', '')
2653 yield ('\n', '')
@@ -1,366 +1,388 b''
1 $ hg init repo
1 $ hg init repo
2 $ cd repo
2 $ cd repo
3 $ cat > a <<EOF
3 $ cat > a <<EOF
4 > c
4 > c
5 > c
5 > c
6 > a
6 > a
7 > a
7 > a
8 > b
8 > b
9 > a
9 > a
10 > a
10 > a
11 > c
11 > c
12 > c
12 > c
13 > EOF
13 > EOF
14 $ hg ci -Am adda
14 $ hg ci -Am adda
15 adding a
15 adding a
16
16
17 $ cat > a <<EOF
17 $ cat > a <<EOF
18 > c
18 > c
19 > c
19 > c
20 > a
20 > a
21 > a
21 > a
22 > dd
22 > dd
23 > a
23 > a
24 > a
24 > a
25 > c
25 > c
26 > c
26 > c
27 > EOF
27 > EOF
28
28
29 default context
29 default context
30
30
31 $ hg diff --nodates
31 $ hg diff --nodates
32 diff -r cf9f4ba66af2 a
32 diff -r cf9f4ba66af2 a
33 --- a/a
33 --- a/a
34 +++ b/a
34 +++ b/a
35 @@ -2,7 +2,7 @@
35 @@ -2,7 +2,7 @@
36 c
36 c
37 a
37 a
38 a
38 a
39 -b
39 -b
40 +dd
40 +dd
41 a
41 a
42 a
42 a
43 c
43 c
44
44
45 invalid --unified
45 invalid --unified
46
46
47 $ hg diff --nodates -U foo
47 $ hg diff --nodates -U foo
48 abort: diff context lines count must be an integer, not 'foo'
48 abort: diff context lines count must be an integer, not 'foo'
49 [255]
49 [255]
50
50
51
51
52 $ hg diff --nodates -U 2
52 $ hg diff --nodates -U 2
53 diff -r cf9f4ba66af2 a
53 diff -r cf9f4ba66af2 a
54 --- a/a
54 --- a/a
55 +++ b/a
55 +++ b/a
56 @@ -3,5 +3,5 @@
56 @@ -3,5 +3,5 @@
57 a
57 a
58 a
58 a
59 -b
59 -b
60 +dd
60 +dd
61 a
61 a
62 a
62 a
63
63
64 $ hg --config diff.unified=2 diff --nodates
64 $ hg --config diff.unified=2 diff --nodates
65 diff -r cf9f4ba66af2 a
65 diff -r cf9f4ba66af2 a
66 --- a/a
66 --- a/a
67 +++ b/a
67 +++ b/a
68 @@ -3,5 +3,5 @@
68 @@ -3,5 +3,5 @@
69 a
69 a
70 a
70 a
71 -b
71 -b
72 +dd
72 +dd
73 a
73 a
74 a
74 a
75
75
76 $ hg diff --nodates -U 1
76 $ hg diff --nodates -U 1
77 diff -r cf9f4ba66af2 a
77 diff -r cf9f4ba66af2 a
78 --- a/a
78 --- a/a
79 +++ b/a
79 +++ b/a
80 @@ -4,3 +4,3 @@
80 @@ -4,3 +4,3 @@
81 a
81 a
82 -b
82 -b
83 +dd
83 +dd
84 a
84 a
85
85
86 invalid diff.unified
86 invalid diff.unified
87
87
88 $ hg --config diff.unified=foo diff --nodates
88 $ hg --config diff.unified=foo diff --nodates
89 abort: diff context lines count must be an integer, not 'foo'
89 abort: diff context lines count must be an integer, not 'foo'
90 [255]
90 [255]
91
91
92 noprefix config and option
92 noprefix config and option
93
93
94 $ hg --config diff.noprefix=True diff --nodates
94 $ hg --config diff.noprefix=True diff --nodates
95 diff -r cf9f4ba66af2 a
95 diff -r cf9f4ba66af2 a
96 --- a
96 --- a
97 +++ a
97 +++ a
98 @@ -2,7 +2,7 @@
98 @@ -2,7 +2,7 @@
99 c
99 c
100 a
100 a
101 a
101 a
102 -b
102 -b
103 +dd
103 +dd
104 a
104 a
105 a
105 a
106 c
106 c
107 $ hg diff --noprefix --nodates
107 $ hg diff --noprefix --nodates
108 diff -r cf9f4ba66af2 a
108 diff -r cf9f4ba66af2 a
109 --- a
109 --- a
110 +++ a
110 +++ a
111 @@ -2,7 +2,7 @@
111 @@ -2,7 +2,7 @@
112 c
112 c
113 a
113 a
114 a
114 a
115 -b
115 -b
116 +dd
116 +dd
117 a
117 a
118 a
118 a
119 c
119 c
120
120
121 noprefix config disabled in plain mode, but option still enabled
121 noprefix config disabled in plain mode, but option still enabled
122
122
123 $ HGPLAIN=1 hg --config diff.noprefix=True diff --nodates
123 $ HGPLAIN=1 hg --config diff.noprefix=True diff --nodates
124 diff -r cf9f4ba66af2 a
124 diff -r cf9f4ba66af2 a
125 --- a/a
125 --- a/a
126 +++ b/a
126 +++ b/a
127 @@ -2,7 +2,7 @@
127 @@ -2,7 +2,7 @@
128 c
128 c
129 a
129 a
130 a
130 a
131 -b
131 -b
132 +dd
132 +dd
133 a
133 a
134 a
134 a
135 c
135 c
136 $ HGPLAIN=1 hg diff --noprefix --nodates
136 $ HGPLAIN=1 hg diff --noprefix --nodates
137 diff -r cf9f4ba66af2 a
137 diff -r cf9f4ba66af2 a
138 --- a
138 --- a
139 +++ a
139 +++ a
140 @@ -2,7 +2,7 @@
140 @@ -2,7 +2,7 @@
141 c
141 c
142 a
142 a
143 a
143 a
144 -b
144 -b
145 +dd
145 +dd
146 a
146 a
147 a
147 a
148 c
148 c
149
149
150 $ cd ..
150 $ cd ..
151
151
152
152
153 0 lines of context hunk header matches gnu diff hunk header
153 0 lines of context hunk header matches gnu diff hunk header
154
154
155 $ hg init diffzero
155 $ hg init diffzero
156 $ cd diffzero
156 $ cd diffzero
157 $ cat > f1 << EOF
157 $ cat > f1 << EOF
158 > c2
158 > c2
159 > c4
159 > c4
160 > c5
160 > c5
161 > EOF
161 > EOF
162 $ hg commit -Am0
162 $ hg commit -Am0
163 adding f1
163 adding f1
164
164
165 $ cat > f2 << EOF
165 $ cat > f2 << EOF
166 > c1
166 > c1
167 > c2
167 > c2
168 > c3
168 > c3
169 > c4
169 > c4
170 > EOF
170 > EOF
171 $ mv f2 f1
171 $ mv f2 f1
172 $ hg diff -U0 --nodates
172 $ hg diff -U0 --nodates
173 diff -r 55d8ff78db23 f1
173 diff -r 55d8ff78db23 f1
174 --- a/f1
174 --- a/f1
175 +++ b/f1
175 +++ b/f1
176 @@ -0,0 +1,1 @@
176 @@ -0,0 +1,1 @@
177 +c1
177 +c1
178 @@ -1,0 +3,1 @@
178 @@ -1,0 +3,1 @@
179 +c3
179 +c3
180 @@ -3,1 +4,0 @@
180 @@ -3,1 +4,0 @@
181 -c5
181 -c5
182
182
183 $ hg diff -U0 --nodates --git
183 $ hg diff -U0 --nodates --git
184 diff --git a/f1 b/f1
184 diff --git a/f1 b/f1
185 --- a/f1
185 --- a/f1
186 +++ b/f1
186 +++ b/f1
187 @@ -0,0 +1,1 @@
187 @@ -0,0 +1,1 @@
188 +c1
188 +c1
189 @@ -1,0 +3,1 @@
189 @@ -1,0 +3,1 @@
190 +c3
190 +c3
191 @@ -3,1 +4,0 @@
191 @@ -3,1 +4,0 @@
192 -c5
192 -c5
193
193
194 $ hg diff -U0 --nodates -p
194 $ hg diff -U0 --nodates -p
195 diff -r 55d8ff78db23 f1
195 diff -r 55d8ff78db23 f1
196 --- a/f1
196 --- a/f1
197 +++ b/f1
197 +++ b/f1
198 @@ -0,0 +1,1 @@
198 @@ -0,0 +1,1 @@
199 +c1
199 +c1
200 @@ -1,0 +3,1 @@ c2
200 @@ -1,0 +3,1 @@ c2
201 +c3
201 +c3
202 @@ -3,1 +4,0 @@ c4
202 @@ -3,1 +4,0 @@ c4
203 -c5
203 -c5
204
204
205 $ echo a > f1
205 $ echo a > f1
206 $ hg ci -m movef2
206 $ hg ci -m movef2
207
207
208 Test diff headers terminating with TAB when necessary (issue3357)
208 Test diff headers terminating with TAB when necessary (issue3357)
209 Regular diff --nodates, file creation
209 Regular diff --nodates, file creation
210
210
211 $ hg mv f1 'f 1'
211 $ hg mv f1 'f 1'
212 $ echo b > 'f 1'
212 $ echo b > 'f 1'
213 $ hg diff --nodates 'f 1'
213 $ hg diff --nodates 'f 1'
214 diff -r 7574207d0d15 f 1
214 diff -r 7574207d0d15 f 1
215 --- /dev/null
215 --- /dev/null
216 +++ b/f 1
216 +++ b/f 1
217 @@ -0,0 +1,1 @@
217 @@ -0,0 +1,1 @@
218 +b
218 +b
219
219
220 Git diff, adding space
220 Git diff, adding space
221
221
222 $ hg diff --git
222 $ hg diff --git
223 diff --git a/f1 b/f 1
223 diff --git a/f1 b/f 1
224 rename from f1
224 rename from f1
225 rename to f 1
225 rename to f 1
226 --- a/f1
226 --- a/f1
227 +++ b/f 1
227 +++ b/f 1
228 @@ -1,1 +1,1 @@
228 @@ -1,1 +1,1 @@
229 -a
229 -a
230 +b
230 +b
231
231
232 Git diff, adding extended headers
232 Git diff, adding extended headers
233
233
234 $ hg diff --git --config experimental.extendedheader.index=7 --config experimental.extendedheader.similarity=True
234 $ hg diff --git --config experimental.extendedheader.index=7 --config experimental.extendedheader.similarity=True
235 diff --git a/f1 b/f 1
235 diff --git a/f1 b/f 1
236 similarity index 0%
236 similarity index 0%
237 rename from f1
237 rename from f1
238 rename to f 1
238 rename to f 1
239 index 7898192..6178079 100644
239 index 7898192..6178079 100644
240 --- a/f1
240 --- a/f1
241 +++ b/f 1
241 +++ b/f 1
242 @@ -1,1 +1,1 @@
242 @@ -1,1 +1,1 @@
243 -a
243 -a
244 +b
244 +b
245
245
246 $ hg diff --git --config experimental.extendedheader.index=-1
247 invalid length for extendedheader.index: '-1'
248 diff --git a/f1 b/f 1
249 rename from f1
250 rename to f 1
251 --- a/f1
252 +++ b/f 1
253 @@ -1,1 +1,1 @@
254 -a
255 +b
256
257 $ hg diff --git --config experimental.extendedheader.index=whatever
258 invalid value for extendedheader.index: 'whatever'
259 diff --git a/f1 b/f 1
260 rename from f1
261 rename to f 1
262 --- a/f1
263 +++ b/f 1
264 @@ -1,1 +1,1 @@
265 -a
266 +b
267
246 Git diff with noprefix
268 Git diff with noprefix
247
269
248 $ hg --config diff.noprefix=True diff --git --nodates
270 $ hg --config diff.noprefix=True diff --git --nodates
249 diff --git f1 f 1
271 diff --git f1 f 1
250 rename from f1
272 rename from f1
251 rename to f 1
273 rename to f 1
252 --- f1
274 --- f1
253 +++ f 1
275 +++ f 1
254 @@ -1,1 +1,1 @@
276 @@ -1,1 +1,1 @@
255 -a
277 -a
256 +b
278 +b
257
279
258 noprefix config disabled in plain mode, but option still enabled
280 noprefix config disabled in plain mode, but option still enabled
259
281
260 $ HGPLAIN=1 hg --config diff.noprefix=True diff --git --nodates
282 $ HGPLAIN=1 hg --config diff.noprefix=True diff --git --nodates
261 diff --git a/f1 b/f 1
283 diff --git a/f1 b/f 1
262 rename from f1
284 rename from f1
263 rename to f 1
285 rename to f 1
264 --- a/f1
286 --- a/f1
265 +++ b/f 1
287 +++ b/f 1
266 @@ -1,1 +1,1 @@
288 @@ -1,1 +1,1 @@
267 -a
289 -a
268 +b
290 +b
269 $ HGPLAIN=1 hg diff --git --noprefix --nodates
291 $ HGPLAIN=1 hg diff --git --noprefix --nodates
270 diff --git f1 f 1
292 diff --git f1 f 1
271 rename from f1
293 rename from f1
272 rename to f 1
294 rename to f 1
273 --- f1
295 --- f1
274 +++ f 1
296 +++ f 1
275 @@ -1,1 +1,1 @@
297 @@ -1,1 +1,1 @@
276 -a
298 -a
277 +b
299 +b
278
300
279 Regular diff --nodates, file deletion
301 Regular diff --nodates, file deletion
280
302
281 $ hg ci -m addspace
303 $ hg ci -m addspace
282 $ hg mv 'f 1' f1
304 $ hg mv 'f 1' f1
283 $ echo a > f1
305 $ echo a > f1
284 $ hg diff --nodates 'f 1'
306 $ hg diff --nodates 'f 1'
285 diff -r ca50fe67c9c7 f 1
307 diff -r ca50fe67c9c7 f 1
286 --- a/f 1
308 --- a/f 1
287 +++ /dev/null
309 +++ /dev/null
288 @@ -1,1 +0,0 @@
310 @@ -1,1 +0,0 @@
289 -b
311 -b
290
312
291 Git diff, removing space
313 Git diff, removing space
292
314
293 $ hg diff --git
315 $ hg diff --git
294 diff --git a/f 1 b/f1
316 diff --git a/f 1 b/f1
295 rename from f 1
317 rename from f 1
296 rename to f1
318 rename to f1
297 --- a/f 1
319 --- a/f 1
298 +++ b/f1
320 +++ b/f1
299 @@ -1,1 +1,1 @@
321 @@ -1,1 +1,1 @@
300 -b
322 -b
301 +a
323 +a
302
324
303 showfunc diff
325 showfunc diff
304 $ cat > f1 << EOF
326 $ cat > f1 << EOF
305 > int main() {
327 > int main() {
306 > int a = 0;
328 > int a = 0;
307 > int b = 1;
329 > int b = 1;
308 > int c = 2;
330 > int c = 2;
309 > int d = 3;
331 > int d = 3;
310 > return a + b + c + d;
332 > return a + b + c + d;
311 > }
333 > }
312 > EOF
334 > EOF
313 $ hg commit -m addfunction
335 $ hg commit -m addfunction
314 $ cat > f1 << EOF
336 $ cat > f1 << EOF
315 > int main() {
337 > int main() {
316 > int a = 0;
338 > int a = 0;
317 > int b = 1;
339 > int b = 1;
318 > int c = 2;
340 > int c = 2;
319 > int e = 3;
341 > int e = 3;
320 > return a + b + c + e;
342 > return a + b + c + e;
321 > }
343 > }
322 > EOF
344 > EOF
323 $ hg diff --git
345 $ hg diff --git
324 diff --git a/f1 b/f1
346 diff --git a/f1 b/f1
325 --- a/f1
347 --- a/f1
326 +++ b/f1
348 +++ b/f1
327 @@ -2,6 +2,6 @@
349 @@ -2,6 +2,6 @@
328 int a = 0;
350 int a = 0;
329 int b = 1;
351 int b = 1;
330 int c = 2;
352 int c = 2;
331 - int d = 3;
353 - int d = 3;
332 - return a + b + c + d;
354 - return a + b + c + d;
333 + int e = 3;
355 + int e = 3;
334 + return a + b + c + e;
356 + return a + b + c + e;
335 }
357 }
336 $ hg diff --config diff.showfunc=True --git
358 $ hg diff --config diff.showfunc=True --git
337 diff --git a/f1 b/f1
359 diff --git a/f1 b/f1
338 --- a/f1
360 --- a/f1
339 +++ b/f1
361 +++ b/f1
340 @@ -2,6 +2,6 @@ int main() {
362 @@ -2,6 +2,6 @@ int main() {
341 int a = 0;
363 int a = 0;
342 int b = 1;
364 int b = 1;
343 int c = 2;
365 int c = 2;
344 - int d = 3;
366 - int d = 3;
345 - return a + b + c + d;
367 - return a + b + c + d;
346 + int e = 3;
368 + int e = 3;
347 + return a + b + c + e;
369 + return a + b + c + e;
348 }
370 }
349
371
350 If [diff] git is set to true, but the user says --no-git, we should
372 If [diff] git is set to true, but the user says --no-git, we should
351 *not* get git diffs
373 *not* get git diffs
352 $ hg diff --nodates --config diff.git=1 --no-git
374 $ hg diff --nodates --config diff.git=1 --no-git
353 diff -r f2c7c817fa55 f1
375 diff -r f2c7c817fa55 f1
354 --- a/f1
376 --- a/f1
355 +++ b/f1
377 +++ b/f1
356 @@ -2,6 +2,6 @@
378 @@ -2,6 +2,6 @@
357 int a = 0;
379 int a = 0;
358 int b = 1;
380 int b = 1;
359 int c = 2;
381 int c = 2;
360 - int d = 3;
382 - int d = 3;
361 - return a + b + c + d;
383 - return a + b + c + d;
362 + int e = 3;
384 + int e = 3;
363 + return a + b + c + e;
385 + return a + b + c + e;
364 }
386 }
365
387
366 $ cd ..
388 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now