##// END OF EJS Templates
patch: allow copy information to be passed in...
Henrik Stuart -
r29422:40d53d4b default
parent child Browse files
Show More
@@ -1,2606 +1,2610 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 scmutil,
37 scmutil,
38 util,
38 util,
39 )
39 )
40 stringio = util.stringio
40 stringio = util.stringio
41
41
42 gitre = re.compile('diff --git a/(.*) b/(.*)')
42 gitre = re.compile('diff --git a/(.*) b/(.*)')
43 tabsplitter = re.compile(r'(\t+|[^\t]+)')
43 tabsplitter = re.compile(r'(\t+|[^\t]+)')
44
44
45 class PatchError(Exception):
45 class PatchError(Exception):
46 pass
46 pass
47
47
48
48
49 # public functions
49 # public functions
50
50
51 def split(stream):
51 def split(stream):
52 '''return an iterator of individual patches from a stream'''
52 '''return an iterator of individual patches from a stream'''
53 def isheader(line, inheader):
53 def isheader(line, inheader):
54 if inheader and line[0] in (' ', '\t'):
54 if inheader and line[0] in (' ', '\t'):
55 # continuation
55 # continuation
56 return True
56 return True
57 if line[0] in (' ', '-', '+'):
57 if line[0] in (' ', '-', '+'):
58 # diff line - don't check for header pattern in there
58 # diff line - don't check for header pattern in there
59 return False
59 return False
60 l = line.split(': ', 1)
60 l = line.split(': ', 1)
61 return len(l) == 2 and ' ' not in l[0]
61 return len(l) == 2 and ' ' not in l[0]
62
62
63 def chunk(lines):
63 def chunk(lines):
64 return stringio(''.join(lines))
64 return stringio(''.join(lines))
65
65
66 def hgsplit(stream, cur):
66 def hgsplit(stream, cur):
67 inheader = True
67 inheader = True
68
68
69 for line in stream:
69 for line in stream:
70 if not line.strip():
70 if not line.strip():
71 inheader = False
71 inheader = False
72 if not inheader and line.startswith('# HG changeset patch'):
72 if not inheader and line.startswith('# HG changeset patch'):
73 yield chunk(cur)
73 yield chunk(cur)
74 cur = []
74 cur = []
75 inheader = True
75 inheader = True
76
76
77 cur.append(line)
77 cur.append(line)
78
78
79 if cur:
79 if cur:
80 yield chunk(cur)
80 yield chunk(cur)
81
81
82 def mboxsplit(stream, cur):
82 def mboxsplit(stream, cur):
83 for line in stream:
83 for line in stream:
84 if line.startswith('From '):
84 if line.startswith('From '):
85 for c in split(chunk(cur[1:])):
85 for c in split(chunk(cur[1:])):
86 yield c
86 yield c
87 cur = []
87 cur = []
88
88
89 cur.append(line)
89 cur.append(line)
90
90
91 if cur:
91 if cur:
92 for c in split(chunk(cur[1:])):
92 for c in split(chunk(cur[1:])):
93 yield c
93 yield c
94
94
95 def mimesplit(stream, cur):
95 def mimesplit(stream, cur):
96 def msgfp(m):
96 def msgfp(m):
97 fp = stringio()
97 fp = stringio()
98 g = email.Generator.Generator(fp, mangle_from_=False)
98 g = email.Generator.Generator(fp, mangle_from_=False)
99 g.flatten(m)
99 g.flatten(m)
100 fp.seek(0)
100 fp.seek(0)
101 return fp
101 return fp
102
102
103 for line in stream:
103 for line in stream:
104 cur.append(line)
104 cur.append(line)
105 c = chunk(cur)
105 c = chunk(cur)
106
106
107 m = email.Parser.Parser().parse(c)
107 m = email.Parser.Parser().parse(c)
108 if not m.is_multipart():
108 if not m.is_multipart():
109 yield msgfp(m)
109 yield msgfp(m)
110 else:
110 else:
111 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
111 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
112 for part in m.walk():
112 for part in m.walk():
113 ct = part.get_content_type()
113 ct = part.get_content_type()
114 if ct not in ok_types:
114 if ct not in ok_types:
115 continue
115 continue
116 yield msgfp(part)
116 yield msgfp(part)
117
117
118 def headersplit(stream, cur):
118 def headersplit(stream, cur):
119 inheader = False
119 inheader = False
120
120
121 for line in stream:
121 for line in stream:
122 if not inheader and isheader(line, inheader):
122 if not inheader and isheader(line, inheader):
123 yield chunk(cur)
123 yield chunk(cur)
124 cur = []
124 cur = []
125 inheader = True
125 inheader = True
126 if inheader and not isheader(line, inheader):
126 if inheader and not isheader(line, inheader):
127 inheader = False
127 inheader = False
128
128
129 cur.append(line)
129 cur.append(line)
130
130
131 if cur:
131 if cur:
132 yield chunk(cur)
132 yield chunk(cur)
133
133
134 def remainder(cur):
134 def remainder(cur):
135 yield chunk(cur)
135 yield chunk(cur)
136
136
137 class fiter(object):
137 class fiter(object):
138 def __init__(self, fp):
138 def __init__(self, fp):
139 self.fp = fp
139 self.fp = fp
140
140
141 def __iter__(self):
141 def __iter__(self):
142 return self
142 return self
143
143
144 def next(self):
144 def next(self):
145 l = self.fp.readline()
145 l = self.fp.readline()
146 if not l:
146 if not l:
147 raise StopIteration
147 raise StopIteration
148 return l
148 return l
149
149
150 inheader = False
150 inheader = False
151 cur = []
151 cur = []
152
152
153 mimeheaders = ['content-type']
153 mimeheaders = ['content-type']
154
154
155 if not util.safehasattr(stream, 'next'):
155 if not util.safehasattr(stream, 'next'):
156 # http responses, for example, have readline but not next
156 # http responses, for example, have readline but not next
157 stream = fiter(stream)
157 stream = fiter(stream)
158
158
159 for line in stream:
159 for line in stream:
160 cur.append(line)
160 cur.append(line)
161 if line.startswith('# HG changeset patch'):
161 if line.startswith('# HG changeset patch'):
162 return hgsplit(stream, cur)
162 return hgsplit(stream, cur)
163 elif line.startswith('From '):
163 elif line.startswith('From '):
164 return mboxsplit(stream, cur)
164 return mboxsplit(stream, cur)
165 elif isheader(line, inheader):
165 elif isheader(line, inheader):
166 inheader = True
166 inheader = True
167 if line.split(':', 1)[0].lower() in mimeheaders:
167 if line.split(':', 1)[0].lower() in mimeheaders:
168 # let email parser handle this
168 # let email parser handle this
169 return mimesplit(stream, cur)
169 return mimesplit(stream, cur)
170 elif line.startswith('--- ') and inheader:
170 elif line.startswith('--- ') and inheader:
171 # No evil headers seen by diff start, split by hand
171 # No evil headers seen by diff start, split by hand
172 return headersplit(stream, cur)
172 return headersplit(stream, cur)
173 # Not enough info, keep reading
173 # Not enough info, keep reading
174
174
175 # if we are here, we have a very plain patch
175 # if we are here, we have a very plain patch
176 return remainder(cur)
176 return remainder(cur)
177
177
178 ## Some facility for extensible patch parsing:
178 ## Some facility for extensible patch parsing:
179 # list of pairs ("header to match", "data key")
179 # list of pairs ("header to match", "data key")
180 patchheadermap = [('Date', 'date'),
180 patchheadermap = [('Date', 'date'),
181 ('Branch', 'branch'),
181 ('Branch', 'branch'),
182 ('Node ID', 'nodeid'),
182 ('Node ID', 'nodeid'),
183 ]
183 ]
184
184
185 def extract(ui, fileobj):
185 def extract(ui, fileobj):
186 '''extract patch from data read from fileobj.
186 '''extract patch from data read from fileobj.
187
187
188 patch can be a normal patch or contained in an email message.
188 patch can be a normal patch or contained in an email message.
189
189
190 return a dictionary. Standard keys are:
190 return a dictionary. Standard keys are:
191 - filename,
191 - filename,
192 - message,
192 - message,
193 - user,
193 - user,
194 - date,
194 - date,
195 - branch,
195 - branch,
196 - node,
196 - node,
197 - p1,
197 - p1,
198 - p2.
198 - p2.
199 Any item can be missing from the dictionary. If filename is missing,
199 Any item can be missing from the dictionary. If filename is missing,
200 fileobj did not contain a patch. Caller must unlink filename when done.'''
200 fileobj did not contain a patch. Caller must unlink filename when done.'''
201
201
202 # attempt to detect the start of a patch
202 # attempt to detect the start of a patch
203 # (this heuristic is borrowed from quilt)
203 # (this heuristic is borrowed from quilt)
204 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
204 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
205 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
205 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
206 r'---[ \t].*?^\+\+\+[ \t]|'
206 r'---[ \t].*?^\+\+\+[ \t]|'
207 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
207 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
208
208
209 data = {}
209 data = {}
210 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
210 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
211 tmpfp = os.fdopen(fd, 'w')
211 tmpfp = os.fdopen(fd, 'w')
212 try:
212 try:
213 msg = email.Parser.Parser().parse(fileobj)
213 msg = email.Parser.Parser().parse(fileobj)
214
214
215 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
215 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
216 data['user'] = msg['From'] and mail.headdecode(msg['From'])
216 data['user'] = msg['From'] and mail.headdecode(msg['From'])
217 if not subject and not data['user']:
217 if not subject and not data['user']:
218 # Not an email, restore parsed headers if any
218 # Not an email, restore parsed headers if any
219 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
219 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
220
220
221 # should try to parse msg['Date']
221 # should try to parse msg['Date']
222 parents = []
222 parents = []
223
223
224 if subject:
224 if subject:
225 if subject.startswith('[PATCH'):
225 if subject.startswith('[PATCH'):
226 pend = subject.find(']')
226 pend = subject.find(']')
227 if pend >= 0:
227 if pend >= 0:
228 subject = subject[pend + 1:].lstrip()
228 subject = subject[pend + 1:].lstrip()
229 subject = re.sub(r'\n[ \t]+', ' ', subject)
229 subject = re.sub(r'\n[ \t]+', ' ', subject)
230 ui.debug('Subject: %s\n' % subject)
230 ui.debug('Subject: %s\n' % subject)
231 if data['user']:
231 if data['user']:
232 ui.debug('From: %s\n' % data['user'])
232 ui.debug('From: %s\n' % data['user'])
233 diffs_seen = 0
233 diffs_seen = 0
234 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
234 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
235 message = ''
235 message = ''
236 for part in msg.walk():
236 for part in msg.walk():
237 content_type = part.get_content_type()
237 content_type = part.get_content_type()
238 ui.debug('Content-Type: %s\n' % content_type)
238 ui.debug('Content-Type: %s\n' % content_type)
239 if content_type not in ok_types:
239 if content_type not in ok_types:
240 continue
240 continue
241 payload = part.get_payload(decode=True)
241 payload = part.get_payload(decode=True)
242 m = diffre.search(payload)
242 m = diffre.search(payload)
243 if m:
243 if m:
244 hgpatch = False
244 hgpatch = False
245 hgpatchheader = False
245 hgpatchheader = False
246 ignoretext = False
246 ignoretext = False
247
247
248 ui.debug('found patch at byte %d\n' % m.start(0))
248 ui.debug('found patch at byte %d\n' % m.start(0))
249 diffs_seen += 1
249 diffs_seen += 1
250 cfp = stringio()
250 cfp = stringio()
251 for line in payload[:m.start(0)].splitlines():
251 for line in payload[:m.start(0)].splitlines():
252 if line.startswith('# HG changeset patch') and not hgpatch:
252 if line.startswith('# HG changeset patch') and not hgpatch:
253 ui.debug('patch generated by hg export\n')
253 ui.debug('patch generated by hg export\n')
254 hgpatch = True
254 hgpatch = True
255 hgpatchheader = True
255 hgpatchheader = True
256 # drop earlier commit message content
256 # drop earlier commit message content
257 cfp.seek(0)
257 cfp.seek(0)
258 cfp.truncate()
258 cfp.truncate()
259 subject = None
259 subject = None
260 elif hgpatchheader:
260 elif hgpatchheader:
261 if line.startswith('# User '):
261 if line.startswith('# User '):
262 data['user'] = line[7:]
262 data['user'] = line[7:]
263 ui.debug('From: %s\n' % data['user'])
263 ui.debug('From: %s\n' % data['user'])
264 elif line.startswith("# Parent "):
264 elif line.startswith("# Parent "):
265 parents.append(line[9:].lstrip())
265 parents.append(line[9:].lstrip())
266 elif line.startswith("# "):
266 elif line.startswith("# "):
267 for header, key in patchheadermap:
267 for header, key in patchheadermap:
268 prefix = '# %s ' % header
268 prefix = '# %s ' % header
269 if line.startswith(prefix):
269 if line.startswith(prefix):
270 data[key] = line[len(prefix):]
270 data[key] = line[len(prefix):]
271 else:
271 else:
272 hgpatchheader = False
272 hgpatchheader = False
273 elif line == '---':
273 elif line == '---':
274 ignoretext = True
274 ignoretext = True
275 if not hgpatchheader and not ignoretext:
275 if not hgpatchheader and not ignoretext:
276 cfp.write(line)
276 cfp.write(line)
277 cfp.write('\n')
277 cfp.write('\n')
278 message = cfp.getvalue()
278 message = cfp.getvalue()
279 if tmpfp:
279 if tmpfp:
280 tmpfp.write(payload)
280 tmpfp.write(payload)
281 if not payload.endswith('\n'):
281 if not payload.endswith('\n'):
282 tmpfp.write('\n')
282 tmpfp.write('\n')
283 elif not diffs_seen and message and content_type == 'text/plain':
283 elif not diffs_seen and message and content_type == 'text/plain':
284 message += '\n' + payload
284 message += '\n' + payload
285 except: # re-raises
285 except: # re-raises
286 tmpfp.close()
286 tmpfp.close()
287 os.unlink(tmpname)
287 os.unlink(tmpname)
288 raise
288 raise
289
289
290 if subject and not message.startswith(subject):
290 if subject and not message.startswith(subject):
291 message = '%s\n%s' % (subject, message)
291 message = '%s\n%s' % (subject, message)
292 data['message'] = message
292 data['message'] = message
293 tmpfp.close()
293 tmpfp.close()
294 if parents:
294 if parents:
295 data['p1'] = parents.pop(0)
295 data['p1'] = parents.pop(0)
296 if parents:
296 if parents:
297 data['p2'] = parents.pop(0)
297 data['p2'] = parents.pop(0)
298
298
299 if diffs_seen:
299 if diffs_seen:
300 data['filename'] = tmpname
300 data['filename'] = tmpname
301 else:
301 else:
302 os.unlink(tmpname)
302 os.unlink(tmpname)
303 return data
303 return data
304
304
305 class patchmeta(object):
305 class patchmeta(object):
306 """Patched file metadata
306 """Patched file metadata
307
307
308 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
308 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
309 or COPY. 'path' is patched file path. 'oldpath' is set to the
309 or COPY. 'path' is patched file path. 'oldpath' is set to the
310 origin file when 'op' is either COPY or RENAME, None otherwise. If
310 origin file when 'op' is either COPY or RENAME, None otherwise. If
311 file mode is changed, 'mode' is a tuple (islink, isexec) where
311 file mode is changed, 'mode' is a tuple (islink, isexec) where
312 'islink' is True if the file is a symlink and 'isexec' is True if
312 'islink' is True if the file is a symlink and 'isexec' is True if
313 the file is executable. Otherwise, 'mode' is None.
313 the file is executable. Otherwise, 'mode' is None.
314 """
314 """
315 def __init__(self, path):
315 def __init__(self, path):
316 self.path = path
316 self.path = path
317 self.oldpath = None
317 self.oldpath = None
318 self.mode = None
318 self.mode = None
319 self.op = 'MODIFY'
319 self.op = 'MODIFY'
320 self.binary = False
320 self.binary = False
321
321
322 def setmode(self, mode):
322 def setmode(self, mode):
323 islink = mode & 0o20000
323 islink = mode & 0o20000
324 isexec = mode & 0o100
324 isexec = mode & 0o100
325 self.mode = (islink, isexec)
325 self.mode = (islink, isexec)
326
326
327 def copy(self):
327 def copy(self):
328 other = patchmeta(self.path)
328 other = patchmeta(self.path)
329 other.oldpath = self.oldpath
329 other.oldpath = self.oldpath
330 other.mode = self.mode
330 other.mode = self.mode
331 other.op = self.op
331 other.op = self.op
332 other.binary = self.binary
332 other.binary = self.binary
333 return other
333 return other
334
334
335 def _ispatchinga(self, afile):
335 def _ispatchinga(self, afile):
336 if afile == '/dev/null':
336 if afile == '/dev/null':
337 return self.op == 'ADD'
337 return self.op == 'ADD'
338 return afile == 'a/' + (self.oldpath or self.path)
338 return afile == 'a/' + (self.oldpath or self.path)
339
339
340 def _ispatchingb(self, bfile):
340 def _ispatchingb(self, bfile):
341 if bfile == '/dev/null':
341 if bfile == '/dev/null':
342 return self.op == 'DELETE'
342 return self.op == 'DELETE'
343 return bfile == 'b/' + self.path
343 return bfile == 'b/' + self.path
344
344
345 def ispatching(self, afile, bfile):
345 def ispatching(self, afile, bfile):
346 return self._ispatchinga(afile) and self._ispatchingb(bfile)
346 return self._ispatchinga(afile) and self._ispatchingb(bfile)
347
347
348 def __repr__(self):
348 def __repr__(self):
349 return "<patchmeta %s %r>" % (self.op, self.path)
349 return "<patchmeta %s %r>" % (self.op, self.path)
350
350
351 def readgitpatch(lr):
351 def readgitpatch(lr):
352 """extract git-style metadata about patches from <patchname>"""
352 """extract git-style metadata about patches from <patchname>"""
353
353
354 # Filter patch for git information
354 # Filter patch for git information
355 gp = None
355 gp = None
356 gitpatches = []
356 gitpatches = []
357 for line in lr:
357 for line in lr:
358 line = line.rstrip(' \r\n')
358 line = line.rstrip(' \r\n')
359 if line.startswith('diff --git a/'):
359 if line.startswith('diff --git a/'):
360 m = gitre.match(line)
360 m = gitre.match(line)
361 if m:
361 if m:
362 if gp:
362 if gp:
363 gitpatches.append(gp)
363 gitpatches.append(gp)
364 dst = m.group(2)
364 dst = m.group(2)
365 gp = patchmeta(dst)
365 gp = patchmeta(dst)
366 elif gp:
366 elif gp:
367 if line.startswith('--- '):
367 if line.startswith('--- '):
368 gitpatches.append(gp)
368 gitpatches.append(gp)
369 gp = None
369 gp = None
370 continue
370 continue
371 if line.startswith('rename from '):
371 if line.startswith('rename from '):
372 gp.op = 'RENAME'
372 gp.op = 'RENAME'
373 gp.oldpath = line[12:]
373 gp.oldpath = line[12:]
374 elif line.startswith('rename to '):
374 elif line.startswith('rename to '):
375 gp.path = line[10:]
375 gp.path = line[10:]
376 elif line.startswith('copy from '):
376 elif line.startswith('copy from '):
377 gp.op = 'COPY'
377 gp.op = 'COPY'
378 gp.oldpath = line[10:]
378 gp.oldpath = line[10:]
379 elif line.startswith('copy to '):
379 elif line.startswith('copy to '):
380 gp.path = line[8:]
380 gp.path = line[8:]
381 elif line.startswith('deleted file'):
381 elif line.startswith('deleted file'):
382 gp.op = 'DELETE'
382 gp.op = 'DELETE'
383 elif line.startswith('new file mode '):
383 elif line.startswith('new file mode '):
384 gp.op = 'ADD'
384 gp.op = 'ADD'
385 gp.setmode(int(line[-6:], 8))
385 gp.setmode(int(line[-6:], 8))
386 elif line.startswith('new mode '):
386 elif line.startswith('new mode '):
387 gp.setmode(int(line[-6:], 8))
387 gp.setmode(int(line[-6:], 8))
388 elif line.startswith('GIT binary patch'):
388 elif line.startswith('GIT binary patch'):
389 gp.binary = True
389 gp.binary = True
390 if gp:
390 if gp:
391 gitpatches.append(gp)
391 gitpatches.append(gp)
392
392
393 return gitpatches
393 return gitpatches
394
394
395 class linereader(object):
395 class linereader(object):
396 # simple class to allow pushing lines back into the input stream
396 # simple class to allow pushing lines back into the input stream
397 def __init__(self, fp):
397 def __init__(self, fp):
398 self.fp = fp
398 self.fp = fp
399 self.buf = []
399 self.buf = []
400
400
401 def push(self, line):
401 def push(self, line):
402 if line is not None:
402 if line is not None:
403 self.buf.append(line)
403 self.buf.append(line)
404
404
405 def readline(self):
405 def readline(self):
406 if self.buf:
406 if self.buf:
407 l = self.buf[0]
407 l = self.buf[0]
408 del self.buf[0]
408 del self.buf[0]
409 return l
409 return l
410 return self.fp.readline()
410 return self.fp.readline()
411
411
412 def __iter__(self):
412 def __iter__(self):
413 while True:
413 while True:
414 l = self.readline()
414 l = self.readline()
415 if not l:
415 if not l:
416 break
416 break
417 yield l
417 yield l
418
418
419 class abstractbackend(object):
419 class abstractbackend(object):
420 def __init__(self, ui):
420 def __init__(self, ui):
421 self.ui = ui
421 self.ui = ui
422
422
423 def getfile(self, fname):
423 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
424 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
425 isexec)) tuple. Data is None if file is missing/deleted.
426 """
426 """
427 raise NotImplementedError
427 raise NotImplementedError
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
430 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
431 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
432 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
433 copysource is set to the original file name.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def unlink(self, fname):
437 def unlink(self, fname):
438 """Unlink target file."""
438 """Unlink target file."""
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def writerej(self, fname, failed, total, lines):
441 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
442 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
443 which failed to apply and total the total number of hunks for this
444 files.
444 files.
445 """
445 """
446 pass
446 pass
447
447
448 def exists(self, fname):
448 def exists(self, fname):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 class fsbackend(abstractbackend):
451 class fsbackend(abstractbackend):
452 def __init__(self, ui, basedir):
452 def __init__(self, ui, basedir):
453 super(fsbackend, self).__init__(ui)
453 super(fsbackend, self).__init__(ui)
454 self.opener = scmutil.opener(basedir)
454 self.opener = scmutil.opener(basedir)
455
455
456 def _join(self, f):
456 def _join(self, f):
457 return os.path.join(self.opener.base, f)
457 return os.path.join(self.opener.base, f)
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 if self.opener.islink(fname):
460 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
461 return (self.opener.readlink(fname), (True, False))
462
462
463 isexec = False
463 isexec = False
464 try:
464 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
466 except OSError as e:
467 if e.errno != errno.ENOENT:
467 if e.errno != errno.ENOENT:
468 raise
468 raise
469 try:
469 try:
470 return (self.opener.read(fname), (False, isexec))
470 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
471 except IOError as e:
472 if e.errno != errno.ENOENT:
472 if e.errno != errno.ENOENT:
473 raise
473 raise
474 return None, None
474 return None, None
475
475
476 def setfile(self, fname, data, mode, copysource):
476 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
477 islink, isexec = mode
478 if data is None:
478 if data is None:
479 self.opener.setflags(fname, islink, isexec)
479 self.opener.setflags(fname, islink, isexec)
480 return
480 return
481 if islink:
481 if islink:
482 self.opener.symlink(data, fname)
482 self.opener.symlink(data, fname)
483 else:
483 else:
484 self.opener.write(fname, data)
484 self.opener.write(fname, data)
485 if isexec:
485 if isexec:
486 self.opener.setflags(fname, False, True)
486 self.opener.setflags(fname, False, True)
487
487
488 def unlink(self, fname):
488 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
489 self.opener.unlinkpath(fname, ignoremissing=True)
490
490
491 def writerej(self, fname, failed, total, lines):
491 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
492 fname = fname + ".rej"
493 self.ui.warn(
493 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
495 (failed, total, fname))
496 fp = self.opener(fname, 'w')
496 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
497 fp.writelines(lines)
498 fp.close()
498 fp.close()
499
499
500 def exists(self, fname):
500 def exists(self, fname):
501 return self.opener.lexists(fname)
501 return self.opener.lexists(fname)
502
502
503 class workingbackend(fsbackend):
503 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
504 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
505 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
506 self.repo = repo
507 self.similarity = similarity
507 self.similarity = similarity
508 self.removed = set()
508 self.removed = set()
509 self.changed = set()
509 self.changed = set()
510 self.copied = []
510 self.copied = []
511
511
512 def _checkknown(self, fname):
512 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
515
516 def setfile(self, fname, data, mode, copysource):
516 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
517 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
519 if copysource is not None:
520 self.copied.append((copysource, fname))
520 self.copied.append((copysource, fname))
521 self.changed.add(fname)
521 self.changed.add(fname)
522
522
523 def unlink(self, fname):
523 def unlink(self, fname):
524 self._checkknown(fname)
524 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
525 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
526 self.removed.add(fname)
527 self.changed.add(fname)
527 self.changed.add(fname)
528
528
529 def close(self):
529 def close(self):
530 wctx = self.repo[None]
530 wctx = self.repo[None]
531 changed = set(self.changed)
531 changed = set(self.changed)
532 for src, dst in self.copied:
532 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
534 if self.removed:
535 wctx.forget(sorted(self.removed))
535 wctx.forget(sorted(self.removed))
536 for f in self.removed:
536 for f in self.removed:
537 if f not in self.repo.dirstate:
537 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
538 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
539 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
540 # deleted, and should not be considered by
541 # marktouched().
541 # marktouched().
542 changed.discard(f)
542 changed.discard(f)
543 if changed:
543 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
544 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
545 return sorted(self.changed)
546
546
547 class filestore(object):
547 class filestore(object):
548 def __init__(self, maxsize=None):
548 def __init__(self, maxsize=None):
549 self.opener = None
549 self.opener = None
550 self.files = {}
550 self.files = {}
551 self.created = 0
551 self.created = 0
552 self.maxsize = maxsize
552 self.maxsize = maxsize
553 if self.maxsize is None:
553 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
554 self.maxsize = 4*(2**20)
555 self.size = 0
555 self.size = 0
556 self.data = {}
556 self.data = {}
557
557
558 def setfile(self, fname, data, mode, copied=None):
558 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
560 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
561 self.size += len(data)
562 else:
562 else:
563 if self.opener is None:
563 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
564 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = scmutil.opener(root)
565 self.opener = scmutil.opener(root)
566 # Avoid filename issues with these simple names
566 # Avoid filename issues with these simple names
567 fn = str(self.created)
567 fn = str(self.created)
568 self.opener.write(fn, data)
568 self.opener.write(fn, data)
569 self.created += 1
569 self.created += 1
570 self.files[fname] = (fn, mode, copied)
570 self.files[fname] = (fn, mode, copied)
571
571
572 def getfile(self, fname):
572 def getfile(self, fname):
573 if fname in self.data:
573 if fname in self.data:
574 return self.data[fname]
574 return self.data[fname]
575 if not self.opener or fname not in self.files:
575 if not self.opener or fname not in self.files:
576 return None, None, None
576 return None, None, None
577 fn, mode, copied = self.files[fname]
577 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
578 return self.opener.read(fn), mode, copied
579
579
580 def close(self):
580 def close(self):
581 if self.opener:
581 if self.opener:
582 shutil.rmtree(self.opener.base)
582 shutil.rmtree(self.opener.base)
583
583
584 class repobackend(abstractbackend):
584 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
585 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
586 super(repobackend, self).__init__(ui)
587 self.repo = repo
587 self.repo = repo
588 self.ctx = ctx
588 self.ctx = ctx
589 self.store = store
589 self.store = store
590 self.changed = set()
590 self.changed = set()
591 self.removed = set()
591 self.removed = set()
592 self.copied = {}
592 self.copied = {}
593
593
594 def _checkknown(self, fname):
594 def _checkknown(self, fname):
595 if fname not in self.ctx:
595 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
597
598 def getfile(self, fname):
598 def getfile(self, fname):
599 try:
599 try:
600 fctx = self.ctx[fname]
600 fctx = self.ctx[fname]
601 except error.LookupError:
601 except error.LookupError:
602 return None, None
602 return None, None
603 flags = fctx.flags()
603 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
604 return fctx.data(), ('l' in flags, 'x' in flags)
605
605
606 def setfile(self, fname, data, mode, copysource):
606 def setfile(self, fname, data, mode, copysource):
607 if copysource:
607 if copysource:
608 self._checkknown(copysource)
608 self._checkknown(copysource)
609 if data is None:
609 if data is None:
610 data = self.ctx[fname].data()
610 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
611 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
612 self.changed.add(fname)
613 if copysource:
613 if copysource:
614 self.copied[fname] = copysource
614 self.copied[fname] = copysource
615
615
616 def unlink(self, fname):
616 def unlink(self, fname):
617 self._checkknown(fname)
617 self._checkknown(fname)
618 self.removed.add(fname)
618 self.removed.add(fname)
619
619
620 def exists(self, fname):
620 def exists(self, fname):
621 return fname in self.ctx
621 return fname in self.ctx
622
622
623 def close(self):
623 def close(self):
624 return self.changed | self.removed
624 return self.changed | self.removed
625
625
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
630
631 class patchfile(object):
631 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
633 self.fname = gp.path
634 self.eolmode = eolmode
634 self.eolmode = eolmode
635 self.eol = None
635 self.eol = None
636 self.backend = backend
636 self.backend = backend
637 self.ui = ui
637 self.ui = ui
638 self.lines = []
638 self.lines = []
639 self.exists = False
639 self.exists = False
640 self.missing = True
640 self.missing = True
641 self.mode = gp.mode
641 self.mode = gp.mode
642 self.copysource = gp.oldpath
642 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
644 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
645 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
646 data, mode = backend.getfile(self.fname)
647 else:
647 else:
648 data, mode = store.getfile(self.copysource)[:2]
648 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
649 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
651 self.missing = False
652 if data:
652 if data:
653 self.lines = mdiff.splitnewlines(data)
653 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
654 if self.mode is None:
655 self.mode = mode
655 self.mode = mode
656 if self.lines:
656 if self.lines:
657 # Normalize line endings
657 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
658 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
659 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
660 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
661 self.eol = '\n'
662 if eolmode != 'strict':
662 if eolmode != 'strict':
663 nlines = []
663 nlines = []
664 for l in self.lines:
664 for l in self.lines:
665 if l.endswith('\r\n'):
665 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
666 l = l[:-2] + '\n'
667 nlines.append(l)
667 nlines.append(l)
668 self.lines = nlines
668 self.lines = nlines
669 else:
669 else:
670 if self.create:
670 if self.create:
671 self.missing = False
671 self.missing = False
672 if self.mode is None:
672 if self.mode is None:
673 self.mode = (False, False)
673 self.mode = (False, False)
674 if self.missing:
674 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676
676
677 self.hash = {}
677 self.hash = {}
678 self.dirty = 0
678 self.dirty = 0
679 self.offset = 0
679 self.offset = 0
680 self.skew = 0
680 self.skew = 0
681 self.rej = []
681 self.rej = []
682 self.fileprinted = False
682 self.fileprinted = False
683 self.printfile(False)
683 self.printfile(False)
684 self.hunks = 0
684 self.hunks = 0
685
685
686 def writelines(self, fname, lines, mode):
686 def writelines(self, fname, lines, mode):
687 if self.eolmode == 'auto':
687 if self.eolmode == 'auto':
688 eol = self.eol
688 eol = self.eol
689 elif self.eolmode == 'crlf':
689 elif self.eolmode == 'crlf':
690 eol = '\r\n'
690 eol = '\r\n'
691 else:
691 else:
692 eol = '\n'
692 eol = '\n'
693
693
694 if self.eolmode != 'strict' and eol and eol != '\n':
694 if self.eolmode != 'strict' and eol and eol != '\n':
695 rawlines = []
695 rawlines = []
696 for l in lines:
696 for l in lines:
697 if l and l[-1] == '\n':
697 if l and l[-1] == '\n':
698 l = l[:-1] + eol
698 l = l[:-1] + eol
699 rawlines.append(l)
699 rawlines.append(l)
700 lines = rawlines
700 lines = rawlines
701
701
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703
703
704 def printfile(self, warn):
704 def printfile(self, warn):
705 if self.fileprinted:
705 if self.fileprinted:
706 return
706 return
707 if warn or self.ui.verbose:
707 if warn or self.ui.verbose:
708 self.fileprinted = True
708 self.fileprinted = True
709 s = _("patching file %s\n") % self.fname
709 s = _("patching file %s\n") % self.fname
710 if warn:
710 if warn:
711 self.ui.warn(s)
711 self.ui.warn(s)
712 else:
712 else:
713 self.ui.note(s)
713 self.ui.note(s)
714
714
715
715
716 def findlines(self, l, linenum):
716 def findlines(self, l, linenum):
717 # looks through the hash and finds candidate lines. The
717 # looks through the hash and finds candidate lines. The
718 # result is a list of line numbers sorted based on distance
718 # result is a list of line numbers sorted based on distance
719 # from linenum
719 # from linenum
720
720
721 cand = self.hash.get(l, [])
721 cand = self.hash.get(l, [])
722 if len(cand) > 1:
722 if len(cand) > 1:
723 # resort our list of potentials forward then back.
723 # resort our list of potentials forward then back.
724 cand.sort(key=lambda x: abs(x - linenum))
724 cand.sort(key=lambda x: abs(x - linenum))
725 return cand
725 return cand
726
726
727 def write_rej(self):
727 def write_rej(self):
728 # our rejects are a little different from patch(1). This always
728 # our rejects are a little different from patch(1). This always
729 # creates rejects in the same form as the original patch. A file
729 # creates rejects in the same form as the original patch. A file
730 # header is inserted so that you can run the reject through patch again
730 # header is inserted so that you can run the reject through patch again
731 # without having to type the filename.
731 # without having to type the filename.
732 if not self.rej:
732 if not self.rej:
733 return
733 return
734 base = os.path.basename(self.fname)
734 base = os.path.basename(self.fname)
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 for x in self.rej:
736 for x in self.rej:
737 for l in x.hunk:
737 for l in x.hunk:
738 lines.append(l)
738 lines.append(l)
739 if l[-1] != '\n':
739 if l[-1] != '\n':
740 lines.append("\n\ No newline at end of file\n")
740 lines.append("\n\ No newline at end of file\n")
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742
742
743 def apply(self, h):
743 def apply(self, h):
744 if not h.complete():
744 if not h.complete():
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 h.lenb))
747 h.lenb))
748
748
749 self.hunks += 1
749 self.hunks += 1
750
750
751 if self.missing:
751 if self.missing:
752 self.rej.append(h)
752 self.rej.append(h)
753 return -1
753 return -1
754
754
755 if self.exists and self.create:
755 if self.exists and self.create:
756 if self.copysource:
756 if self.copysource:
757 self.ui.warn(_("cannot create %s: destination already "
757 self.ui.warn(_("cannot create %s: destination already "
758 "exists\n") % self.fname)
758 "exists\n") % self.fname)
759 else:
759 else:
760 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.rej.append(h)
761 self.rej.append(h)
762 return -1
762 return -1
763
763
764 if isinstance(h, binhunk):
764 if isinstance(h, binhunk):
765 if self.remove:
765 if self.remove:
766 self.backend.unlink(self.fname)
766 self.backend.unlink(self.fname)
767 else:
767 else:
768 l = h.new(self.lines)
768 l = h.new(self.lines)
769 self.lines[:] = l
769 self.lines[:] = l
770 self.offset += len(l)
770 self.offset += len(l)
771 self.dirty = True
771 self.dirty = True
772 return 0
772 return 0
773
773
774 horig = h
774 horig = h
775 if (self.eolmode in ('crlf', 'lf')
775 if (self.eolmode in ('crlf', 'lf')
776 or self.eolmode == 'auto' and self.eol):
776 or self.eolmode == 'auto' and self.eol):
777 # If new eols are going to be normalized, then normalize
777 # If new eols are going to be normalized, then normalize
778 # hunk data before patching. Otherwise, preserve input
778 # hunk data before patching. Otherwise, preserve input
779 # line-endings.
779 # line-endings.
780 h = h.getnormalized()
780 h = h.getnormalized()
781
781
782 # fast case first, no offsets, no fuzz
782 # fast case first, no offsets, no fuzz
783 old, oldstart, new, newstart = h.fuzzit(0, False)
783 old, oldstart, new, newstart = h.fuzzit(0, False)
784 oldstart += self.offset
784 oldstart += self.offset
785 orig_start = oldstart
785 orig_start = oldstart
786 # if there's skew we want to emit the "(offset %d lines)" even
786 # if there's skew we want to emit the "(offset %d lines)" even
787 # when the hunk cleanly applies at start + skew, so skip the
787 # when the hunk cleanly applies at start + skew, so skip the
788 # fast case code
788 # fast case code
789 if (self.skew == 0 and
789 if (self.skew == 0 and
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 if self.remove:
791 if self.remove:
792 self.backend.unlink(self.fname)
792 self.backend.unlink(self.fname)
793 else:
793 else:
794 self.lines[oldstart:oldstart + len(old)] = new
794 self.lines[oldstart:oldstart + len(old)] = new
795 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
796 self.dirty = True
796 self.dirty = True
797 return 0
797 return 0
798
798
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 self.hash = {}
800 self.hash = {}
801 for x, s in enumerate(self.lines):
801 for x, s in enumerate(self.lines):
802 self.hash.setdefault(s, []).append(x)
802 self.hash.setdefault(s, []).append(x)
803
803
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
805 for toponly in [True, False]:
805 for toponly in [True, False]:
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 oldstart = oldstart + self.offset + self.skew
807 oldstart = oldstart + self.offset + self.skew
808 oldstart = min(oldstart, len(self.lines))
808 oldstart = min(oldstart, len(self.lines))
809 if old:
809 if old:
810 cand = self.findlines(old[0][1:], oldstart)
810 cand = self.findlines(old[0][1:], oldstart)
811 else:
811 else:
812 # Only adding lines with no or fuzzed context, just
812 # Only adding lines with no or fuzzed context, just
813 # take the skew in account
813 # take the skew in account
814 cand = [oldstart]
814 cand = [oldstart]
815
815
816 for l in cand:
816 for l in cand:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 self.lines[l : l + len(old)] = new
818 self.lines[l : l + len(old)] = new
819 self.offset += len(new) - len(old)
819 self.offset += len(new) - len(old)
820 self.skew = l - orig_start
820 self.skew = l - orig_start
821 self.dirty = True
821 self.dirty = True
822 offset = l - orig_start - fuzzlen
822 offset = l - orig_start - fuzzlen
823 if fuzzlen:
823 if fuzzlen:
824 msg = _("Hunk #%d succeeded at %d "
824 msg = _("Hunk #%d succeeded at %d "
825 "with fuzz %d "
825 "with fuzz %d "
826 "(offset %d lines).\n")
826 "(offset %d lines).\n")
827 self.printfile(True)
827 self.printfile(True)
828 self.ui.warn(msg %
828 self.ui.warn(msg %
829 (h.number, l + 1, fuzzlen, offset))
829 (h.number, l + 1, fuzzlen, offset))
830 else:
830 else:
831 msg = _("Hunk #%d succeeded at %d "
831 msg = _("Hunk #%d succeeded at %d "
832 "(offset %d lines).\n")
832 "(offset %d lines).\n")
833 self.ui.note(msg % (h.number, l + 1, offset))
833 self.ui.note(msg % (h.number, l + 1, offset))
834 return fuzzlen
834 return fuzzlen
835 self.printfile(True)
835 self.printfile(True)
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.rej.append(horig)
837 self.rej.append(horig)
838 return -1
838 return -1
839
839
840 def close(self):
840 def close(self):
841 if self.dirty:
841 if self.dirty:
842 self.writelines(self.fname, self.lines, self.mode)
842 self.writelines(self.fname, self.lines, self.mode)
843 self.write_rej()
843 self.write_rej()
844 return len(self.rej)
844 return len(self.rej)
845
845
846 class header(object):
846 class header(object):
847 """patch header
847 """patch header
848 """
848 """
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
851 allhunks_re = re.compile('(?:index|deleted file) ')
851 allhunks_re = re.compile('(?:index|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 newfile_re = re.compile('(?:new file)')
854 newfile_re = re.compile('(?:new file)')
855
855
856 def __init__(self, header):
856 def __init__(self, header):
857 self.header = header
857 self.header = header
858 self.hunks = []
858 self.hunks = []
859
859
860 def binary(self):
860 def binary(self):
861 return any(h.startswith('index ') for h in self.header)
861 return any(h.startswith('index ') for h in self.header)
862
862
863 def pretty(self, fp):
863 def pretty(self, fp):
864 for h in self.header:
864 for h in self.header:
865 if h.startswith('index '):
865 if h.startswith('index '):
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 break
867 break
868 if self.pretty_re.match(h):
868 if self.pretty_re.match(h):
869 fp.write(h)
869 fp.write(h)
870 if self.binary():
870 if self.binary():
871 fp.write(_('this is a binary file\n'))
871 fp.write(_('this is a binary file\n'))
872 break
872 break
873 if h.startswith('---'):
873 if h.startswith('---'):
874 fp.write(_('%d hunks, %d lines changed\n') %
874 fp.write(_('%d hunks, %d lines changed\n') %
875 (len(self.hunks),
875 (len(self.hunks),
876 sum([max(h.added, h.removed) for h in self.hunks])))
876 sum([max(h.added, h.removed) for h in self.hunks])))
877 break
877 break
878 fp.write(h)
878 fp.write(h)
879
879
880 def write(self, fp):
880 def write(self, fp):
881 fp.write(''.join(self.header))
881 fp.write(''.join(self.header))
882
882
883 def allhunks(self):
883 def allhunks(self):
884 return any(self.allhunks_re.match(h) for h in self.header)
884 return any(self.allhunks_re.match(h) for h in self.header)
885
885
886 def files(self):
886 def files(self):
887 match = self.diffgit_re.match(self.header[0])
887 match = self.diffgit_re.match(self.header[0])
888 if match:
888 if match:
889 fromfile, tofile = match.groups()
889 fromfile, tofile = match.groups()
890 if fromfile == tofile:
890 if fromfile == tofile:
891 return [fromfile]
891 return [fromfile]
892 return [fromfile, tofile]
892 return [fromfile, tofile]
893 else:
893 else:
894 return self.diff_re.match(self.header[0]).groups()
894 return self.diff_re.match(self.header[0]).groups()
895
895
896 def filename(self):
896 def filename(self):
897 return self.files()[-1]
897 return self.files()[-1]
898
898
899 def __repr__(self):
899 def __repr__(self):
900 return '<header %s>' % (' '.join(map(repr, self.files())))
900 return '<header %s>' % (' '.join(map(repr, self.files())))
901
901
902 def isnewfile(self):
902 def isnewfile(self):
903 return any(self.newfile_re.match(h) for h in self.header)
903 return any(self.newfile_re.match(h) for h in self.header)
904
904
905 def special(self):
905 def special(self):
906 # Special files are shown only at the header level and not at the hunk
906 # Special files are shown only at the header level and not at the hunk
907 # level for example a file that has been deleted is a special file.
907 # level for example a file that has been deleted is a special file.
908 # The user cannot change the content of the operation, in the case of
908 # The user cannot change the content of the operation, in the case of
909 # the deleted file he has to take the deletion or not take it, he
909 # the deleted file he has to take the deletion or not take it, he
910 # cannot take some of it.
910 # cannot take some of it.
911 # Newly added files are special if they are empty, they are not special
911 # Newly added files are special if they are empty, they are not special
912 # if they have some content as we want to be able to change it
912 # if they have some content as we want to be able to change it
913 nocontent = len(self.header) == 2
913 nocontent = len(self.header) == 2
914 emptynewfile = self.isnewfile() and nocontent
914 emptynewfile = self.isnewfile() and nocontent
915 return emptynewfile or \
915 return emptynewfile or \
916 any(self.special_re.match(h) for h in self.header)
916 any(self.special_re.match(h) for h in self.header)
917
917
918 class recordhunk(object):
918 class recordhunk(object):
919 """patch hunk
919 """patch hunk
920
920
921 XXX shouldn't we merge this with the other hunk class?
921 XXX shouldn't we merge this with the other hunk class?
922 """
922 """
923 maxcontext = 3
923 maxcontext = 3
924
924
925 def __init__(self, header, fromline, toline, proc, before, hunk, after):
925 def __init__(self, header, fromline, toline, proc, before, hunk, after):
926 def trimcontext(number, lines):
926 def trimcontext(number, lines):
927 delta = len(lines) - self.maxcontext
927 delta = len(lines) - self.maxcontext
928 if False and delta > 0:
928 if False and delta > 0:
929 return number + delta, lines[:self.maxcontext]
929 return number + delta, lines[:self.maxcontext]
930 return number, lines
930 return number, lines
931
931
932 self.header = header
932 self.header = header
933 self.fromline, self.before = trimcontext(fromline, before)
933 self.fromline, self.before = trimcontext(fromline, before)
934 self.toline, self.after = trimcontext(toline, after)
934 self.toline, self.after = trimcontext(toline, after)
935 self.proc = proc
935 self.proc = proc
936 self.hunk = hunk
936 self.hunk = hunk
937 self.added, self.removed = self.countchanges(self.hunk)
937 self.added, self.removed = self.countchanges(self.hunk)
938
938
939 def __eq__(self, v):
939 def __eq__(self, v):
940 if not isinstance(v, recordhunk):
940 if not isinstance(v, recordhunk):
941 return False
941 return False
942
942
943 return ((v.hunk == self.hunk) and
943 return ((v.hunk == self.hunk) and
944 (v.proc == self.proc) and
944 (v.proc == self.proc) and
945 (self.fromline == v.fromline) and
945 (self.fromline == v.fromline) and
946 (self.header.files() == v.header.files()))
946 (self.header.files() == v.header.files()))
947
947
948 def __hash__(self):
948 def __hash__(self):
949 return hash((tuple(self.hunk),
949 return hash((tuple(self.hunk),
950 tuple(self.header.files()),
950 tuple(self.header.files()),
951 self.fromline,
951 self.fromline,
952 self.proc))
952 self.proc))
953
953
954 def countchanges(self, hunk):
954 def countchanges(self, hunk):
955 """hunk -> (n+,n-)"""
955 """hunk -> (n+,n-)"""
956 add = len([h for h in hunk if h[0] == '+'])
956 add = len([h for h in hunk if h[0] == '+'])
957 rem = len([h for h in hunk if h[0] == '-'])
957 rem = len([h for h in hunk if h[0] == '-'])
958 return add, rem
958 return add, rem
959
959
960 def write(self, fp):
960 def write(self, fp):
961 delta = len(self.before) + len(self.after)
961 delta = len(self.before) + len(self.after)
962 if self.after and self.after[-1] == '\\ No newline at end of file\n':
962 if self.after and self.after[-1] == '\\ No newline at end of file\n':
963 delta -= 1
963 delta -= 1
964 fromlen = delta + self.removed
964 fromlen = delta + self.removed
965 tolen = delta + self.added
965 tolen = delta + self.added
966 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
966 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
967 (self.fromline, fromlen, self.toline, tolen,
967 (self.fromline, fromlen, self.toline, tolen,
968 self.proc and (' ' + self.proc)))
968 self.proc and (' ' + self.proc)))
969 fp.write(''.join(self.before + self.hunk + self.after))
969 fp.write(''.join(self.before + self.hunk + self.after))
970
970
971 pretty = write
971 pretty = write
972
972
973 def filename(self):
973 def filename(self):
974 return self.header.filename()
974 return self.header.filename()
975
975
976 def __repr__(self):
976 def __repr__(self):
977 return '<hunk %r@%d>' % (self.filename(), self.fromline)
977 return '<hunk %r@%d>' % (self.filename(), self.fromline)
978
978
979 def filterpatch(ui, headers, operation=None):
979 def filterpatch(ui, headers, operation=None):
980 """Interactively filter patch chunks into applied-only chunks"""
980 """Interactively filter patch chunks into applied-only chunks"""
981 if operation is None:
981 if operation is None:
982 operation = 'record'
982 operation = 'record'
983 messages = {
983 messages = {
984 'multiple': {
984 'multiple': {
985 'discard': _("discard change %d/%d to '%s'?"),
985 'discard': _("discard change %d/%d to '%s'?"),
986 'record': _("record change %d/%d to '%s'?"),
986 'record': _("record change %d/%d to '%s'?"),
987 'revert': _("revert change %d/%d to '%s'?"),
987 'revert': _("revert change %d/%d to '%s'?"),
988 }[operation],
988 }[operation],
989 'single': {
989 'single': {
990 'discard': _("discard this change to '%s'?"),
990 'discard': _("discard this change to '%s'?"),
991 'record': _("record this change to '%s'?"),
991 'record': _("record this change to '%s'?"),
992 'revert': _("revert this change to '%s'?"),
992 'revert': _("revert this change to '%s'?"),
993 }[operation],
993 }[operation],
994 }
994 }
995
995
996 def prompt(skipfile, skipall, query, chunk):
996 def prompt(skipfile, skipall, query, chunk):
997 """prompt query, and process base inputs
997 """prompt query, and process base inputs
998
998
999 - y/n for the rest of file
999 - y/n for the rest of file
1000 - y/n for the rest
1000 - y/n for the rest
1001 - ? (help)
1001 - ? (help)
1002 - q (quit)
1002 - q (quit)
1003
1003
1004 Return True/False and possibly updated skipfile and skipall.
1004 Return True/False and possibly updated skipfile and skipall.
1005 """
1005 """
1006 newpatches = None
1006 newpatches = None
1007 if skipall is not None:
1007 if skipall is not None:
1008 return skipall, skipfile, skipall, newpatches
1008 return skipall, skipfile, skipall, newpatches
1009 if skipfile is not None:
1009 if skipfile is not None:
1010 return skipfile, skipfile, skipall, newpatches
1010 return skipfile, skipfile, skipall, newpatches
1011 while True:
1011 while True:
1012 resps = _('[Ynesfdaq?]'
1012 resps = _('[Ynesfdaq?]'
1013 '$$ &Yes, record this change'
1013 '$$ &Yes, record this change'
1014 '$$ &No, skip this change'
1014 '$$ &No, skip this change'
1015 '$$ &Edit this change manually'
1015 '$$ &Edit this change manually'
1016 '$$ &Skip remaining changes to this file'
1016 '$$ &Skip remaining changes to this file'
1017 '$$ Record remaining changes to this &file'
1017 '$$ Record remaining changes to this &file'
1018 '$$ &Done, skip remaining changes and files'
1018 '$$ &Done, skip remaining changes and files'
1019 '$$ Record &all changes to all remaining files'
1019 '$$ Record &all changes to all remaining files'
1020 '$$ &Quit, recording no changes'
1020 '$$ &Quit, recording no changes'
1021 '$$ &? (display help)')
1021 '$$ &? (display help)')
1022 r = ui.promptchoice("%s %s" % (query, resps))
1022 r = ui.promptchoice("%s %s" % (query, resps))
1023 ui.write("\n")
1023 ui.write("\n")
1024 if r == 8: # ?
1024 if r == 8: # ?
1025 for c, t in ui.extractchoices(resps)[1]:
1025 for c, t in ui.extractchoices(resps)[1]:
1026 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1026 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1027 continue
1027 continue
1028 elif r == 0: # yes
1028 elif r == 0: # yes
1029 ret = True
1029 ret = True
1030 elif r == 1: # no
1030 elif r == 1: # no
1031 ret = False
1031 ret = False
1032 elif r == 2: # Edit patch
1032 elif r == 2: # Edit patch
1033 if chunk is None:
1033 if chunk is None:
1034 ui.write(_('cannot edit patch for whole file'))
1034 ui.write(_('cannot edit patch for whole file'))
1035 ui.write("\n")
1035 ui.write("\n")
1036 continue
1036 continue
1037 if chunk.header.binary():
1037 if chunk.header.binary():
1038 ui.write(_('cannot edit patch for binary file'))
1038 ui.write(_('cannot edit patch for binary file'))
1039 ui.write("\n")
1039 ui.write("\n")
1040 continue
1040 continue
1041 # Patch comment based on the Git one (based on comment at end of
1041 # Patch comment based on the Git one (based on comment at end of
1042 # https://mercurial-scm.org/wiki/RecordExtension)
1042 # https://mercurial-scm.org/wiki/RecordExtension)
1043 phelp = '---' + _("""
1043 phelp = '---' + _("""
1044 To remove '-' lines, make them ' ' lines (context).
1044 To remove '-' lines, make them ' ' lines (context).
1045 To remove '+' lines, delete them.
1045 To remove '+' lines, delete them.
1046 Lines starting with # will be removed from the patch.
1046 Lines starting with # will be removed from the patch.
1047
1047
1048 If the patch applies cleanly, the edited hunk will immediately be
1048 If the patch applies cleanly, the edited hunk will immediately be
1049 added to the record list. If it does not apply cleanly, a rejects
1049 added to the record list. If it does not apply cleanly, a rejects
1050 file will be generated: you can use that when you try again. If
1050 file will be generated: you can use that when you try again. If
1051 all lines of the hunk are removed, then the edit is aborted and
1051 all lines of the hunk are removed, then the edit is aborted and
1052 the hunk is left unchanged.
1052 the hunk is left unchanged.
1053 """)
1053 """)
1054 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1054 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1055 suffix=".diff", text=True)
1055 suffix=".diff", text=True)
1056 ncpatchfp = None
1056 ncpatchfp = None
1057 try:
1057 try:
1058 # Write the initial patch
1058 # Write the initial patch
1059 f = os.fdopen(patchfd, "w")
1059 f = os.fdopen(patchfd, "w")
1060 chunk.header.write(f)
1060 chunk.header.write(f)
1061 chunk.write(f)
1061 chunk.write(f)
1062 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1062 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1063 f.close()
1063 f.close()
1064 # Start the editor and wait for it to complete
1064 # Start the editor and wait for it to complete
1065 editor = ui.geteditor()
1065 editor = ui.geteditor()
1066 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1066 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1067 environ={'HGUSER': ui.username()})
1067 environ={'HGUSER': ui.username()})
1068 if ret != 0:
1068 if ret != 0:
1069 ui.warn(_("editor exited with exit code %d\n") % ret)
1069 ui.warn(_("editor exited with exit code %d\n") % ret)
1070 continue
1070 continue
1071 # Remove comment lines
1071 # Remove comment lines
1072 patchfp = open(patchfn)
1072 patchfp = open(patchfn)
1073 ncpatchfp = stringio()
1073 ncpatchfp = stringio()
1074 for line in patchfp:
1074 for line in patchfp:
1075 if not line.startswith('#'):
1075 if not line.startswith('#'):
1076 ncpatchfp.write(line)
1076 ncpatchfp.write(line)
1077 patchfp.close()
1077 patchfp.close()
1078 ncpatchfp.seek(0)
1078 ncpatchfp.seek(0)
1079 newpatches = parsepatch(ncpatchfp)
1079 newpatches = parsepatch(ncpatchfp)
1080 finally:
1080 finally:
1081 os.unlink(patchfn)
1081 os.unlink(patchfn)
1082 del ncpatchfp
1082 del ncpatchfp
1083 # Signal that the chunk shouldn't be applied as-is, but
1083 # Signal that the chunk shouldn't be applied as-is, but
1084 # provide the new patch to be used instead.
1084 # provide the new patch to be used instead.
1085 ret = False
1085 ret = False
1086 elif r == 3: # Skip
1086 elif r == 3: # Skip
1087 ret = skipfile = False
1087 ret = skipfile = False
1088 elif r == 4: # file (Record remaining)
1088 elif r == 4: # file (Record remaining)
1089 ret = skipfile = True
1089 ret = skipfile = True
1090 elif r == 5: # done, skip remaining
1090 elif r == 5: # done, skip remaining
1091 ret = skipall = False
1091 ret = skipall = False
1092 elif r == 6: # all
1092 elif r == 6: # all
1093 ret = skipall = True
1093 ret = skipall = True
1094 elif r == 7: # quit
1094 elif r == 7: # quit
1095 raise error.Abort(_('user quit'))
1095 raise error.Abort(_('user quit'))
1096 return ret, skipfile, skipall, newpatches
1096 return ret, skipfile, skipall, newpatches
1097
1097
1098 seen = set()
1098 seen = set()
1099 applied = {} # 'filename' -> [] of chunks
1099 applied = {} # 'filename' -> [] of chunks
1100 skipfile, skipall = None, None
1100 skipfile, skipall = None, None
1101 pos, total = 1, sum(len(h.hunks) for h in headers)
1101 pos, total = 1, sum(len(h.hunks) for h in headers)
1102 for h in headers:
1102 for h in headers:
1103 pos += len(h.hunks)
1103 pos += len(h.hunks)
1104 skipfile = None
1104 skipfile = None
1105 fixoffset = 0
1105 fixoffset = 0
1106 hdr = ''.join(h.header)
1106 hdr = ''.join(h.header)
1107 if hdr in seen:
1107 if hdr in seen:
1108 continue
1108 continue
1109 seen.add(hdr)
1109 seen.add(hdr)
1110 if skipall is None:
1110 if skipall is None:
1111 h.pretty(ui)
1111 h.pretty(ui)
1112 msg = (_('examine changes to %s?') %
1112 msg = (_('examine changes to %s?') %
1113 _(' and ').join("'%s'" % f for f in h.files()))
1113 _(' and ').join("'%s'" % f for f in h.files()))
1114 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1114 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1115 if not r:
1115 if not r:
1116 continue
1116 continue
1117 applied[h.filename()] = [h]
1117 applied[h.filename()] = [h]
1118 if h.allhunks():
1118 if h.allhunks():
1119 applied[h.filename()] += h.hunks
1119 applied[h.filename()] += h.hunks
1120 continue
1120 continue
1121 for i, chunk in enumerate(h.hunks):
1121 for i, chunk in enumerate(h.hunks):
1122 if skipfile is None and skipall is None:
1122 if skipfile is None and skipall is None:
1123 chunk.pretty(ui)
1123 chunk.pretty(ui)
1124 if total == 1:
1124 if total == 1:
1125 msg = messages['single'] % chunk.filename()
1125 msg = messages['single'] % chunk.filename()
1126 else:
1126 else:
1127 idx = pos - len(h.hunks) + i
1127 idx = pos - len(h.hunks) + i
1128 msg = messages['multiple'] % (idx, total, chunk.filename())
1128 msg = messages['multiple'] % (idx, total, chunk.filename())
1129 r, skipfile, skipall, newpatches = prompt(skipfile,
1129 r, skipfile, skipall, newpatches = prompt(skipfile,
1130 skipall, msg, chunk)
1130 skipall, msg, chunk)
1131 if r:
1131 if r:
1132 if fixoffset:
1132 if fixoffset:
1133 chunk = copy.copy(chunk)
1133 chunk = copy.copy(chunk)
1134 chunk.toline += fixoffset
1134 chunk.toline += fixoffset
1135 applied[chunk.filename()].append(chunk)
1135 applied[chunk.filename()].append(chunk)
1136 elif newpatches is not None:
1136 elif newpatches is not None:
1137 for newpatch in newpatches:
1137 for newpatch in newpatches:
1138 for newhunk in newpatch.hunks:
1138 for newhunk in newpatch.hunks:
1139 if fixoffset:
1139 if fixoffset:
1140 newhunk.toline += fixoffset
1140 newhunk.toline += fixoffset
1141 applied[newhunk.filename()].append(newhunk)
1141 applied[newhunk.filename()].append(newhunk)
1142 else:
1142 else:
1143 fixoffset += chunk.removed - chunk.added
1143 fixoffset += chunk.removed - chunk.added
1144 return (sum([h for h in applied.itervalues()
1144 return (sum([h for h in applied.itervalues()
1145 if h[0].special() or len(h) > 1], []), {})
1145 if h[0].special() or len(h) > 1], []), {})
1146 class hunk(object):
1146 class hunk(object):
1147 def __init__(self, desc, num, lr, context):
1147 def __init__(self, desc, num, lr, context):
1148 self.number = num
1148 self.number = num
1149 self.desc = desc
1149 self.desc = desc
1150 self.hunk = [desc]
1150 self.hunk = [desc]
1151 self.a = []
1151 self.a = []
1152 self.b = []
1152 self.b = []
1153 self.starta = self.lena = None
1153 self.starta = self.lena = None
1154 self.startb = self.lenb = None
1154 self.startb = self.lenb = None
1155 if lr is not None:
1155 if lr is not None:
1156 if context:
1156 if context:
1157 self.read_context_hunk(lr)
1157 self.read_context_hunk(lr)
1158 else:
1158 else:
1159 self.read_unified_hunk(lr)
1159 self.read_unified_hunk(lr)
1160
1160
1161 def getnormalized(self):
1161 def getnormalized(self):
1162 """Return a copy with line endings normalized to LF."""
1162 """Return a copy with line endings normalized to LF."""
1163
1163
1164 def normalize(lines):
1164 def normalize(lines):
1165 nlines = []
1165 nlines = []
1166 for line in lines:
1166 for line in lines:
1167 if line.endswith('\r\n'):
1167 if line.endswith('\r\n'):
1168 line = line[:-2] + '\n'
1168 line = line[:-2] + '\n'
1169 nlines.append(line)
1169 nlines.append(line)
1170 return nlines
1170 return nlines
1171
1171
1172 # Dummy object, it is rebuilt manually
1172 # Dummy object, it is rebuilt manually
1173 nh = hunk(self.desc, self.number, None, None)
1173 nh = hunk(self.desc, self.number, None, None)
1174 nh.number = self.number
1174 nh.number = self.number
1175 nh.desc = self.desc
1175 nh.desc = self.desc
1176 nh.hunk = self.hunk
1176 nh.hunk = self.hunk
1177 nh.a = normalize(self.a)
1177 nh.a = normalize(self.a)
1178 nh.b = normalize(self.b)
1178 nh.b = normalize(self.b)
1179 nh.starta = self.starta
1179 nh.starta = self.starta
1180 nh.startb = self.startb
1180 nh.startb = self.startb
1181 nh.lena = self.lena
1181 nh.lena = self.lena
1182 nh.lenb = self.lenb
1182 nh.lenb = self.lenb
1183 return nh
1183 return nh
1184
1184
1185 def read_unified_hunk(self, lr):
1185 def read_unified_hunk(self, lr):
1186 m = unidesc.match(self.desc)
1186 m = unidesc.match(self.desc)
1187 if not m:
1187 if not m:
1188 raise PatchError(_("bad hunk #%d") % self.number)
1188 raise PatchError(_("bad hunk #%d") % self.number)
1189 self.starta, self.lena, self.startb, self.lenb = m.groups()
1189 self.starta, self.lena, self.startb, self.lenb = m.groups()
1190 if self.lena is None:
1190 if self.lena is None:
1191 self.lena = 1
1191 self.lena = 1
1192 else:
1192 else:
1193 self.lena = int(self.lena)
1193 self.lena = int(self.lena)
1194 if self.lenb is None:
1194 if self.lenb is None:
1195 self.lenb = 1
1195 self.lenb = 1
1196 else:
1196 else:
1197 self.lenb = int(self.lenb)
1197 self.lenb = int(self.lenb)
1198 self.starta = int(self.starta)
1198 self.starta = int(self.starta)
1199 self.startb = int(self.startb)
1199 self.startb = int(self.startb)
1200 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1200 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1201 self.b)
1201 self.b)
1202 # if we hit eof before finishing out the hunk, the last line will
1202 # if we hit eof before finishing out the hunk, the last line will
1203 # be zero length. Lets try to fix it up.
1203 # be zero length. Lets try to fix it up.
1204 while len(self.hunk[-1]) == 0:
1204 while len(self.hunk[-1]) == 0:
1205 del self.hunk[-1]
1205 del self.hunk[-1]
1206 del self.a[-1]
1206 del self.a[-1]
1207 del self.b[-1]
1207 del self.b[-1]
1208 self.lena -= 1
1208 self.lena -= 1
1209 self.lenb -= 1
1209 self.lenb -= 1
1210 self._fixnewline(lr)
1210 self._fixnewline(lr)
1211
1211
1212 def read_context_hunk(self, lr):
1212 def read_context_hunk(self, lr):
1213 self.desc = lr.readline()
1213 self.desc = lr.readline()
1214 m = contextdesc.match(self.desc)
1214 m = contextdesc.match(self.desc)
1215 if not m:
1215 if not m:
1216 raise PatchError(_("bad hunk #%d") % self.number)
1216 raise PatchError(_("bad hunk #%d") % self.number)
1217 self.starta, aend = m.groups()
1217 self.starta, aend = m.groups()
1218 self.starta = int(self.starta)
1218 self.starta = int(self.starta)
1219 if aend is None:
1219 if aend is None:
1220 aend = self.starta
1220 aend = self.starta
1221 self.lena = int(aend) - self.starta
1221 self.lena = int(aend) - self.starta
1222 if self.starta:
1222 if self.starta:
1223 self.lena += 1
1223 self.lena += 1
1224 for x in xrange(self.lena):
1224 for x in xrange(self.lena):
1225 l = lr.readline()
1225 l = lr.readline()
1226 if l.startswith('---'):
1226 if l.startswith('---'):
1227 # lines addition, old block is empty
1227 # lines addition, old block is empty
1228 lr.push(l)
1228 lr.push(l)
1229 break
1229 break
1230 s = l[2:]
1230 s = l[2:]
1231 if l.startswith('- ') or l.startswith('! '):
1231 if l.startswith('- ') or l.startswith('! '):
1232 u = '-' + s
1232 u = '-' + s
1233 elif l.startswith(' '):
1233 elif l.startswith(' '):
1234 u = ' ' + s
1234 u = ' ' + s
1235 else:
1235 else:
1236 raise PatchError(_("bad hunk #%d old text line %d") %
1236 raise PatchError(_("bad hunk #%d old text line %d") %
1237 (self.number, x))
1237 (self.number, x))
1238 self.a.append(u)
1238 self.a.append(u)
1239 self.hunk.append(u)
1239 self.hunk.append(u)
1240
1240
1241 l = lr.readline()
1241 l = lr.readline()
1242 if l.startswith('\ '):
1242 if l.startswith('\ '):
1243 s = self.a[-1][:-1]
1243 s = self.a[-1][:-1]
1244 self.a[-1] = s
1244 self.a[-1] = s
1245 self.hunk[-1] = s
1245 self.hunk[-1] = s
1246 l = lr.readline()
1246 l = lr.readline()
1247 m = contextdesc.match(l)
1247 m = contextdesc.match(l)
1248 if not m:
1248 if not m:
1249 raise PatchError(_("bad hunk #%d") % self.number)
1249 raise PatchError(_("bad hunk #%d") % self.number)
1250 self.startb, bend = m.groups()
1250 self.startb, bend = m.groups()
1251 self.startb = int(self.startb)
1251 self.startb = int(self.startb)
1252 if bend is None:
1252 if bend is None:
1253 bend = self.startb
1253 bend = self.startb
1254 self.lenb = int(bend) - self.startb
1254 self.lenb = int(bend) - self.startb
1255 if self.startb:
1255 if self.startb:
1256 self.lenb += 1
1256 self.lenb += 1
1257 hunki = 1
1257 hunki = 1
1258 for x in xrange(self.lenb):
1258 for x in xrange(self.lenb):
1259 l = lr.readline()
1259 l = lr.readline()
1260 if l.startswith('\ '):
1260 if l.startswith('\ '):
1261 # XXX: the only way to hit this is with an invalid line range.
1261 # XXX: the only way to hit this is with an invalid line range.
1262 # The no-eol marker is not counted in the line range, but I
1262 # The no-eol marker is not counted in the line range, but I
1263 # guess there are diff(1) out there which behave differently.
1263 # guess there are diff(1) out there which behave differently.
1264 s = self.b[-1][:-1]
1264 s = self.b[-1][:-1]
1265 self.b[-1] = s
1265 self.b[-1] = s
1266 self.hunk[hunki - 1] = s
1266 self.hunk[hunki - 1] = s
1267 continue
1267 continue
1268 if not l:
1268 if not l:
1269 # line deletions, new block is empty and we hit EOF
1269 # line deletions, new block is empty and we hit EOF
1270 lr.push(l)
1270 lr.push(l)
1271 break
1271 break
1272 s = l[2:]
1272 s = l[2:]
1273 if l.startswith('+ ') or l.startswith('! '):
1273 if l.startswith('+ ') or l.startswith('! '):
1274 u = '+' + s
1274 u = '+' + s
1275 elif l.startswith(' '):
1275 elif l.startswith(' '):
1276 u = ' ' + s
1276 u = ' ' + s
1277 elif len(self.b) == 0:
1277 elif len(self.b) == 0:
1278 # line deletions, new block is empty
1278 # line deletions, new block is empty
1279 lr.push(l)
1279 lr.push(l)
1280 break
1280 break
1281 else:
1281 else:
1282 raise PatchError(_("bad hunk #%d old text line %d") %
1282 raise PatchError(_("bad hunk #%d old text line %d") %
1283 (self.number, x))
1283 (self.number, x))
1284 self.b.append(s)
1284 self.b.append(s)
1285 while True:
1285 while True:
1286 if hunki >= len(self.hunk):
1286 if hunki >= len(self.hunk):
1287 h = ""
1287 h = ""
1288 else:
1288 else:
1289 h = self.hunk[hunki]
1289 h = self.hunk[hunki]
1290 hunki += 1
1290 hunki += 1
1291 if h == u:
1291 if h == u:
1292 break
1292 break
1293 elif h.startswith('-'):
1293 elif h.startswith('-'):
1294 continue
1294 continue
1295 else:
1295 else:
1296 self.hunk.insert(hunki - 1, u)
1296 self.hunk.insert(hunki - 1, u)
1297 break
1297 break
1298
1298
1299 if not self.a:
1299 if not self.a:
1300 # this happens when lines were only added to the hunk
1300 # this happens when lines were only added to the hunk
1301 for x in self.hunk:
1301 for x in self.hunk:
1302 if x.startswith('-') or x.startswith(' '):
1302 if x.startswith('-') or x.startswith(' '):
1303 self.a.append(x)
1303 self.a.append(x)
1304 if not self.b:
1304 if not self.b:
1305 # this happens when lines were only deleted from the hunk
1305 # this happens when lines were only deleted from the hunk
1306 for x in self.hunk:
1306 for x in self.hunk:
1307 if x.startswith('+') or x.startswith(' '):
1307 if x.startswith('+') or x.startswith(' '):
1308 self.b.append(x[1:])
1308 self.b.append(x[1:])
1309 # @@ -start,len +start,len @@
1309 # @@ -start,len +start,len @@
1310 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1310 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1311 self.startb, self.lenb)
1311 self.startb, self.lenb)
1312 self.hunk[0] = self.desc
1312 self.hunk[0] = self.desc
1313 self._fixnewline(lr)
1313 self._fixnewline(lr)
1314
1314
1315 def _fixnewline(self, lr):
1315 def _fixnewline(self, lr):
1316 l = lr.readline()
1316 l = lr.readline()
1317 if l.startswith('\ '):
1317 if l.startswith('\ '):
1318 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1318 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1319 else:
1319 else:
1320 lr.push(l)
1320 lr.push(l)
1321
1321
1322 def complete(self):
1322 def complete(self):
1323 return len(self.a) == self.lena and len(self.b) == self.lenb
1323 return len(self.a) == self.lena and len(self.b) == self.lenb
1324
1324
1325 def _fuzzit(self, old, new, fuzz, toponly):
1325 def _fuzzit(self, old, new, fuzz, toponly):
1326 # this removes context lines from the top and bottom of list 'l'. It
1326 # this removes context lines from the top and bottom of list 'l'. It
1327 # checks the hunk to make sure only context lines are removed, and then
1327 # checks the hunk to make sure only context lines are removed, and then
1328 # returns a new shortened list of lines.
1328 # returns a new shortened list of lines.
1329 fuzz = min(fuzz, len(old))
1329 fuzz = min(fuzz, len(old))
1330 if fuzz:
1330 if fuzz:
1331 top = 0
1331 top = 0
1332 bot = 0
1332 bot = 0
1333 hlen = len(self.hunk)
1333 hlen = len(self.hunk)
1334 for x in xrange(hlen - 1):
1334 for x in xrange(hlen - 1):
1335 # the hunk starts with the @@ line, so use x+1
1335 # the hunk starts with the @@ line, so use x+1
1336 if self.hunk[x + 1][0] == ' ':
1336 if self.hunk[x + 1][0] == ' ':
1337 top += 1
1337 top += 1
1338 else:
1338 else:
1339 break
1339 break
1340 if not toponly:
1340 if not toponly:
1341 for x in xrange(hlen - 1):
1341 for x in xrange(hlen - 1):
1342 if self.hunk[hlen - bot - 1][0] == ' ':
1342 if self.hunk[hlen - bot - 1][0] == ' ':
1343 bot += 1
1343 bot += 1
1344 else:
1344 else:
1345 break
1345 break
1346
1346
1347 bot = min(fuzz, bot)
1347 bot = min(fuzz, bot)
1348 top = min(fuzz, top)
1348 top = min(fuzz, top)
1349 return old[top:len(old) - bot], new[top:len(new) - bot], top
1349 return old[top:len(old) - bot], new[top:len(new) - bot], top
1350 return old, new, 0
1350 return old, new, 0
1351
1351
1352 def fuzzit(self, fuzz, toponly):
1352 def fuzzit(self, fuzz, toponly):
1353 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1353 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1354 oldstart = self.starta + top
1354 oldstart = self.starta + top
1355 newstart = self.startb + top
1355 newstart = self.startb + top
1356 # zero length hunk ranges already have their start decremented
1356 # zero length hunk ranges already have their start decremented
1357 if self.lena and oldstart > 0:
1357 if self.lena and oldstart > 0:
1358 oldstart -= 1
1358 oldstart -= 1
1359 if self.lenb and newstart > 0:
1359 if self.lenb and newstart > 0:
1360 newstart -= 1
1360 newstart -= 1
1361 return old, oldstart, new, newstart
1361 return old, oldstart, new, newstart
1362
1362
1363 class binhunk(object):
1363 class binhunk(object):
1364 'A binary patch file.'
1364 'A binary patch file.'
1365 def __init__(self, lr, fname):
1365 def __init__(self, lr, fname):
1366 self.text = None
1366 self.text = None
1367 self.delta = False
1367 self.delta = False
1368 self.hunk = ['GIT binary patch\n']
1368 self.hunk = ['GIT binary patch\n']
1369 self._fname = fname
1369 self._fname = fname
1370 self._read(lr)
1370 self._read(lr)
1371
1371
1372 def complete(self):
1372 def complete(self):
1373 return self.text is not None
1373 return self.text is not None
1374
1374
1375 def new(self, lines):
1375 def new(self, lines):
1376 if self.delta:
1376 if self.delta:
1377 return [applybindelta(self.text, ''.join(lines))]
1377 return [applybindelta(self.text, ''.join(lines))]
1378 return [self.text]
1378 return [self.text]
1379
1379
1380 def _read(self, lr):
1380 def _read(self, lr):
1381 def getline(lr, hunk):
1381 def getline(lr, hunk):
1382 l = lr.readline()
1382 l = lr.readline()
1383 hunk.append(l)
1383 hunk.append(l)
1384 return l.rstrip('\r\n')
1384 return l.rstrip('\r\n')
1385
1385
1386 size = 0
1386 size = 0
1387 while True:
1387 while True:
1388 line = getline(lr, self.hunk)
1388 line = getline(lr, self.hunk)
1389 if not line:
1389 if not line:
1390 raise PatchError(_('could not extract "%s" binary data')
1390 raise PatchError(_('could not extract "%s" binary data')
1391 % self._fname)
1391 % self._fname)
1392 if line.startswith('literal '):
1392 if line.startswith('literal '):
1393 size = int(line[8:].rstrip())
1393 size = int(line[8:].rstrip())
1394 break
1394 break
1395 if line.startswith('delta '):
1395 if line.startswith('delta '):
1396 size = int(line[6:].rstrip())
1396 size = int(line[6:].rstrip())
1397 self.delta = True
1397 self.delta = True
1398 break
1398 break
1399 dec = []
1399 dec = []
1400 line = getline(lr, self.hunk)
1400 line = getline(lr, self.hunk)
1401 while len(line) > 1:
1401 while len(line) > 1:
1402 l = line[0]
1402 l = line[0]
1403 if l <= 'Z' and l >= 'A':
1403 if l <= 'Z' and l >= 'A':
1404 l = ord(l) - ord('A') + 1
1404 l = ord(l) - ord('A') + 1
1405 else:
1405 else:
1406 l = ord(l) - ord('a') + 27
1406 l = ord(l) - ord('a') + 27
1407 try:
1407 try:
1408 dec.append(base85.b85decode(line[1:])[:l])
1408 dec.append(base85.b85decode(line[1:])[:l])
1409 except ValueError as e:
1409 except ValueError as e:
1410 raise PatchError(_('could not decode "%s" binary patch: %s')
1410 raise PatchError(_('could not decode "%s" binary patch: %s')
1411 % (self._fname, str(e)))
1411 % (self._fname, str(e)))
1412 line = getline(lr, self.hunk)
1412 line = getline(lr, self.hunk)
1413 text = zlib.decompress(''.join(dec))
1413 text = zlib.decompress(''.join(dec))
1414 if len(text) != size:
1414 if len(text) != size:
1415 raise PatchError(_('"%s" length is %d bytes, should be %d')
1415 raise PatchError(_('"%s" length is %d bytes, should be %d')
1416 % (self._fname, len(text), size))
1416 % (self._fname, len(text), size))
1417 self.text = text
1417 self.text = text
1418
1418
1419 def parsefilename(str):
1419 def parsefilename(str):
1420 # --- filename \t|space stuff
1420 # --- filename \t|space stuff
1421 s = str[4:].rstrip('\r\n')
1421 s = str[4:].rstrip('\r\n')
1422 i = s.find('\t')
1422 i = s.find('\t')
1423 if i < 0:
1423 if i < 0:
1424 i = s.find(' ')
1424 i = s.find(' ')
1425 if i < 0:
1425 if i < 0:
1426 return s
1426 return s
1427 return s[:i]
1427 return s[:i]
1428
1428
1429 def reversehunks(hunks):
1429 def reversehunks(hunks):
1430 '''reverse the signs in the hunks given as argument
1430 '''reverse the signs in the hunks given as argument
1431
1431
1432 This function operates on hunks coming out of patch.filterpatch, that is
1432 This function operates on hunks coming out of patch.filterpatch, that is
1433 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1433 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1434
1434
1435 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1435 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1436 ... --- a/folder1/g
1436 ... --- a/folder1/g
1437 ... +++ b/folder1/g
1437 ... +++ b/folder1/g
1438 ... @@ -1,7 +1,7 @@
1438 ... @@ -1,7 +1,7 @@
1439 ... +firstline
1439 ... +firstline
1440 ... c
1440 ... c
1441 ... 1
1441 ... 1
1442 ... 2
1442 ... 2
1443 ... + 3
1443 ... + 3
1444 ... -4
1444 ... -4
1445 ... 5
1445 ... 5
1446 ... d
1446 ... d
1447 ... +lastline"""
1447 ... +lastline"""
1448 >>> hunks = parsepatch(rawpatch)
1448 >>> hunks = parsepatch(rawpatch)
1449 >>> hunkscomingfromfilterpatch = []
1449 >>> hunkscomingfromfilterpatch = []
1450 >>> for h in hunks:
1450 >>> for h in hunks:
1451 ... hunkscomingfromfilterpatch.append(h)
1451 ... hunkscomingfromfilterpatch.append(h)
1452 ... hunkscomingfromfilterpatch.extend(h.hunks)
1452 ... hunkscomingfromfilterpatch.extend(h.hunks)
1453
1453
1454 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1454 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1455 >>> from . import util
1455 >>> from . import util
1456 >>> fp = util.stringio()
1456 >>> fp = util.stringio()
1457 >>> for c in reversedhunks:
1457 >>> for c in reversedhunks:
1458 ... c.write(fp)
1458 ... c.write(fp)
1459 >>> fp.seek(0)
1459 >>> fp.seek(0)
1460 >>> reversedpatch = fp.read()
1460 >>> reversedpatch = fp.read()
1461 >>> print reversedpatch
1461 >>> print reversedpatch
1462 diff --git a/folder1/g b/folder1/g
1462 diff --git a/folder1/g b/folder1/g
1463 --- a/folder1/g
1463 --- a/folder1/g
1464 +++ b/folder1/g
1464 +++ b/folder1/g
1465 @@ -1,4 +1,3 @@
1465 @@ -1,4 +1,3 @@
1466 -firstline
1466 -firstline
1467 c
1467 c
1468 1
1468 1
1469 2
1469 2
1470 @@ -1,6 +2,6 @@
1470 @@ -1,6 +2,6 @@
1471 c
1471 c
1472 1
1472 1
1473 2
1473 2
1474 - 3
1474 - 3
1475 +4
1475 +4
1476 5
1476 5
1477 d
1477 d
1478 @@ -5,3 +6,2 @@
1478 @@ -5,3 +6,2 @@
1479 5
1479 5
1480 d
1480 d
1481 -lastline
1481 -lastline
1482
1482
1483 '''
1483 '''
1484
1484
1485 from . import crecord as crecordmod
1485 from . import crecord as crecordmod
1486 newhunks = []
1486 newhunks = []
1487 for c in hunks:
1487 for c in hunks:
1488 if isinstance(c, crecordmod.uihunk):
1488 if isinstance(c, crecordmod.uihunk):
1489 # curses hunks encapsulate the record hunk in _hunk
1489 # curses hunks encapsulate the record hunk in _hunk
1490 c = c._hunk
1490 c = c._hunk
1491 if isinstance(c, recordhunk):
1491 if isinstance(c, recordhunk):
1492 for j, line in enumerate(c.hunk):
1492 for j, line in enumerate(c.hunk):
1493 if line.startswith("-"):
1493 if line.startswith("-"):
1494 c.hunk[j] = "+" + c.hunk[j][1:]
1494 c.hunk[j] = "+" + c.hunk[j][1:]
1495 elif line.startswith("+"):
1495 elif line.startswith("+"):
1496 c.hunk[j] = "-" + c.hunk[j][1:]
1496 c.hunk[j] = "-" + c.hunk[j][1:]
1497 c.added, c.removed = c.removed, c.added
1497 c.added, c.removed = c.removed, c.added
1498 newhunks.append(c)
1498 newhunks.append(c)
1499 return newhunks
1499 return newhunks
1500
1500
1501 def parsepatch(originalchunks):
1501 def parsepatch(originalchunks):
1502 """patch -> [] of headers -> [] of hunks """
1502 """patch -> [] of headers -> [] of hunks """
1503 class parser(object):
1503 class parser(object):
1504 """patch parsing state machine"""
1504 """patch parsing state machine"""
1505 def __init__(self):
1505 def __init__(self):
1506 self.fromline = 0
1506 self.fromline = 0
1507 self.toline = 0
1507 self.toline = 0
1508 self.proc = ''
1508 self.proc = ''
1509 self.header = None
1509 self.header = None
1510 self.context = []
1510 self.context = []
1511 self.before = []
1511 self.before = []
1512 self.hunk = []
1512 self.hunk = []
1513 self.headers = []
1513 self.headers = []
1514
1514
1515 def addrange(self, limits):
1515 def addrange(self, limits):
1516 fromstart, fromend, tostart, toend, proc = limits
1516 fromstart, fromend, tostart, toend, proc = limits
1517 self.fromline = int(fromstart)
1517 self.fromline = int(fromstart)
1518 self.toline = int(tostart)
1518 self.toline = int(tostart)
1519 self.proc = proc
1519 self.proc = proc
1520
1520
1521 def addcontext(self, context):
1521 def addcontext(self, context):
1522 if self.hunk:
1522 if self.hunk:
1523 h = recordhunk(self.header, self.fromline, self.toline,
1523 h = recordhunk(self.header, self.fromline, self.toline,
1524 self.proc, self.before, self.hunk, context)
1524 self.proc, self.before, self.hunk, context)
1525 self.header.hunks.append(h)
1525 self.header.hunks.append(h)
1526 self.fromline += len(self.before) + h.removed
1526 self.fromline += len(self.before) + h.removed
1527 self.toline += len(self.before) + h.added
1527 self.toline += len(self.before) + h.added
1528 self.before = []
1528 self.before = []
1529 self.hunk = []
1529 self.hunk = []
1530 self.context = context
1530 self.context = context
1531
1531
1532 def addhunk(self, hunk):
1532 def addhunk(self, hunk):
1533 if self.context:
1533 if self.context:
1534 self.before = self.context
1534 self.before = self.context
1535 self.context = []
1535 self.context = []
1536 self.hunk = hunk
1536 self.hunk = hunk
1537
1537
1538 def newfile(self, hdr):
1538 def newfile(self, hdr):
1539 self.addcontext([])
1539 self.addcontext([])
1540 h = header(hdr)
1540 h = header(hdr)
1541 self.headers.append(h)
1541 self.headers.append(h)
1542 self.header = h
1542 self.header = h
1543
1543
1544 def addother(self, line):
1544 def addother(self, line):
1545 pass # 'other' lines are ignored
1545 pass # 'other' lines are ignored
1546
1546
1547 def finished(self):
1547 def finished(self):
1548 self.addcontext([])
1548 self.addcontext([])
1549 return self.headers
1549 return self.headers
1550
1550
1551 transitions = {
1551 transitions = {
1552 'file': {'context': addcontext,
1552 'file': {'context': addcontext,
1553 'file': newfile,
1553 'file': newfile,
1554 'hunk': addhunk,
1554 'hunk': addhunk,
1555 'range': addrange},
1555 'range': addrange},
1556 'context': {'file': newfile,
1556 'context': {'file': newfile,
1557 'hunk': addhunk,
1557 'hunk': addhunk,
1558 'range': addrange,
1558 'range': addrange,
1559 'other': addother},
1559 'other': addother},
1560 'hunk': {'context': addcontext,
1560 'hunk': {'context': addcontext,
1561 'file': newfile,
1561 'file': newfile,
1562 'range': addrange},
1562 'range': addrange},
1563 'range': {'context': addcontext,
1563 'range': {'context': addcontext,
1564 'hunk': addhunk},
1564 'hunk': addhunk},
1565 'other': {'other': addother},
1565 'other': {'other': addother},
1566 }
1566 }
1567
1567
1568 p = parser()
1568 p = parser()
1569 fp = stringio()
1569 fp = stringio()
1570 fp.write(''.join(originalchunks))
1570 fp.write(''.join(originalchunks))
1571 fp.seek(0)
1571 fp.seek(0)
1572
1572
1573 state = 'context'
1573 state = 'context'
1574 for newstate, data in scanpatch(fp):
1574 for newstate, data in scanpatch(fp):
1575 try:
1575 try:
1576 p.transitions[state][newstate](p, data)
1576 p.transitions[state][newstate](p, data)
1577 except KeyError:
1577 except KeyError:
1578 raise PatchError('unhandled transition: %s -> %s' %
1578 raise PatchError('unhandled transition: %s -> %s' %
1579 (state, newstate))
1579 (state, newstate))
1580 state = newstate
1580 state = newstate
1581 del fp
1581 del fp
1582 return p.finished()
1582 return p.finished()
1583
1583
1584 def pathtransform(path, strip, prefix):
1584 def pathtransform(path, strip, prefix):
1585 '''turn a path from a patch into a path suitable for the repository
1585 '''turn a path from a patch into a path suitable for the repository
1586
1586
1587 prefix, if not empty, is expected to be normalized with a / at the end.
1587 prefix, if not empty, is expected to be normalized with a / at the end.
1588
1588
1589 Returns (stripped components, path in repository).
1589 Returns (stripped components, path in repository).
1590
1590
1591 >>> pathtransform('a/b/c', 0, '')
1591 >>> pathtransform('a/b/c', 0, '')
1592 ('', 'a/b/c')
1592 ('', 'a/b/c')
1593 >>> pathtransform(' a/b/c ', 0, '')
1593 >>> pathtransform(' a/b/c ', 0, '')
1594 ('', ' a/b/c')
1594 ('', ' a/b/c')
1595 >>> pathtransform(' a/b/c ', 2, '')
1595 >>> pathtransform(' a/b/c ', 2, '')
1596 ('a/b/', 'c')
1596 ('a/b/', 'c')
1597 >>> pathtransform('a/b/c', 0, 'd/e/')
1597 >>> pathtransform('a/b/c', 0, 'd/e/')
1598 ('', 'd/e/a/b/c')
1598 ('', 'd/e/a/b/c')
1599 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1599 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1600 ('a//b/', 'd/e/c')
1600 ('a//b/', 'd/e/c')
1601 >>> pathtransform('a/b/c', 3, '')
1601 >>> pathtransform('a/b/c', 3, '')
1602 Traceback (most recent call last):
1602 Traceback (most recent call last):
1603 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1603 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1604 '''
1604 '''
1605 pathlen = len(path)
1605 pathlen = len(path)
1606 i = 0
1606 i = 0
1607 if strip == 0:
1607 if strip == 0:
1608 return '', prefix + path.rstrip()
1608 return '', prefix + path.rstrip()
1609 count = strip
1609 count = strip
1610 while count > 0:
1610 while count > 0:
1611 i = path.find('/', i)
1611 i = path.find('/', i)
1612 if i == -1:
1612 if i == -1:
1613 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1613 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1614 (count, strip, path))
1614 (count, strip, path))
1615 i += 1
1615 i += 1
1616 # consume '//' in the path
1616 # consume '//' in the path
1617 while i < pathlen - 1 and path[i] == '/':
1617 while i < pathlen - 1 and path[i] == '/':
1618 i += 1
1618 i += 1
1619 count -= 1
1619 count -= 1
1620 return path[:i].lstrip(), prefix + path[i:].rstrip()
1620 return path[:i].lstrip(), prefix + path[i:].rstrip()
1621
1621
1622 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1622 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1623 nulla = afile_orig == "/dev/null"
1623 nulla = afile_orig == "/dev/null"
1624 nullb = bfile_orig == "/dev/null"
1624 nullb = bfile_orig == "/dev/null"
1625 create = nulla and hunk.starta == 0 and hunk.lena == 0
1625 create = nulla and hunk.starta == 0 and hunk.lena == 0
1626 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1626 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1627 abase, afile = pathtransform(afile_orig, strip, prefix)
1627 abase, afile = pathtransform(afile_orig, strip, prefix)
1628 gooda = not nulla and backend.exists(afile)
1628 gooda = not nulla and backend.exists(afile)
1629 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1629 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1630 if afile == bfile:
1630 if afile == bfile:
1631 goodb = gooda
1631 goodb = gooda
1632 else:
1632 else:
1633 goodb = not nullb and backend.exists(bfile)
1633 goodb = not nullb and backend.exists(bfile)
1634 missing = not goodb and not gooda and not create
1634 missing = not goodb and not gooda and not create
1635
1635
1636 # some diff programs apparently produce patches where the afile is
1636 # some diff programs apparently produce patches where the afile is
1637 # not /dev/null, but afile starts with bfile
1637 # not /dev/null, but afile starts with bfile
1638 abasedir = afile[:afile.rfind('/') + 1]
1638 abasedir = afile[:afile.rfind('/') + 1]
1639 bbasedir = bfile[:bfile.rfind('/') + 1]
1639 bbasedir = bfile[:bfile.rfind('/') + 1]
1640 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1640 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1641 and hunk.starta == 0 and hunk.lena == 0):
1641 and hunk.starta == 0 and hunk.lena == 0):
1642 create = True
1642 create = True
1643 missing = False
1643 missing = False
1644
1644
1645 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1645 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1646 # diff is between a file and its backup. In this case, the original
1646 # diff is between a file and its backup. In this case, the original
1647 # file should be patched (see original mpatch code).
1647 # file should be patched (see original mpatch code).
1648 isbackup = (abase == bbase and bfile.startswith(afile))
1648 isbackup = (abase == bbase and bfile.startswith(afile))
1649 fname = None
1649 fname = None
1650 if not missing:
1650 if not missing:
1651 if gooda and goodb:
1651 if gooda and goodb:
1652 if isbackup:
1652 if isbackup:
1653 fname = afile
1653 fname = afile
1654 else:
1654 else:
1655 fname = bfile
1655 fname = bfile
1656 elif gooda:
1656 elif gooda:
1657 fname = afile
1657 fname = afile
1658
1658
1659 if not fname:
1659 if not fname:
1660 if not nullb:
1660 if not nullb:
1661 if isbackup:
1661 if isbackup:
1662 fname = afile
1662 fname = afile
1663 else:
1663 else:
1664 fname = bfile
1664 fname = bfile
1665 elif not nulla:
1665 elif not nulla:
1666 fname = afile
1666 fname = afile
1667 else:
1667 else:
1668 raise PatchError(_("undefined source and destination files"))
1668 raise PatchError(_("undefined source and destination files"))
1669
1669
1670 gp = patchmeta(fname)
1670 gp = patchmeta(fname)
1671 if create:
1671 if create:
1672 gp.op = 'ADD'
1672 gp.op = 'ADD'
1673 elif remove:
1673 elif remove:
1674 gp.op = 'DELETE'
1674 gp.op = 'DELETE'
1675 return gp
1675 return gp
1676
1676
1677 def scanpatch(fp):
1677 def scanpatch(fp):
1678 """like patch.iterhunks, but yield different events
1678 """like patch.iterhunks, but yield different events
1679
1679
1680 - ('file', [header_lines + fromfile + tofile])
1680 - ('file', [header_lines + fromfile + tofile])
1681 - ('context', [context_lines])
1681 - ('context', [context_lines])
1682 - ('hunk', [hunk_lines])
1682 - ('hunk', [hunk_lines])
1683 - ('range', (-start,len, +start,len, proc))
1683 - ('range', (-start,len, +start,len, proc))
1684 """
1684 """
1685 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1685 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1686 lr = linereader(fp)
1686 lr = linereader(fp)
1687
1687
1688 def scanwhile(first, p):
1688 def scanwhile(first, p):
1689 """scan lr while predicate holds"""
1689 """scan lr while predicate holds"""
1690 lines = [first]
1690 lines = [first]
1691 while True:
1691 while True:
1692 line = lr.readline()
1692 line = lr.readline()
1693 if not line:
1693 if not line:
1694 break
1694 break
1695 if p(line):
1695 if p(line):
1696 lines.append(line)
1696 lines.append(line)
1697 else:
1697 else:
1698 lr.push(line)
1698 lr.push(line)
1699 break
1699 break
1700 return lines
1700 return lines
1701
1701
1702 while True:
1702 while True:
1703 line = lr.readline()
1703 line = lr.readline()
1704 if not line:
1704 if not line:
1705 break
1705 break
1706 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1706 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1707 def notheader(line):
1707 def notheader(line):
1708 s = line.split(None, 1)
1708 s = line.split(None, 1)
1709 return not s or s[0] not in ('---', 'diff')
1709 return not s or s[0] not in ('---', 'diff')
1710 header = scanwhile(line, notheader)
1710 header = scanwhile(line, notheader)
1711 fromfile = lr.readline()
1711 fromfile = lr.readline()
1712 if fromfile.startswith('---'):
1712 if fromfile.startswith('---'):
1713 tofile = lr.readline()
1713 tofile = lr.readline()
1714 header += [fromfile, tofile]
1714 header += [fromfile, tofile]
1715 else:
1715 else:
1716 lr.push(fromfile)
1716 lr.push(fromfile)
1717 yield 'file', header
1717 yield 'file', header
1718 elif line[0] == ' ':
1718 elif line[0] == ' ':
1719 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1719 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1720 elif line[0] in '-+':
1720 elif line[0] in '-+':
1721 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1721 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1722 else:
1722 else:
1723 m = lines_re.match(line)
1723 m = lines_re.match(line)
1724 if m:
1724 if m:
1725 yield 'range', m.groups()
1725 yield 'range', m.groups()
1726 else:
1726 else:
1727 yield 'other', line
1727 yield 'other', line
1728
1728
1729 def scangitpatch(lr, firstline):
1729 def scangitpatch(lr, firstline):
1730 """
1730 """
1731 Git patches can emit:
1731 Git patches can emit:
1732 - rename a to b
1732 - rename a to b
1733 - change b
1733 - change b
1734 - copy a to c
1734 - copy a to c
1735 - change c
1735 - change c
1736
1736
1737 We cannot apply this sequence as-is, the renamed 'a' could not be
1737 We cannot apply this sequence as-is, the renamed 'a' could not be
1738 found for it would have been renamed already. And we cannot copy
1738 found for it would have been renamed already. And we cannot copy
1739 from 'b' instead because 'b' would have been changed already. So
1739 from 'b' instead because 'b' would have been changed already. So
1740 we scan the git patch for copy and rename commands so we can
1740 we scan the git patch for copy and rename commands so we can
1741 perform the copies ahead of time.
1741 perform the copies ahead of time.
1742 """
1742 """
1743 pos = 0
1743 pos = 0
1744 try:
1744 try:
1745 pos = lr.fp.tell()
1745 pos = lr.fp.tell()
1746 fp = lr.fp
1746 fp = lr.fp
1747 except IOError:
1747 except IOError:
1748 fp = stringio(lr.fp.read())
1748 fp = stringio(lr.fp.read())
1749 gitlr = linereader(fp)
1749 gitlr = linereader(fp)
1750 gitlr.push(firstline)
1750 gitlr.push(firstline)
1751 gitpatches = readgitpatch(gitlr)
1751 gitpatches = readgitpatch(gitlr)
1752 fp.seek(pos)
1752 fp.seek(pos)
1753 return gitpatches
1753 return gitpatches
1754
1754
1755 def iterhunks(fp):
1755 def iterhunks(fp):
1756 """Read a patch and yield the following events:
1756 """Read a patch and yield the following events:
1757 - ("file", afile, bfile, firsthunk): select a new target file.
1757 - ("file", afile, bfile, firsthunk): select a new target file.
1758 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1758 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1759 "file" event.
1759 "file" event.
1760 - ("git", gitchanges): current diff is in git format, gitchanges
1760 - ("git", gitchanges): current diff is in git format, gitchanges
1761 maps filenames to gitpatch records. Unique event.
1761 maps filenames to gitpatch records. Unique event.
1762 """
1762 """
1763 afile = ""
1763 afile = ""
1764 bfile = ""
1764 bfile = ""
1765 state = None
1765 state = None
1766 hunknum = 0
1766 hunknum = 0
1767 emitfile = newfile = False
1767 emitfile = newfile = False
1768 gitpatches = None
1768 gitpatches = None
1769
1769
1770 # our states
1770 # our states
1771 BFILE = 1
1771 BFILE = 1
1772 context = None
1772 context = None
1773 lr = linereader(fp)
1773 lr = linereader(fp)
1774
1774
1775 while True:
1775 while True:
1776 x = lr.readline()
1776 x = lr.readline()
1777 if not x:
1777 if not x:
1778 break
1778 break
1779 if state == BFILE and (
1779 if state == BFILE and (
1780 (not context and x[0] == '@')
1780 (not context and x[0] == '@')
1781 or (context is not False and x.startswith('***************'))
1781 or (context is not False and x.startswith('***************'))
1782 or x.startswith('GIT binary patch')):
1782 or x.startswith('GIT binary patch')):
1783 gp = None
1783 gp = None
1784 if (gitpatches and
1784 if (gitpatches and
1785 gitpatches[-1].ispatching(afile, bfile)):
1785 gitpatches[-1].ispatching(afile, bfile)):
1786 gp = gitpatches.pop()
1786 gp = gitpatches.pop()
1787 if x.startswith('GIT binary patch'):
1787 if x.startswith('GIT binary patch'):
1788 h = binhunk(lr, gp.path)
1788 h = binhunk(lr, gp.path)
1789 else:
1789 else:
1790 if context is None and x.startswith('***************'):
1790 if context is None and x.startswith('***************'):
1791 context = True
1791 context = True
1792 h = hunk(x, hunknum + 1, lr, context)
1792 h = hunk(x, hunknum + 1, lr, context)
1793 hunknum += 1
1793 hunknum += 1
1794 if emitfile:
1794 if emitfile:
1795 emitfile = False
1795 emitfile = False
1796 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1796 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1797 yield 'hunk', h
1797 yield 'hunk', h
1798 elif x.startswith('diff --git a/'):
1798 elif x.startswith('diff --git a/'):
1799 m = gitre.match(x.rstrip(' \r\n'))
1799 m = gitre.match(x.rstrip(' \r\n'))
1800 if not m:
1800 if not m:
1801 continue
1801 continue
1802 if gitpatches is None:
1802 if gitpatches is None:
1803 # scan whole input for git metadata
1803 # scan whole input for git metadata
1804 gitpatches = scangitpatch(lr, x)
1804 gitpatches = scangitpatch(lr, x)
1805 yield 'git', [g.copy() for g in gitpatches
1805 yield 'git', [g.copy() for g in gitpatches
1806 if g.op in ('COPY', 'RENAME')]
1806 if g.op in ('COPY', 'RENAME')]
1807 gitpatches.reverse()
1807 gitpatches.reverse()
1808 afile = 'a/' + m.group(1)
1808 afile = 'a/' + m.group(1)
1809 bfile = 'b/' + m.group(2)
1809 bfile = 'b/' + m.group(2)
1810 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1810 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1811 gp = gitpatches.pop()
1811 gp = gitpatches.pop()
1812 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1812 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1813 if not gitpatches:
1813 if not gitpatches:
1814 raise PatchError(_('failed to synchronize metadata for "%s"')
1814 raise PatchError(_('failed to synchronize metadata for "%s"')
1815 % afile[2:])
1815 % afile[2:])
1816 gp = gitpatches[-1]
1816 gp = gitpatches[-1]
1817 newfile = True
1817 newfile = True
1818 elif x.startswith('---'):
1818 elif x.startswith('---'):
1819 # check for a unified diff
1819 # check for a unified diff
1820 l2 = lr.readline()
1820 l2 = lr.readline()
1821 if not l2.startswith('+++'):
1821 if not l2.startswith('+++'):
1822 lr.push(l2)
1822 lr.push(l2)
1823 continue
1823 continue
1824 newfile = True
1824 newfile = True
1825 context = False
1825 context = False
1826 afile = parsefilename(x)
1826 afile = parsefilename(x)
1827 bfile = parsefilename(l2)
1827 bfile = parsefilename(l2)
1828 elif x.startswith('***'):
1828 elif x.startswith('***'):
1829 # check for a context diff
1829 # check for a context diff
1830 l2 = lr.readline()
1830 l2 = lr.readline()
1831 if not l2.startswith('---'):
1831 if not l2.startswith('---'):
1832 lr.push(l2)
1832 lr.push(l2)
1833 continue
1833 continue
1834 l3 = lr.readline()
1834 l3 = lr.readline()
1835 lr.push(l3)
1835 lr.push(l3)
1836 if not l3.startswith("***************"):
1836 if not l3.startswith("***************"):
1837 lr.push(l2)
1837 lr.push(l2)
1838 continue
1838 continue
1839 newfile = True
1839 newfile = True
1840 context = True
1840 context = True
1841 afile = parsefilename(x)
1841 afile = parsefilename(x)
1842 bfile = parsefilename(l2)
1842 bfile = parsefilename(l2)
1843
1843
1844 if newfile:
1844 if newfile:
1845 newfile = False
1845 newfile = False
1846 emitfile = True
1846 emitfile = True
1847 state = BFILE
1847 state = BFILE
1848 hunknum = 0
1848 hunknum = 0
1849
1849
1850 while gitpatches:
1850 while gitpatches:
1851 gp = gitpatches.pop()
1851 gp = gitpatches.pop()
1852 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1852 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1853
1853
1854 def applybindelta(binchunk, data):
1854 def applybindelta(binchunk, data):
1855 """Apply a binary delta hunk
1855 """Apply a binary delta hunk
1856 The algorithm used is the algorithm from git's patch-delta.c
1856 The algorithm used is the algorithm from git's patch-delta.c
1857 """
1857 """
1858 def deltahead(binchunk):
1858 def deltahead(binchunk):
1859 i = 0
1859 i = 0
1860 for c in binchunk:
1860 for c in binchunk:
1861 i += 1
1861 i += 1
1862 if not (ord(c) & 0x80):
1862 if not (ord(c) & 0x80):
1863 return i
1863 return i
1864 return i
1864 return i
1865 out = ""
1865 out = ""
1866 s = deltahead(binchunk)
1866 s = deltahead(binchunk)
1867 binchunk = binchunk[s:]
1867 binchunk = binchunk[s:]
1868 s = deltahead(binchunk)
1868 s = deltahead(binchunk)
1869 binchunk = binchunk[s:]
1869 binchunk = binchunk[s:]
1870 i = 0
1870 i = 0
1871 while i < len(binchunk):
1871 while i < len(binchunk):
1872 cmd = ord(binchunk[i])
1872 cmd = ord(binchunk[i])
1873 i += 1
1873 i += 1
1874 if (cmd & 0x80):
1874 if (cmd & 0x80):
1875 offset = 0
1875 offset = 0
1876 size = 0
1876 size = 0
1877 if (cmd & 0x01):
1877 if (cmd & 0x01):
1878 offset = ord(binchunk[i])
1878 offset = ord(binchunk[i])
1879 i += 1
1879 i += 1
1880 if (cmd & 0x02):
1880 if (cmd & 0x02):
1881 offset |= ord(binchunk[i]) << 8
1881 offset |= ord(binchunk[i]) << 8
1882 i += 1
1882 i += 1
1883 if (cmd & 0x04):
1883 if (cmd & 0x04):
1884 offset |= ord(binchunk[i]) << 16
1884 offset |= ord(binchunk[i]) << 16
1885 i += 1
1885 i += 1
1886 if (cmd & 0x08):
1886 if (cmd & 0x08):
1887 offset |= ord(binchunk[i]) << 24
1887 offset |= ord(binchunk[i]) << 24
1888 i += 1
1888 i += 1
1889 if (cmd & 0x10):
1889 if (cmd & 0x10):
1890 size = ord(binchunk[i])
1890 size = ord(binchunk[i])
1891 i += 1
1891 i += 1
1892 if (cmd & 0x20):
1892 if (cmd & 0x20):
1893 size |= ord(binchunk[i]) << 8
1893 size |= ord(binchunk[i]) << 8
1894 i += 1
1894 i += 1
1895 if (cmd & 0x40):
1895 if (cmd & 0x40):
1896 size |= ord(binchunk[i]) << 16
1896 size |= ord(binchunk[i]) << 16
1897 i += 1
1897 i += 1
1898 if size == 0:
1898 if size == 0:
1899 size = 0x10000
1899 size = 0x10000
1900 offset_end = offset + size
1900 offset_end = offset + size
1901 out += data[offset:offset_end]
1901 out += data[offset:offset_end]
1902 elif cmd != 0:
1902 elif cmd != 0:
1903 offset_end = i + cmd
1903 offset_end = i + cmd
1904 out += binchunk[i:offset_end]
1904 out += binchunk[i:offset_end]
1905 i += cmd
1905 i += cmd
1906 else:
1906 else:
1907 raise PatchError(_('unexpected delta opcode 0'))
1907 raise PatchError(_('unexpected delta opcode 0'))
1908 return out
1908 return out
1909
1909
1910 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1910 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1911 """Reads a patch from fp and tries to apply it.
1911 """Reads a patch from fp and tries to apply it.
1912
1912
1913 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1913 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1914 there was any fuzz.
1914 there was any fuzz.
1915
1915
1916 If 'eolmode' is 'strict', the patch content and patched file are
1916 If 'eolmode' is 'strict', the patch content and patched file are
1917 read in binary mode. Otherwise, line endings are ignored when
1917 read in binary mode. Otherwise, line endings are ignored when
1918 patching then normalized according to 'eolmode'.
1918 patching then normalized according to 'eolmode'.
1919 """
1919 """
1920 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1920 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1921 prefix=prefix, eolmode=eolmode)
1921 prefix=prefix, eolmode=eolmode)
1922
1922
1923 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1923 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1924 eolmode='strict'):
1924 eolmode='strict'):
1925
1925
1926 if prefix:
1926 if prefix:
1927 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1927 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1928 prefix)
1928 prefix)
1929 if prefix != '':
1929 if prefix != '':
1930 prefix += '/'
1930 prefix += '/'
1931 def pstrip(p):
1931 def pstrip(p):
1932 return pathtransform(p, strip - 1, prefix)[1]
1932 return pathtransform(p, strip - 1, prefix)[1]
1933
1933
1934 rejects = 0
1934 rejects = 0
1935 err = 0
1935 err = 0
1936 current_file = None
1936 current_file = None
1937
1937
1938 for state, values in iterhunks(fp):
1938 for state, values in iterhunks(fp):
1939 if state == 'hunk':
1939 if state == 'hunk':
1940 if not current_file:
1940 if not current_file:
1941 continue
1941 continue
1942 ret = current_file.apply(values)
1942 ret = current_file.apply(values)
1943 if ret > 0:
1943 if ret > 0:
1944 err = 1
1944 err = 1
1945 elif state == 'file':
1945 elif state == 'file':
1946 if current_file:
1946 if current_file:
1947 rejects += current_file.close()
1947 rejects += current_file.close()
1948 current_file = None
1948 current_file = None
1949 afile, bfile, first_hunk, gp = values
1949 afile, bfile, first_hunk, gp = values
1950 if gp:
1950 if gp:
1951 gp.path = pstrip(gp.path)
1951 gp.path = pstrip(gp.path)
1952 if gp.oldpath:
1952 if gp.oldpath:
1953 gp.oldpath = pstrip(gp.oldpath)
1953 gp.oldpath = pstrip(gp.oldpath)
1954 else:
1954 else:
1955 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1955 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1956 prefix)
1956 prefix)
1957 if gp.op == 'RENAME':
1957 if gp.op == 'RENAME':
1958 backend.unlink(gp.oldpath)
1958 backend.unlink(gp.oldpath)
1959 if not first_hunk:
1959 if not first_hunk:
1960 if gp.op == 'DELETE':
1960 if gp.op == 'DELETE':
1961 backend.unlink(gp.path)
1961 backend.unlink(gp.path)
1962 continue
1962 continue
1963 data, mode = None, None
1963 data, mode = None, None
1964 if gp.op in ('RENAME', 'COPY'):
1964 if gp.op in ('RENAME', 'COPY'):
1965 data, mode = store.getfile(gp.oldpath)[:2]
1965 data, mode = store.getfile(gp.oldpath)[:2]
1966 # FIXME: failing getfile has never been handled here
1966 # FIXME: failing getfile has never been handled here
1967 assert data is not None
1967 assert data is not None
1968 if gp.mode:
1968 if gp.mode:
1969 mode = gp.mode
1969 mode = gp.mode
1970 if gp.op == 'ADD':
1970 if gp.op == 'ADD':
1971 # Added files without content have no hunk and
1971 # Added files without content have no hunk and
1972 # must be created
1972 # must be created
1973 data = ''
1973 data = ''
1974 if data or mode:
1974 if data or mode:
1975 if (gp.op in ('ADD', 'RENAME', 'COPY')
1975 if (gp.op in ('ADD', 'RENAME', 'COPY')
1976 and backend.exists(gp.path)):
1976 and backend.exists(gp.path)):
1977 raise PatchError(_("cannot create %s: destination "
1977 raise PatchError(_("cannot create %s: destination "
1978 "already exists") % gp.path)
1978 "already exists") % gp.path)
1979 backend.setfile(gp.path, data, mode, gp.oldpath)
1979 backend.setfile(gp.path, data, mode, gp.oldpath)
1980 continue
1980 continue
1981 try:
1981 try:
1982 current_file = patcher(ui, gp, backend, store,
1982 current_file = patcher(ui, gp, backend, store,
1983 eolmode=eolmode)
1983 eolmode=eolmode)
1984 except PatchError as inst:
1984 except PatchError as inst:
1985 ui.warn(str(inst) + '\n')
1985 ui.warn(str(inst) + '\n')
1986 current_file = None
1986 current_file = None
1987 rejects += 1
1987 rejects += 1
1988 continue
1988 continue
1989 elif state == 'git':
1989 elif state == 'git':
1990 for gp in values:
1990 for gp in values:
1991 path = pstrip(gp.oldpath)
1991 path = pstrip(gp.oldpath)
1992 data, mode = backend.getfile(path)
1992 data, mode = backend.getfile(path)
1993 if data is None:
1993 if data is None:
1994 # The error ignored here will trigger a getfile()
1994 # The error ignored here will trigger a getfile()
1995 # error in a place more appropriate for error
1995 # error in a place more appropriate for error
1996 # handling, and will not interrupt the patching
1996 # handling, and will not interrupt the patching
1997 # process.
1997 # process.
1998 pass
1998 pass
1999 else:
1999 else:
2000 store.setfile(path, data, mode)
2000 store.setfile(path, data, mode)
2001 else:
2001 else:
2002 raise error.Abort(_('unsupported parser state: %s') % state)
2002 raise error.Abort(_('unsupported parser state: %s') % state)
2003
2003
2004 if current_file:
2004 if current_file:
2005 rejects += current_file.close()
2005 rejects += current_file.close()
2006
2006
2007 if rejects:
2007 if rejects:
2008 return -1
2008 return -1
2009 return err
2009 return err
2010
2010
2011 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2011 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2012 similarity):
2012 similarity):
2013 """use <patcher> to apply <patchname> to the working directory.
2013 """use <patcher> to apply <patchname> to the working directory.
2014 returns whether patch was applied with fuzz factor."""
2014 returns whether patch was applied with fuzz factor."""
2015
2015
2016 fuzz = False
2016 fuzz = False
2017 args = []
2017 args = []
2018 cwd = repo.root
2018 cwd = repo.root
2019 if cwd:
2019 if cwd:
2020 args.append('-d %s' % util.shellquote(cwd))
2020 args.append('-d %s' % util.shellquote(cwd))
2021 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2021 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2022 util.shellquote(patchname)))
2022 util.shellquote(patchname)))
2023 try:
2023 try:
2024 for line in fp:
2024 for line in fp:
2025 line = line.rstrip()
2025 line = line.rstrip()
2026 ui.note(line + '\n')
2026 ui.note(line + '\n')
2027 if line.startswith('patching file '):
2027 if line.startswith('patching file '):
2028 pf = util.parsepatchoutput(line)
2028 pf = util.parsepatchoutput(line)
2029 printed_file = False
2029 printed_file = False
2030 files.add(pf)
2030 files.add(pf)
2031 elif line.find('with fuzz') >= 0:
2031 elif line.find('with fuzz') >= 0:
2032 fuzz = True
2032 fuzz = True
2033 if not printed_file:
2033 if not printed_file:
2034 ui.warn(pf + '\n')
2034 ui.warn(pf + '\n')
2035 printed_file = True
2035 printed_file = True
2036 ui.warn(line + '\n')
2036 ui.warn(line + '\n')
2037 elif line.find('saving rejects to file') >= 0:
2037 elif line.find('saving rejects to file') >= 0:
2038 ui.warn(line + '\n')
2038 ui.warn(line + '\n')
2039 elif line.find('FAILED') >= 0:
2039 elif line.find('FAILED') >= 0:
2040 if not printed_file:
2040 if not printed_file:
2041 ui.warn(pf + '\n')
2041 ui.warn(pf + '\n')
2042 printed_file = True
2042 printed_file = True
2043 ui.warn(line + '\n')
2043 ui.warn(line + '\n')
2044 finally:
2044 finally:
2045 if files:
2045 if files:
2046 scmutil.marktouched(repo, files, similarity)
2046 scmutil.marktouched(repo, files, similarity)
2047 code = fp.close()
2047 code = fp.close()
2048 if code:
2048 if code:
2049 raise PatchError(_("patch command failed: %s") %
2049 raise PatchError(_("patch command failed: %s") %
2050 util.explainexit(code)[0])
2050 util.explainexit(code)[0])
2051 return fuzz
2051 return fuzz
2052
2052
2053 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2053 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2054 eolmode='strict'):
2054 eolmode='strict'):
2055 if files is None:
2055 if files is None:
2056 files = set()
2056 files = set()
2057 if eolmode is None:
2057 if eolmode is None:
2058 eolmode = ui.config('patch', 'eol', 'strict')
2058 eolmode = ui.config('patch', 'eol', 'strict')
2059 if eolmode.lower() not in eolmodes:
2059 if eolmode.lower() not in eolmodes:
2060 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2060 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2061 eolmode = eolmode.lower()
2061 eolmode = eolmode.lower()
2062
2062
2063 store = filestore()
2063 store = filestore()
2064 try:
2064 try:
2065 fp = open(patchobj, 'rb')
2065 fp = open(patchobj, 'rb')
2066 except TypeError:
2066 except TypeError:
2067 fp = patchobj
2067 fp = patchobj
2068 try:
2068 try:
2069 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2069 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2070 eolmode=eolmode)
2070 eolmode=eolmode)
2071 finally:
2071 finally:
2072 if fp != patchobj:
2072 if fp != patchobj:
2073 fp.close()
2073 fp.close()
2074 files.update(backend.close())
2074 files.update(backend.close())
2075 store.close()
2075 store.close()
2076 if ret < 0:
2076 if ret < 0:
2077 raise PatchError(_('patch failed to apply'))
2077 raise PatchError(_('patch failed to apply'))
2078 return ret > 0
2078 return ret > 0
2079
2079
2080 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2080 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2081 eolmode='strict', similarity=0):
2081 eolmode='strict', similarity=0):
2082 """use builtin patch to apply <patchobj> to the working directory.
2082 """use builtin patch to apply <patchobj> to the working directory.
2083 returns whether patch was applied with fuzz factor."""
2083 returns whether patch was applied with fuzz factor."""
2084 backend = workingbackend(ui, repo, similarity)
2084 backend = workingbackend(ui, repo, similarity)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2086
2086
2087 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2087 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2088 eolmode='strict'):
2088 eolmode='strict'):
2089 backend = repobackend(ui, repo, ctx, store)
2089 backend = repobackend(ui, repo, ctx, store)
2090 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2090 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2091
2091
2092 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2092 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2093 similarity=0):
2093 similarity=0):
2094 """Apply <patchname> to the working directory.
2094 """Apply <patchname> to the working directory.
2095
2095
2096 'eolmode' specifies how end of lines should be handled. It can be:
2096 'eolmode' specifies how end of lines should be handled. It can be:
2097 - 'strict': inputs are read in binary mode, EOLs are preserved
2097 - 'strict': inputs are read in binary mode, EOLs are preserved
2098 - 'crlf': EOLs are ignored when patching and reset to CRLF
2098 - 'crlf': EOLs are ignored when patching and reset to CRLF
2099 - 'lf': EOLs are ignored when patching and reset to LF
2099 - 'lf': EOLs are ignored when patching and reset to LF
2100 - None: get it from user settings, default to 'strict'
2100 - None: get it from user settings, default to 'strict'
2101 'eolmode' is ignored when using an external patcher program.
2101 'eolmode' is ignored when using an external patcher program.
2102
2102
2103 Returns whether patch was applied with fuzz factor.
2103 Returns whether patch was applied with fuzz factor.
2104 """
2104 """
2105 patcher = ui.config('ui', 'patch')
2105 patcher = ui.config('ui', 'patch')
2106 if files is None:
2106 if files is None:
2107 files = set()
2107 files = set()
2108 if patcher:
2108 if patcher:
2109 return _externalpatch(ui, repo, patcher, patchname, strip,
2109 return _externalpatch(ui, repo, patcher, patchname, strip,
2110 files, similarity)
2110 files, similarity)
2111 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2111 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2112 similarity)
2112 similarity)
2113
2113
2114 def changedfiles(ui, repo, patchpath, strip=1):
2114 def changedfiles(ui, repo, patchpath, strip=1):
2115 backend = fsbackend(ui, repo.root)
2115 backend = fsbackend(ui, repo.root)
2116 with open(patchpath, 'rb') as fp:
2116 with open(patchpath, 'rb') as fp:
2117 changed = set()
2117 changed = set()
2118 for state, values in iterhunks(fp):
2118 for state, values in iterhunks(fp):
2119 if state == 'file':
2119 if state == 'file':
2120 afile, bfile, first_hunk, gp = values
2120 afile, bfile, first_hunk, gp = values
2121 if gp:
2121 if gp:
2122 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2122 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2123 if gp.oldpath:
2123 if gp.oldpath:
2124 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2124 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2125 else:
2125 else:
2126 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2126 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2127 '')
2127 '')
2128 changed.add(gp.path)
2128 changed.add(gp.path)
2129 if gp.op == 'RENAME':
2129 if gp.op == 'RENAME':
2130 changed.add(gp.oldpath)
2130 changed.add(gp.oldpath)
2131 elif state not in ('hunk', 'git'):
2131 elif state not in ('hunk', 'git'):
2132 raise error.Abort(_('unsupported parser state: %s') % state)
2132 raise error.Abort(_('unsupported parser state: %s') % state)
2133 return changed
2133 return changed
2134
2134
2135 class GitDiffRequired(Exception):
2135 class GitDiffRequired(Exception):
2136 pass
2136 pass
2137
2137
2138 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2138 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2139 '''return diffopts with all features supported and parsed'''
2139 '''return diffopts with all features supported and parsed'''
2140 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2140 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2141 git=True, whitespace=True, formatchanging=True)
2141 git=True, whitespace=True, formatchanging=True)
2142
2142
2143 diffopts = diffallopts
2143 diffopts = diffallopts
2144
2144
2145 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2145 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2146 whitespace=False, formatchanging=False):
2146 whitespace=False, formatchanging=False):
2147 '''return diffopts with only opted-in features parsed
2147 '''return diffopts with only opted-in features parsed
2148
2148
2149 Features:
2149 Features:
2150 - git: git-style diffs
2150 - git: git-style diffs
2151 - whitespace: whitespace options like ignoreblanklines and ignorews
2151 - whitespace: whitespace options like ignoreblanklines and ignorews
2152 - formatchanging: options that will likely break or cause correctness issues
2152 - formatchanging: options that will likely break or cause correctness issues
2153 with most diff parsers
2153 with most diff parsers
2154 '''
2154 '''
2155 def get(key, name=None, getter=ui.configbool, forceplain=None):
2155 def get(key, name=None, getter=ui.configbool, forceplain=None):
2156 if opts:
2156 if opts:
2157 v = opts.get(key)
2157 v = opts.get(key)
2158 if v:
2158 if v:
2159 return v
2159 return v
2160 if forceplain is not None and ui.plain():
2160 if forceplain is not None and ui.plain():
2161 return forceplain
2161 return forceplain
2162 return getter(section, name or key, None, untrusted=untrusted)
2162 return getter(section, name or key, None, untrusted=untrusted)
2163
2163
2164 # core options, expected to be understood by every diff parser
2164 # core options, expected to be understood by every diff parser
2165 buildopts = {
2165 buildopts = {
2166 'nodates': get('nodates'),
2166 'nodates': get('nodates'),
2167 'showfunc': get('show_function', 'showfunc'),
2167 'showfunc': get('show_function', 'showfunc'),
2168 'context': get('unified', getter=ui.config),
2168 'context': get('unified', getter=ui.config),
2169 }
2169 }
2170
2170
2171 if git:
2171 if git:
2172 buildopts['git'] = get('git')
2172 buildopts['git'] = get('git')
2173 if whitespace:
2173 if whitespace:
2174 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2174 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2175 buildopts['ignorewsamount'] = get('ignore_space_change',
2175 buildopts['ignorewsamount'] = get('ignore_space_change',
2176 'ignorewsamount')
2176 'ignorewsamount')
2177 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2177 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2178 'ignoreblanklines')
2178 'ignoreblanklines')
2179 if formatchanging:
2179 if formatchanging:
2180 buildopts['text'] = opts and opts.get('text')
2180 buildopts['text'] = opts and opts.get('text')
2181 buildopts['nobinary'] = get('nobinary', forceplain=False)
2181 buildopts['nobinary'] = get('nobinary', forceplain=False)
2182 buildopts['noprefix'] = get('noprefix', forceplain=False)
2182 buildopts['noprefix'] = get('noprefix', forceplain=False)
2183
2183
2184 return mdiff.diffopts(**buildopts)
2184 return mdiff.diffopts(**buildopts)
2185
2185
2186 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2186 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2187 losedatafn=None, prefix='', relroot=''):
2187 losedatafn=None, prefix='', relroot='', copy=None):
2188 '''yields diff of changes to files between two nodes, or node and
2188 '''yields diff of changes to files between two nodes, or node and
2189 working directory.
2189 working directory.
2190
2190
2191 if node1 is None, use first dirstate parent instead.
2191 if node1 is None, use first dirstate parent instead.
2192 if node2 is None, compare node1 with working directory.
2192 if node2 is None, compare node1 with working directory.
2193
2193
2194 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2194 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2195 every time some change cannot be represented with the current
2195 every time some change cannot be represented with the current
2196 patch format. Return False to upgrade to git patch format, True to
2196 patch format. Return False to upgrade to git patch format, True to
2197 accept the loss or raise an exception to abort the diff. It is
2197 accept the loss or raise an exception to abort the diff. It is
2198 called with the name of current file being diffed as 'fn'. If set
2198 called with the name of current file being diffed as 'fn'. If set
2199 to None, patches will always be upgraded to git format when
2199 to None, patches will always be upgraded to git format when
2200 necessary.
2200 necessary.
2201
2201
2202 prefix is a filename prefix that is prepended to all filenames on
2202 prefix is a filename prefix that is prepended to all filenames on
2203 display (used for subrepos).
2203 display (used for subrepos).
2204
2204
2205 relroot, if not empty, must be normalized with a trailing /. Any match
2205 relroot, if not empty, must be normalized with a trailing /. Any match
2206 patterns that fall outside it will be ignored.'''
2206 patterns that fall outside it will be ignored.
2207
2208 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2209 information.'''
2207
2210
2208 if opts is None:
2211 if opts is None:
2209 opts = mdiff.defaultopts
2212 opts = mdiff.defaultopts
2210
2213
2211 if not node1 and not node2:
2214 if not node1 and not node2:
2212 node1 = repo.dirstate.p1()
2215 node1 = repo.dirstate.p1()
2213
2216
2214 def lrugetfilectx():
2217 def lrugetfilectx():
2215 cache = {}
2218 cache = {}
2216 order = collections.deque()
2219 order = collections.deque()
2217 def getfilectx(f, ctx):
2220 def getfilectx(f, ctx):
2218 fctx = ctx.filectx(f, filelog=cache.get(f))
2221 fctx = ctx.filectx(f, filelog=cache.get(f))
2219 if f not in cache:
2222 if f not in cache:
2220 if len(cache) > 20:
2223 if len(cache) > 20:
2221 del cache[order.popleft()]
2224 del cache[order.popleft()]
2222 cache[f] = fctx.filelog()
2225 cache[f] = fctx.filelog()
2223 else:
2226 else:
2224 order.remove(f)
2227 order.remove(f)
2225 order.append(f)
2228 order.append(f)
2226 return fctx
2229 return fctx
2227 return getfilectx
2230 return getfilectx
2228 getfilectx = lrugetfilectx()
2231 getfilectx = lrugetfilectx()
2229
2232
2230 ctx1 = repo[node1]
2233 ctx1 = repo[node1]
2231 ctx2 = repo[node2]
2234 ctx2 = repo[node2]
2232
2235
2233 relfiltered = False
2236 relfiltered = False
2234 if relroot != '' and match.always():
2237 if relroot != '' and match.always():
2235 # as a special case, create a new matcher with just the relroot
2238 # as a special case, create a new matcher with just the relroot
2236 pats = [relroot]
2239 pats = [relroot]
2237 match = scmutil.match(ctx2, pats, default='path')
2240 match = scmutil.match(ctx2, pats, default='path')
2238 relfiltered = True
2241 relfiltered = True
2239
2242
2240 if not changes:
2243 if not changes:
2241 changes = repo.status(ctx1, ctx2, match=match)
2244 changes = repo.status(ctx1, ctx2, match=match)
2242 modified, added, removed = changes[:3]
2245 modified, added, removed = changes[:3]
2243
2246
2244 if not modified and not added and not removed:
2247 if not modified and not added and not removed:
2245 return []
2248 return []
2246
2249
2247 if repo.ui.debugflag:
2250 if repo.ui.debugflag:
2248 hexfunc = hex
2251 hexfunc = hex
2249 else:
2252 else:
2250 hexfunc = short
2253 hexfunc = short
2251 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2254 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2252
2255
2253 copy = {}
2256 if copy is None:
2254 if opts.git or opts.upgrade:
2257 copy = {}
2255 copy = copies.pathcopies(ctx1, ctx2, match=match)
2258 if opts.git or opts.upgrade:
2259 copy = copies.pathcopies(ctx1, ctx2, match=match)
2256
2260
2257 if relroot is not None:
2261 if relroot is not None:
2258 if not relfiltered:
2262 if not relfiltered:
2259 # XXX this would ideally be done in the matcher, but that is
2263 # XXX this would ideally be done in the matcher, but that is
2260 # generally meant to 'or' patterns, not 'and' them. In this case we
2264 # generally meant to 'or' patterns, not 'and' them. In this case we
2261 # need to 'and' all the patterns from the matcher with relroot.
2265 # need to 'and' all the patterns from the matcher with relroot.
2262 def filterrel(l):
2266 def filterrel(l):
2263 return [f for f in l if f.startswith(relroot)]
2267 return [f for f in l if f.startswith(relroot)]
2264 modified = filterrel(modified)
2268 modified = filterrel(modified)
2265 added = filterrel(added)
2269 added = filterrel(added)
2266 removed = filterrel(removed)
2270 removed = filterrel(removed)
2267 relfiltered = True
2271 relfiltered = True
2268 # filter out copies where either side isn't inside the relative root
2272 # filter out copies where either side isn't inside the relative root
2269 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2273 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2270 if dst.startswith(relroot)
2274 if dst.startswith(relroot)
2271 and src.startswith(relroot)))
2275 and src.startswith(relroot)))
2272
2276
2273 modifiedset = set(modified)
2277 modifiedset = set(modified)
2274 addedset = set(added)
2278 addedset = set(added)
2275 removedset = set(removed)
2279 removedset = set(removed)
2276 for f in modified:
2280 for f in modified:
2277 if f not in ctx1:
2281 if f not in ctx1:
2278 # Fix up added, since merged-in additions appear as
2282 # Fix up added, since merged-in additions appear as
2279 # modifications during merges
2283 # modifications during merges
2280 modifiedset.remove(f)
2284 modifiedset.remove(f)
2281 addedset.add(f)
2285 addedset.add(f)
2282 for f in removed:
2286 for f in removed:
2283 if f not in ctx1:
2287 if f not in ctx1:
2284 # Merged-in additions that are then removed are reported as removed.
2288 # Merged-in additions that are then removed are reported as removed.
2285 # They are not in ctx1, so We don't want to show them in the diff.
2289 # They are not in ctx1, so We don't want to show them in the diff.
2286 removedset.remove(f)
2290 removedset.remove(f)
2287 modified = sorted(modifiedset)
2291 modified = sorted(modifiedset)
2288 added = sorted(addedset)
2292 added = sorted(addedset)
2289 removed = sorted(removedset)
2293 removed = sorted(removedset)
2290 for dst, src in copy.items():
2294 for dst, src in copy.items():
2291 if src not in ctx1:
2295 if src not in ctx1:
2292 # Files merged in during a merge and then copied/renamed are
2296 # Files merged in during a merge and then copied/renamed are
2293 # reported as copies. We want to show them in the diff as additions.
2297 # reported as copies. We want to show them in the diff as additions.
2294 del copy[dst]
2298 del copy[dst]
2295
2299
2296 def difffn(opts, losedata):
2300 def difffn(opts, losedata):
2297 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2301 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2298 copy, getfilectx, opts, losedata, prefix, relroot)
2302 copy, getfilectx, opts, losedata, prefix, relroot)
2299 if opts.upgrade and not opts.git:
2303 if opts.upgrade and not opts.git:
2300 try:
2304 try:
2301 def losedata(fn):
2305 def losedata(fn):
2302 if not losedatafn or not losedatafn(fn=fn):
2306 if not losedatafn or not losedatafn(fn=fn):
2303 raise GitDiffRequired
2307 raise GitDiffRequired
2304 # Buffer the whole output until we are sure it can be generated
2308 # Buffer the whole output until we are sure it can be generated
2305 return list(difffn(opts.copy(git=False), losedata))
2309 return list(difffn(opts.copy(git=False), losedata))
2306 except GitDiffRequired:
2310 except GitDiffRequired:
2307 return difffn(opts.copy(git=True), None)
2311 return difffn(opts.copy(git=True), None)
2308 else:
2312 else:
2309 return difffn(opts, None)
2313 return difffn(opts, None)
2310
2314
2311 def difflabel(func, *args, **kw):
2315 def difflabel(func, *args, **kw):
2312 '''yields 2-tuples of (output, label) based on the output of func()'''
2316 '''yields 2-tuples of (output, label) based on the output of func()'''
2313 headprefixes = [('diff', 'diff.diffline'),
2317 headprefixes = [('diff', 'diff.diffline'),
2314 ('copy', 'diff.extended'),
2318 ('copy', 'diff.extended'),
2315 ('rename', 'diff.extended'),
2319 ('rename', 'diff.extended'),
2316 ('old', 'diff.extended'),
2320 ('old', 'diff.extended'),
2317 ('new', 'diff.extended'),
2321 ('new', 'diff.extended'),
2318 ('deleted', 'diff.extended'),
2322 ('deleted', 'diff.extended'),
2319 ('---', 'diff.file_a'),
2323 ('---', 'diff.file_a'),
2320 ('+++', 'diff.file_b')]
2324 ('+++', 'diff.file_b')]
2321 textprefixes = [('@', 'diff.hunk'),
2325 textprefixes = [('@', 'diff.hunk'),
2322 ('-', 'diff.deleted'),
2326 ('-', 'diff.deleted'),
2323 ('+', 'diff.inserted')]
2327 ('+', 'diff.inserted')]
2324 head = False
2328 head = False
2325 for chunk in func(*args, **kw):
2329 for chunk in func(*args, **kw):
2326 lines = chunk.split('\n')
2330 lines = chunk.split('\n')
2327 for i, line in enumerate(lines):
2331 for i, line in enumerate(lines):
2328 if i != 0:
2332 if i != 0:
2329 yield ('\n', '')
2333 yield ('\n', '')
2330 if head:
2334 if head:
2331 if line.startswith('@'):
2335 if line.startswith('@'):
2332 head = False
2336 head = False
2333 else:
2337 else:
2334 if line and line[0] not in ' +-@\\':
2338 if line and line[0] not in ' +-@\\':
2335 head = True
2339 head = True
2336 stripline = line
2340 stripline = line
2337 diffline = False
2341 diffline = False
2338 if not head and line and line[0] in '+-':
2342 if not head and line and line[0] in '+-':
2339 # highlight tabs and trailing whitespace, but only in
2343 # highlight tabs and trailing whitespace, but only in
2340 # changed lines
2344 # changed lines
2341 stripline = line.rstrip()
2345 stripline = line.rstrip()
2342 diffline = True
2346 diffline = True
2343
2347
2344 prefixes = textprefixes
2348 prefixes = textprefixes
2345 if head:
2349 if head:
2346 prefixes = headprefixes
2350 prefixes = headprefixes
2347 for prefix, label in prefixes:
2351 for prefix, label in prefixes:
2348 if stripline.startswith(prefix):
2352 if stripline.startswith(prefix):
2349 if diffline:
2353 if diffline:
2350 for token in tabsplitter.findall(stripline):
2354 for token in tabsplitter.findall(stripline):
2351 if '\t' == token[0]:
2355 if '\t' == token[0]:
2352 yield (token, 'diff.tab')
2356 yield (token, 'diff.tab')
2353 else:
2357 else:
2354 yield (token, label)
2358 yield (token, label)
2355 else:
2359 else:
2356 yield (stripline, label)
2360 yield (stripline, label)
2357 break
2361 break
2358 else:
2362 else:
2359 yield (line, '')
2363 yield (line, '')
2360 if line != stripline:
2364 if line != stripline:
2361 yield (line[len(stripline):], 'diff.trailingwhitespace')
2365 yield (line[len(stripline):], 'diff.trailingwhitespace')
2362
2366
2363 def diffui(*args, **kw):
2367 def diffui(*args, **kw):
2364 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2368 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2365 return difflabel(diff, *args, **kw)
2369 return difflabel(diff, *args, **kw)
2366
2370
2367 def _filepairs(modified, added, removed, copy, opts):
2371 def _filepairs(modified, added, removed, copy, opts):
2368 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2372 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2369 before and f2 is the the name after. For added files, f1 will be None,
2373 before and f2 is the the name after. For added files, f1 will be None,
2370 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2374 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2371 or 'rename' (the latter two only if opts.git is set).'''
2375 or 'rename' (the latter two only if opts.git is set).'''
2372 gone = set()
2376 gone = set()
2373
2377
2374 copyto = dict([(v, k) for k, v in copy.items()])
2378 copyto = dict([(v, k) for k, v in copy.items()])
2375
2379
2376 addedset, removedset = set(added), set(removed)
2380 addedset, removedset = set(added), set(removed)
2377
2381
2378 for f in sorted(modified + added + removed):
2382 for f in sorted(modified + added + removed):
2379 copyop = None
2383 copyop = None
2380 f1, f2 = f, f
2384 f1, f2 = f, f
2381 if f in addedset:
2385 if f in addedset:
2382 f1 = None
2386 f1 = None
2383 if f in copy:
2387 if f in copy:
2384 if opts.git:
2388 if opts.git:
2385 f1 = copy[f]
2389 f1 = copy[f]
2386 if f1 in removedset and f1 not in gone:
2390 if f1 in removedset and f1 not in gone:
2387 copyop = 'rename'
2391 copyop = 'rename'
2388 gone.add(f1)
2392 gone.add(f1)
2389 else:
2393 else:
2390 copyop = 'copy'
2394 copyop = 'copy'
2391 elif f in removedset:
2395 elif f in removedset:
2392 f2 = None
2396 f2 = None
2393 if opts.git:
2397 if opts.git:
2394 # have we already reported a copy above?
2398 # have we already reported a copy above?
2395 if (f in copyto and copyto[f] in addedset
2399 if (f in copyto and copyto[f] in addedset
2396 and copy[copyto[f]] == f):
2400 and copy[copyto[f]] == f):
2397 continue
2401 continue
2398 yield f1, f2, copyop
2402 yield f1, f2, copyop
2399
2403
2400 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2404 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2401 copy, getfilectx, opts, losedatafn, prefix, relroot):
2405 copy, getfilectx, opts, losedatafn, prefix, relroot):
2402 '''given input data, generate a diff and yield it in blocks
2406 '''given input data, generate a diff and yield it in blocks
2403
2407
2404 If generating a diff would lose data like flags or binary data and
2408 If generating a diff would lose data like flags or binary data and
2405 losedatafn is not None, it will be called.
2409 losedatafn is not None, it will be called.
2406
2410
2407 relroot is removed and prefix is added to every path in the diff output.
2411 relroot is removed and prefix is added to every path in the diff output.
2408
2412
2409 If relroot is not empty, this function expects every path in modified,
2413 If relroot is not empty, this function expects every path in modified,
2410 added, removed and copy to start with it.'''
2414 added, removed and copy to start with it.'''
2411
2415
2412 def gitindex(text):
2416 def gitindex(text):
2413 if not text:
2417 if not text:
2414 text = ""
2418 text = ""
2415 l = len(text)
2419 l = len(text)
2416 s = hashlib.sha1('blob %d\0' % l)
2420 s = hashlib.sha1('blob %d\0' % l)
2417 s.update(text)
2421 s.update(text)
2418 return s.hexdigest()
2422 return s.hexdigest()
2419
2423
2420 if opts.noprefix:
2424 if opts.noprefix:
2421 aprefix = bprefix = ''
2425 aprefix = bprefix = ''
2422 else:
2426 else:
2423 aprefix = 'a/'
2427 aprefix = 'a/'
2424 bprefix = 'b/'
2428 bprefix = 'b/'
2425
2429
2426 def diffline(f, revs):
2430 def diffline(f, revs):
2427 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2431 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2428 return 'diff %s %s' % (revinfo, f)
2432 return 'diff %s %s' % (revinfo, f)
2429
2433
2430 date1 = util.datestr(ctx1.date())
2434 date1 = util.datestr(ctx1.date())
2431 date2 = util.datestr(ctx2.date())
2435 date2 = util.datestr(ctx2.date())
2432
2436
2433 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2437 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2434
2438
2435 if relroot != '' and (repo.ui.configbool('devel', 'all')
2439 if relroot != '' and (repo.ui.configbool('devel', 'all')
2436 or repo.ui.configbool('devel', 'check-relroot')):
2440 or repo.ui.configbool('devel', 'check-relroot')):
2437 for f in modified + added + removed + copy.keys() + copy.values():
2441 for f in modified + added + removed + copy.keys() + copy.values():
2438 if f is not None and not f.startswith(relroot):
2442 if f is not None and not f.startswith(relroot):
2439 raise AssertionError(
2443 raise AssertionError(
2440 "file %s doesn't start with relroot %s" % (f, relroot))
2444 "file %s doesn't start with relroot %s" % (f, relroot))
2441
2445
2442 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2446 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2443 content1 = None
2447 content1 = None
2444 content2 = None
2448 content2 = None
2445 flag1 = None
2449 flag1 = None
2446 flag2 = None
2450 flag2 = None
2447 if f1:
2451 if f1:
2448 content1 = getfilectx(f1, ctx1).data()
2452 content1 = getfilectx(f1, ctx1).data()
2449 if opts.git or losedatafn:
2453 if opts.git or losedatafn:
2450 flag1 = ctx1.flags(f1)
2454 flag1 = ctx1.flags(f1)
2451 if f2:
2455 if f2:
2452 content2 = getfilectx(f2, ctx2).data()
2456 content2 = getfilectx(f2, ctx2).data()
2453 if opts.git or losedatafn:
2457 if opts.git or losedatafn:
2454 flag2 = ctx2.flags(f2)
2458 flag2 = ctx2.flags(f2)
2455 binary = False
2459 binary = False
2456 if opts.git or losedatafn:
2460 if opts.git or losedatafn:
2457 binary = util.binary(content1) or util.binary(content2)
2461 binary = util.binary(content1) or util.binary(content2)
2458
2462
2459 if losedatafn and not opts.git:
2463 if losedatafn and not opts.git:
2460 if (binary or
2464 if (binary or
2461 # copy/rename
2465 # copy/rename
2462 f2 in copy or
2466 f2 in copy or
2463 # empty file creation
2467 # empty file creation
2464 (not f1 and not content2) or
2468 (not f1 and not content2) or
2465 # empty file deletion
2469 # empty file deletion
2466 (not content1 and not f2) or
2470 (not content1 and not f2) or
2467 # create with flags
2471 # create with flags
2468 (not f1 and flag2) or
2472 (not f1 and flag2) or
2469 # change flags
2473 # change flags
2470 (f1 and f2 and flag1 != flag2)):
2474 (f1 and f2 and flag1 != flag2)):
2471 losedatafn(f2 or f1)
2475 losedatafn(f2 or f1)
2472
2476
2473 path1 = f1 or f2
2477 path1 = f1 or f2
2474 path2 = f2 or f1
2478 path2 = f2 or f1
2475 path1 = posixpath.join(prefix, path1[len(relroot):])
2479 path1 = posixpath.join(prefix, path1[len(relroot):])
2476 path2 = posixpath.join(prefix, path2[len(relroot):])
2480 path2 = posixpath.join(prefix, path2[len(relroot):])
2477 header = []
2481 header = []
2478 if opts.git:
2482 if opts.git:
2479 header.append('diff --git %s%s %s%s' %
2483 header.append('diff --git %s%s %s%s' %
2480 (aprefix, path1, bprefix, path2))
2484 (aprefix, path1, bprefix, path2))
2481 if not f1: # added
2485 if not f1: # added
2482 header.append('new file mode %s' % gitmode[flag2])
2486 header.append('new file mode %s' % gitmode[flag2])
2483 elif not f2: # removed
2487 elif not f2: # removed
2484 header.append('deleted file mode %s' % gitmode[flag1])
2488 header.append('deleted file mode %s' % gitmode[flag1])
2485 else: # modified/copied/renamed
2489 else: # modified/copied/renamed
2486 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2490 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2487 if mode1 != mode2:
2491 if mode1 != mode2:
2488 header.append('old mode %s' % mode1)
2492 header.append('old mode %s' % mode1)
2489 header.append('new mode %s' % mode2)
2493 header.append('new mode %s' % mode2)
2490 if copyop is not None:
2494 if copyop is not None:
2491 header.append('%s from %s' % (copyop, path1))
2495 header.append('%s from %s' % (copyop, path1))
2492 header.append('%s to %s' % (copyop, path2))
2496 header.append('%s to %s' % (copyop, path2))
2493 elif revs and not repo.ui.quiet:
2497 elif revs and not repo.ui.quiet:
2494 header.append(diffline(path1, revs))
2498 header.append(diffline(path1, revs))
2495
2499
2496 if binary and opts.git and not opts.nobinary:
2500 if binary and opts.git and not opts.nobinary:
2497 text = mdiff.b85diff(content1, content2)
2501 text = mdiff.b85diff(content1, content2)
2498 if text:
2502 if text:
2499 header.append('index %s..%s' %
2503 header.append('index %s..%s' %
2500 (gitindex(content1), gitindex(content2)))
2504 (gitindex(content1), gitindex(content2)))
2501 else:
2505 else:
2502 text = mdiff.unidiff(content1, date1,
2506 text = mdiff.unidiff(content1, date1,
2503 content2, date2,
2507 content2, date2,
2504 path1, path2, opts=opts)
2508 path1, path2, opts=opts)
2505 if header and (text or len(header) > 1):
2509 if header and (text or len(header) > 1):
2506 yield '\n'.join(header) + '\n'
2510 yield '\n'.join(header) + '\n'
2507 if text:
2511 if text:
2508 yield text
2512 yield text
2509
2513
2510 def diffstatsum(stats):
2514 def diffstatsum(stats):
2511 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2515 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2512 for f, a, r, b in stats:
2516 for f, a, r, b in stats:
2513 maxfile = max(maxfile, encoding.colwidth(f))
2517 maxfile = max(maxfile, encoding.colwidth(f))
2514 maxtotal = max(maxtotal, a + r)
2518 maxtotal = max(maxtotal, a + r)
2515 addtotal += a
2519 addtotal += a
2516 removetotal += r
2520 removetotal += r
2517 binary = binary or b
2521 binary = binary or b
2518
2522
2519 return maxfile, maxtotal, addtotal, removetotal, binary
2523 return maxfile, maxtotal, addtotal, removetotal, binary
2520
2524
2521 def diffstatdata(lines):
2525 def diffstatdata(lines):
2522 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2526 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2523
2527
2524 results = []
2528 results = []
2525 filename, adds, removes, isbinary = None, 0, 0, False
2529 filename, adds, removes, isbinary = None, 0, 0, False
2526
2530
2527 def addresult():
2531 def addresult():
2528 if filename:
2532 if filename:
2529 results.append((filename, adds, removes, isbinary))
2533 results.append((filename, adds, removes, isbinary))
2530
2534
2531 for line in lines:
2535 for line in lines:
2532 if line.startswith('diff'):
2536 if line.startswith('diff'):
2533 addresult()
2537 addresult()
2534 # set numbers to 0 anyway when starting new file
2538 # set numbers to 0 anyway when starting new file
2535 adds, removes, isbinary = 0, 0, False
2539 adds, removes, isbinary = 0, 0, False
2536 if line.startswith('diff --git a/'):
2540 if line.startswith('diff --git a/'):
2537 filename = gitre.search(line).group(2)
2541 filename = gitre.search(line).group(2)
2538 elif line.startswith('diff -r'):
2542 elif line.startswith('diff -r'):
2539 # format: "diff -r ... -r ... filename"
2543 # format: "diff -r ... -r ... filename"
2540 filename = diffre.search(line).group(1)
2544 filename = diffre.search(line).group(1)
2541 elif line.startswith('+') and not line.startswith('+++ '):
2545 elif line.startswith('+') and not line.startswith('+++ '):
2542 adds += 1
2546 adds += 1
2543 elif line.startswith('-') and not line.startswith('--- '):
2547 elif line.startswith('-') and not line.startswith('--- '):
2544 removes += 1
2548 removes += 1
2545 elif (line.startswith('GIT binary patch') or
2549 elif (line.startswith('GIT binary patch') or
2546 line.startswith('Binary file')):
2550 line.startswith('Binary file')):
2547 isbinary = True
2551 isbinary = True
2548 addresult()
2552 addresult()
2549 return results
2553 return results
2550
2554
2551 def diffstat(lines, width=80, git=False):
2555 def diffstat(lines, width=80, git=False):
2552 output = []
2556 output = []
2553 stats = diffstatdata(lines)
2557 stats = diffstatdata(lines)
2554 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2558 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2555
2559
2556 countwidth = len(str(maxtotal))
2560 countwidth = len(str(maxtotal))
2557 if hasbinary and countwidth < 3:
2561 if hasbinary and countwidth < 3:
2558 countwidth = 3
2562 countwidth = 3
2559 graphwidth = width - countwidth - maxname - 6
2563 graphwidth = width - countwidth - maxname - 6
2560 if graphwidth < 10:
2564 if graphwidth < 10:
2561 graphwidth = 10
2565 graphwidth = 10
2562
2566
2563 def scale(i):
2567 def scale(i):
2564 if maxtotal <= graphwidth:
2568 if maxtotal <= graphwidth:
2565 return i
2569 return i
2566 # If diffstat runs out of room it doesn't print anything,
2570 # If diffstat runs out of room it doesn't print anything,
2567 # which isn't very useful, so always print at least one + or -
2571 # which isn't very useful, so always print at least one + or -
2568 # if there were at least some changes.
2572 # if there were at least some changes.
2569 return max(i * graphwidth // maxtotal, int(bool(i)))
2573 return max(i * graphwidth // maxtotal, int(bool(i)))
2570
2574
2571 for filename, adds, removes, isbinary in stats:
2575 for filename, adds, removes, isbinary in stats:
2572 if isbinary:
2576 if isbinary:
2573 count = 'Bin'
2577 count = 'Bin'
2574 else:
2578 else:
2575 count = adds + removes
2579 count = adds + removes
2576 pluses = '+' * scale(adds)
2580 pluses = '+' * scale(adds)
2577 minuses = '-' * scale(removes)
2581 minuses = '-' * scale(removes)
2578 output.append(' %s%s | %*s %s%s\n' %
2582 output.append(' %s%s | %*s %s%s\n' %
2579 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2583 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2580 countwidth, count, pluses, minuses))
2584 countwidth, count, pluses, minuses))
2581
2585
2582 if stats:
2586 if stats:
2583 output.append(_(' %d files changed, %d insertions(+), '
2587 output.append(_(' %d files changed, %d insertions(+), '
2584 '%d deletions(-)\n')
2588 '%d deletions(-)\n')
2585 % (len(stats), totaladds, totalremoves))
2589 % (len(stats), totaladds, totalremoves))
2586
2590
2587 return ''.join(output)
2591 return ''.join(output)
2588
2592
2589 def diffstatui(*args, **kw):
2593 def diffstatui(*args, **kw):
2590 '''like diffstat(), but yields 2-tuples of (output, label) for
2594 '''like diffstat(), but yields 2-tuples of (output, label) for
2591 ui.write()
2595 ui.write()
2592 '''
2596 '''
2593
2597
2594 for line in diffstat(*args, **kw).splitlines():
2598 for line in diffstat(*args, **kw).splitlines():
2595 if line and line[-1] in '+-':
2599 if line and line[-1] in '+-':
2596 name, graph = line.rsplit(' ', 1)
2600 name, graph = line.rsplit(' ', 1)
2597 yield (name + ' ', '')
2601 yield (name + ' ', '')
2598 m = re.search(r'\++', graph)
2602 m = re.search(r'\++', graph)
2599 if m:
2603 if m:
2600 yield (m.group(0), 'diffstat.inserted')
2604 yield (m.group(0), 'diffstat.inserted')
2601 m = re.search(r'-+', graph)
2605 m = re.search(r'-+', graph)
2602 if m:
2606 if m:
2603 yield (m.group(0), 'diffstat.deleted')
2607 yield (m.group(0), 'diffstat.deleted')
2604 else:
2608 else:
2605 yield (line, '')
2609 yield (line, '')
2606 yield ('\n', '')
2610 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now