##// END OF EJS Templates
patch: use `iter(callable, sentinel)` instead of while True...
Augie Fackler -
r29726:160c829d default
parent child Browse files
Show More
@@ -1,2610 +1,2597 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 scmutil,
37 scmutil,
38 util,
38 util,
39 )
39 )
40 stringio = util.stringio
40 stringio = util.stringio
41
41
42 gitre = re.compile('diff --git a/(.*) b/(.*)')
42 gitre = re.compile('diff --git a/(.*) b/(.*)')
43 tabsplitter = re.compile(r'(\t+|[^\t]+)')
43 tabsplitter = re.compile(r'(\t+|[^\t]+)')
44
44
45 class PatchError(Exception):
45 class PatchError(Exception):
46 pass
46 pass
47
47
48
48
49 # public functions
49 # public functions
50
50
51 def split(stream):
51 def split(stream):
52 '''return an iterator of individual patches from a stream'''
52 '''return an iterator of individual patches from a stream'''
53 def isheader(line, inheader):
53 def isheader(line, inheader):
54 if inheader and line[0] in (' ', '\t'):
54 if inheader and line[0] in (' ', '\t'):
55 # continuation
55 # continuation
56 return True
56 return True
57 if line[0] in (' ', '-', '+'):
57 if line[0] in (' ', '-', '+'):
58 # diff line - don't check for header pattern in there
58 # diff line - don't check for header pattern in there
59 return False
59 return False
60 l = line.split(': ', 1)
60 l = line.split(': ', 1)
61 return len(l) == 2 and ' ' not in l[0]
61 return len(l) == 2 and ' ' not in l[0]
62
62
63 def chunk(lines):
63 def chunk(lines):
64 return stringio(''.join(lines))
64 return stringio(''.join(lines))
65
65
66 def hgsplit(stream, cur):
66 def hgsplit(stream, cur):
67 inheader = True
67 inheader = True
68
68
69 for line in stream:
69 for line in stream:
70 if not line.strip():
70 if not line.strip():
71 inheader = False
71 inheader = False
72 if not inheader and line.startswith('# HG changeset patch'):
72 if not inheader and line.startswith('# HG changeset patch'):
73 yield chunk(cur)
73 yield chunk(cur)
74 cur = []
74 cur = []
75 inheader = True
75 inheader = True
76
76
77 cur.append(line)
77 cur.append(line)
78
78
79 if cur:
79 if cur:
80 yield chunk(cur)
80 yield chunk(cur)
81
81
82 def mboxsplit(stream, cur):
82 def mboxsplit(stream, cur):
83 for line in stream:
83 for line in stream:
84 if line.startswith('From '):
84 if line.startswith('From '):
85 for c in split(chunk(cur[1:])):
85 for c in split(chunk(cur[1:])):
86 yield c
86 yield c
87 cur = []
87 cur = []
88
88
89 cur.append(line)
89 cur.append(line)
90
90
91 if cur:
91 if cur:
92 for c in split(chunk(cur[1:])):
92 for c in split(chunk(cur[1:])):
93 yield c
93 yield c
94
94
95 def mimesplit(stream, cur):
95 def mimesplit(stream, cur):
96 def msgfp(m):
96 def msgfp(m):
97 fp = stringio()
97 fp = stringio()
98 g = email.Generator.Generator(fp, mangle_from_=False)
98 g = email.Generator.Generator(fp, mangle_from_=False)
99 g.flatten(m)
99 g.flatten(m)
100 fp.seek(0)
100 fp.seek(0)
101 return fp
101 return fp
102
102
103 for line in stream:
103 for line in stream:
104 cur.append(line)
104 cur.append(line)
105 c = chunk(cur)
105 c = chunk(cur)
106
106
107 m = email.Parser.Parser().parse(c)
107 m = email.Parser.Parser().parse(c)
108 if not m.is_multipart():
108 if not m.is_multipart():
109 yield msgfp(m)
109 yield msgfp(m)
110 else:
110 else:
111 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
111 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
112 for part in m.walk():
112 for part in m.walk():
113 ct = part.get_content_type()
113 ct = part.get_content_type()
114 if ct not in ok_types:
114 if ct not in ok_types:
115 continue
115 continue
116 yield msgfp(part)
116 yield msgfp(part)
117
117
118 def headersplit(stream, cur):
118 def headersplit(stream, cur):
119 inheader = False
119 inheader = False
120
120
121 for line in stream:
121 for line in stream:
122 if not inheader and isheader(line, inheader):
122 if not inheader and isheader(line, inheader):
123 yield chunk(cur)
123 yield chunk(cur)
124 cur = []
124 cur = []
125 inheader = True
125 inheader = True
126 if inheader and not isheader(line, inheader):
126 if inheader and not isheader(line, inheader):
127 inheader = False
127 inheader = False
128
128
129 cur.append(line)
129 cur.append(line)
130
130
131 if cur:
131 if cur:
132 yield chunk(cur)
132 yield chunk(cur)
133
133
134 def remainder(cur):
134 def remainder(cur):
135 yield chunk(cur)
135 yield chunk(cur)
136
136
137 class fiter(object):
137 class fiter(object):
138 def __init__(self, fp):
138 def __init__(self, fp):
139 self.fp = fp
139 self.fp = fp
140
140
141 def __iter__(self):
141 def __iter__(self):
142 return self
142 return self
143
143
144 def next(self):
144 def next(self):
145 l = self.fp.readline()
145 l = self.fp.readline()
146 if not l:
146 if not l:
147 raise StopIteration
147 raise StopIteration
148 return l
148 return l
149
149
150 inheader = False
150 inheader = False
151 cur = []
151 cur = []
152
152
153 mimeheaders = ['content-type']
153 mimeheaders = ['content-type']
154
154
155 if not util.safehasattr(stream, 'next'):
155 if not util.safehasattr(stream, 'next'):
156 # http responses, for example, have readline but not next
156 # http responses, for example, have readline but not next
157 stream = fiter(stream)
157 stream = fiter(stream)
158
158
159 for line in stream:
159 for line in stream:
160 cur.append(line)
160 cur.append(line)
161 if line.startswith('# HG changeset patch'):
161 if line.startswith('# HG changeset patch'):
162 return hgsplit(stream, cur)
162 return hgsplit(stream, cur)
163 elif line.startswith('From '):
163 elif line.startswith('From '):
164 return mboxsplit(stream, cur)
164 return mboxsplit(stream, cur)
165 elif isheader(line, inheader):
165 elif isheader(line, inheader):
166 inheader = True
166 inheader = True
167 if line.split(':', 1)[0].lower() in mimeheaders:
167 if line.split(':', 1)[0].lower() in mimeheaders:
168 # let email parser handle this
168 # let email parser handle this
169 return mimesplit(stream, cur)
169 return mimesplit(stream, cur)
170 elif line.startswith('--- ') and inheader:
170 elif line.startswith('--- ') and inheader:
171 # No evil headers seen by diff start, split by hand
171 # No evil headers seen by diff start, split by hand
172 return headersplit(stream, cur)
172 return headersplit(stream, cur)
173 # Not enough info, keep reading
173 # Not enough info, keep reading
174
174
175 # if we are here, we have a very plain patch
175 # if we are here, we have a very plain patch
176 return remainder(cur)
176 return remainder(cur)
177
177
178 ## Some facility for extensible patch parsing:
178 ## Some facility for extensible patch parsing:
179 # list of pairs ("header to match", "data key")
179 # list of pairs ("header to match", "data key")
180 patchheadermap = [('Date', 'date'),
180 patchheadermap = [('Date', 'date'),
181 ('Branch', 'branch'),
181 ('Branch', 'branch'),
182 ('Node ID', 'nodeid'),
182 ('Node ID', 'nodeid'),
183 ]
183 ]
184
184
185 def extract(ui, fileobj):
185 def extract(ui, fileobj):
186 '''extract patch from data read from fileobj.
186 '''extract patch from data read from fileobj.
187
187
188 patch can be a normal patch or contained in an email message.
188 patch can be a normal patch or contained in an email message.
189
189
190 return a dictionary. Standard keys are:
190 return a dictionary. Standard keys are:
191 - filename,
191 - filename,
192 - message,
192 - message,
193 - user,
193 - user,
194 - date,
194 - date,
195 - branch,
195 - branch,
196 - node,
196 - node,
197 - p1,
197 - p1,
198 - p2.
198 - p2.
199 Any item can be missing from the dictionary. If filename is missing,
199 Any item can be missing from the dictionary. If filename is missing,
200 fileobj did not contain a patch. Caller must unlink filename when done.'''
200 fileobj did not contain a patch. Caller must unlink filename when done.'''
201
201
202 # attempt to detect the start of a patch
202 # attempt to detect the start of a patch
203 # (this heuristic is borrowed from quilt)
203 # (this heuristic is borrowed from quilt)
204 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
204 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
205 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
205 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
206 r'---[ \t].*?^\+\+\+[ \t]|'
206 r'---[ \t].*?^\+\+\+[ \t]|'
207 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
207 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
208
208
209 data = {}
209 data = {}
210 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
210 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
211 tmpfp = os.fdopen(fd, 'w')
211 tmpfp = os.fdopen(fd, 'w')
212 try:
212 try:
213 msg = email.Parser.Parser().parse(fileobj)
213 msg = email.Parser.Parser().parse(fileobj)
214
214
215 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
215 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
216 data['user'] = msg['From'] and mail.headdecode(msg['From'])
216 data['user'] = msg['From'] and mail.headdecode(msg['From'])
217 if not subject and not data['user']:
217 if not subject and not data['user']:
218 # Not an email, restore parsed headers if any
218 # Not an email, restore parsed headers if any
219 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
219 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
220
220
221 # should try to parse msg['Date']
221 # should try to parse msg['Date']
222 parents = []
222 parents = []
223
223
224 if subject:
224 if subject:
225 if subject.startswith('[PATCH'):
225 if subject.startswith('[PATCH'):
226 pend = subject.find(']')
226 pend = subject.find(']')
227 if pend >= 0:
227 if pend >= 0:
228 subject = subject[pend + 1:].lstrip()
228 subject = subject[pend + 1:].lstrip()
229 subject = re.sub(r'\n[ \t]+', ' ', subject)
229 subject = re.sub(r'\n[ \t]+', ' ', subject)
230 ui.debug('Subject: %s\n' % subject)
230 ui.debug('Subject: %s\n' % subject)
231 if data['user']:
231 if data['user']:
232 ui.debug('From: %s\n' % data['user'])
232 ui.debug('From: %s\n' % data['user'])
233 diffs_seen = 0
233 diffs_seen = 0
234 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
234 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
235 message = ''
235 message = ''
236 for part in msg.walk():
236 for part in msg.walk():
237 content_type = part.get_content_type()
237 content_type = part.get_content_type()
238 ui.debug('Content-Type: %s\n' % content_type)
238 ui.debug('Content-Type: %s\n' % content_type)
239 if content_type not in ok_types:
239 if content_type not in ok_types:
240 continue
240 continue
241 payload = part.get_payload(decode=True)
241 payload = part.get_payload(decode=True)
242 m = diffre.search(payload)
242 m = diffre.search(payload)
243 if m:
243 if m:
244 hgpatch = False
244 hgpatch = False
245 hgpatchheader = False
245 hgpatchheader = False
246 ignoretext = False
246 ignoretext = False
247
247
248 ui.debug('found patch at byte %d\n' % m.start(0))
248 ui.debug('found patch at byte %d\n' % m.start(0))
249 diffs_seen += 1
249 diffs_seen += 1
250 cfp = stringio()
250 cfp = stringio()
251 for line in payload[:m.start(0)].splitlines():
251 for line in payload[:m.start(0)].splitlines():
252 if line.startswith('# HG changeset patch') and not hgpatch:
252 if line.startswith('# HG changeset patch') and not hgpatch:
253 ui.debug('patch generated by hg export\n')
253 ui.debug('patch generated by hg export\n')
254 hgpatch = True
254 hgpatch = True
255 hgpatchheader = True
255 hgpatchheader = True
256 # drop earlier commit message content
256 # drop earlier commit message content
257 cfp.seek(0)
257 cfp.seek(0)
258 cfp.truncate()
258 cfp.truncate()
259 subject = None
259 subject = None
260 elif hgpatchheader:
260 elif hgpatchheader:
261 if line.startswith('# User '):
261 if line.startswith('# User '):
262 data['user'] = line[7:]
262 data['user'] = line[7:]
263 ui.debug('From: %s\n' % data['user'])
263 ui.debug('From: %s\n' % data['user'])
264 elif line.startswith("# Parent "):
264 elif line.startswith("# Parent "):
265 parents.append(line[9:].lstrip())
265 parents.append(line[9:].lstrip())
266 elif line.startswith("# "):
266 elif line.startswith("# "):
267 for header, key in patchheadermap:
267 for header, key in patchheadermap:
268 prefix = '# %s ' % header
268 prefix = '# %s ' % header
269 if line.startswith(prefix):
269 if line.startswith(prefix):
270 data[key] = line[len(prefix):]
270 data[key] = line[len(prefix):]
271 else:
271 else:
272 hgpatchheader = False
272 hgpatchheader = False
273 elif line == '---':
273 elif line == '---':
274 ignoretext = True
274 ignoretext = True
275 if not hgpatchheader and not ignoretext:
275 if not hgpatchheader and not ignoretext:
276 cfp.write(line)
276 cfp.write(line)
277 cfp.write('\n')
277 cfp.write('\n')
278 message = cfp.getvalue()
278 message = cfp.getvalue()
279 if tmpfp:
279 if tmpfp:
280 tmpfp.write(payload)
280 tmpfp.write(payload)
281 if not payload.endswith('\n'):
281 if not payload.endswith('\n'):
282 tmpfp.write('\n')
282 tmpfp.write('\n')
283 elif not diffs_seen and message and content_type == 'text/plain':
283 elif not diffs_seen and message and content_type == 'text/plain':
284 message += '\n' + payload
284 message += '\n' + payload
285 except: # re-raises
285 except: # re-raises
286 tmpfp.close()
286 tmpfp.close()
287 os.unlink(tmpname)
287 os.unlink(tmpname)
288 raise
288 raise
289
289
290 if subject and not message.startswith(subject):
290 if subject and not message.startswith(subject):
291 message = '%s\n%s' % (subject, message)
291 message = '%s\n%s' % (subject, message)
292 data['message'] = message
292 data['message'] = message
293 tmpfp.close()
293 tmpfp.close()
294 if parents:
294 if parents:
295 data['p1'] = parents.pop(0)
295 data['p1'] = parents.pop(0)
296 if parents:
296 if parents:
297 data['p2'] = parents.pop(0)
297 data['p2'] = parents.pop(0)
298
298
299 if diffs_seen:
299 if diffs_seen:
300 data['filename'] = tmpname
300 data['filename'] = tmpname
301 else:
301 else:
302 os.unlink(tmpname)
302 os.unlink(tmpname)
303 return data
303 return data
304
304
305 class patchmeta(object):
305 class patchmeta(object):
306 """Patched file metadata
306 """Patched file metadata
307
307
308 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
308 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
309 or COPY. 'path' is patched file path. 'oldpath' is set to the
309 or COPY. 'path' is patched file path. 'oldpath' is set to the
310 origin file when 'op' is either COPY or RENAME, None otherwise. If
310 origin file when 'op' is either COPY or RENAME, None otherwise. If
311 file mode is changed, 'mode' is a tuple (islink, isexec) where
311 file mode is changed, 'mode' is a tuple (islink, isexec) where
312 'islink' is True if the file is a symlink and 'isexec' is True if
312 'islink' is True if the file is a symlink and 'isexec' is True if
313 the file is executable. Otherwise, 'mode' is None.
313 the file is executable. Otherwise, 'mode' is None.
314 """
314 """
315 def __init__(self, path):
315 def __init__(self, path):
316 self.path = path
316 self.path = path
317 self.oldpath = None
317 self.oldpath = None
318 self.mode = None
318 self.mode = None
319 self.op = 'MODIFY'
319 self.op = 'MODIFY'
320 self.binary = False
320 self.binary = False
321
321
322 def setmode(self, mode):
322 def setmode(self, mode):
323 islink = mode & 0o20000
323 islink = mode & 0o20000
324 isexec = mode & 0o100
324 isexec = mode & 0o100
325 self.mode = (islink, isexec)
325 self.mode = (islink, isexec)
326
326
327 def copy(self):
327 def copy(self):
328 other = patchmeta(self.path)
328 other = patchmeta(self.path)
329 other.oldpath = self.oldpath
329 other.oldpath = self.oldpath
330 other.mode = self.mode
330 other.mode = self.mode
331 other.op = self.op
331 other.op = self.op
332 other.binary = self.binary
332 other.binary = self.binary
333 return other
333 return other
334
334
335 def _ispatchinga(self, afile):
335 def _ispatchinga(self, afile):
336 if afile == '/dev/null':
336 if afile == '/dev/null':
337 return self.op == 'ADD'
337 return self.op == 'ADD'
338 return afile == 'a/' + (self.oldpath or self.path)
338 return afile == 'a/' + (self.oldpath or self.path)
339
339
340 def _ispatchingb(self, bfile):
340 def _ispatchingb(self, bfile):
341 if bfile == '/dev/null':
341 if bfile == '/dev/null':
342 return self.op == 'DELETE'
342 return self.op == 'DELETE'
343 return bfile == 'b/' + self.path
343 return bfile == 'b/' + self.path
344
344
345 def ispatching(self, afile, bfile):
345 def ispatching(self, afile, bfile):
346 return self._ispatchinga(afile) and self._ispatchingb(bfile)
346 return self._ispatchinga(afile) and self._ispatchingb(bfile)
347
347
348 def __repr__(self):
348 def __repr__(self):
349 return "<patchmeta %s %r>" % (self.op, self.path)
349 return "<patchmeta %s %r>" % (self.op, self.path)
350
350
351 def readgitpatch(lr):
351 def readgitpatch(lr):
352 """extract git-style metadata about patches from <patchname>"""
352 """extract git-style metadata about patches from <patchname>"""
353
353
354 # Filter patch for git information
354 # Filter patch for git information
355 gp = None
355 gp = None
356 gitpatches = []
356 gitpatches = []
357 for line in lr:
357 for line in lr:
358 line = line.rstrip(' \r\n')
358 line = line.rstrip(' \r\n')
359 if line.startswith('diff --git a/'):
359 if line.startswith('diff --git a/'):
360 m = gitre.match(line)
360 m = gitre.match(line)
361 if m:
361 if m:
362 if gp:
362 if gp:
363 gitpatches.append(gp)
363 gitpatches.append(gp)
364 dst = m.group(2)
364 dst = m.group(2)
365 gp = patchmeta(dst)
365 gp = patchmeta(dst)
366 elif gp:
366 elif gp:
367 if line.startswith('--- '):
367 if line.startswith('--- '):
368 gitpatches.append(gp)
368 gitpatches.append(gp)
369 gp = None
369 gp = None
370 continue
370 continue
371 if line.startswith('rename from '):
371 if line.startswith('rename from '):
372 gp.op = 'RENAME'
372 gp.op = 'RENAME'
373 gp.oldpath = line[12:]
373 gp.oldpath = line[12:]
374 elif line.startswith('rename to '):
374 elif line.startswith('rename to '):
375 gp.path = line[10:]
375 gp.path = line[10:]
376 elif line.startswith('copy from '):
376 elif line.startswith('copy from '):
377 gp.op = 'COPY'
377 gp.op = 'COPY'
378 gp.oldpath = line[10:]
378 gp.oldpath = line[10:]
379 elif line.startswith('copy to '):
379 elif line.startswith('copy to '):
380 gp.path = line[8:]
380 gp.path = line[8:]
381 elif line.startswith('deleted file'):
381 elif line.startswith('deleted file'):
382 gp.op = 'DELETE'
382 gp.op = 'DELETE'
383 elif line.startswith('new file mode '):
383 elif line.startswith('new file mode '):
384 gp.op = 'ADD'
384 gp.op = 'ADD'
385 gp.setmode(int(line[-6:], 8))
385 gp.setmode(int(line[-6:], 8))
386 elif line.startswith('new mode '):
386 elif line.startswith('new mode '):
387 gp.setmode(int(line[-6:], 8))
387 gp.setmode(int(line[-6:], 8))
388 elif line.startswith('GIT binary patch'):
388 elif line.startswith('GIT binary patch'):
389 gp.binary = True
389 gp.binary = True
390 if gp:
390 if gp:
391 gitpatches.append(gp)
391 gitpatches.append(gp)
392
392
393 return gitpatches
393 return gitpatches
394
394
395 class linereader(object):
395 class linereader(object):
396 # simple class to allow pushing lines back into the input stream
396 # simple class to allow pushing lines back into the input stream
397 def __init__(self, fp):
397 def __init__(self, fp):
398 self.fp = fp
398 self.fp = fp
399 self.buf = []
399 self.buf = []
400
400
401 def push(self, line):
401 def push(self, line):
402 if line is not None:
402 if line is not None:
403 self.buf.append(line)
403 self.buf.append(line)
404
404
405 def readline(self):
405 def readline(self):
406 if self.buf:
406 if self.buf:
407 l = self.buf[0]
407 l = self.buf[0]
408 del self.buf[0]
408 del self.buf[0]
409 return l
409 return l
410 return self.fp.readline()
410 return self.fp.readline()
411
411
412 def __iter__(self):
412 def __iter__(self):
413 while True:
413 return iter(self.readline, '')
414 l = self.readline()
415 if not l:
416 break
417 yield l
418
414
419 class abstractbackend(object):
415 class abstractbackend(object):
420 def __init__(self, ui):
416 def __init__(self, ui):
421 self.ui = ui
417 self.ui = ui
422
418
423 def getfile(self, fname):
419 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
420 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
421 isexec)) tuple. Data is None if file is missing/deleted.
426 """
422 """
427 raise NotImplementedError
423 raise NotImplementedError
428
424
429 def setfile(self, fname, data, mode, copysource):
425 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
426 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
427 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
428 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
429 copysource is set to the original file name.
434 """
430 """
435 raise NotImplementedError
431 raise NotImplementedError
436
432
437 def unlink(self, fname):
433 def unlink(self, fname):
438 """Unlink target file."""
434 """Unlink target file."""
439 raise NotImplementedError
435 raise NotImplementedError
440
436
441 def writerej(self, fname, failed, total, lines):
437 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
438 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
439 which failed to apply and total the total number of hunks for this
444 files.
440 files.
445 """
441 """
446 pass
442 pass
447
443
448 def exists(self, fname):
444 def exists(self, fname):
449 raise NotImplementedError
445 raise NotImplementedError
450
446
451 class fsbackend(abstractbackend):
447 class fsbackend(abstractbackend):
452 def __init__(self, ui, basedir):
448 def __init__(self, ui, basedir):
453 super(fsbackend, self).__init__(ui)
449 super(fsbackend, self).__init__(ui)
454 self.opener = scmutil.opener(basedir)
450 self.opener = scmutil.opener(basedir)
455
451
456 def _join(self, f):
452 def _join(self, f):
457 return os.path.join(self.opener.base, f)
453 return os.path.join(self.opener.base, f)
458
454
459 def getfile(self, fname):
455 def getfile(self, fname):
460 if self.opener.islink(fname):
456 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
457 return (self.opener.readlink(fname), (True, False))
462
458
463 isexec = False
459 isexec = False
464 try:
460 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
461 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
462 except OSError as e:
467 if e.errno != errno.ENOENT:
463 if e.errno != errno.ENOENT:
468 raise
464 raise
469 try:
465 try:
470 return (self.opener.read(fname), (False, isexec))
466 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
467 except IOError as e:
472 if e.errno != errno.ENOENT:
468 if e.errno != errno.ENOENT:
473 raise
469 raise
474 return None, None
470 return None, None
475
471
476 def setfile(self, fname, data, mode, copysource):
472 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
473 islink, isexec = mode
478 if data is None:
474 if data is None:
479 self.opener.setflags(fname, islink, isexec)
475 self.opener.setflags(fname, islink, isexec)
480 return
476 return
481 if islink:
477 if islink:
482 self.opener.symlink(data, fname)
478 self.opener.symlink(data, fname)
483 else:
479 else:
484 self.opener.write(fname, data)
480 self.opener.write(fname, data)
485 if isexec:
481 if isexec:
486 self.opener.setflags(fname, False, True)
482 self.opener.setflags(fname, False, True)
487
483
488 def unlink(self, fname):
484 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
485 self.opener.unlinkpath(fname, ignoremissing=True)
490
486
491 def writerej(self, fname, failed, total, lines):
487 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
488 fname = fname + ".rej"
493 self.ui.warn(
489 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
490 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
491 (failed, total, fname))
496 fp = self.opener(fname, 'w')
492 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
493 fp.writelines(lines)
498 fp.close()
494 fp.close()
499
495
500 def exists(self, fname):
496 def exists(self, fname):
501 return self.opener.lexists(fname)
497 return self.opener.lexists(fname)
502
498
503 class workingbackend(fsbackend):
499 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
500 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
501 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
502 self.repo = repo
507 self.similarity = similarity
503 self.similarity = similarity
508 self.removed = set()
504 self.removed = set()
509 self.changed = set()
505 self.changed = set()
510 self.copied = []
506 self.copied = []
511
507
512 def _checkknown(self, fname):
508 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
509 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
510 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
511
516 def setfile(self, fname, data, mode, copysource):
512 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
513 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
514 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
515 if copysource is not None:
520 self.copied.append((copysource, fname))
516 self.copied.append((copysource, fname))
521 self.changed.add(fname)
517 self.changed.add(fname)
522
518
523 def unlink(self, fname):
519 def unlink(self, fname):
524 self._checkknown(fname)
520 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
521 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
522 self.removed.add(fname)
527 self.changed.add(fname)
523 self.changed.add(fname)
528
524
529 def close(self):
525 def close(self):
530 wctx = self.repo[None]
526 wctx = self.repo[None]
531 changed = set(self.changed)
527 changed = set(self.changed)
532 for src, dst in self.copied:
528 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
529 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
530 if self.removed:
535 wctx.forget(sorted(self.removed))
531 wctx.forget(sorted(self.removed))
536 for f in self.removed:
532 for f in self.removed:
537 if f not in self.repo.dirstate:
533 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
534 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
535 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
536 # deleted, and should not be considered by
541 # marktouched().
537 # marktouched().
542 changed.discard(f)
538 changed.discard(f)
543 if changed:
539 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
540 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
541 return sorted(self.changed)
546
542
547 class filestore(object):
543 class filestore(object):
548 def __init__(self, maxsize=None):
544 def __init__(self, maxsize=None):
549 self.opener = None
545 self.opener = None
550 self.files = {}
546 self.files = {}
551 self.created = 0
547 self.created = 0
552 self.maxsize = maxsize
548 self.maxsize = maxsize
553 if self.maxsize is None:
549 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
550 self.maxsize = 4*(2**20)
555 self.size = 0
551 self.size = 0
556 self.data = {}
552 self.data = {}
557
553
558 def setfile(self, fname, data, mode, copied=None):
554 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
555 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
556 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
557 self.size += len(data)
562 else:
558 else:
563 if self.opener is None:
559 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
560 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = scmutil.opener(root)
561 self.opener = scmutil.opener(root)
566 # Avoid filename issues with these simple names
562 # Avoid filename issues with these simple names
567 fn = str(self.created)
563 fn = str(self.created)
568 self.opener.write(fn, data)
564 self.opener.write(fn, data)
569 self.created += 1
565 self.created += 1
570 self.files[fname] = (fn, mode, copied)
566 self.files[fname] = (fn, mode, copied)
571
567
572 def getfile(self, fname):
568 def getfile(self, fname):
573 if fname in self.data:
569 if fname in self.data:
574 return self.data[fname]
570 return self.data[fname]
575 if not self.opener or fname not in self.files:
571 if not self.opener or fname not in self.files:
576 return None, None, None
572 return None, None, None
577 fn, mode, copied = self.files[fname]
573 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
574 return self.opener.read(fn), mode, copied
579
575
580 def close(self):
576 def close(self):
581 if self.opener:
577 if self.opener:
582 shutil.rmtree(self.opener.base)
578 shutil.rmtree(self.opener.base)
583
579
584 class repobackend(abstractbackend):
580 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
581 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
582 super(repobackend, self).__init__(ui)
587 self.repo = repo
583 self.repo = repo
588 self.ctx = ctx
584 self.ctx = ctx
589 self.store = store
585 self.store = store
590 self.changed = set()
586 self.changed = set()
591 self.removed = set()
587 self.removed = set()
592 self.copied = {}
588 self.copied = {}
593
589
594 def _checkknown(self, fname):
590 def _checkknown(self, fname):
595 if fname not in self.ctx:
591 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
592 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
593
598 def getfile(self, fname):
594 def getfile(self, fname):
599 try:
595 try:
600 fctx = self.ctx[fname]
596 fctx = self.ctx[fname]
601 except error.LookupError:
597 except error.LookupError:
602 return None, None
598 return None, None
603 flags = fctx.flags()
599 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
600 return fctx.data(), ('l' in flags, 'x' in flags)
605
601
606 def setfile(self, fname, data, mode, copysource):
602 def setfile(self, fname, data, mode, copysource):
607 if copysource:
603 if copysource:
608 self._checkknown(copysource)
604 self._checkknown(copysource)
609 if data is None:
605 if data is None:
610 data = self.ctx[fname].data()
606 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
607 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
608 self.changed.add(fname)
613 if copysource:
609 if copysource:
614 self.copied[fname] = copysource
610 self.copied[fname] = copysource
615
611
616 def unlink(self, fname):
612 def unlink(self, fname):
617 self._checkknown(fname)
613 self._checkknown(fname)
618 self.removed.add(fname)
614 self.removed.add(fname)
619
615
620 def exists(self, fname):
616 def exists(self, fname):
621 return fname in self.ctx
617 return fname in self.ctx
622
618
623 def close(self):
619 def close(self):
624 return self.changed | self.removed
620 return self.changed | self.removed
625
621
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
622 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
623 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
624 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
625 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
626
631 class patchfile(object):
627 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
628 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
629 self.fname = gp.path
634 self.eolmode = eolmode
630 self.eolmode = eolmode
635 self.eol = None
631 self.eol = None
636 self.backend = backend
632 self.backend = backend
637 self.ui = ui
633 self.ui = ui
638 self.lines = []
634 self.lines = []
639 self.exists = False
635 self.exists = False
640 self.missing = True
636 self.missing = True
641 self.mode = gp.mode
637 self.mode = gp.mode
642 self.copysource = gp.oldpath
638 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
639 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
640 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
641 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
642 data, mode = backend.getfile(self.fname)
647 else:
643 else:
648 data, mode = store.getfile(self.copysource)[:2]
644 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
645 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
646 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
647 self.missing = False
652 if data:
648 if data:
653 self.lines = mdiff.splitnewlines(data)
649 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
650 if self.mode is None:
655 self.mode = mode
651 self.mode = mode
656 if self.lines:
652 if self.lines:
657 # Normalize line endings
653 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
654 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
655 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
656 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
657 self.eol = '\n'
662 if eolmode != 'strict':
658 if eolmode != 'strict':
663 nlines = []
659 nlines = []
664 for l in self.lines:
660 for l in self.lines:
665 if l.endswith('\r\n'):
661 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
662 l = l[:-2] + '\n'
667 nlines.append(l)
663 nlines.append(l)
668 self.lines = nlines
664 self.lines = nlines
669 else:
665 else:
670 if self.create:
666 if self.create:
671 self.missing = False
667 self.missing = False
672 if self.mode is None:
668 if self.mode is None:
673 self.mode = (False, False)
669 self.mode = (False, False)
674 if self.missing:
670 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
671 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676
672
677 self.hash = {}
673 self.hash = {}
678 self.dirty = 0
674 self.dirty = 0
679 self.offset = 0
675 self.offset = 0
680 self.skew = 0
676 self.skew = 0
681 self.rej = []
677 self.rej = []
682 self.fileprinted = False
678 self.fileprinted = False
683 self.printfile(False)
679 self.printfile(False)
684 self.hunks = 0
680 self.hunks = 0
685
681
686 def writelines(self, fname, lines, mode):
682 def writelines(self, fname, lines, mode):
687 if self.eolmode == 'auto':
683 if self.eolmode == 'auto':
688 eol = self.eol
684 eol = self.eol
689 elif self.eolmode == 'crlf':
685 elif self.eolmode == 'crlf':
690 eol = '\r\n'
686 eol = '\r\n'
691 else:
687 else:
692 eol = '\n'
688 eol = '\n'
693
689
694 if self.eolmode != 'strict' and eol and eol != '\n':
690 if self.eolmode != 'strict' and eol and eol != '\n':
695 rawlines = []
691 rawlines = []
696 for l in lines:
692 for l in lines:
697 if l and l[-1] == '\n':
693 if l and l[-1] == '\n':
698 l = l[:-1] + eol
694 l = l[:-1] + eol
699 rawlines.append(l)
695 rawlines.append(l)
700 lines = rawlines
696 lines = rawlines
701
697
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
698 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703
699
704 def printfile(self, warn):
700 def printfile(self, warn):
705 if self.fileprinted:
701 if self.fileprinted:
706 return
702 return
707 if warn or self.ui.verbose:
703 if warn or self.ui.verbose:
708 self.fileprinted = True
704 self.fileprinted = True
709 s = _("patching file %s\n") % self.fname
705 s = _("patching file %s\n") % self.fname
710 if warn:
706 if warn:
711 self.ui.warn(s)
707 self.ui.warn(s)
712 else:
708 else:
713 self.ui.note(s)
709 self.ui.note(s)
714
710
715
711
716 def findlines(self, l, linenum):
712 def findlines(self, l, linenum):
717 # looks through the hash and finds candidate lines. The
713 # looks through the hash and finds candidate lines. The
718 # result is a list of line numbers sorted based on distance
714 # result is a list of line numbers sorted based on distance
719 # from linenum
715 # from linenum
720
716
721 cand = self.hash.get(l, [])
717 cand = self.hash.get(l, [])
722 if len(cand) > 1:
718 if len(cand) > 1:
723 # resort our list of potentials forward then back.
719 # resort our list of potentials forward then back.
724 cand.sort(key=lambda x: abs(x - linenum))
720 cand.sort(key=lambda x: abs(x - linenum))
725 return cand
721 return cand
726
722
727 def write_rej(self):
723 def write_rej(self):
728 # our rejects are a little different from patch(1). This always
724 # our rejects are a little different from patch(1). This always
729 # creates rejects in the same form as the original patch. A file
725 # creates rejects in the same form as the original patch. A file
730 # header is inserted so that you can run the reject through patch again
726 # header is inserted so that you can run the reject through patch again
731 # without having to type the filename.
727 # without having to type the filename.
732 if not self.rej:
728 if not self.rej:
733 return
729 return
734 base = os.path.basename(self.fname)
730 base = os.path.basename(self.fname)
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
731 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 for x in self.rej:
732 for x in self.rej:
737 for l in x.hunk:
733 for l in x.hunk:
738 lines.append(l)
734 lines.append(l)
739 if l[-1] != '\n':
735 if l[-1] != '\n':
740 lines.append("\n\ No newline at end of file\n")
736 lines.append("\n\ No newline at end of file\n")
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
737 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742
738
743 def apply(self, h):
739 def apply(self, h):
744 if not h.complete():
740 if not h.complete():
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
741 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
742 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 h.lenb))
743 h.lenb))
748
744
749 self.hunks += 1
745 self.hunks += 1
750
746
751 if self.missing:
747 if self.missing:
752 self.rej.append(h)
748 self.rej.append(h)
753 return -1
749 return -1
754
750
755 if self.exists and self.create:
751 if self.exists and self.create:
756 if self.copysource:
752 if self.copysource:
757 self.ui.warn(_("cannot create %s: destination already "
753 self.ui.warn(_("cannot create %s: destination already "
758 "exists\n") % self.fname)
754 "exists\n") % self.fname)
759 else:
755 else:
760 self.ui.warn(_("file %s already exists\n") % self.fname)
756 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.rej.append(h)
757 self.rej.append(h)
762 return -1
758 return -1
763
759
764 if isinstance(h, binhunk):
760 if isinstance(h, binhunk):
765 if self.remove:
761 if self.remove:
766 self.backend.unlink(self.fname)
762 self.backend.unlink(self.fname)
767 else:
763 else:
768 l = h.new(self.lines)
764 l = h.new(self.lines)
769 self.lines[:] = l
765 self.lines[:] = l
770 self.offset += len(l)
766 self.offset += len(l)
771 self.dirty = True
767 self.dirty = True
772 return 0
768 return 0
773
769
774 horig = h
770 horig = h
775 if (self.eolmode in ('crlf', 'lf')
771 if (self.eolmode in ('crlf', 'lf')
776 or self.eolmode == 'auto' and self.eol):
772 or self.eolmode == 'auto' and self.eol):
777 # If new eols are going to be normalized, then normalize
773 # If new eols are going to be normalized, then normalize
778 # hunk data before patching. Otherwise, preserve input
774 # hunk data before patching. Otherwise, preserve input
779 # line-endings.
775 # line-endings.
780 h = h.getnormalized()
776 h = h.getnormalized()
781
777
782 # fast case first, no offsets, no fuzz
778 # fast case first, no offsets, no fuzz
783 old, oldstart, new, newstart = h.fuzzit(0, False)
779 old, oldstart, new, newstart = h.fuzzit(0, False)
784 oldstart += self.offset
780 oldstart += self.offset
785 orig_start = oldstart
781 orig_start = oldstart
786 # if there's skew we want to emit the "(offset %d lines)" even
782 # if there's skew we want to emit the "(offset %d lines)" even
787 # when the hunk cleanly applies at start + skew, so skip the
783 # when the hunk cleanly applies at start + skew, so skip the
788 # fast case code
784 # fast case code
789 if (self.skew == 0 and
785 if (self.skew == 0 and
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
786 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 if self.remove:
787 if self.remove:
792 self.backend.unlink(self.fname)
788 self.backend.unlink(self.fname)
793 else:
789 else:
794 self.lines[oldstart:oldstart + len(old)] = new
790 self.lines[oldstart:oldstart + len(old)] = new
795 self.offset += len(new) - len(old)
791 self.offset += len(new) - len(old)
796 self.dirty = True
792 self.dirty = True
797 return 0
793 return 0
798
794
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
795 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 self.hash = {}
796 self.hash = {}
801 for x, s in enumerate(self.lines):
797 for x, s in enumerate(self.lines):
802 self.hash.setdefault(s, []).append(x)
798 self.hash.setdefault(s, []).append(x)
803
799
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
800 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
805 for toponly in [True, False]:
801 for toponly in [True, False]:
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
802 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 oldstart = oldstart + self.offset + self.skew
803 oldstart = oldstart + self.offset + self.skew
808 oldstart = min(oldstart, len(self.lines))
804 oldstart = min(oldstart, len(self.lines))
809 if old:
805 if old:
810 cand = self.findlines(old[0][1:], oldstart)
806 cand = self.findlines(old[0][1:], oldstart)
811 else:
807 else:
812 # Only adding lines with no or fuzzed context, just
808 # Only adding lines with no or fuzzed context, just
813 # take the skew in account
809 # take the skew in account
814 cand = [oldstart]
810 cand = [oldstart]
815
811
816 for l in cand:
812 for l in cand:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
813 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 self.lines[l : l + len(old)] = new
814 self.lines[l : l + len(old)] = new
819 self.offset += len(new) - len(old)
815 self.offset += len(new) - len(old)
820 self.skew = l - orig_start
816 self.skew = l - orig_start
821 self.dirty = True
817 self.dirty = True
822 offset = l - orig_start - fuzzlen
818 offset = l - orig_start - fuzzlen
823 if fuzzlen:
819 if fuzzlen:
824 msg = _("Hunk #%d succeeded at %d "
820 msg = _("Hunk #%d succeeded at %d "
825 "with fuzz %d "
821 "with fuzz %d "
826 "(offset %d lines).\n")
822 "(offset %d lines).\n")
827 self.printfile(True)
823 self.printfile(True)
828 self.ui.warn(msg %
824 self.ui.warn(msg %
829 (h.number, l + 1, fuzzlen, offset))
825 (h.number, l + 1, fuzzlen, offset))
830 else:
826 else:
831 msg = _("Hunk #%d succeeded at %d "
827 msg = _("Hunk #%d succeeded at %d "
832 "(offset %d lines).\n")
828 "(offset %d lines).\n")
833 self.ui.note(msg % (h.number, l + 1, offset))
829 self.ui.note(msg % (h.number, l + 1, offset))
834 return fuzzlen
830 return fuzzlen
835 self.printfile(True)
831 self.printfile(True)
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
832 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.rej.append(horig)
833 self.rej.append(horig)
838 return -1
834 return -1
839
835
840 def close(self):
836 def close(self):
841 if self.dirty:
837 if self.dirty:
842 self.writelines(self.fname, self.lines, self.mode)
838 self.writelines(self.fname, self.lines, self.mode)
843 self.write_rej()
839 self.write_rej()
844 return len(self.rej)
840 return len(self.rej)
845
841
846 class header(object):
842 class header(object):
847 """patch header
843 """patch header
848 """
844 """
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
845 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
846 diff_re = re.compile('diff -r .* (.*)$')
851 allhunks_re = re.compile('(?:index|deleted file) ')
847 allhunks_re = re.compile('(?:index|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
848 pretty_re = re.compile('(?:new file|deleted file) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
849 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 newfile_re = re.compile('(?:new file)')
850 newfile_re = re.compile('(?:new file)')
855
851
856 def __init__(self, header):
852 def __init__(self, header):
857 self.header = header
853 self.header = header
858 self.hunks = []
854 self.hunks = []
859
855
860 def binary(self):
856 def binary(self):
861 return any(h.startswith('index ') for h in self.header)
857 return any(h.startswith('index ') for h in self.header)
862
858
863 def pretty(self, fp):
859 def pretty(self, fp):
864 for h in self.header:
860 for h in self.header:
865 if h.startswith('index '):
861 if h.startswith('index '):
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
862 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 break
863 break
868 if self.pretty_re.match(h):
864 if self.pretty_re.match(h):
869 fp.write(h)
865 fp.write(h)
870 if self.binary():
866 if self.binary():
871 fp.write(_('this is a binary file\n'))
867 fp.write(_('this is a binary file\n'))
872 break
868 break
873 if h.startswith('---'):
869 if h.startswith('---'):
874 fp.write(_('%d hunks, %d lines changed\n') %
870 fp.write(_('%d hunks, %d lines changed\n') %
875 (len(self.hunks),
871 (len(self.hunks),
876 sum([max(h.added, h.removed) for h in self.hunks])))
872 sum([max(h.added, h.removed) for h in self.hunks])))
877 break
873 break
878 fp.write(h)
874 fp.write(h)
879
875
880 def write(self, fp):
876 def write(self, fp):
881 fp.write(''.join(self.header))
877 fp.write(''.join(self.header))
882
878
883 def allhunks(self):
879 def allhunks(self):
884 return any(self.allhunks_re.match(h) for h in self.header)
880 return any(self.allhunks_re.match(h) for h in self.header)
885
881
886 def files(self):
882 def files(self):
887 match = self.diffgit_re.match(self.header[0])
883 match = self.diffgit_re.match(self.header[0])
888 if match:
884 if match:
889 fromfile, tofile = match.groups()
885 fromfile, tofile = match.groups()
890 if fromfile == tofile:
886 if fromfile == tofile:
891 return [fromfile]
887 return [fromfile]
892 return [fromfile, tofile]
888 return [fromfile, tofile]
893 else:
889 else:
894 return self.diff_re.match(self.header[0]).groups()
890 return self.diff_re.match(self.header[0]).groups()
895
891
896 def filename(self):
892 def filename(self):
897 return self.files()[-1]
893 return self.files()[-1]
898
894
899 def __repr__(self):
895 def __repr__(self):
900 return '<header %s>' % (' '.join(map(repr, self.files())))
896 return '<header %s>' % (' '.join(map(repr, self.files())))
901
897
902 def isnewfile(self):
898 def isnewfile(self):
903 return any(self.newfile_re.match(h) for h in self.header)
899 return any(self.newfile_re.match(h) for h in self.header)
904
900
905 def special(self):
901 def special(self):
906 # Special files are shown only at the header level and not at the hunk
902 # Special files are shown only at the header level and not at the hunk
907 # level for example a file that has been deleted is a special file.
903 # level for example a file that has been deleted is a special file.
908 # The user cannot change the content of the operation, in the case of
904 # The user cannot change the content of the operation, in the case of
909 # the deleted file he has to take the deletion or not take it, he
905 # the deleted file he has to take the deletion or not take it, he
910 # cannot take some of it.
906 # cannot take some of it.
911 # Newly added files are special if they are empty, they are not special
907 # Newly added files are special if they are empty, they are not special
912 # if they have some content as we want to be able to change it
908 # if they have some content as we want to be able to change it
913 nocontent = len(self.header) == 2
909 nocontent = len(self.header) == 2
914 emptynewfile = self.isnewfile() and nocontent
910 emptynewfile = self.isnewfile() and nocontent
915 return emptynewfile or \
911 return emptynewfile or \
916 any(self.special_re.match(h) for h in self.header)
912 any(self.special_re.match(h) for h in self.header)
917
913
918 class recordhunk(object):
914 class recordhunk(object):
919 """patch hunk
915 """patch hunk
920
916
921 XXX shouldn't we merge this with the other hunk class?
917 XXX shouldn't we merge this with the other hunk class?
922 """
918 """
923 maxcontext = 3
919 maxcontext = 3
924
920
925 def __init__(self, header, fromline, toline, proc, before, hunk, after):
921 def __init__(self, header, fromline, toline, proc, before, hunk, after):
926 def trimcontext(number, lines):
922 def trimcontext(number, lines):
927 delta = len(lines) - self.maxcontext
923 delta = len(lines) - self.maxcontext
928 if False and delta > 0:
924 if False and delta > 0:
929 return number + delta, lines[:self.maxcontext]
925 return number + delta, lines[:self.maxcontext]
930 return number, lines
926 return number, lines
931
927
932 self.header = header
928 self.header = header
933 self.fromline, self.before = trimcontext(fromline, before)
929 self.fromline, self.before = trimcontext(fromline, before)
934 self.toline, self.after = trimcontext(toline, after)
930 self.toline, self.after = trimcontext(toline, after)
935 self.proc = proc
931 self.proc = proc
936 self.hunk = hunk
932 self.hunk = hunk
937 self.added, self.removed = self.countchanges(self.hunk)
933 self.added, self.removed = self.countchanges(self.hunk)
938
934
939 def __eq__(self, v):
935 def __eq__(self, v):
940 if not isinstance(v, recordhunk):
936 if not isinstance(v, recordhunk):
941 return False
937 return False
942
938
943 return ((v.hunk == self.hunk) and
939 return ((v.hunk == self.hunk) and
944 (v.proc == self.proc) and
940 (v.proc == self.proc) and
945 (self.fromline == v.fromline) and
941 (self.fromline == v.fromline) and
946 (self.header.files() == v.header.files()))
942 (self.header.files() == v.header.files()))
947
943
948 def __hash__(self):
944 def __hash__(self):
949 return hash((tuple(self.hunk),
945 return hash((tuple(self.hunk),
950 tuple(self.header.files()),
946 tuple(self.header.files()),
951 self.fromline,
947 self.fromline,
952 self.proc))
948 self.proc))
953
949
954 def countchanges(self, hunk):
950 def countchanges(self, hunk):
955 """hunk -> (n+,n-)"""
951 """hunk -> (n+,n-)"""
956 add = len([h for h in hunk if h[0] == '+'])
952 add = len([h for h in hunk if h[0] == '+'])
957 rem = len([h for h in hunk if h[0] == '-'])
953 rem = len([h for h in hunk if h[0] == '-'])
958 return add, rem
954 return add, rem
959
955
960 def write(self, fp):
956 def write(self, fp):
961 delta = len(self.before) + len(self.after)
957 delta = len(self.before) + len(self.after)
962 if self.after and self.after[-1] == '\\ No newline at end of file\n':
958 if self.after and self.after[-1] == '\\ No newline at end of file\n':
963 delta -= 1
959 delta -= 1
964 fromlen = delta + self.removed
960 fromlen = delta + self.removed
965 tolen = delta + self.added
961 tolen = delta + self.added
966 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
962 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
967 (self.fromline, fromlen, self.toline, tolen,
963 (self.fromline, fromlen, self.toline, tolen,
968 self.proc and (' ' + self.proc)))
964 self.proc and (' ' + self.proc)))
969 fp.write(''.join(self.before + self.hunk + self.after))
965 fp.write(''.join(self.before + self.hunk + self.after))
970
966
971 pretty = write
967 pretty = write
972
968
973 def filename(self):
969 def filename(self):
974 return self.header.filename()
970 return self.header.filename()
975
971
976 def __repr__(self):
972 def __repr__(self):
977 return '<hunk %r@%d>' % (self.filename(), self.fromline)
973 return '<hunk %r@%d>' % (self.filename(), self.fromline)
978
974
979 def filterpatch(ui, headers, operation=None):
975 def filterpatch(ui, headers, operation=None):
980 """Interactively filter patch chunks into applied-only chunks"""
976 """Interactively filter patch chunks into applied-only chunks"""
981 if operation is None:
977 if operation is None:
982 operation = 'record'
978 operation = 'record'
983 messages = {
979 messages = {
984 'multiple': {
980 'multiple': {
985 'discard': _("discard change %d/%d to '%s'?"),
981 'discard': _("discard change %d/%d to '%s'?"),
986 'record': _("record change %d/%d to '%s'?"),
982 'record': _("record change %d/%d to '%s'?"),
987 'revert': _("revert change %d/%d to '%s'?"),
983 'revert': _("revert change %d/%d to '%s'?"),
988 }[operation],
984 }[operation],
989 'single': {
985 'single': {
990 'discard': _("discard this change to '%s'?"),
986 'discard': _("discard this change to '%s'?"),
991 'record': _("record this change to '%s'?"),
987 'record': _("record this change to '%s'?"),
992 'revert': _("revert this change to '%s'?"),
988 'revert': _("revert this change to '%s'?"),
993 }[operation],
989 }[operation],
994 }
990 }
995
991
996 def prompt(skipfile, skipall, query, chunk):
992 def prompt(skipfile, skipall, query, chunk):
997 """prompt query, and process base inputs
993 """prompt query, and process base inputs
998
994
999 - y/n for the rest of file
995 - y/n for the rest of file
1000 - y/n for the rest
996 - y/n for the rest
1001 - ? (help)
997 - ? (help)
1002 - q (quit)
998 - q (quit)
1003
999
1004 Return True/False and possibly updated skipfile and skipall.
1000 Return True/False and possibly updated skipfile and skipall.
1005 """
1001 """
1006 newpatches = None
1002 newpatches = None
1007 if skipall is not None:
1003 if skipall is not None:
1008 return skipall, skipfile, skipall, newpatches
1004 return skipall, skipfile, skipall, newpatches
1009 if skipfile is not None:
1005 if skipfile is not None:
1010 return skipfile, skipfile, skipall, newpatches
1006 return skipfile, skipfile, skipall, newpatches
1011 while True:
1007 while True:
1012 resps = _('[Ynesfdaq?]'
1008 resps = _('[Ynesfdaq?]'
1013 '$$ &Yes, record this change'
1009 '$$ &Yes, record this change'
1014 '$$ &No, skip this change'
1010 '$$ &No, skip this change'
1015 '$$ &Edit this change manually'
1011 '$$ &Edit this change manually'
1016 '$$ &Skip remaining changes to this file'
1012 '$$ &Skip remaining changes to this file'
1017 '$$ Record remaining changes to this &file'
1013 '$$ Record remaining changes to this &file'
1018 '$$ &Done, skip remaining changes and files'
1014 '$$ &Done, skip remaining changes and files'
1019 '$$ Record &all changes to all remaining files'
1015 '$$ Record &all changes to all remaining files'
1020 '$$ &Quit, recording no changes'
1016 '$$ &Quit, recording no changes'
1021 '$$ &? (display help)')
1017 '$$ &? (display help)')
1022 r = ui.promptchoice("%s %s" % (query, resps))
1018 r = ui.promptchoice("%s %s" % (query, resps))
1023 ui.write("\n")
1019 ui.write("\n")
1024 if r == 8: # ?
1020 if r == 8: # ?
1025 for c, t in ui.extractchoices(resps)[1]:
1021 for c, t in ui.extractchoices(resps)[1]:
1026 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1022 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1027 continue
1023 continue
1028 elif r == 0: # yes
1024 elif r == 0: # yes
1029 ret = True
1025 ret = True
1030 elif r == 1: # no
1026 elif r == 1: # no
1031 ret = False
1027 ret = False
1032 elif r == 2: # Edit patch
1028 elif r == 2: # Edit patch
1033 if chunk is None:
1029 if chunk is None:
1034 ui.write(_('cannot edit patch for whole file'))
1030 ui.write(_('cannot edit patch for whole file'))
1035 ui.write("\n")
1031 ui.write("\n")
1036 continue
1032 continue
1037 if chunk.header.binary():
1033 if chunk.header.binary():
1038 ui.write(_('cannot edit patch for binary file'))
1034 ui.write(_('cannot edit patch for binary file'))
1039 ui.write("\n")
1035 ui.write("\n")
1040 continue
1036 continue
1041 # Patch comment based on the Git one (based on comment at end of
1037 # Patch comment based on the Git one (based on comment at end of
1042 # https://mercurial-scm.org/wiki/RecordExtension)
1038 # https://mercurial-scm.org/wiki/RecordExtension)
1043 phelp = '---' + _("""
1039 phelp = '---' + _("""
1044 To remove '-' lines, make them ' ' lines (context).
1040 To remove '-' lines, make them ' ' lines (context).
1045 To remove '+' lines, delete them.
1041 To remove '+' lines, delete them.
1046 Lines starting with # will be removed from the patch.
1042 Lines starting with # will be removed from the patch.
1047
1043
1048 If the patch applies cleanly, the edited hunk will immediately be
1044 If the patch applies cleanly, the edited hunk will immediately be
1049 added to the record list. If it does not apply cleanly, a rejects
1045 added to the record list. If it does not apply cleanly, a rejects
1050 file will be generated: you can use that when you try again. If
1046 file will be generated: you can use that when you try again. If
1051 all lines of the hunk are removed, then the edit is aborted and
1047 all lines of the hunk are removed, then the edit is aborted and
1052 the hunk is left unchanged.
1048 the hunk is left unchanged.
1053 """)
1049 """)
1054 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1050 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1055 suffix=".diff", text=True)
1051 suffix=".diff", text=True)
1056 ncpatchfp = None
1052 ncpatchfp = None
1057 try:
1053 try:
1058 # Write the initial patch
1054 # Write the initial patch
1059 f = os.fdopen(patchfd, "w")
1055 f = os.fdopen(patchfd, "w")
1060 chunk.header.write(f)
1056 chunk.header.write(f)
1061 chunk.write(f)
1057 chunk.write(f)
1062 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1058 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1063 f.close()
1059 f.close()
1064 # Start the editor and wait for it to complete
1060 # Start the editor and wait for it to complete
1065 editor = ui.geteditor()
1061 editor = ui.geteditor()
1066 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1062 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1067 environ={'HGUSER': ui.username()})
1063 environ={'HGUSER': ui.username()})
1068 if ret != 0:
1064 if ret != 0:
1069 ui.warn(_("editor exited with exit code %d\n") % ret)
1065 ui.warn(_("editor exited with exit code %d\n") % ret)
1070 continue
1066 continue
1071 # Remove comment lines
1067 # Remove comment lines
1072 patchfp = open(patchfn)
1068 patchfp = open(patchfn)
1073 ncpatchfp = stringio()
1069 ncpatchfp = stringio()
1074 for line in patchfp:
1070 for line in patchfp:
1075 if not line.startswith('#'):
1071 if not line.startswith('#'):
1076 ncpatchfp.write(line)
1072 ncpatchfp.write(line)
1077 patchfp.close()
1073 patchfp.close()
1078 ncpatchfp.seek(0)
1074 ncpatchfp.seek(0)
1079 newpatches = parsepatch(ncpatchfp)
1075 newpatches = parsepatch(ncpatchfp)
1080 finally:
1076 finally:
1081 os.unlink(patchfn)
1077 os.unlink(patchfn)
1082 del ncpatchfp
1078 del ncpatchfp
1083 # Signal that the chunk shouldn't be applied as-is, but
1079 # Signal that the chunk shouldn't be applied as-is, but
1084 # provide the new patch to be used instead.
1080 # provide the new patch to be used instead.
1085 ret = False
1081 ret = False
1086 elif r == 3: # Skip
1082 elif r == 3: # Skip
1087 ret = skipfile = False
1083 ret = skipfile = False
1088 elif r == 4: # file (Record remaining)
1084 elif r == 4: # file (Record remaining)
1089 ret = skipfile = True
1085 ret = skipfile = True
1090 elif r == 5: # done, skip remaining
1086 elif r == 5: # done, skip remaining
1091 ret = skipall = False
1087 ret = skipall = False
1092 elif r == 6: # all
1088 elif r == 6: # all
1093 ret = skipall = True
1089 ret = skipall = True
1094 elif r == 7: # quit
1090 elif r == 7: # quit
1095 raise error.Abort(_('user quit'))
1091 raise error.Abort(_('user quit'))
1096 return ret, skipfile, skipall, newpatches
1092 return ret, skipfile, skipall, newpatches
1097
1093
1098 seen = set()
1094 seen = set()
1099 applied = {} # 'filename' -> [] of chunks
1095 applied = {} # 'filename' -> [] of chunks
1100 skipfile, skipall = None, None
1096 skipfile, skipall = None, None
1101 pos, total = 1, sum(len(h.hunks) for h in headers)
1097 pos, total = 1, sum(len(h.hunks) for h in headers)
1102 for h in headers:
1098 for h in headers:
1103 pos += len(h.hunks)
1099 pos += len(h.hunks)
1104 skipfile = None
1100 skipfile = None
1105 fixoffset = 0
1101 fixoffset = 0
1106 hdr = ''.join(h.header)
1102 hdr = ''.join(h.header)
1107 if hdr in seen:
1103 if hdr in seen:
1108 continue
1104 continue
1109 seen.add(hdr)
1105 seen.add(hdr)
1110 if skipall is None:
1106 if skipall is None:
1111 h.pretty(ui)
1107 h.pretty(ui)
1112 msg = (_('examine changes to %s?') %
1108 msg = (_('examine changes to %s?') %
1113 _(' and ').join("'%s'" % f for f in h.files()))
1109 _(' and ').join("'%s'" % f for f in h.files()))
1114 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1110 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1115 if not r:
1111 if not r:
1116 continue
1112 continue
1117 applied[h.filename()] = [h]
1113 applied[h.filename()] = [h]
1118 if h.allhunks():
1114 if h.allhunks():
1119 applied[h.filename()] += h.hunks
1115 applied[h.filename()] += h.hunks
1120 continue
1116 continue
1121 for i, chunk in enumerate(h.hunks):
1117 for i, chunk in enumerate(h.hunks):
1122 if skipfile is None and skipall is None:
1118 if skipfile is None and skipall is None:
1123 chunk.pretty(ui)
1119 chunk.pretty(ui)
1124 if total == 1:
1120 if total == 1:
1125 msg = messages['single'] % chunk.filename()
1121 msg = messages['single'] % chunk.filename()
1126 else:
1122 else:
1127 idx = pos - len(h.hunks) + i
1123 idx = pos - len(h.hunks) + i
1128 msg = messages['multiple'] % (idx, total, chunk.filename())
1124 msg = messages['multiple'] % (idx, total, chunk.filename())
1129 r, skipfile, skipall, newpatches = prompt(skipfile,
1125 r, skipfile, skipall, newpatches = prompt(skipfile,
1130 skipall, msg, chunk)
1126 skipall, msg, chunk)
1131 if r:
1127 if r:
1132 if fixoffset:
1128 if fixoffset:
1133 chunk = copy.copy(chunk)
1129 chunk = copy.copy(chunk)
1134 chunk.toline += fixoffset
1130 chunk.toline += fixoffset
1135 applied[chunk.filename()].append(chunk)
1131 applied[chunk.filename()].append(chunk)
1136 elif newpatches is not None:
1132 elif newpatches is not None:
1137 for newpatch in newpatches:
1133 for newpatch in newpatches:
1138 for newhunk in newpatch.hunks:
1134 for newhunk in newpatch.hunks:
1139 if fixoffset:
1135 if fixoffset:
1140 newhunk.toline += fixoffset
1136 newhunk.toline += fixoffset
1141 applied[newhunk.filename()].append(newhunk)
1137 applied[newhunk.filename()].append(newhunk)
1142 else:
1138 else:
1143 fixoffset += chunk.removed - chunk.added
1139 fixoffset += chunk.removed - chunk.added
1144 return (sum([h for h in applied.itervalues()
1140 return (sum([h for h in applied.itervalues()
1145 if h[0].special() or len(h) > 1], []), {})
1141 if h[0].special() or len(h) > 1], []), {})
1146 class hunk(object):
1142 class hunk(object):
1147 def __init__(self, desc, num, lr, context):
1143 def __init__(self, desc, num, lr, context):
1148 self.number = num
1144 self.number = num
1149 self.desc = desc
1145 self.desc = desc
1150 self.hunk = [desc]
1146 self.hunk = [desc]
1151 self.a = []
1147 self.a = []
1152 self.b = []
1148 self.b = []
1153 self.starta = self.lena = None
1149 self.starta = self.lena = None
1154 self.startb = self.lenb = None
1150 self.startb = self.lenb = None
1155 if lr is not None:
1151 if lr is not None:
1156 if context:
1152 if context:
1157 self.read_context_hunk(lr)
1153 self.read_context_hunk(lr)
1158 else:
1154 else:
1159 self.read_unified_hunk(lr)
1155 self.read_unified_hunk(lr)
1160
1156
1161 def getnormalized(self):
1157 def getnormalized(self):
1162 """Return a copy with line endings normalized to LF."""
1158 """Return a copy with line endings normalized to LF."""
1163
1159
1164 def normalize(lines):
1160 def normalize(lines):
1165 nlines = []
1161 nlines = []
1166 for line in lines:
1162 for line in lines:
1167 if line.endswith('\r\n'):
1163 if line.endswith('\r\n'):
1168 line = line[:-2] + '\n'
1164 line = line[:-2] + '\n'
1169 nlines.append(line)
1165 nlines.append(line)
1170 return nlines
1166 return nlines
1171
1167
1172 # Dummy object, it is rebuilt manually
1168 # Dummy object, it is rebuilt manually
1173 nh = hunk(self.desc, self.number, None, None)
1169 nh = hunk(self.desc, self.number, None, None)
1174 nh.number = self.number
1170 nh.number = self.number
1175 nh.desc = self.desc
1171 nh.desc = self.desc
1176 nh.hunk = self.hunk
1172 nh.hunk = self.hunk
1177 nh.a = normalize(self.a)
1173 nh.a = normalize(self.a)
1178 nh.b = normalize(self.b)
1174 nh.b = normalize(self.b)
1179 nh.starta = self.starta
1175 nh.starta = self.starta
1180 nh.startb = self.startb
1176 nh.startb = self.startb
1181 nh.lena = self.lena
1177 nh.lena = self.lena
1182 nh.lenb = self.lenb
1178 nh.lenb = self.lenb
1183 return nh
1179 return nh
1184
1180
1185 def read_unified_hunk(self, lr):
1181 def read_unified_hunk(self, lr):
1186 m = unidesc.match(self.desc)
1182 m = unidesc.match(self.desc)
1187 if not m:
1183 if not m:
1188 raise PatchError(_("bad hunk #%d") % self.number)
1184 raise PatchError(_("bad hunk #%d") % self.number)
1189 self.starta, self.lena, self.startb, self.lenb = m.groups()
1185 self.starta, self.lena, self.startb, self.lenb = m.groups()
1190 if self.lena is None:
1186 if self.lena is None:
1191 self.lena = 1
1187 self.lena = 1
1192 else:
1188 else:
1193 self.lena = int(self.lena)
1189 self.lena = int(self.lena)
1194 if self.lenb is None:
1190 if self.lenb is None:
1195 self.lenb = 1
1191 self.lenb = 1
1196 else:
1192 else:
1197 self.lenb = int(self.lenb)
1193 self.lenb = int(self.lenb)
1198 self.starta = int(self.starta)
1194 self.starta = int(self.starta)
1199 self.startb = int(self.startb)
1195 self.startb = int(self.startb)
1200 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1196 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1201 self.b)
1197 self.b)
1202 # if we hit eof before finishing out the hunk, the last line will
1198 # if we hit eof before finishing out the hunk, the last line will
1203 # be zero length. Lets try to fix it up.
1199 # be zero length. Lets try to fix it up.
1204 while len(self.hunk[-1]) == 0:
1200 while len(self.hunk[-1]) == 0:
1205 del self.hunk[-1]
1201 del self.hunk[-1]
1206 del self.a[-1]
1202 del self.a[-1]
1207 del self.b[-1]
1203 del self.b[-1]
1208 self.lena -= 1
1204 self.lena -= 1
1209 self.lenb -= 1
1205 self.lenb -= 1
1210 self._fixnewline(lr)
1206 self._fixnewline(lr)
1211
1207
1212 def read_context_hunk(self, lr):
1208 def read_context_hunk(self, lr):
1213 self.desc = lr.readline()
1209 self.desc = lr.readline()
1214 m = contextdesc.match(self.desc)
1210 m = contextdesc.match(self.desc)
1215 if not m:
1211 if not m:
1216 raise PatchError(_("bad hunk #%d") % self.number)
1212 raise PatchError(_("bad hunk #%d") % self.number)
1217 self.starta, aend = m.groups()
1213 self.starta, aend = m.groups()
1218 self.starta = int(self.starta)
1214 self.starta = int(self.starta)
1219 if aend is None:
1215 if aend is None:
1220 aend = self.starta
1216 aend = self.starta
1221 self.lena = int(aend) - self.starta
1217 self.lena = int(aend) - self.starta
1222 if self.starta:
1218 if self.starta:
1223 self.lena += 1
1219 self.lena += 1
1224 for x in xrange(self.lena):
1220 for x in xrange(self.lena):
1225 l = lr.readline()
1221 l = lr.readline()
1226 if l.startswith('---'):
1222 if l.startswith('---'):
1227 # lines addition, old block is empty
1223 # lines addition, old block is empty
1228 lr.push(l)
1224 lr.push(l)
1229 break
1225 break
1230 s = l[2:]
1226 s = l[2:]
1231 if l.startswith('- ') or l.startswith('! '):
1227 if l.startswith('- ') or l.startswith('! '):
1232 u = '-' + s
1228 u = '-' + s
1233 elif l.startswith(' '):
1229 elif l.startswith(' '):
1234 u = ' ' + s
1230 u = ' ' + s
1235 else:
1231 else:
1236 raise PatchError(_("bad hunk #%d old text line %d") %
1232 raise PatchError(_("bad hunk #%d old text line %d") %
1237 (self.number, x))
1233 (self.number, x))
1238 self.a.append(u)
1234 self.a.append(u)
1239 self.hunk.append(u)
1235 self.hunk.append(u)
1240
1236
1241 l = lr.readline()
1237 l = lr.readline()
1242 if l.startswith('\ '):
1238 if l.startswith('\ '):
1243 s = self.a[-1][:-1]
1239 s = self.a[-1][:-1]
1244 self.a[-1] = s
1240 self.a[-1] = s
1245 self.hunk[-1] = s
1241 self.hunk[-1] = s
1246 l = lr.readline()
1242 l = lr.readline()
1247 m = contextdesc.match(l)
1243 m = contextdesc.match(l)
1248 if not m:
1244 if not m:
1249 raise PatchError(_("bad hunk #%d") % self.number)
1245 raise PatchError(_("bad hunk #%d") % self.number)
1250 self.startb, bend = m.groups()
1246 self.startb, bend = m.groups()
1251 self.startb = int(self.startb)
1247 self.startb = int(self.startb)
1252 if bend is None:
1248 if bend is None:
1253 bend = self.startb
1249 bend = self.startb
1254 self.lenb = int(bend) - self.startb
1250 self.lenb = int(bend) - self.startb
1255 if self.startb:
1251 if self.startb:
1256 self.lenb += 1
1252 self.lenb += 1
1257 hunki = 1
1253 hunki = 1
1258 for x in xrange(self.lenb):
1254 for x in xrange(self.lenb):
1259 l = lr.readline()
1255 l = lr.readline()
1260 if l.startswith('\ '):
1256 if l.startswith('\ '):
1261 # XXX: the only way to hit this is with an invalid line range.
1257 # XXX: the only way to hit this is with an invalid line range.
1262 # The no-eol marker is not counted in the line range, but I
1258 # The no-eol marker is not counted in the line range, but I
1263 # guess there are diff(1) out there which behave differently.
1259 # guess there are diff(1) out there which behave differently.
1264 s = self.b[-1][:-1]
1260 s = self.b[-1][:-1]
1265 self.b[-1] = s
1261 self.b[-1] = s
1266 self.hunk[hunki - 1] = s
1262 self.hunk[hunki - 1] = s
1267 continue
1263 continue
1268 if not l:
1264 if not l:
1269 # line deletions, new block is empty and we hit EOF
1265 # line deletions, new block is empty and we hit EOF
1270 lr.push(l)
1266 lr.push(l)
1271 break
1267 break
1272 s = l[2:]
1268 s = l[2:]
1273 if l.startswith('+ ') or l.startswith('! '):
1269 if l.startswith('+ ') or l.startswith('! '):
1274 u = '+' + s
1270 u = '+' + s
1275 elif l.startswith(' '):
1271 elif l.startswith(' '):
1276 u = ' ' + s
1272 u = ' ' + s
1277 elif len(self.b) == 0:
1273 elif len(self.b) == 0:
1278 # line deletions, new block is empty
1274 # line deletions, new block is empty
1279 lr.push(l)
1275 lr.push(l)
1280 break
1276 break
1281 else:
1277 else:
1282 raise PatchError(_("bad hunk #%d old text line %d") %
1278 raise PatchError(_("bad hunk #%d old text line %d") %
1283 (self.number, x))
1279 (self.number, x))
1284 self.b.append(s)
1280 self.b.append(s)
1285 while True:
1281 while True:
1286 if hunki >= len(self.hunk):
1282 if hunki >= len(self.hunk):
1287 h = ""
1283 h = ""
1288 else:
1284 else:
1289 h = self.hunk[hunki]
1285 h = self.hunk[hunki]
1290 hunki += 1
1286 hunki += 1
1291 if h == u:
1287 if h == u:
1292 break
1288 break
1293 elif h.startswith('-'):
1289 elif h.startswith('-'):
1294 continue
1290 continue
1295 else:
1291 else:
1296 self.hunk.insert(hunki - 1, u)
1292 self.hunk.insert(hunki - 1, u)
1297 break
1293 break
1298
1294
1299 if not self.a:
1295 if not self.a:
1300 # this happens when lines were only added to the hunk
1296 # this happens when lines were only added to the hunk
1301 for x in self.hunk:
1297 for x in self.hunk:
1302 if x.startswith('-') or x.startswith(' '):
1298 if x.startswith('-') or x.startswith(' '):
1303 self.a.append(x)
1299 self.a.append(x)
1304 if not self.b:
1300 if not self.b:
1305 # this happens when lines were only deleted from the hunk
1301 # this happens when lines were only deleted from the hunk
1306 for x in self.hunk:
1302 for x in self.hunk:
1307 if x.startswith('+') or x.startswith(' '):
1303 if x.startswith('+') or x.startswith(' '):
1308 self.b.append(x[1:])
1304 self.b.append(x[1:])
1309 # @@ -start,len +start,len @@
1305 # @@ -start,len +start,len @@
1310 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1306 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1311 self.startb, self.lenb)
1307 self.startb, self.lenb)
1312 self.hunk[0] = self.desc
1308 self.hunk[0] = self.desc
1313 self._fixnewline(lr)
1309 self._fixnewline(lr)
1314
1310
1315 def _fixnewline(self, lr):
1311 def _fixnewline(self, lr):
1316 l = lr.readline()
1312 l = lr.readline()
1317 if l.startswith('\ '):
1313 if l.startswith('\ '):
1318 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1314 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1319 else:
1315 else:
1320 lr.push(l)
1316 lr.push(l)
1321
1317
1322 def complete(self):
1318 def complete(self):
1323 return len(self.a) == self.lena and len(self.b) == self.lenb
1319 return len(self.a) == self.lena and len(self.b) == self.lenb
1324
1320
1325 def _fuzzit(self, old, new, fuzz, toponly):
1321 def _fuzzit(self, old, new, fuzz, toponly):
1326 # this removes context lines from the top and bottom of list 'l'. It
1322 # this removes context lines from the top and bottom of list 'l'. It
1327 # checks the hunk to make sure only context lines are removed, and then
1323 # checks the hunk to make sure only context lines are removed, and then
1328 # returns a new shortened list of lines.
1324 # returns a new shortened list of lines.
1329 fuzz = min(fuzz, len(old))
1325 fuzz = min(fuzz, len(old))
1330 if fuzz:
1326 if fuzz:
1331 top = 0
1327 top = 0
1332 bot = 0
1328 bot = 0
1333 hlen = len(self.hunk)
1329 hlen = len(self.hunk)
1334 for x in xrange(hlen - 1):
1330 for x in xrange(hlen - 1):
1335 # the hunk starts with the @@ line, so use x+1
1331 # the hunk starts with the @@ line, so use x+1
1336 if self.hunk[x + 1][0] == ' ':
1332 if self.hunk[x + 1][0] == ' ':
1337 top += 1
1333 top += 1
1338 else:
1334 else:
1339 break
1335 break
1340 if not toponly:
1336 if not toponly:
1341 for x in xrange(hlen - 1):
1337 for x in xrange(hlen - 1):
1342 if self.hunk[hlen - bot - 1][0] == ' ':
1338 if self.hunk[hlen - bot - 1][0] == ' ':
1343 bot += 1
1339 bot += 1
1344 else:
1340 else:
1345 break
1341 break
1346
1342
1347 bot = min(fuzz, bot)
1343 bot = min(fuzz, bot)
1348 top = min(fuzz, top)
1344 top = min(fuzz, top)
1349 return old[top:len(old) - bot], new[top:len(new) - bot], top
1345 return old[top:len(old) - bot], new[top:len(new) - bot], top
1350 return old, new, 0
1346 return old, new, 0
1351
1347
1352 def fuzzit(self, fuzz, toponly):
1348 def fuzzit(self, fuzz, toponly):
1353 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1349 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1354 oldstart = self.starta + top
1350 oldstart = self.starta + top
1355 newstart = self.startb + top
1351 newstart = self.startb + top
1356 # zero length hunk ranges already have their start decremented
1352 # zero length hunk ranges already have their start decremented
1357 if self.lena and oldstart > 0:
1353 if self.lena and oldstart > 0:
1358 oldstart -= 1
1354 oldstart -= 1
1359 if self.lenb and newstart > 0:
1355 if self.lenb and newstart > 0:
1360 newstart -= 1
1356 newstart -= 1
1361 return old, oldstart, new, newstart
1357 return old, oldstart, new, newstart
1362
1358
1363 class binhunk(object):
1359 class binhunk(object):
1364 'A binary patch file.'
1360 'A binary patch file.'
1365 def __init__(self, lr, fname):
1361 def __init__(self, lr, fname):
1366 self.text = None
1362 self.text = None
1367 self.delta = False
1363 self.delta = False
1368 self.hunk = ['GIT binary patch\n']
1364 self.hunk = ['GIT binary patch\n']
1369 self._fname = fname
1365 self._fname = fname
1370 self._read(lr)
1366 self._read(lr)
1371
1367
1372 def complete(self):
1368 def complete(self):
1373 return self.text is not None
1369 return self.text is not None
1374
1370
1375 def new(self, lines):
1371 def new(self, lines):
1376 if self.delta:
1372 if self.delta:
1377 return [applybindelta(self.text, ''.join(lines))]
1373 return [applybindelta(self.text, ''.join(lines))]
1378 return [self.text]
1374 return [self.text]
1379
1375
1380 def _read(self, lr):
1376 def _read(self, lr):
1381 def getline(lr, hunk):
1377 def getline(lr, hunk):
1382 l = lr.readline()
1378 l = lr.readline()
1383 hunk.append(l)
1379 hunk.append(l)
1384 return l.rstrip('\r\n')
1380 return l.rstrip('\r\n')
1385
1381
1386 size = 0
1382 size = 0
1387 while True:
1383 while True:
1388 line = getline(lr, self.hunk)
1384 line = getline(lr, self.hunk)
1389 if not line:
1385 if not line:
1390 raise PatchError(_('could not extract "%s" binary data')
1386 raise PatchError(_('could not extract "%s" binary data')
1391 % self._fname)
1387 % self._fname)
1392 if line.startswith('literal '):
1388 if line.startswith('literal '):
1393 size = int(line[8:].rstrip())
1389 size = int(line[8:].rstrip())
1394 break
1390 break
1395 if line.startswith('delta '):
1391 if line.startswith('delta '):
1396 size = int(line[6:].rstrip())
1392 size = int(line[6:].rstrip())
1397 self.delta = True
1393 self.delta = True
1398 break
1394 break
1399 dec = []
1395 dec = []
1400 line = getline(lr, self.hunk)
1396 line = getline(lr, self.hunk)
1401 while len(line) > 1:
1397 while len(line) > 1:
1402 l = line[0]
1398 l = line[0]
1403 if l <= 'Z' and l >= 'A':
1399 if l <= 'Z' and l >= 'A':
1404 l = ord(l) - ord('A') + 1
1400 l = ord(l) - ord('A') + 1
1405 else:
1401 else:
1406 l = ord(l) - ord('a') + 27
1402 l = ord(l) - ord('a') + 27
1407 try:
1403 try:
1408 dec.append(base85.b85decode(line[1:])[:l])
1404 dec.append(base85.b85decode(line[1:])[:l])
1409 except ValueError as e:
1405 except ValueError as e:
1410 raise PatchError(_('could not decode "%s" binary patch: %s')
1406 raise PatchError(_('could not decode "%s" binary patch: %s')
1411 % (self._fname, str(e)))
1407 % (self._fname, str(e)))
1412 line = getline(lr, self.hunk)
1408 line = getline(lr, self.hunk)
1413 text = zlib.decompress(''.join(dec))
1409 text = zlib.decompress(''.join(dec))
1414 if len(text) != size:
1410 if len(text) != size:
1415 raise PatchError(_('"%s" length is %d bytes, should be %d')
1411 raise PatchError(_('"%s" length is %d bytes, should be %d')
1416 % (self._fname, len(text), size))
1412 % (self._fname, len(text), size))
1417 self.text = text
1413 self.text = text
1418
1414
1419 def parsefilename(str):
1415 def parsefilename(str):
1420 # --- filename \t|space stuff
1416 # --- filename \t|space stuff
1421 s = str[4:].rstrip('\r\n')
1417 s = str[4:].rstrip('\r\n')
1422 i = s.find('\t')
1418 i = s.find('\t')
1423 if i < 0:
1419 if i < 0:
1424 i = s.find(' ')
1420 i = s.find(' ')
1425 if i < 0:
1421 if i < 0:
1426 return s
1422 return s
1427 return s[:i]
1423 return s[:i]
1428
1424
1429 def reversehunks(hunks):
1425 def reversehunks(hunks):
1430 '''reverse the signs in the hunks given as argument
1426 '''reverse the signs in the hunks given as argument
1431
1427
1432 This function operates on hunks coming out of patch.filterpatch, that is
1428 This function operates on hunks coming out of patch.filterpatch, that is
1433 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1429 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1434
1430
1435 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1431 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1436 ... --- a/folder1/g
1432 ... --- a/folder1/g
1437 ... +++ b/folder1/g
1433 ... +++ b/folder1/g
1438 ... @@ -1,7 +1,7 @@
1434 ... @@ -1,7 +1,7 @@
1439 ... +firstline
1435 ... +firstline
1440 ... c
1436 ... c
1441 ... 1
1437 ... 1
1442 ... 2
1438 ... 2
1443 ... + 3
1439 ... + 3
1444 ... -4
1440 ... -4
1445 ... 5
1441 ... 5
1446 ... d
1442 ... d
1447 ... +lastline"""
1443 ... +lastline"""
1448 >>> hunks = parsepatch(rawpatch)
1444 >>> hunks = parsepatch(rawpatch)
1449 >>> hunkscomingfromfilterpatch = []
1445 >>> hunkscomingfromfilterpatch = []
1450 >>> for h in hunks:
1446 >>> for h in hunks:
1451 ... hunkscomingfromfilterpatch.append(h)
1447 ... hunkscomingfromfilterpatch.append(h)
1452 ... hunkscomingfromfilterpatch.extend(h.hunks)
1448 ... hunkscomingfromfilterpatch.extend(h.hunks)
1453
1449
1454 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1450 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1455 >>> from . import util
1451 >>> from . import util
1456 >>> fp = util.stringio()
1452 >>> fp = util.stringio()
1457 >>> for c in reversedhunks:
1453 >>> for c in reversedhunks:
1458 ... c.write(fp)
1454 ... c.write(fp)
1459 >>> fp.seek(0)
1455 >>> fp.seek(0)
1460 >>> reversedpatch = fp.read()
1456 >>> reversedpatch = fp.read()
1461 >>> print reversedpatch
1457 >>> print reversedpatch
1462 diff --git a/folder1/g b/folder1/g
1458 diff --git a/folder1/g b/folder1/g
1463 --- a/folder1/g
1459 --- a/folder1/g
1464 +++ b/folder1/g
1460 +++ b/folder1/g
1465 @@ -1,4 +1,3 @@
1461 @@ -1,4 +1,3 @@
1466 -firstline
1462 -firstline
1467 c
1463 c
1468 1
1464 1
1469 2
1465 2
1470 @@ -1,6 +2,6 @@
1466 @@ -1,6 +2,6 @@
1471 c
1467 c
1472 1
1468 1
1473 2
1469 2
1474 - 3
1470 - 3
1475 +4
1471 +4
1476 5
1472 5
1477 d
1473 d
1478 @@ -5,3 +6,2 @@
1474 @@ -5,3 +6,2 @@
1479 5
1475 5
1480 d
1476 d
1481 -lastline
1477 -lastline
1482
1478
1483 '''
1479 '''
1484
1480
1485 from . import crecord as crecordmod
1481 from . import crecord as crecordmod
1486 newhunks = []
1482 newhunks = []
1487 for c in hunks:
1483 for c in hunks:
1488 if isinstance(c, crecordmod.uihunk):
1484 if isinstance(c, crecordmod.uihunk):
1489 # curses hunks encapsulate the record hunk in _hunk
1485 # curses hunks encapsulate the record hunk in _hunk
1490 c = c._hunk
1486 c = c._hunk
1491 if isinstance(c, recordhunk):
1487 if isinstance(c, recordhunk):
1492 for j, line in enumerate(c.hunk):
1488 for j, line in enumerate(c.hunk):
1493 if line.startswith("-"):
1489 if line.startswith("-"):
1494 c.hunk[j] = "+" + c.hunk[j][1:]
1490 c.hunk[j] = "+" + c.hunk[j][1:]
1495 elif line.startswith("+"):
1491 elif line.startswith("+"):
1496 c.hunk[j] = "-" + c.hunk[j][1:]
1492 c.hunk[j] = "-" + c.hunk[j][1:]
1497 c.added, c.removed = c.removed, c.added
1493 c.added, c.removed = c.removed, c.added
1498 newhunks.append(c)
1494 newhunks.append(c)
1499 return newhunks
1495 return newhunks
1500
1496
1501 def parsepatch(originalchunks):
1497 def parsepatch(originalchunks):
1502 """patch -> [] of headers -> [] of hunks """
1498 """patch -> [] of headers -> [] of hunks """
1503 class parser(object):
1499 class parser(object):
1504 """patch parsing state machine"""
1500 """patch parsing state machine"""
1505 def __init__(self):
1501 def __init__(self):
1506 self.fromline = 0
1502 self.fromline = 0
1507 self.toline = 0
1503 self.toline = 0
1508 self.proc = ''
1504 self.proc = ''
1509 self.header = None
1505 self.header = None
1510 self.context = []
1506 self.context = []
1511 self.before = []
1507 self.before = []
1512 self.hunk = []
1508 self.hunk = []
1513 self.headers = []
1509 self.headers = []
1514
1510
1515 def addrange(self, limits):
1511 def addrange(self, limits):
1516 fromstart, fromend, tostart, toend, proc = limits
1512 fromstart, fromend, tostart, toend, proc = limits
1517 self.fromline = int(fromstart)
1513 self.fromline = int(fromstart)
1518 self.toline = int(tostart)
1514 self.toline = int(tostart)
1519 self.proc = proc
1515 self.proc = proc
1520
1516
1521 def addcontext(self, context):
1517 def addcontext(self, context):
1522 if self.hunk:
1518 if self.hunk:
1523 h = recordhunk(self.header, self.fromline, self.toline,
1519 h = recordhunk(self.header, self.fromline, self.toline,
1524 self.proc, self.before, self.hunk, context)
1520 self.proc, self.before, self.hunk, context)
1525 self.header.hunks.append(h)
1521 self.header.hunks.append(h)
1526 self.fromline += len(self.before) + h.removed
1522 self.fromline += len(self.before) + h.removed
1527 self.toline += len(self.before) + h.added
1523 self.toline += len(self.before) + h.added
1528 self.before = []
1524 self.before = []
1529 self.hunk = []
1525 self.hunk = []
1530 self.context = context
1526 self.context = context
1531
1527
1532 def addhunk(self, hunk):
1528 def addhunk(self, hunk):
1533 if self.context:
1529 if self.context:
1534 self.before = self.context
1530 self.before = self.context
1535 self.context = []
1531 self.context = []
1536 self.hunk = hunk
1532 self.hunk = hunk
1537
1533
1538 def newfile(self, hdr):
1534 def newfile(self, hdr):
1539 self.addcontext([])
1535 self.addcontext([])
1540 h = header(hdr)
1536 h = header(hdr)
1541 self.headers.append(h)
1537 self.headers.append(h)
1542 self.header = h
1538 self.header = h
1543
1539
1544 def addother(self, line):
1540 def addother(self, line):
1545 pass # 'other' lines are ignored
1541 pass # 'other' lines are ignored
1546
1542
1547 def finished(self):
1543 def finished(self):
1548 self.addcontext([])
1544 self.addcontext([])
1549 return self.headers
1545 return self.headers
1550
1546
1551 transitions = {
1547 transitions = {
1552 'file': {'context': addcontext,
1548 'file': {'context': addcontext,
1553 'file': newfile,
1549 'file': newfile,
1554 'hunk': addhunk,
1550 'hunk': addhunk,
1555 'range': addrange},
1551 'range': addrange},
1556 'context': {'file': newfile,
1552 'context': {'file': newfile,
1557 'hunk': addhunk,
1553 'hunk': addhunk,
1558 'range': addrange,
1554 'range': addrange,
1559 'other': addother},
1555 'other': addother},
1560 'hunk': {'context': addcontext,
1556 'hunk': {'context': addcontext,
1561 'file': newfile,
1557 'file': newfile,
1562 'range': addrange},
1558 'range': addrange},
1563 'range': {'context': addcontext,
1559 'range': {'context': addcontext,
1564 'hunk': addhunk},
1560 'hunk': addhunk},
1565 'other': {'other': addother},
1561 'other': {'other': addother},
1566 }
1562 }
1567
1563
1568 p = parser()
1564 p = parser()
1569 fp = stringio()
1565 fp = stringio()
1570 fp.write(''.join(originalchunks))
1566 fp.write(''.join(originalchunks))
1571 fp.seek(0)
1567 fp.seek(0)
1572
1568
1573 state = 'context'
1569 state = 'context'
1574 for newstate, data in scanpatch(fp):
1570 for newstate, data in scanpatch(fp):
1575 try:
1571 try:
1576 p.transitions[state][newstate](p, data)
1572 p.transitions[state][newstate](p, data)
1577 except KeyError:
1573 except KeyError:
1578 raise PatchError('unhandled transition: %s -> %s' %
1574 raise PatchError('unhandled transition: %s -> %s' %
1579 (state, newstate))
1575 (state, newstate))
1580 state = newstate
1576 state = newstate
1581 del fp
1577 del fp
1582 return p.finished()
1578 return p.finished()
1583
1579
1584 def pathtransform(path, strip, prefix):
1580 def pathtransform(path, strip, prefix):
1585 '''turn a path from a patch into a path suitable for the repository
1581 '''turn a path from a patch into a path suitable for the repository
1586
1582
1587 prefix, if not empty, is expected to be normalized with a / at the end.
1583 prefix, if not empty, is expected to be normalized with a / at the end.
1588
1584
1589 Returns (stripped components, path in repository).
1585 Returns (stripped components, path in repository).
1590
1586
1591 >>> pathtransform('a/b/c', 0, '')
1587 >>> pathtransform('a/b/c', 0, '')
1592 ('', 'a/b/c')
1588 ('', 'a/b/c')
1593 >>> pathtransform(' a/b/c ', 0, '')
1589 >>> pathtransform(' a/b/c ', 0, '')
1594 ('', ' a/b/c')
1590 ('', ' a/b/c')
1595 >>> pathtransform(' a/b/c ', 2, '')
1591 >>> pathtransform(' a/b/c ', 2, '')
1596 ('a/b/', 'c')
1592 ('a/b/', 'c')
1597 >>> pathtransform('a/b/c', 0, 'd/e/')
1593 >>> pathtransform('a/b/c', 0, 'd/e/')
1598 ('', 'd/e/a/b/c')
1594 ('', 'd/e/a/b/c')
1599 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1595 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1600 ('a//b/', 'd/e/c')
1596 ('a//b/', 'd/e/c')
1601 >>> pathtransform('a/b/c', 3, '')
1597 >>> pathtransform('a/b/c', 3, '')
1602 Traceback (most recent call last):
1598 Traceback (most recent call last):
1603 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1599 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1604 '''
1600 '''
1605 pathlen = len(path)
1601 pathlen = len(path)
1606 i = 0
1602 i = 0
1607 if strip == 0:
1603 if strip == 0:
1608 return '', prefix + path.rstrip()
1604 return '', prefix + path.rstrip()
1609 count = strip
1605 count = strip
1610 while count > 0:
1606 while count > 0:
1611 i = path.find('/', i)
1607 i = path.find('/', i)
1612 if i == -1:
1608 if i == -1:
1613 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1609 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1614 (count, strip, path))
1610 (count, strip, path))
1615 i += 1
1611 i += 1
1616 # consume '//' in the path
1612 # consume '//' in the path
1617 while i < pathlen - 1 and path[i] == '/':
1613 while i < pathlen - 1 and path[i] == '/':
1618 i += 1
1614 i += 1
1619 count -= 1
1615 count -= 1
1620 return path[:i].lstrip(), prefix + path[i:].rstrip()
1616 return path[:i].lstrip(), prefix + path[i:].rstrip()
1621
1617
1622 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1618 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1623 nulla = afile_orig == "/dev/null"
1619 nulla = afile_orig == "/dev/null"
1624 nullb = bfile_orig == "/dev/null"
1620 nullb = bfile_orig == "/dev/null"
1625 create = nulla and hunk.starta == 0 and hunk.lena == 0
1621 create = nulla and hunk.starta == 0 and hunk.lena == 0
1626 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1622 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1627 abase, afile = pathtransform(afile_orig, strip, prefix)
1623 abase, afile = pathtransform(afile_orig, strip, prefix)
1628 gooda = not nulla and backend.exists(afile)
1624 gooda = not nulla and backend.exists(afile)
1629 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1625 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1630 if afile == bfile:
1626 if afile == bfile:
1631 goodb = gooda
1627 goodb = gooda
1632 else:
1628 else:
1633 goodb = not nullb and backend.exists(bfile)
1629 goodb = not nullb and backend.exists(bfile)
1634 missing = not goodb and not gooda and not create
1630 missing = not goodb and not gooda and not create
1635
1631
1636 # some diff programs apparently produce patches where the afile is
1632 # some diff programs apparently produce patches where the afile is
1637 # not /dev/null, but afile starts with bfile
1633 # not /dev/null, but afile starts with bfile
1638 abasedir = afile[:afile.rfind('/') + 1]
1634 abasedir = afile[:afile.rfind('/') + 1]
1639 bbasedir = bfile[:bfile.rfind('/') + 1]
1635 bbasedir = bfile[:bfile.rfind('/') + 1]
1640 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1636 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1641 and hunk.starta == 0 and hunk.lena == 0):
1637 and hunk.starta == 0 and hunk.lena == 0):
1642 create = True
1638 create = True
1643 missing = False
1639 missing = False
1644
1640
1645 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1641 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1646 # diff is between a file and its backup. In this case, the original
1642 # diff is between a file and its backup. In this case, the original
1647 # file should be patched (see original mpatch code).
1643 # file should be patched (see original mpatch code).
1648 isbackup = (abase == bbase and bfile.startswith(afile))
1644 isbackup = (abase == bbase and bfile.startswith(afile))
1649 fname = None
1645 fname = None
1650 if not missing:
1646 if not missing:
1651 if gooda and goodb:
1647 if gooda and goodb:
1652 if isbackup:
1648 if isbackup:
1653 fname = afile
1649 fname = afile
1654 else:
1650 else:
1655 fname = bfile
1651 fname = bfile
1656 elif gooda:
1652 elif gooda:
1657 fname = afile
1653 fname = afile
1658
1654
1659 if not fname:
1655 if not fname:
1660 if not nullb:
1656 if not nullb:
1661 if isbackup:
1657 if isbackup:
1662 fname = afile
1658 fname = afile
1663 else:
1659 else:
1664 fname = bfile
1660 fname = bfile
1665 elif not nulla:
1661 elif not nulla:
1666 fname = afile
1662 fname = afile
1667 else:
1663 else:
1668 raise PatchError(_("undefined source and destination files"))
1664 raise PatchError(_("undefined source and destination files"))
1669
1665
1670 gp = patchmeta(fname)
1666 gp = patchmeta(fname)
1671 if create:
1667 if create:
1672 gp.op = 'ADD'
1668 gp.op = 'ADD'
1673 elif remove:
1669 elif remove:
1674 gp.op = 'DELETE'
1670 gp.op = 'DELETE'
1675 return gp
1671 return gp
1676
1672
1677 def scanpatch(fp):
1673 def scanpatch(fp):
1678 """like patch.iterhunks, but yield different events
1674 """like patch.iterhunks, but yield different events
1679
1675
1680 - ('file', [header_lines + fromfile + tofile])
1676 - ('file', [header_lines + fromfile + tofile])
1681 - ('context', [context_lines])
1677 - ('context', [context_lines])
1682 - ('hunk', [hunk_lines])
1678 - ('hunk', [hunk_lines])
1683 - ('range', (-start,len, +start,len, proc))
1679 - ('range', (-start,len, +start,len, proc))
1684 """
1680 """
1685 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1681 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1686 lr = linereader(fp)
1682 lr = linereader(fp)
1687
1683
1688 def scanwhile(first, p):
1684 def scanwhile(first, p):
1689 """scan lr while predicate holds"""
1685 """scan lr while predicate holds"""
1690 lines = [first]
1686 lines = [first]
1691 while True:
1687 for line in iter(lr.readline, ''):
1692 line = lr.readline()
1693 if not line:
1694 break
1695 if p(line):
1688 if p(line):
1696 lines.append(line)
1689 lines.append(line)
1697 else:
1690 else:
1698 lr.push(line)
1691 lr.push(line)
1699 break
1692 break
1700 return lines
1693 return lines
1701
1694
1702 while True:
1695 for line in iter(lr.readline, ''):
1703 line = lr.readline()
1704 if not line:
1705 break
1706 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1696 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1707 def notheader(line):
1697 def notheader(line):
1708 s = line.split(None, 1)
1698 s = line.split(None, 1)
1709 return not s or s[0] not in ('---', 'diff')
1699 return not s or s[0] not in ('---', 'diff')
1710 header = scanwhile(line, notheader)
1700 header = scanwhile(line, notheader)
1711 fromfile = lr.readline()
1701 fromfile = lr.readline()
1712 if fromfile.startswith('---'):
1702 if fromfile.startswith('---'):
1713 tofile = lr.readline()
1703 tofile = lr.readline()
1714 header += [fromfile, tofile]
1704 header += [fromfile, tofile]
1715 else:
1705 else:
1716 lr.push(fromfile)
1706 lr.push(fromfile)
1717 yield 'file', header
1707 yield 'file', header
1718 elif line[0] == ' ':
1708 elif line[0] == ' ':
1719 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1709 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1720 elif line[0] in '-+':
1710 elif line[0] in '-+':
1721 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1711 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1722 else:
1712 else:
1723 m = lines_re.match(line)
1713 m = lines_re.match(line)
1724 if m:
1714 if m:
1725 yield 'range', m.groups()
1715 yield 'range', m.groups()
1726 else:
1716 else:
1727 yield 'other', line
1717 yield 'other', line
1728
1718
1729 def scangitpatch(lr, firstline):
1719 def scangitpatch(lr, firstline):
1730 """
1720 """
1731 Git patches can emit:
1721 Git patches can emit:
1732 - rename a to b
1722 - rename a to b
1733 - change b
1723 - change b
1734 - copy a to c
1724 - copy a to c
1735 - change c
1725 - change c
1736
1726
1737 We cannot apply this sequence as-is, the renamed 'a' could not be
1727 We cannot apply this sequence as-is, the renamed 'a' could not be
1738 found for it would have been renamed already. And we cannot copy
1728 found for it would have been renamed already. And we cannot copy
1739 from 'b' instead because 'b' would have been changed already. So
1729 from 'b' instead because 'b' would have been changed already. So
1740 we scan the git patch for copy and rename commands so we can
1730 we scan the git patch for copy and rename commands so we can
1741 perform the copies ahead of time.
1731 perform the copies ahead of time.
1742 """
1732 """
1743 pos = 0
1733 pos = 0
1744 try:
1734 try:
1745 pos = lr.fp.tell()
1735 pos = lr.fp.tell()
1746 fp = lr.fp
1736 fp = lr.fp
1747 except IOError:
1737 except IOError:
1748 fp = stringio(lr.fp.read())
1738 fp = stringio(lr.fp.read())
1749 gitlr = linereader(fp)
1739 gitlr = linereader(fp)
1750 gitlr.push(firstline)
1740 gitlr.push(firstline)
1751 gitpatches = readgitpatch(gitlr)
1741 gitpatches = readgitpatch(gitlr)
1752 fp.seek(pos)
1742 fp.seek(pos)
1753 return gitpatches
1743 return gitpatches
1754
1744
1755 def iterhunks(fp):
1745 def iterhunks(fp):
1756 """Read a patch and yield the following events:
1746 """Read a patch and yield the following events:
1757 - ("file", afile, bfile, firsthunk): select a new target file.
1747 - ("file", afile, bfile, firsthunk): select a new target file.
1758 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1748 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1759 "file" event.
1749 "file" event.
1760 - ("git", gitchanges): current diff is in git format, gitchanges
1750 - ("git", gitchanges): current diff is in git format, gitchanges
1761 maps filenames to gitpatch records. Unique event.
1751 maps filenames to gitpatch records. Unique event.
1762 """
1752 """
1763 afile = ""
1753 afile = ""
1764 bfile = ""
1754 bfile = ""
1765 state = None
1755 state = None
1766 hunknum = 0
1756 hunknum = 0
1767 emitfile = newfile = False
1757 emitfile = newfile = False
1768 gitpatches = None
1758 gitpatches = None
1769
1759
1770 # our states
1760 # our states
1771 BFILE = 1
1761 BFILE = 1
1772 context = None
1762 context = None
1773 lr = linereader(fp)
1763 lr = linereader(fp)
1774
1764
1775 while True:
1765 for x in iter(lr.readline, ''):
1776 x = lr.readline()
1777 if not x:
1778 break
1779 if state == BFILE and (
1766 if state == BFILE and (
1780 (not context and x[0] == '@')
1767 (not context and x[0] == '@')
1781 or (context is not False and x.startswith('***************'))
1768 or (context is not False and x.startswith('***************'))
1782 or x.startswith('GIT binary patch')):
1769 or x.startswith('GIT binary patch')):
1783 gp = None
1770 gp = None
1784 if (gitpatches and
1771 if (gitpatches and
1785 gitpatches[-1].ispatching(afile, bfile)):
1772 gitpatches[-1].ispatching(afile, bfile)):
1786 gp = gitpatches.pop()
1773 gp = gitpatches.pop()
1787 if x.startswith('GIT binary patch'):
1774 if x.startswith('GIT binary patch'):
1788 h = binhunk(lr, gp.path)
1775 h = binhunk(lr, gp.path)
1789 else:
1776 else:
1790 if context is None and x.startswith('***************'):
1777 if context is None and x.startswith('***************'):
1791 context = True
1778 context = True
1792 h = hunk(x, hunknum + 1, lr, context)
1779 h = hunk(x, hunknum + 1, lr, context)
1793 hunknum += 1
1780 hunknum += 1
1794 if emitfile:
1781 if emitfile:
1795 emitfile = False
1782 emitfile = False
1796 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1783 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1797 yield 'hunk', h
1784 yield 'hunk', h
1798 elif x.startswith('diff --git a/'):
1785 elif x.startswith('diff --git a/'):
1799 m = gitre.match(x.rstrip(' \r\n'))
1786 m = gitre.match(x.rstrip(' \r\n'))
1800 if not m:
1787 if not m:
1801 continue
1788 continue
1802 if gitpatches is None:
1789 if gitpatches is None:
1803 # scan whole input for git metadata
1790 # scan whole input for git metadata
1804 gitpatches = scangitpatch(lr, x)
1791 gitpatches = scangitpatch(lr, x)
1805 yield 'git', [g.copy() for g in gitpatches
1792 yield 'git', [g.copy() for g in gitpatches
1806 if g.op in ('COPY', 'RENAME')]
1793 if g.op in ('COPY', 'RENAME')]
1807 gitpatches.reverse()
1794 gitpatches.reverse()
1808 afile = 'a/' + m.group(1)
1795 afile = 'a/' + m.group(1)
1809 bfile = 'b/' + m.group(2)
1796 bfile = 'b/' + m.group(2)
1810 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1797 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1811 gp = gitpatches.pop()
1798 gp = gitpatches.pop()
1812 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1799 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1813 if not gitpatches:
1800 if not gitpatches:
1814 raise PatchError(_('failed to synchronize metadata for "%s"')
1801 raise PatchError(_('failed to synchronize metadata for "%s"')
1815 % afile[2:])
1802 % afile[2:])
1816 gp = gitpatches[-1]
1803 gp = gitpatches[-1]
1817 newfile = True
1804 newfile = True
1818 elif x.startswith('---'):
1805 elif x.startswith('---'):
1819 # check for a unified diff
1806 # check for a unified diff
1820 l2 = lr.readline()
1807 l2 = lr.readline()
1821 if not l2.startswith('+++'):
1808 if not l2.startswith('+++'):
1822 lr.push(l2)
1809 lr.push(l2)
1823 continue
1810 continue
1824 newfile = True
1811 newfile = True
1825 context = False
1812 context = False
1826 afile = parsefilename(x)
1813 afile = parsefilename(x)
1827 bfile = parsefilename(l2)
1814 bfile = parsefilename(l2)
1828 elif x.startswith('***'):
1815 elif x.startswith('***'):
1829 # check for a context diff
1816 # check for a context diff
1830 l2 = lr.readline()
1817 l2 = lr.readline()
1831 if not l2.startswith('---'):
1818 if not l2.startswith('---'):
1832 lr.push(l2)
1819 lr.push(l2)
1833 continue
1820 continue
1834 l3 = lr.readline()
1821 l3 = lr.readline()
1835 lr.push(l3)
1822 lr.push(l3)
1836 if not l3.startswith("***************"):
1823 if not l3.startswith("***************"):
1837 lr.push(l2)
1824 lr.push(l2)
1838 continue
1825 continue
1839 newfile = True
1826 newfile = True
1840 context = True
1827 context = True
1841 afile = parsefilename(x)
1828 afile = parsefilename(x)
1842 bfile = parsefilename(l2)
1829 bfile = parsefilename(l2)
1843
1830
1844 if newfile:
1831 if newfile:
1845 newfile = False
1832 newfile = False
1846 emitfile = True
1833 emitfile = True
1847 state = BFILE
1834 state = BFILE
1848 hunknum = 0
1835 hunknum = 0
1849
1836
1850 while gitpatches:
1837 while gitpatches:
1851 gp = gitpatches.pop()
1838 gp = gitpatches.pop()
1852 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1839 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1853
1840
1854 def applybindelta(binchunk, data):
1841 def applybindelta(binchunk, data):
1855 """Apply a binary delta hunk
1842 """Apply a binary delta hunk
1856 The algorithm used is the algorithm from git's patch-delta.c
1843 The algorithm used is the algorithm from git's patch-delta.c
1857 """
1844 """
1858 def deltahead(binchunk):
1845 def deltahead(binchunk):
1859 i = 0
1846 i = 0
1860 for c in binchunk:
1847 for c in binchunk:
1861 i += 1
1848 i += 1
1862 if not (ord(c) & 0x80):
1849 if not (ord(c) & 0x80):
1863 return i
1850 return i
1864 return i
1851 return i
1865 out = ""
1852 out = ""
1866 s = deltahead(binchunk)
1853 s = deltahead(binchunk)
1867 binchunk = binchunk[s:]
1854 binchunk = binchunk[s:]
1868 s = deltahead(binchunk)
1855 s = deltahead(binchunk)
1869 binchunk = binchunk[s:]
1856 binchunk = binchunk[s:]
1870 i = 0
1857 i = 0
1871 while i < len(binchunk):
1858 while i < len(binchunk):
1872 cmd = ord(binchunk[i])
1859 cmd = ord(binchunk[i])
1873 i += 1
1860 i += 1
1874 if (cmd & 0x80):
1861 if (cmd & 0x80):
1875 offset = 0
1862 offset = 0
1876 size = 0
1863 size = 0
1877 if (cmd & 0x01):
1864 if (cmd & 0x01):
1878 offset = ord(binchunk[i])
1865 offset = ord(binchunk[i])
1879 i += 1
1866 i += 1
1880 if (cmd & 0x02):
1867 if (cmd & 0x02):
1881 offset |= ord(binchunk[i]) << 8
1868 offset |= ord(binchunk[i]) << 8
1882 i += 1
1869 i += 1
1883 if (cmd & 0x04):
1870 if (cmd & 0x04):
1884 offset |= ord(binchunk[i]) << 16
1871 offset |= ord(binchunk[i]) << 16
1885 i += 1
1872 i += 1
1886 if (cmd & 0x08):
1873 if (cmd & 0x08):
1887 offset |= ord(binchunk[i]) << 24
1874 offset |= ord(binchunk[i]) << 24
1888 i += 1
1875 i += 1
1889 if (cmd & 0x10):
1876 if (cmd & 0x10):
1890 size = ord(binchunk[i])
1877 size = ord(binchunk[i])
1891 i += 1
1878 i += 1
1892 if (cmd & 0x20):
1879 if (cmd & 0x20):
1893 size |= ord(binchunk[i]) << 8
1880 size |= ord(binchunk[i]) << 8
1894 i += 1
1881 i += 1
1895 if (cmd & 0x40):
1882 if (cmd & 0x40):
1896 size |= ord(binchunk[i]) << 16
1883 size |= ord(binchunk[i]) << 16
1897 i += 1
1884 i += 1
1898 if size == 0:
1885 if size == 0:
1899 size = 0x10000
1886 size = 0x10000
1900 offset_end = offset + size
1887 offset_end = offset + size
1901 out += data[offset:offset_end]
1888 out += data[offset:offset_end]
1902 elif cmd != 0:
1889 elif cmd != 0:
1903 offset_end = i + cmd
1890 offset_end = i + cmd
1904 out += binchunk[i:offset_end]
1891 out += binchunk[i:offset_end]
1905 i += cmd
1892 i += cmd
1906 else:
1893 else:
1907 raise PatchError(_('unexpected delta opcode 0'))
1894 raise PatchError(_('unexpected delta opcode 0'))
1908 return out
1895 return out
1909
1896
1910 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1897 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1911 """Reads a patch from fp and tries to apply it.
1898 """Reads a patch from fp and tries to apply it.
1912
1899
1913 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1900 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1914 there was any fuzz.
1901 there was any fuzz.
1915
1902
1916 If 'eolmode' is 'strict', the patch content and patched file are
1903 If 'eolmode' is 'strict', the patch content and patched file are
1917 read in binary mode. Otherwise, line endings are ignored when
1904 read in binary mode. Otherwise, line endings are ignored when
1918 patching then normalized according to 'eolmode'.
1905 patching then normalized according to 'eolmode'.
1919 """
1906 """
1920 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1907 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1921 prefix=prefix, eolmode=eolmode)
1908 prefix=prefix, eolmode=eolmode)
1922
1909
1923 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1910 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1924 eolmode='strict'):
1911 eolmode='strict'):
1925
1912
1926 if prefix:
1913 if prefix:
1927 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1914 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1928 prefix)
1915 prefix)
1929 if prefix != '':
1916 if prefix != '':
1930 prefix += '/'
1917 prefix += '/'
1931 def pstrip(p):
1918 def pstrip(p):
1932 return pathtransform(p, strip - 1, prefix)[1]
1919 return pathtransform(p, strip - 1, prefix)[1]
1933
1920
1934 rejects = 0
1921 rejects = 0
1935 err = 0
1922 err = 0
1936 current_file = None
1923 current_file = None
1937
1924
1938 for state, values in iterhunks(fp):
1925 for state, values in iterhunks(fp):
1939 if state == 'hunk':
1926 if state == 'hunk':
1940 if not current_file:
1927 if not current_file:
1941 continue
1928 continue
1942 ret = current_file.apply(values)
1929 ret = current_file.apply(values)
1943 if ret > 0:
1930 if ret > 0:
1944 err = 1
1931 err = 1
1945 elif state == 'file':
1932 elif state == 'file':
1946 if current_file:
1933 if current_file:
1947 rejects += current_file.close()
1934 rejects += current_file.close()
1948 current_file = None
1935 current_file = None
1949 afile, bfile, first_hunk, gp = values
1936 afile, bfile, first_hunk, gp = values
1950 if gp:
1937 if gp:
1951 gp.path = pstrip(gp.path)
1938 gp.path = pstrip(gp.path)
1952 if gp.oldpath:
1939 if gp.oldpath:
1953 gp.oldpath = pstrip(gp.oldpath)
1940 gp.oldpath = pstrip(gp.oldpath)
1954 else:
1941 else:
1955 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1942 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1956 prefix)
1943 prefix)
1957 if gp.op == 'RENAME':
1944 if gp.op == 'RENAME':
1958 backend.unlink(gp.oldpath)
1945 backend.unlink(gp.oldpath)
1959 if not first_hunk:
1946 if not first_hunk:
1960 if gp.op == 'DELETE':
1947 if gp.op == 'DELETE':
1961 backend.unlink(gp.path)
1948 backend.unlink(gp.path)
1962 continue
1949 continue
1963 data, mode = None, None
1950 data, mode = None, None
1964 if gp.op in ('RENAME', 'COPY'):
1951 if gp.op in ('RENAME', 'COPY'):
1965 data, mode = store.getfile(gp.oldpath)[:2]
1952 data, mode = store.getfile(gp.oldpath)[:2]
1966 # FIXME: failing getfile has never been handled here
1953 # FIXME: failing getfile has never been handled here
1967 assert data is not None
1954 assert data is not None
1968 if gp.mode:
1955 if gp.mode:
1969 mode = gp.mode
1956 mode = gp.mode
1970 if gp.op == 'ADD':
1957 if gp.op == 'ADD':
1971 # Added files without content have no hunk and
1958 # Added files without content have no hunk and
1972 # must be created
1959 # must be created
1973 data = ''
1960 data = ''
1974 if data or mode:
1961 if data or mode:
1975 if (gp.op in ('ADD', 'RENAME', 'COPY')
1962 if (gp.op in ('ADD', 'RENAME', 'COPY')
1976 and backend.exists(gp.path)):
1963 and backend.exists(gp.path)):
1977 raise PatchError(_("cannot create %s: destination "
1964 raise PatchError(_("cannot create %s: destination "
1978 "already exists") % gp.path)
1965 "already exists") % gp.path)
1979 backend.setfile(gp.path, data, mode, gp.oldpath)
1966 backend.setfile(gp.path, data, mode, gp.oldpath)
1980 continue
1967 continue
1981 try:
1968 try:
1982 current_file = patcher(ui, gp, backend, store,
1969 current_file = patcher(ui, gp, backend, store,
1983 eolmode=eolmode)
1970 eolmode=eolmode)
1984 except PatchError as inst:
1971 except PatchError as inst:
1985 ui.warn(str(inst) + '\n')
1972 ui.warn(str(inst) + '\n')
1986 current_file = None
1973 current_file = None
1987 rejects += 1
1974 rejects += 1
1988 continue
1975 continue
1989 elif state == 'git':
1976 elif state == 'git':
1990 for gp in values:
1977 for gp in values:
1991 path = pstrip(gp.oldpath)
1978 path = pstrip(gp.oldpath)
1992 data, mode = backend.getfile(path)
1979 data, mode = backend.getfile(path)
1993 if data is None:
1980 if data is None:
1994 # The error ignored here will trigger a getfile()
1981 # The error ignored here will trigger a getfile()
1995 # error in a place more appropriate for error
1982 # error in a place more appropriate for error
1996 # handling, and will not interrupt the patching
1983 # handling, and will not interrupt the patching
1997 # process.
1984 # process.
1998 pass
1985 pass
1999 else:
1986 else:
2000 store.setfile(path, data, mode)
1987 store.setfile(path, data, mode)
2001 else:
1988 else:
2002 raise error.Abort(_('unsupported parser state: %s') % state)
1989 raise error.Abort(_('unsupported parser state: %s') % state)
2003
1990
2004 if current_file:
1991 if current_file:
2005 rejects += current_file.close()
1992 rejects += current_file.close()
2006
1993
2007 if rejects:
1994 if rejects:
2008 return -1
1995 return -1
2009 return err
1996 return err
2010
1997
2011 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1998 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2012 similarity):
1999 similarity):
2013 """use <patcher> to apply <patchname> to the working directory.
2000 """use <patcher> to apply <patchname> to the working directory.
2014 returns whether patch was applied with fuzz factor."""
2001 returns whether patch was applied with fuzz factor."""
2015
2002
2016 fuzz = False
2003 fuzz = False
2017 args = []
2004 args = []
2018 cwd = repo.root
2005 cwd = repo.root
2019 if cwd:
2006 if cwd:
2020 args.append('-d %s' % util.shellquote(cwd))
2007 args.append('-d %s' % util.shellquote(cwd))
2021 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2008 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2022 util.shellquote(patchname)))
2009 util.shellquote(patchname)))
2023 try:
2010 try:
2024 for line in fp:
2011 for line in fp:
2025 line = line.rstrip()
2012 line = line.rstrip()
2026 ui.note(line + '\n')
2013 ui.note(line + '\n')
2027 if line.startswith('patching file '):
2014 if line.startswith('patching file '):
2028 pf = util.parsepatchoutput(line)
2015 pf = util.parsepatchoutput(line)
2029 printed_file = False
2016 printed_file = False
2030 files.add(pf)
2017 files.add(pf)
2031 elif line.find('with fuzz') >= 0:
2018 elif line.find('with fuzz') >= 0:
2032 fuzz = True
2019 fuzz = True
2033 if not printed_file:
2020 if not printed_file:
2034 ui.warn(pf + '\n')
2021 ui.warn(pf + '\n')
2035 printed_file = True
2022 printed_file = True
2036 ui.warn(line + '\n')
2023 ui.warn(line + '\n')
2037 elif line.find('saving rejects to file') >= 0:
2024 elif line.find('saving rejects to file') >= 0:
2038 ui.warn(line + '\n')
2025 ui.warn(line + '\n')
2039 elif line.find('FAILED') >= 0:
2026 elif line.find('FAILED') >= 0:
2040 if not printed_file:
2027 if not printed_file:
2041 ui.warn(pf + '\n')
2028 ui.warn(pf + '\n')
2042 printed_file = True
2029 printed_file = True
2043 ui.warn(line + '\n')
2030 ui.warn(line + '\n')
2044 finally:
2031 finally:
2045 if files:
2032 if files:
2046 scmutil.marktouched(repo, files, similarity)
2033 scmutil.marktouched(repo, files, similarity)
2047 code = fp.close()
2034 code = fp.close()
2048 if code:
2035 if code:
2049 raise PatchError(_("patch command failed: %s") %
2036 raise PatchError(_("patch command failed: %s") %
2050 util.explainexit(code)[0])
2037 util.explainexit(code)[0])
2051 return fuzz
2038 return fuzz
2052
2039
2053 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2040 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2054 eolmode='strict'):
2041 eolmode='strict'):
2055 if files is None:
2042 if files is None:
2056 files = set()
2043 files = set()
2057 if eolmode is None:
2044 if eolmode is None:
2058 eolmode = ui.config('patch', 'eol', 'strict')
2045 eolmode = ui.config('patch', 'eol', 'strict')
2059 if eolmode.lower() not in eolmodes:
2046 if eolmode.lower() not in eolmodes:
2060 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2047 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2061 eolmode = eolmode.lower()
2048 eolmode = eolmode.lower()
2062
2049
2063 store = filestore()
2050 store = filestore()
2064 try:
2051 try:
2065 fp = open(patchobj, 'rb')
2052 fp = open(patchobj, 'rb')
2066 except TypeError:
2053 except TypeError:
2067 fp = patchobj
2054 fp = patchobj
2068 try:
2055 try:
2069 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2056 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2070 eolmode=eolmode)
2057 eolmode=eolmode)
2071 finally:
2058 finally:
2072 if fp != patchobj:
2059 if fp != patchobj:
2073 fp.close()
2060 fp.close()
2074 files.update(backend.close())
2061 files.update(backend.close())
2075 store.close()
2062 store.close()
2076 if ret < 0:
2063 if ret < 0:
2077 raise PatchError(_('patch failed to apply'))
2064 raise PatchError(_('patch failed to apply'))
2078 return ret > 0
2065 return ret > 0
2079
2066
2080 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2067 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2081 eolmode='strict', similarity=0):
2068 eolmode='strict', similarity=0):
2082 """use builtin patch to apply <patchobj> to the working directory.
2069 """use builtin patch to apply <patchobj> to the working directory.
2083 returns whether patch was applied with fuzz factor."""
2070 returns whether patch was applied with fuzz factor."""
2084 backend = workingbackend(ui, repo, similarity)
2071 backend = workingbackend(ui, repo, similarity)
2085 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2072 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2086
2073
2087 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2074 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2088 eolmode='strict'):
2075 eolmode='strict'):
2089 backend = repobackend(ui, repo, ctx, store)
2076 backend = repobackend(ui, repo, ctx, store)
2090 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2091
2078
2092 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2079 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2093 similarity=0):
2080 similarity=0):
2094 """Apply <patchname> to the working directory.
2081 """Apply <patchname> to the working directory.
2095
2082
2096 'eolmode' specifies how end of lines should be handled. It can be:
2083 'eolmode' specifies how end of lines should be handled. It can be:
2097 - 'strict': inputs are read in binary mode, EOLs are preserved
2084 - 'strict': inputs are read in binary mode, EOLs are preserved
2098 - 'crlf': EOLs are ignored when patching and reset to CRLF
2085 - 'crlf': EOLs are ignored when patching and reset to CRLF
2099 - 'lf': EOLs are ignored when patching and reset to LF
2086 - 'lf': EOLs are ignored when patching and reset to LF
2100 - None: get it from user settings, default to 'strict'
2087 - None: get it from user settings, default to 'strict'
2101 'eolmode' is ignored when using an external patcher program.
2088 'eolmode' is ignored when using an external patcher program.
2102
2089
2103 Returns whether patch was applied with fuzz factor.
2090 Returns whether patch was applied with fuzz factor.
2104 """
2091 """
2105 patcher = ui.config('ui', 'patch')
2092 patcher = ui.config('ui', 'patch')
2106 if files is None:
2093 if files is None:
2107 files = set()
2094 files = set()
2108 if patcher:
2095 if patcher:
2109 return _externalpatch(ui, repo, patcher, patchname, strip,
2096 return _externalpatch(ui, repo, patcher, patchname, strip,
2110 files, similarity)
2097 files, similarity)
2111 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2098 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2112 similarity)
2099 similarity)
2113
2100
2114 def changedfiles(ui, repo, patchpath, strip=1):
2101 def changedfiles(ui, repo, patchpath, strip=1):
2115 backend = fsbackend(ui, repo.root)
2102 backend = fsbackend(ui, repo.root)
2116 with open(patchpath, 'rb') as fp:
2103 with open(patchpath, 'rb') as fp:
2117 changed = set()
2104 changed = set()
2118 for state, values in iterhunks(fp):
2105 for state, values in iterhunks(fp):
2119 if state == 'file':
2106 if state == 'file':
2120 afile, bfile, first_hunk, gp = values
2107 afile, bfile, first_hunk, gp = values
2121 if gp:
2108 if gp:
2122 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2109 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2123 if gp.oldpath:
2110 if gp.oldpath:
2124 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2111 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2125 else:
2112 else:
2126 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2113 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2127 '')
2114 '')
2128 changed.add(gp.path)
2115 changed.add(gp.path)
2129 if gp.op == 'RENAME':
2116 if gp.op == 'RENAME':
2130 changed.add(gp.oldpath)
2117 changed.add(gp.oldpath)
2131 elif state not in ('hunk', 'git'):
2118 elif state not in ('hunk', 'git'):
2132 raise error.Abort(_('unsupported parser state: %s') % state)
2119 raise error.Abort(_('unsupported parser state: %s') % state)
2133 return changed
2120 return changed
2134
2121
2135 class GitDiffRequired(Exception):
2122 class GitDiffRequired(Exception):
2136 pass
2123 pass
2137
2124
2138 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2125 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2139 '''return diffopts with all features supported and parsed'''
2126 '''return diffopts with all features supported and parsed'''
2140 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2127 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2141 git=True, whitespace=True, formatchanging=True)
2128 git=True, whitespace=True, formatchanging=True)
2142
2129
2143 diffopts = diffallopts
2130 diffopts = diffallopts
2144
2131
2145 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2132 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2146 whitespace=False, formatchanging=False):
2133 whitespace=False, formatchanging=False):
2147 '''return diffopts with only opted-in features parsed
2134 '''return diffopts with only opted-in features parsed
2148
2135
2149 Features:
2136 Features:
2150 - git: git-style diffs
2137 - git: git-style diffs
2151 - whitespace: whitespace options like ignoreblanklines and ignorews
2138 - whitespace: whitespace options like ignoreblanklines and ignorews
2152 - formatchanging: options that will likely break or cause correctness issues
2139 - formatchanging: options that will likely break or cause correctness issues
2153 with most diff parsers
2140 with most diff parsers
2154 '''
2141 '''
2155 def get(key, name=None, getter=ui.configbool, forceplain=None):
2142 def get(key, name=None, getter=ui.configbool, forceplain=None):
2156 if opts:
2143 if opts:
2157 v = opts.get(key)
2144 v = opts.get(key)
2158 if v:
2145 if v:
2159 return v
2146 return v
2160 if forceplain is not None and ui.plain():
2147 if forceplain is not None and ui.plain():
2161 return forceplain
2148 return forceplain
2162 return getter(section, name or key, None, untrusted=untrusted)
2149 return getter(section, name or key, None, untrusted=untrusted)
2163
2150
2164 # core options, expected to be understood by every diff parser
2151 # core options, expected to be understood by every diff parser
2165 buildopts = {
2152 buildopts = {
2166 'nodates': get('nodates'),
2153 'nodates': get('nodates'),
2167 'showfunc': get('show_function', 'showfunc'),
2154 'showfunc': get('show_function', 'showfunc'),
2168 'context': get('unified', getter=ui.config),
2155 'context': get('unified', getter=ui.config),
2169 }
2156 }
2170
2157
2171 if git:
2158 if git:
2172 buildopts['git'] = get('git')
2159 buildopts['git'] = get('git')
2173 if whitespace:
2160 if whitespace:
2174 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2161 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2175 buildopts['ignorewsamount'] = get('ignore_space_change',
2162 buildopts['ignorewsamount'] = get('ignore_space_change',
2176 'ignorewsamount')
2163 'ignorewsamount')
2177 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2164 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2178 'ignoreblanklines')
2165 'ignoreblanklines')
2179 if formatchanging:
2166 if formatchanging:
2180 buildopts['text'] = opts and opts.get('text')
2167 buildopts['text'] = opts and opts.get('text')
2181 buildopts['nobinary'] = get('nobinary', forceplain=False)
2168 buildopts['nobinary'] = get('nobinary', forceplain=False)
2182 buildopts['noprefix'] = get('noprefix', forceplain=False)
2169 buildopts['noprefix'] = get('noprefix', forceplain=False)
2183
2170
2184 return mdiff.diffopts(**buildopts)
2171 return mdiff.diffopts(**buildopts)
2185
2172
2186 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2173 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2187 losedatafn=None, prefix='', relroot='', copy=None):
2174 losedatafn=None, prefix='', relroot='', copy=None):
2188 '''yields diff of changes to files between two nodes, or node and
2175 '''yields diff of changes to files between two nodes, or node and
2189 working directory.
2176 working directory.
2190
2177
2191 if node1 is None, use first dirstate parent instead.
2178 if node1 is None, use first dirstate parent instead.
2192 if node2 is None, compare node1 with working directory.
2179 if node2 is None, compare node1 with working directory.
2193
2180
2194 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2181 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2195 every time some change cannot be represented with the current
2182 every time some change cannot be represented with the current
2196 patch format. Return False to upgrade to git patch format, True to
2183 patch format. Return False to upgrade to git patch format, True to
2197 accept the loss or raise an exception to abort the diff. It is
2184 accept the loss or raise an exception to abort the diff. It is
2198 called with the name of current file being diffed as 'fn'. If set
2185 called with the name of current file being diffed as 'fn'. If set
2199 to None, patches will always be upgraded to git format when
2186 to None, patches will always be upgraded to git format when
2200 necessary.
2187 necessary.
2201
2188
2202 prefix is a filename prefix that is prepended to all filenames on
2189 prefix is a filename prefix that is prepended to all filenames on
2203 display (used for subrepos).
2190 display (used for subrepos).
2204
2191
2205 relroot, if not empty, must be normalized with a trailing /. Any match
2192 relroot, if not empty, must be normalized with a trailing /. Any match
2206 patterns that fall outside it will be ignored.
2193 patterns that fall outside it will be ignored.
2207
2194
2208 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2195 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2209 information.'''
2196 information.'''
2210
2197
2211 if opts is None:
2198 if opts is None:
2212 opts = mdiff.defaultopts
2199 opts = mdiff.defaultopts
2213
2200
2214 if not node1 and not node2:
2201 if not node1 and not node2:
2215 node1 = repo.dirstate.p1()
2202 node1 = repo.dirstate.p1()
2216
2203
2217 def lrugetfilectx():
2204 def lrugetfilectx():
2218 cache = {}
2205 cache = {}
2219 order = collections.deque()
2206 order = collections.deque()
2220 def getfilectx(f, ctx):
2207 def getfilectx(f, ctx):
2221 fctx = ctx.filectx(f, filelog=cache.get(f))
2208 fctx = ctx.filectx(f, filelog=cache.get(f))
2222 if f not in cache:
2209 if f not in cache:
2223 if len(cache) > 20:
2210 if len(cache) > 20:
2224 del cache[order.popleft()]
2211 del cache[order.popleft()]
2225 cache[f] = fctx.filelog()
2212 cache[f] = fctx.filelog()
2226 else:
2213 else:
2227 order.remove(f)
2214 order.remove(f)
2228 order.append(f)
2215 order.append(f)
2229 return fctx
2216 return fctx
2230 return getfilectx
2217 return getfilectx
2231 getfilectx = lrugetfilectx()
2218 getfilectx = lrugetfilectx()
2232
2219
2233 ctx1 = repo[node1]
2220 ctx1 = repo[node1]
2234 ctx2 = repo[node2]
2221 ctx2 = repo[node2]
2235
2222
2236 relfiltered = False
2223 relfiltered = False
2237 if relroot != '' and match.always():
2224 if relroot != '' and match.always():
2238 # as a special case, create a new matcher with just the relroot
2225 # as a special case, create a new matcher with just the relroot
2239 pats = [relroot]
2226 pats = [relroot]
2240 match = scmutil.match(ctx2, pats, default='path')
2227 match = scmutil.match(ctx2, pats, default='path')
2241 relfiltered = True
2228 relfiltered = True
2242
2229
2243 if not changes:
2230 if not changes:
2244 changes = repo.status(ctx1, ctx2, match=match)
2231 changes = repo.status(ctx1, ctx2, match=match)
2245 modified, added, removed = changes[:3]
2232 modified, added, removed = changes[:3]
2246
2233
2247 if not modified and not added and not removed:
2234 if not modified and not added and not removed:
2248 return []
2235 return []
2249
2236
2250 if repo.ui.debugflag:
2237 if repo.ui.debugflag:
2251 hexfunc = hex
2238 hexfunc = hex
2252 else:
2239 else:
2253 hexfunc = short
2240 hexfunc = short
2254 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2241 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2255
2242
2256 if copy is None:
2243 if copy is None:
2257 copy = {}
2244 copy = {}
2258 if opts.git or opts.upgrade:
2245 if opts.git or opts.upgrade:
2259 copy = copies.pathcopies(ctx1, ctx2, match=match)
2246 copy = copies.pathcopies(ctx1, ctx2, match=match)
2260
2247
2261 if relroot is not None:
2248 if relroot is not None:
2262 if not relfiltered:
2249 if not relfiltered:
2263 # XXX this would ideally be done in the matcher, but that is
2250 # XXX this would ideally be done in the matcher, but that is
2264 # generally meant to 'or' patterns, not 'and' them. In this case we
2251 # generally meant to 'or' patterns, not 'and' them. In this case we
2265 # need to 'and' all the patterns from the matcher with relroot.
2252 # need to 'and' all the patterns from the matcher with relroot.
2266 def filterrel(l):
2253 def filterrel(l):
2267 return [f for f in l if f.startswith(relroot)]
2254 return [f for f in l if f.startswith(relroot)]
2268 modified = filterrel(modified)
2255 modified = filterrel(modified)
2269 added = filterrel(added)
2256 added = filterrel(added)
2270 removed = filterrel(removed)
2257 removed = filterrel(removed)
2271 relfiltered = True
2258 relfiltered = True
2272 # filter out copies where either side isn't inside the relative root
2259 # filter out copies where either side isn't inside the relative root
2273 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2260 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2274 if dst.startswith(relroot)
2261 if dst.startswith(relroot)
2275 and src.startswith(relroot)))
2262 and src.startswith(relroot)))
2276
2263
2277 modifiedset = set(modified)
2264 modifiedset = set(modified)
2278 addedset = set(added)
2265 addedset = set(added)
2279 removedset = set(removed)
2266 removedset = set(removed)
2280 for f in modified:
2267 for f in modified:
2281 if f not in ctx1:
2268 if f not in ctx1:
2282 # Fix up added, since merged-in additions appear as
2269 # Fix up added, since merged-in additions appear as
2283 # modifications during merges
2270 # modifications during merges
2284 modifiedset.remove(f)
2271 modifiedset.remove(f)
2285 addedset.add(f)
2272 addedset.add(f)
2286 for f in removed:
2273 for f in removed:
2287 if f not in ctx1:
2274 if f not in ctx1:
2288 # Merged-in additions that are then removed are reported as removed.
2275 # Merged-in additions that are then removed are reported as removed.
2289 # They are not in ctx1, so We don't want to show them in the diff.
2276 # They are not in ctx1, so We don't want to show them in the diff.
2290 removedset.remove(f)
2277 removedset.remove(f)
2291 modified = sorted(modifiedset)
2278 modified = sorted(modifiedset)
2292 added = sorted(addedset)
2279 added = sorted(addedset)
2293 removed = sorted(removedset)
2280 removed = sorted(removedset)
2294 for dst, src in copy.items():
2281 for dst, src in copy.items():
2295 if src not in ctx1:
2282 if src not in ctx1:
2296 # Files merged in during a merge and then copied/renamed are
2283 # Files merged in during a merge and then copied/renamed are
2297 # reported as copies. We want to show them in the diff as additions.
2284 # reported as copies. We want to show them in the diff as additions.
2298 del copy[dst]
2285 del copy[dst]
2299
2286
2300 def difffn(opts, losedata):
2287 def difffn(opts, losedata):
2301 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2288 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2302 copy, getfilectx, opts, losedata, prefix, relroot)
2289 copy, getfilectx, opts, losedata, prefix, relroot)
2303 if opts.upgrade and not opts.git:
2290 if opts.upgrade and not opts.git:
2304 try:
2291 try:
2305 def losedata(fn):
2292 def losedata(fn):
2306 if not losedatafn or not losedatafn(fn=fn):
2293 if not losedatafn or not losedatafn(fn=fn):
2307 raise GitDiffRequired
2294 raise GitDiffRequired
2308 # Buffer the whole output until we are sure it can be generated
2295 # Buffer the whole output until we are sure it can be generated
2309 return list(difffn(opts.copy(git=False), losedata))
2296 return list(difffn(opts.copy(git=False), losedata))
2310 except GitDiffRequired:
2297 except GitDiffRequired:
2311 return difffn(opts.copy(git=True), None)
2298 return difffn(opts.copy(git=True), None)
2312 else:
2299 else:
2313 return difffn(opts, None)
2300 return difffn(opts, None)
2314
2301
2315 def difflabel(func, *args, **kw):
2302 def difflabel(func, *args, **kw):
2316 '''yields 2-tuples of (output, label) based on the output of func()'''
2303 '''yields 2-tuples of (output, label) based on the output of func()'''
2317 headprefixes = [('diff', 'diff.diffline'),
2304 headprefixes = [('diff', 'diff.diffline'),
2318 ('copy', 'diff.extended'),
2305 ('copy', 'diff.extended'),
2319 ('rename', 'diff.extended'),
2306 ('rename', 'diff.extended'),
2320 ('old', 'diff.extended'),
2307 ('old', 'diff.extended'),
2321 ('new', 'diff.extended'),
2308 ('new', 'diff.extended'),
2322 ('deleted', 'diff.extended'),
2309 ('deleted', 'diff.extended'),
2323 ('---', 'diff.file_a'),
2310 ('---', 'diff.file_a'),
2324 ('+++', 'diff.file_b')]
2311 ('+++', 'diff.file_b')]
2325 textprefixes = [('@', 'diff.hunk'),
2312 textprefixes = [('@', 'diff.hunk'),
2326 ('-', 'diff.deleted'),
2313 ('-', 'diff.deleted'),
2327 ('+', 'diff.inserted')]
2314 ('+', 'diff.inserted')]
2328 head = False
2315 head = False
2329 for chunk in func(*args, **kw):
2316 for chunk in func(*args, **kw):
2330 lines = chunk.split('\n')
2317 lines = chunk.split('\n')
2331 for i, line in enumerate(lines):
2318 for i, line in enumerate(lines):
2332 if i != 0:
2319 if i != 0:
2333 yield ('\n', '')
2320 yield ('\n', '')
2334 if head:
2321 if head:
2335 if line.startswith('@'):
2322 if line.startswith('@'):
2336 head = False
2323 head = False
2337 else:
2324 else:
2338 if line and line[0] not in ' +-@\\':
2325 if line and line[0] not in ' +-@\\':
2339 head = True
2326 head = True
2340 stripline = line
2327 stripline = line
2341 diffline = False
2328 diffline = False
2342 if not head and line and line[0] in '+-':
2329 if not head and line and line[0] in '+-':
2343 # highlight tabs and trailing whitespace, but only in
2330 # highlight tabs and trailing whitespace, but only in
2344 # changed lines
2331 # changed lines
2345 stripline = line.rstrip()
2332 stripline = line.rstrip()
2346 diffline = True
2333 diffline = True
2347
2334
2348 prefixes = textprefixes
2335 prefixes = textprefixes
2349 if head:
2336 if head:
2350 prefixes = headprefixes
2337 prefixes = headprefixes
2351 for prefix, label in prefixes:
2338 for prefix, label in prefixes:
2352 if stripline.startswith(prefix):
2339 if stripline.startswith(prefix):
2353 if diffline:
2340 if diffline:
2354 for token in tabsplitter.findall(stripline):
2341 for token in tabsplitter.findall(stripline):
2355 if '\t' == token[0]:
2342 if '\t' == token[0]:
2356 yield (token, 'diff.tab')
2343 yield (token, 'diff.tab')
2357 else:
2344 else:
2358 yield (token, label)
2345 yield (token, label)
2359 else:
2346 else:
2360 yield (stripline, label)
2347 yield (stripline, label)
2361 break
2348 break
2362 else:
2349 else:
2363 yield (line, '')
2350 yield (line, '')
2364 if line != stripline:
2351 if line != stripline:
2365 yield (line[len(stripline):], 'diff.trailingwhitespace')
2352 yield (line[len(stripline):], 'diff.trailingwhitespace')
2366
2353
2367 def diffui(*args, **kw):
2354 def diffui(*args, **kw):
2368 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2355 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2369 return difflabel(diff, *args, **kw)
2356 return difflabel(diff, *args, **kw)
2370
2357
2371 def _filepairs(modified, added, removed, copy, opts):
2358 def _filepairs(modified, added, removed, copy, opts):
2372 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2359 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2373 before and f2 is the the name after. For added files, f1 will be None,
2360 before and f2 is the the name after. For added files, f1 will be None,
2374 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2361 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2375 or 'rename' (the latter two only if opts.git is set).'''
2362 or 'rename' (the latter two only if opts.git is set).'''
2376 gone = set()
2363 gone = set()
2377
2364
2378 copyto = dict([(v, k) for k, v in copy.items()])
2365 copyto = dict([(v, k) for k, v in copy.items()])
2379
2366
2380 addedset, removedset = set(added), set(removed)
2367 addedset, removedset = set(added), set(removed)
2381
2368
2382 for f in sorted(modified + added + removed):
2369 for f in sorted(modified + added + removed):
2383 copyop = None
2370 copyop = None
2384 f1, f2 = f, f
2371 f1, f2 = f, f
2385 if f in addedset:
2372 if f in addedset:
2386 f1 = None
2373 f1 = None
2387 if f in copy:
2374 if f in copy:
2388 if opts.git:
2375 if opts.git:
2389 f1 = copy[f]
2376 f1 = copy[f]
2390 if f1 in removedset and f1 not in gone:
2377 if f1 in removedset and f1 not in gone:
2391 copyop = 'rename'
2378 copyop = 'rename'
2392 gone.add(f1)
2379 gone.add(f1)
2393 else:
2380 else:
2394 copyop = 'copy'
2381 copyop = 'copy'
2395 elif f in removedset:
2382 elif f in removedset:
2396 f2 = None
2383 f2 = None
2397 if opts.git:
2384 if opts.git:
2398 # have we already reported a copy above?
2385 # have we already reported a copy above?
2399 if (f in copyto and copyto[f] in addedset
2386 if (f in copyto and copyto[f] in addedset
2400 and copy[copyto[f]] == f):
2387 and copy[copyto[f]] == f):
2401 continue
2388 continue
2402 yield f1, f2, copyop
2389 yield f1, f2, copyop
2403
2390
2404 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2391 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2405 copy, getfilectx, opts, losedatafn, prefix, relroot):
2392 copy, getfilectx, opts, losedatafn, prefix, relroot):
2406 '''given input data, generate a diff and yield it in blocks
2393 '''given input data, generate a diff and yield it in blocks
2407
2394
2408 If generating a diff would lose data like flags or binary data and
2395 If generating a diff would lose data like flags or binary data and
2409 losedatafn is not None, it will be called.
2396 losedatafn is not None, it will be called.
2410
2397
2411 relroot is removed and prefix is added to every path in the diff output.
2398 relroot is removed and prefix is added to every path in the diff output.
2412
2399
2413 If relroot is not empty, this function expects every path in modified,
2400 If relroot is not empty, this function expects every path in modified,
2414 added, removed and copy to start with it.'''
2401 added, removed and copy to start with it.'''
2415
2402
2416 def gitindex(text):
2403 def gitindex(text):
2417 if not text:
2404 if not text:
2418 text = ""
2405 text = ""
2419 l = len(text)
2406 l = len(text)
2420 s = hashlib.sha1('blob %d\0' % l)
2407 s = hashlib.sha1('blob %d\0' % l)
2421 s.update(text)
2408 s.update(text)
2422 return s.hexdigest()
2409 return s.hexdigest()
2423
2410
2424 if opts.noprefix:
2411 if opts.noprefix:
2425 aprefix = bprefix = ''
2412 aprefix = bprefix = ''
2426 else:
2413 else:
2427 aprefix = 'a/'
2414 aprefix = 'a/'
2428 bprefix = 'b/'
2415 bprefix = 'b/'
2429
2416
2430 def diffline(f, revs):
2417 def diffline(f, revs):
2431 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2418 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2432 return 'diff %s %s' % (revinfo, f)
2419 return 'diff %s %s' % (revinfo, f)
2433
2420
2434 date1 = util.datestr(ctx1.date())
2421 date1 = util.datestr(ctx1.date())
2435 date2 = util.datestr(ctx2.date())
2422 date2 = util.datestr(ctx2.date())
2436
2423
2437 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2424 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2438
2425
2439 if relroot != '' and (repo.ui.configbool('devel', 'all')
2426 if relroot != '' and (repo.ui.configbool('devel', 'all')
2440 or repo.ui.configbool('devel', 'check-relroot')):
2427 or repo.ui.configbool('devel', 'check-relroot')):
2441 for f in modified + added + removed + copy.keys() + copy.values():
2428 for f in modified + added + removed + copy.keys() + copy.values():
2442 if f is not None and not f.startswith(relroot):
2429 if f is not None and not f.startswith(relroot):
2443 raise AssertionError(
2430 raise AssertionError(
2444 "file %s doesn't start with relroot %s" % (f, relroot))
2431 "file %s doesn't start with relroot %s" % (f, relroot))
2445
2432
2446 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2433 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2447 content1 = None
2434 content1 = None
2448 content2 = None
2435 content2 = None
2449 flag1 = None
2436 flag1 = None
2450 flag2 = None
2437 flag2 = None
2451 if f1:
2438 if f1:
2452 content1 = getfilectx(f1, ctx1).data()
2439 content1 = getfilectx(f1, ctx1).data()
2453 if opts.git or losedatafn:
2440 if opts.git or losedatafn:
2454 flag1 = ctx1.flags(f1)
2441 flag1 = ctx1.flags(f1)
2455 if f2:
2442 if f2:
2456 content2 = getfilectx(f2, ctx2).data()
2443 content2 = getfilectx(f2, ctx2).data()
2457 if opts.git or losedatafn:
2444 if opts.git or losedatafn:
2458 flag2 = ctx2.flags(f2)
2445 flag2 = ctx2.flags(f2)
2459 binary = False
2446 binary = False
2460 if opts.git or losedatafn:
2447 if opts.git or losedatafn:
2461 binary = util.binary(content1) or util.binary(content2)
2448 binary = util.binary(content1) or util.binary(content2)
2462
2449
2463 if losedatafn and not opts.git:
2450 if losedatafn and not opts.git:
2464 if (binary or
2451 if (binary or
2465 # copy/rename
2452 # copy/rename
2466 f2 in copy or
2453 f2 in copy or
2467 # empty file creation
2454 # empty file creation
2468 (not f1 and not content2) or
2455 (not f1 and not content2) or
2469 # empty file deletion
2456 # empty file deletion
2470 (not content1 and not f2) or
2457 (not content1 and not f2) or
2471 # create with flags
2458 # create with flags
2472 (not f1 and flag2) or
2459 (not f1 and flag2) or
2473 # change flags
2460 # change flags
2474 (f1 and f2 and flag1 != flag2)):
2461 (f1 and f2 and flag1 != flag2)):
2475 losedatafn(f2 or f1)
2462 losedatafn(f2 or f1)
2476
2463
2477 path1 = f1 or f2
2464 path1 = f1 or f2
2478 path2 = f2 or f1
2465 path2 = f2 or f1
2479 path1 = posixpath.join(prefix, path1[len(relroot):])
2466 path1 = posixpath.join(prefix, path1[len(relroot):])
2480 path2 = posixpath.join(prefix, path2[len(relroot):])
2467 path2 = posixpath.join(prefix, path2[len(relroot):])
2481 header = []
2468 header = []
2482 if opts.git:
2469 if opts.git:
2483 header.append('diff --git %s%s %s%s' %
2470 header.append('diff --git %s%s %s%s' %
2484 (aprefix, path1, bprefix, path2))
2471 (aprefix, path1, bprefix, path2))
2485 if not f1: # added
2472 if not f1: # added
2486 header.append('new file mode %s' % gitmode[flag2])
2473 header.append('new file mode %s' % gitmode[flag2])
2487 elif not f2: # removed
2474 elif not f2: # removed
2488 header.append('deleted file mode %s' % gitmode[flag1])
2475 header.append('deleted file mode %s' % gitmode[flag1])
2489 else: # modified/copied/renamed
2476 else: # modified/copied/renamed
2490 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2477 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2491 if mode1 != mode2:
2478 if mode1 != mode2:
2492 header.append('old mode %s' % mode1)
2479 header.append('old mode %s' % mode1)
2493 header.append('new mode %s' % mode2)
2480 header.append('new mode %s' % mode2)
2494 if copyop is not None:
2481 if copyop is not None:
2495 header.append('%s from %s' % (copyop, path1))
2482 header.append('%s from %s' % (copyop, path1))
2496 header.append('%s to %s' % (copyop, path2))
2483 header.append('%s to %s' % (copyop, path2))
2497 elif revs and not repo.ui.quiet:
2484 elif revs and not repo.ui.quiet:
2498 header.append(diffline(path1, revs))
2485 header.append(diffline(path1, revs))
2499
2486
2500 if binary and opts.git and not opts.nobinary:
2487 if binary and opts.git and not opts.nobinary:
2501 text = mdiff.b85diff(content1, content2)
2488 text = mdiff.b85diff(content1, content2)
2502 if text:
2489 if text:
2503 header.append('index %s..%s' %
2490 header.append('index %s..%s' %
2504 (gitindex(content1), gitindex(content2)))
2491 (gitindex(content1), gitindex(content2)))
2505 else:
2492 else:
2506 text = mdiff.unidiff(content1, date1,
2493 text = mdiff.unidiff(content1, date1,
2507 content2, date2,
2494 content2, date2,
2508 path1, path2, opts=opts)
2495 path1, path2, opts=opts)
2509 if header and (text or len(header) > 1):
2496 if header and (text or len(header) > 1):
2510 yield '\n'.join(header) + '\n'
2497 yield '\n'.join(header) + '\n'
2511 if text:
2498 if text:
2512 yield text
2499 yield text
2513
2500
2514 def diffstatsum(stats):
2501 def diffstatsum(stats):
2515 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2502 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2516 for f, a, r, b in stats:
2503 for f, a, r, b in stats:
2517 maxfile = max(maxfile, encoding.colwidth(f))
2504 maxfile = max(maxfile, encoding.colwidth(f))
2518 maxtotal = max(maxtotal, a + r)
2505 maxtotal = max(maxtotal, a + r)
2519 addtotal += a
2506 addtotal += a
2520 removetotal += r
2507 removetotal += r
2521 binary = binary or b
2508 binary = binary or b
2522
2509
2523 return maxfile, maxtotal, addtotal, removetotal, binary
2510 return maxfile, maxtotal, addtotal, removetotal, binary
2524
2511
2525 def diffstatdata(lines):
2512 def diffstatdata(lines):
2526 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2513 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2527
2514
2528 results = []
2515 results = []
2529 filename, adds, removes, isbinary = None, 0, 0, False
2516 filename, adds, removes, isbinary = None, 0, 0, False
2530
2517
2531 def addresult():
2518 def addresult():
2532 if filename:
2519 if filename:
2533 results.append((filename, adds, removes, isbinary))
2520 results.append((filename, adds, removes, isbinary))
2534
2521
2535 for line in lines:
2522 for line in lines:
2536 if line.startswith('diff'):
2523 if line.startswith('diff'):
2537 addresult()
2524 addresult()
2538 # set numbers to 0 anyway when starting new file
2525 # set numbers to 0 anyway when starting new file
2539 adds, removes, isbinary = 0, 0, False
2526 adds, removes, isbinary = 0, 0, False
2540 if line.startswith('diff --git a/'):
2527 if line.startswith('diff --git a/'):
2541 filename = gitre.search(line).group(2)
2528 filename = gitre.search(line).group(2)
2542 elif line.startswith('diff -r'):
2529 elif line.startswith('diff -r'):
2543 # format: "diff -r ... -r ... filename"
2530 # format: "diff -r ... -r ... filename"
2544 filename = diffre.search(line).group(1)
2531 filename = diffre.search(line).group(1)
2545 elif line.startswith('+') and not line.startswith('+++ '):
2532 elif line.startswith('+') and not line.startswith('+++ '):
2546 adds += 1
2533 adds += 1
2547 elif line.startswith('-') and not line.startswith('--- '):
2534 elif line.startswith('-') and not line.startswith('--- '):
2548 removes += 1
2535 removes += 1
2549 elif (line.startswith('GIT binary patch') or
2536 elif (line.startswith('GIT binary patch') or
2550 line.startswith('Binary file')):
2537 line.startswith('Binary file')):
2551 isbinary = True
2538 isbinary = True
2552 addresult()
2539 addresult()
2553 return results
2540 return results
2554
2541
2555 def diffstat(lines, width=80, git=False):
2542 def diffstat(lines, width=80, git=False):
2556 output = []
2543 output = []
2557 stats = diffstatdata(lines)
2544 stats = diffstatdata(lines)
2558 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2545 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2559
2546
2560 countwidth = len(str(maxtotal))
2547 countwidth = len(str(maxtotal))
2561 if hasbinary and countwidth < 3:
2548 if hasbinary and countwidth < 3:
2562 countwidth = 3
2549 countwidth = 3
2563 graphwidth = width - countwidth - maxname - 6
2550 graphwidth = width - countwidth - maxname - 6
2564 if graphwidth < 10:
2551 if graphwidth < 10:
2565 graphwidth = 10
2552 graphwidth = 10
2566
2553
2567 def scale(i):
2554 def scale(i):
2568 if maxtotal <= graphwidth:
2555 if maxtotal <= graphwidth:
2569 return i
2556 return i
2570 # If diffstat runs out of room it doesn't print anything,
2557 # If diffstat runs out of room it doesn't print anything,
2571 # which isn't very useful, so always print at least one + or -
2558 # which isn't very useful, so always print at least one + or -
2572 # if there were at least some changes.
2559 # if there were at least some changes.
2573 return max(i * graphwidth // maxtotal, int(bool(i)))
2560 return max(i * graphwidth // maxtotal, int(bool(i)))
2574
2561
2575 for filename, adds, removes, isbinary in stats:
2562 for filename, adds, removes, isbinary in stats:
2576 if isbinary:
2563 if isbinary:
2577 count = 'Bin'
2564 count = 'Bin'
2578 else:
2565 else:
2579 count = adds + removes
2566 count = adds + removes
2580 pluses = '+' * scale(adds)
2567 pluses = '+' * scale(adds)
2581 minuses = '-' * scale(removes)
2568 minuses = '-' * scale(removes)
2582 output.append(' %s%s | %*s %s%s\n' %
2569 output.append(' %s%s | %*s %s%s\n' %
2583 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2570 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2584 countwidth, count, pluses, minuses))
2571 countwidth, count, pluses, minuses))
2585
2572
2586 if stats:
2573 if stats:
2587 output.append(_(' %d files changed, %d insertions(+), '
2574 output.append(_(' %d files changed, %d insertions(+), '
2588 '%d deletions(-)\n')
2575 '%d deletions(-)\n')
2589 % (len(stats), totaladds, totalremoves))
2576 % (len(stats), totaladds, totalremoves))
2590
2577
2591 return ''.join(output)
2578 return ''.join(output)
2592
2579
2593 def diffstatui(*args, **kw):
2580 def diffstatui(*args, **kw):
2594 '''like diffstat(), but yields 2-tuples of (output, label) for
2581 '''like diffstat(), but yields 2-tuples of (output, label) for
2595 ui.write()
2582 ui.write()
2596 '''
2583 '''
2597
2584
2598 for line in diffstat(*args, **kw).splitlines():
2585 for line in diffstat(*args, **kw).splitlines():
2599 if line and line[-1] in '+-':
2586 if line and line[-1] in '+-':
2600 name, graph = line.rsplit(' ', 1)
2587 name, graph = line.rsplit(' ', 1)
2601 yield (name + ' ', '')
2588 yield (name + ' ', '')
2602 m = re.search(r'\++', graph)
2589 m = re.search(r'\++', graph)
2603 if m:
2590 if m:
2604 yield (m.group(0), 'diffstat.inserted')
2591 yield (m.group(0), 'diffstat.inserted')
2605 m = re.search(r'-+', graph)
2592 m = re.search(r'-+', graph)
2606 if m:
2593 if m:
2607 yield (m.group(0), 'diffstat.deleted')
2594 yield (m.group(0), 'diffstat.deleted')
2608 else:
2595 else:
2609 yield (line, '')
2596 yield (line, '')
2610 yield ('\n', '')
2597 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now