##// END OF EJS Templates
diffhelpers: move out of pure package
Yuya Nishihara -
r37589:0ea8b957 default
parent child Browse files
Show More
@@ -1,71 +1,71 b''
1 # diffhelpers.py - pure Python implementation of diffhelpers.c
1 # diffhelpers.py - helper routines for patch
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 def addlines(fp, hunk, lena, lenb, a, b):
10 def addlines(fp, hunk, lena, lenb, a, b):
11 """Read lines from fp into the hunk
11 """Read lines from fp into the hunk
12
12
13 The hunk is parsed into two arrays, a and b. a gets the old state of
13 The hunk is parsed into two arrays, a and b. a gets the old state of
14 the text, b gets the new state. The control char from the hunk is saved
14 the text, b gets the new state. The control char from the hunk is saved
15 when inserting into a, but not b (for performance while deleting files.)
15 when inserting into a, but not b (for performance while deleting files.)
16 """
16 """
17 while True:
17 while True:
18 todoa = lena - len(a)
18 todoa = lena - len(a)
19 todob = lenb - len(b)
19 todob = lenb - len(b)
20 num = max(todoa, todob)
20 num = max(todoa, todob)
21 if num == 0:
21 if num == 0:
22 break
22 break
23 for i in xrange(num):
23 for i in xrange(num):
24 s = fp.readline()
24 s = fp.readline()
25 if s == "\\ No newline at end of file\n":
25 if s == "\\ No newline at end of file\n":
26 fixnewline(hunk, a, b)
26 fixnewline(hunk, a, b)
27 continue
27 continue
28 if s == "\n":
28 if s == "\n":
29 # Some patches may be missing the control char
29 # Some patches may be missing the control char
30 # on empty lines. Supply a leading space.
30 # on empty lines. Supply a leading space.
31 s = " \n"
31 s = " \n"
32 hunk.append(s)
32 hunk.append(s)
33 if s.startswith('+'):
33 if s.startswith('+'):
34 b.append(s[1:])
34 b.append(s[1:])
35 elif s.startswith('-'):
35 elif s.startswith('-'):
36 a.append(s)
36 a.append(s)
37 else:
37 else:
38 b.append(s[1:])
38 b.append(s[1:])
39 a.append(s)
39 a.append(s)
40 return 0
40 return 0
41
41
42 def fixnewline(hunk, a, b):
42 def fixnewline(hunk, a, b):
43 """Fix up the last lines of a and b when the patch has no newline at EOF"""
43 """Fix up the last lines of a and b when the patch has no newline at EOF"""
44 l = hunk[-1]
44 l = hunk[-1]
45 # tolerate CRLF in last line
45 # tolerate CRLF in last line
46 if l.endswith('\r\n'):
46 if l.endswith('\r\n'):
47 hline = l[:-2]
47 hline = l[:-2]
48 else:
48 else:
49 hline = l[:-1]
49 hline = l[:-1]
50
50
51 if hline.startswith((' ', '+')):
51 if hline.startswith((' ', '+')):
52 b[-1] = hline[1:]
52 b[-1] = hline[1:]
53 if hline.startswith((' ', '-')):
53 if hline.startswith((' ', '-')):
54 a[-1] = hline
54 a[-1] = hline
55 hunk[-1] = hline
55 hunk[-1] = hline
56 return 0
56 return 0
57
57
58 def testhunk(a, b, bstart):
58 def testhunk(a, b, bstart):
59 """Compare the lines in a with the lines in b
59 """Compare the lines in a with the lines in b
60
60
61 a is assumed to have a control char at the start of each line, this char
61 a is assumed to have a control char at the start of each line, this char
62 is ignored in the compare.
62 is ignored in the compare.
63 """
63 """
64 alen = len(a)
64 alen = len(a)
65 blen = len(b)
65 blen = len(b)
66 if alen > blen - bstart:
66 if alen > blen - bstart:
67 return -1
67 return -1
68 for i in xrange(alen):
68 for i in xrange(alen):
69 if a[i][1:] != b[i + bstart]:
69 if a[i][1:] != b[i + bstart]:
70 return -1
70 return -1
71 return 0
71 return 0
@@ -1,2913 +1,2913 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import tempfile
21 import tempfile
22 import zlib
22 import zlib
23
23
24 from .i18n import _
24 from .i18n import _
25 from .node import (
25 from .node import (
26 hex,
26 hex,
27 short,
27 short,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 diffhelpers,
31 encoding,
32 encoding,
32 error,
33 error,
33 mail,
34 mail,
34 mdiff,
35 mdiff,
35 pathutil,
36 pathutil,
36 pycompat,
37 pycompat,
37 scmutil,
38 scmutil,
38 similar,
39 similar,
39 util,
40 util,
40 vfs as vfsmod,
41 vfs as vfsmod,
41 )
42 )
42 from .pure import diffhelpers
43 from .utils import (
43 from .utils import (
44 dateutil,
44 dateutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
53 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
54
54
55 PatchError = error.PatchError
55 PatchError = error.PatchError
56
56
57 # public functions
57 # public functions
58
58
59 def split(stream):
59 def split(stream):
60 '''return an iterator of individual patches from a stream'''
60 '''return an iterator of individual patches from a stream'''
61 def isheader(line, inheader):
61 def isheader(line, inheader):
62 if inheader and line.startswith((' ', '\t')):
62 if inheader and line.startswith((' ', '\t')):
63 # continuation
63 # continuation
64 return True
64 return True
65 if line.startswith((' ', '-', '+')):
65 if line.startswith((' ', '-', '+')):
66 # diff line - don't check for header pattern in there
66 # diff line - don't check for header pattern in there
67 return False
67 return False
68 l = line.split(': ', 1)
68 l = line.split(': ', 1)
69 return len(l) == 2 and ' ' not in l[0]
69 return len(l) == 2 and ' ' not in l[0]
70
70
71 def chunk(lines):
71 def chunk(lines):
72 return stringio(''.join(lines))
72 return stringio(''.join(lines))
73
73
74 def hgsplit(stream, cur):
74 def hgsplit(stream, cur):
75 inheader = True
75 inheader = True
76
76
77 for line in stream:
77 for line in stream:
78 if not line.strip():
78 if not line.strip():
79 inheader = False
79 inheader = False
80 if not inheader and line.startswith('# HG changeset patch'):
80 if not inheader and line.startswith('# HG changeset patch'):
81 yield chunk(cur)
81 yield chunk(cur)
82 cur = []
82 cur = []
83 inheader = True
83 inheader = True
84
84
85 cur.append(line)
85 cur.append(line)
86
86
87 if cur:
87 if cur:
88 yield chunk(cur)
88 yield chunk(cur)
89
89
90 def mboxsplit(stream, cur):
90 def mboxsplit(stream, cur):
91 for line in stream:
91 for line in stream:
92 if line.startswith('From '):
92 if line.startswith('From '):
93 for c in split(chunk(cur[1:])):
93 for c in split(chunk(cur[1:])):
94 yield c
94 yield c
95 cur = []
95 cur = []
96
96
97 cur.append(line)
97 cur.append(line)
98
98
99 if cur:
99 if cur:
100 for c in split(chunk(cur[1:])):
100 for c in split(chunk(cur[1:])):
101 yield c
101 yield c
102
102
103 def mimesplit(stream, cur):
103 def mimesplit(stream, cur):
104 def msgfp(m):
104 def msgfp(m):
105 fp = stringio()
105 fp = stringio()
106 g = email.Generator.Generator(fp, mangle_from_=False)
106 g = email.Generator.Generator(fp, mangle_from_=False)
107 g.flatten(m)
107 g.flatten(m)
108 fp.seek(0)
108 fp.seek(0)
109 return fp
109 return fp
110
110
111 for line in stream:
111 for line in stream:
112 cur.append(line)
112 cur.append(line)
113 c = chunk(cur)
113 c = chunk(cur)
114
114
115 m = pycompat.emailparser().parse(c)
115 m = pycompat.emailparser().parse(c)
116 if not m.is_multipart():
116 if not m.is_multipart():
117 yield msgfp(m)
117 yield msgfp(m)
118 else:
118 else:
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 for part in m.walk():
120 for part in m.walk():
121 ct = part.get_content_type()
121 ct = part.get_content_type()
122 if ct not in ok_types:
122 if ct not in ok_types:
123 continue
123 continue
124 yield msgfp(part)
124 yield msgfp(part)
125
125
126 def headersplit(stream, cur):
126 def headersplit(stream, cur):
127 inheader = False
127 inheader = False
128
128
129 for line in stream:
129 for line in stream:
130 if not inheader and isheader(line, inheader):
130 if not inheader and isheader(line, inheader):
131 yield chunk(cur)
131 yield chunk(cur)
132 cur = []
132 cur = []
133 inheader = True
133 inheader = True
134 if inheader and not isheader(line, inheader):
134 if inheader and not isheader(line, inheader):
135 inheader = False
135 inheader = False
136
136
137 cur.append(line)
137 cur.append(line)
138
138
139 if cur:
139 if cur:
140 yield chunk(cur)
140 yield chunk(cur)
141
141
142 def remainder(cur):
142 def remainder(cur):
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 class fiter(object):
145 class fiter(object):
146 def __init__(self, fp):
146 def __init__(self, fp):
147 self.fp = fp
147 self.fp = fp
148
148
149 def __iter__(self):
149 def __iter__(self):
150 return self
150 return self
151
151
152 def next(self):
152 def next(self):
153 l = self.fp.readline()
153 l = self.fp.readline()
154 if not l:
154 if not l:
155 raise StopIteration
155 raise StopIteration
156 return l
156 return l
157
157
158 __next__ = next
158 __next__ = next
159
159
160 inheader = False
160 inheader = False
161 cur = []
161 cur = []
162
162
163 mimeheaders = ['content-type']
163 mimeheaders = ['content-type']
164
164
165 if not util.safehasattr(stream, 'next'):
165 if not util.safehasattr(stream, 'next'):
166 # http responses, for example, have readline but not next
166 # http responses, for example, have readline but not next
167 stream = fiter(stream)
167 stream = fiter(stream)
168
168
169 for line in stream:
169 for line in stream:
170 cur.append(line)
170 cur.append(line)
171 if line.startswith('# HG changeset patch'):
171 if line.startswith('# HG changeset patch'):
172 return hgsplit(stream, cur)
172 return hgsplit(stream, cur)
173 elif line.startswith('From '):
173 elif line.startswith('From '):
174 return mboxsplit(stream, cur)
174 return mboxsplit(stream, cur)
175 elif isheader(line, inheader):
175 elif isheader(line, inheader):
176 inheader = True
176 inheader = True
177 if line.split(':', 1)[0].lower() in mimeheaders:
177 if line.split(':', 1)[0].lower() in mimeheaders:
178 # let email parser handle this
178 # let email parser handle this
179 return mimesplit(stream, cur)
179 return mimesplit(stream, cur)
180 elif line.startswith('--- ') and inheader:
180 elif line.startswith('--- ') and inheader:
181 # No evil headers seen by diff start, split by hand
181 # No evil headers seen by diff start, split by hand
182 return headersplit(stream, cur)
182 return headersplit(stream, cur)
183 # Not enough info, keep reading
183 # Not enough info, keep reading
184
184
185 # if we are here, we have a very plain patch
185 # if we are here, we have a very plain patch
186 return remainder(cur)
186 return remainder(cur)
187
187
188 ## Some facility for extensible patch parsing:
188 ## Some facility for extensible patch parsing:
189 # list of pairs ("header to match", "data key")
189 # list of pairs ("header to match", "data key")
190 patchheadermap = [('Date', 'date'),
190 patchheadermap = [('Date', 'date'),
191 ('Branch', 'branch'),
191 ('Branch', 'branch'),
192 ('Node ID', 'nodeid'),
192 ('Node ID', 'nodeid'),
193 ]
193 ]
194
194
195 def extract(ui, fileobj):
195 def extract(ui, fileobj):
196 '''extract patch from data read from fileobj.
196 '''extract patch from data read from fileobj.
197
197
198 patch can be a normal patch or contained in an email message.
198 patch can be a normal patch or contained in an email message.
199
199
200 return a dictionary. Standard keys are:
200 return a dictionary. Standard keys are:
201 - filename,
201 - filename,
202 - message,
202 - message,
203 - user,
203 - user,
204 - date,
204 - date,
205 - branch,
205 - branch,
206 - node,
206 - node,
207 - p1,
207 - p1,
208 - p2.
208 - p2.
209 Any item can be missing from the dictionary. If filename is missing,
209 Any item can be missing from the dictionary. If filename is missing,
210 fileobj did not contain a patch. Caller must unlink filename when done.'''
210 fileobj did not contain a patch. Caller must unlink filename when done.'''
211
211
212 # attempt to detect the start of a patch
212 # attempt to detect the start of a patch
213 # (this heuristic is borrowed from quilt)
213 # (this heuristic is borrowed from quilt)
214 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
214 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
215 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
215 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
216 br'---[ \t].*?^\+\+\+[ \t]|'
216 br'---[ \t].*?^\+\+\+[ \t]|'
217 br'\*\*\*[ \t].*?^---[ \t])',
217 br'\*\*\*[ \t].*?^---[ \t])',
218 re.MULTILINE | re.DOTALL)
218 re.MULTILINE | re.DOTALL)
219
219
220 data = {}
220 data = {}
221 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
221 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
222 tmpfp = os.fdopen(fd, r'wb')
222 tmpfp = os.fdopen(fd, r'wb')
223 try:
223 try:
224 msg = pycompat.emailparser().parse(fileobj)
224 msg = pycompat.emailparser().parse(fileobj)
225
225
226 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
226 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
227 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
227 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
228 if not subject and not data['user']:
228 if not subject and not data['user']:
229 # Not an email, restore parsed headers if any
229 # Not an email, restore parsed headers if any
230 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
230 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
231 for h in msg.items()) + '\n'
231 for h in msg.items()) + '\n'
232
232
233 # should try to parse msg['Date']
233 # should try to parse msg['Date']
234 parents = []
234 parents = []
235
235
236 if subject:
236 if subject:
237 if subject.startswith('[PATCH'):
237 if subject.startswith('[PATCH'):
238 pend = subject.find(']')
238 pend = subject.find(']')
239 if pend >= 0:
239 if pend >= 0:
240 subject = subject[pend + 1:].lstrip()
240 subject = subject[pend + 1:].lstrip()
241 subject = re.sub(br'\n[ \t]+', ' ', subject)
241 subject = re.sub(br'\n[ \t]+', ' ', subject)
242 ui.debug('Subject: %s\n' % subject)
242 ui.debug('Subject: %s\n' % subject)
243 if data['user']:
243 if data['user']:
244 ui.debug('From: %s\n' % data['user'])
244 ui.debug('From: %s\n' % data['user'])
245 diffs_seen = 0
245 diffs_seen = 0
246 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
246 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
247 message = ''
247 message = ''
248 for part in msg.walk():
248 for part in msg.walk():
249 content_type = pycompat.bytestr(part.get_content_type())
249 content_type = pycompat.bytestr(part.get_content_type())
250 ui.debug('Content-Type: %s\n' % content_type)
250 ui.debug('Content-Type: %s\n' % content_type)
251 if content_type not in ok_types:
251 if content_type not in ok_types:
252 continue
252 continue
253 payload = part.get_payload(decode=True)
253 payload = part.get_payload(decode=True)
254 m = diffre.search(payload)
254 m = diffre.search(payload)
255 if m:
255 if m:
256 hgpatch = False
256 hgpatch = False
257 hgpatchheader = False
257 hgpatchheader = False
258 ignoretext = False
258 ignoretext = False
259
259
260 ui.debug('found patch at byte %d\n' % m.start(0))
260 ui.debug('found patch at byte %d\n' % m.start(0))
261 diffs_seen += 1
261 diffs_seen += 1
262 cfp = stringio()
262 cfp = stringio()
263 for line in payload[:m.start(0)].splitlines():
263 for line in payload[:m.start(0)].splitlines():
264 if line.startswith('# HG changeset patch') and not hgpatch:
264 if line.startswith('# HG changeset patch') and not hgpatch:
265 ui.debug('patch generated by hg export\n')
265 ui.debug('patch generated by hg export\n')
266 hgpatch = True
266 hgpatch = True
267 hgpatchheader = True
267 hgpatchheader = True
268 # drop earlier commit message content
268 # drop earlier commit message content
269 cfp.seek(0)
269 cfp.seek(0)
270 cfp.truncate()
270 cfp.truncate()
271 subject = None
271 subject = None
272 elif hgpatchheader:
272 elif hgpatchheader:
273 if line.startswith('# User '):
273 if line.startswith('# User '):
274 data['user'] = line[7:]
274 data['user'] = line[7:]
275 ui.debug('From: %s\n' % data['user'])
275 ui.debug('From: %s\n' % data['user'])
276 elif line.startswith("# Parent "):
276 elif line.startswith("# Parent "):
277 parents.append(line[9:].lstrip())
277 parents.append(line[9:].lstrip())
278 elif line.startswith("# "):
278 elif line.startswith("# "):
279 for header, key in patchheadermap:
279 for header, key in patchheadermap:
280 prefix = '# %s ' % header
280 prefix = '# %s ' % header
281 if line.startswith(prefix):
281 if line.startswith(prefix):
282 data[key] = line[len(prefix):]
282 data[key] = line[len(prefix):]
283 else:
283 else:
284 hgpatchheader = False
284 hgpatchheader = False
285 elif line == '---':
285 elif line == '---':
286 ignoretext = True
286 ignoretext = True
287 if not hgpatchheader and not ignoretext:
287 if not hgpatchheader and not ignoretext:
288 cfp.write(line)
288 cfp.write(line)
289 cfp.write('\n')
289 cfp.write('\n')
290 message = cfp.getvalue()
290 message = cfp.getvalue()
291 if tmpfp:
291 if tmpfp:
292 tmpfp.write(payload)
292 tmpfp.write(payload)
293 if not payload.endswith('\n'):
293 if not payload.endswith('\n'):
294 tmpfp.write('\n')
294 tmpfp.write('\n')
295 elif not diffs_seen and message and content_type == 'text/plain':
295 elif not diffs_seen and message and content_type == 'text/plain':
296 message += '\n' + payload
296 message += '\n' + payload
297 except: # re-raises
297 except: # re-raises
298 tmpfp.close()
298 tmpfp.close()
299 os.unlink(tmpname)
299 os.unlink(tmpname)
300 raise
300 raise
301
301
302 if subject and not message.startswith(subject):
302 if subject and not message.startswith(subject):
303 message = '%s\n%s' % (subject, message)
303 message = '%s\n%s' % (subject, message)
304 data['message'] = message
304 data['message'] = message
305 tmpfp.close()
305 tmpfp.close()
306 if parents:
306 if parents:
307 data['p1'] = parents.pop(0)
307 data['p1'] = parents.pop(0)
308 if parents:
308 if parents:
309 data['p2'] = parents.pop(0)
309 data['p2'] = parents.pop(0)
310
310
311 if diffs_seen:
311 if diffs_seen:
312 data['filename'] = tmpname
312 data['filename'] = tmpname
313 else:
313 else:
314 os.unlink(tmpname)
314 os.unlink(tmpname)
315 return data
315 return data
316
316
317 class patchmeta(object):
317 class patchmeta(object):
318 """Patched file metadata
318 """Patched file metadata
319
319
320 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
320 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
321 or COPY. 'path' is patched file path. 'oldpath' is set to the
321 or COPY. 'path' is patched file path. 'oldpath' is set to the
322 origin file when 'op' is either COPY or RENAME, None otherwise. If
322 origin file when 'op' is either COPY or RENAME, None otherwise. If
323 file mode is changed, 'mode' is a tuple (islink, isexec) where
323 file mode is changed, 'mode' is a tuple (islink, isexec) where
324 'islink' is True if the file is a symlink and 'isexec' is True if
324 'islink' is True if the file is a symlink and 'isexec' is True if
325 the file is executable. Otherwise, 'mode' is None.
325 the file is executable. Otherwise, 'mode' is None.
326 """
326 """
327 def __init__(self, path):
327 def __init__(self, path):
328 self.path = path
328 self.path = path
329 self.oldpath = None
329 self.oldpath = None
330 self.mode = None
330 self.mode = None
331 self.op = 'MODIFY'
331 self.op = 'MODIFY'
332 self.binary = False
332 self.binary = False
333
333
334 def setmode(self, mode):
334 def setmode(self, mode):
335 islink = mode & 0o20000
335 islink = mode & 0o20000
336 isexec = mode & 0o100
336 isexec = mode & 0o100
337 self.mode = (islink, isexec)
337 self.mode = (islink, isexec)
338
338
339 def copy(self):
339 def copy(self):
340 other = patchmeta(self.path)
340 other = patchmeta(self.path)
341 other.oldpath = self.oldpath
341 other.oldpath = self.oldpath
342 other.mode = self.mode
342 other.mode = self.mode
343 other.op = self.op
343 other.op = self.op
344 other.binary = self.binary
344 other.binary = self.binary
345 return other
345 return other
346
346
347 def _ispatchinga(self, afile):
347 def _ispatchinga(self, afile):
348 if afile == '/dev/null':
348 if afile == '/dev/null':
349 return self.op == 'ADD'
349 return self.op == 'ADD'
350 return afile == 'a/' + (self.oldpath or self.path)
350 return afile == 'a/' + (self.oldpath or self.path)
351
351
352 def _ispatchingb(self, bfile):
352 def _ispatchingb(self, bfile):
353 if bfile == '/dev/null':
353 if bfile == '/dev/null':
354 return self.op == 'DELETE'
354 return self.op == 'DELETE'
355 return bfile == 'b/' + self.path
355 return bfile == 'b/' + self.path
356
356
357 def ispatching(self, afile, bfile):
357 def ispatching(self, afile, bfile):
358 return self._ispatchinga(afile) and self._ispatchingb(bfile)
358 return self._ispatchinga(afile) and self._ispatchingb(bfile)
359
359
360 def __repr__(self):
360 def __repr__(self):
361 return "<patchmeta %s %r>" % (self.op, self.path)
361 return "<patchmeta %s %r>" % (self.op, self.path)
362
362
363 def readgitpatch(lr):
363 def readgitpatch(lr):
364 """extract git-style metadata about patches from <patchname>"""
364 """extract git-style metadata about patches from <patchname>"""
365
365
366 # Filter patch for git information
366 # Filter patch for git information
367 gp = None
367 gp = None
368 gitpatches = []
368 gitpatches = []
369 for line in lr:
369 for line in lr:
370 line = line.rstrip(' \r\n')
370 line = line.rstrip(' \r\n')
371 if line.startswith('diff --git a/'):
371 if line.startswith('diff --git a/'):
372 m = gitre.match(line)
372 m = gitre.match(line)
373 if m:
373 if m:
374 if gp:
374 if gp:
375 gitpatches.append(gp)
375 gitpatches.append(gp)
376 dst = m.group(2)
376 dst = m.group(2)
377 gp = patchmeta(dst)
377 gp = patchmeta(dst)
378 elif gp:
378 elif gp:
379 if line.startswith('--- '):
379 if line.startswith('--- '):
380 gitpatches.append(gp)
380 gitpatches.append(gp)
381 gp = None
381 gp = None
382 continue
382 continue
383 if line.startswith('rename from '):
383 if line.startswith('rename from '):
384 gp.op = 'RENAME'
384 gp.op = 'RENAME'
385 gp.oldpath = line[12:]
385 gp.oldpath = line[12:]
386 elif line.startswith('rename to '):
386 elif line.startswith('rename to '):
387 gp.path = line[10:]
387 gp.path = line[10:]
388 elif line.startswith('copy from '):
388 elif line.startswith('copy from '):
389 gp.op = 'COPY'
389 gp.op = 'COPY'
390 gp.oldpath = line[10:]
390 gp.oldpath = line[10:]
391 elif line.startswith('copy to '):
391 elif line.startswith('copy to '):
392 gp.path = line[8:]
392 gp.path = line[8:]
393 elif line.startswith('deleted file'):
393 elif line.startswith('deleted file'):
394 gp.op = 'DELETE'
394 gp.op = 'DELETE'
395 elif line.startswith('new file mode '):
395 elif line.startswith('new file mode '):
396 gp.op = 'ADD'
396 gp.op = 'ADD'
397 gp.setmode(int(line[-6:], 8))
397 gp.setmode(int(line[-6:], 8))
398 elif line.startswith('new mode '):
398 elif line.startswith('new mode '):
399 gp.setmode(int(line[-6:], 8))
399 gp.setmode(int(line[-6:], 8))
400 elif line.startswith('GIT binary patch'):
400 elif line.startswith('GIT binary patch'):
401 gp.binary = True
401 gp.binary = True
402 if gp:
402 if gp:
403 gitpatches.append(gp)
403 gitpatches.append(gp)
404
404
405 return gitpatches
405 return gitpatches
406
406
407 class linereader(object):
407 class linereader(object):
408 # simple class to allow pushing lines back into the input stream
408 # simple class to allow pushing lines back into the input stream
409 def __init__(self, fp):
409 def __init__(self, fp):
410 self.fp = fp
410 self.fp = fp
411 self.buf = []
411 self.buf = []
412
412
413 def push(self, line):
413 def push(self, line):
414 if line is not None:
414 if line is not None:
415 self.buf.append(line)
415 self.buf.append(line)
416
416
417 def readline(self):
417 def readline(self):
418 if self.buf:
418 if self.buf:
419 l = self.buf[0]
419 l = self.buf[0]
420 del self.buf[0]
420 del self.buf[0]
421 return l
421 return l
422 return self.fp.readline()
422 return self.fp.readline()
423
423
424 def __iter__(self):
424 def __iter__(self):
425 return iter(self.readline, '')
425 return iter(self.readline, '')
426
426
427 class abstractbackend(object):
427 class abstractbackend(object):
428 def __init__(self, ui):
428 def __init__(self, ui):
429 self.ui = ui
429 self.ui = ui
430
430
431 def getfile(self, fname):
431 def getfile(self, fname):
432 """Return target file data and flags as a (data, (islink,
432 """Return target file data and flags as a (data, (islink,
433 isexec)) tuple. Data is None if file is missing/deleted.
433 isexec)) tuple. Data is None if file is missing/deleted.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 """Write data to target file fname and set its mode. mode is a
438 """Write data to target file fname and set its mode. mode is a
439 (islink, isexec) tuple. If data is None, the file content should
439 (islink, isexec) tuple. If data is None, the file content should
440 be left unchanged. If the file is modified after being copied,
440 be left unchanged. If the file is modified after being copied,
441 copysource is set to the original file name.
441 copysource is set to the original file name.
442 """
442 """
443 raise NotImplementedError
443 raise NotImplementedError
444
444
445 def unlink(self, fname):
445 def unlink(self, fname):
446 """Unlink target file."""
446 """Unlink target file."""
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def writerej(self, fname, failed, total, lines):
449 def writerej(self, fname, failed, total, lines):
450 """Write rejected lines for fname. total is the number of hunks
450 """Write rejected lines for fname. total is the number of hunks
451 which failed to apply and total the total number of hunks for this
451 which failed to apply and total the total number of hunks for this
452 files.
452 files.
453 """
453 """
454
454
455 def exists(self, fname):
455 def exists(self, fname):
456 raise NotImplementedError
456 raise NotImplementedError
457
457
458 def close(self):
458 def close(self):
459 raise NotImplementedError
459 raise NotImplementedError
460
460
461 class fsbackend(abstractbackend):
461 class fsbackend(abstractbackend):
462 def __init__(self, ui, basedir):
462 def __init__(self, ui, basedir):
463 super(fsbackend, self).__init__(ui)
463 super(fsbackend, self).__init__(ui)
464 self.opener = vfsmod.vfs(basedir)
464 self.opener = vfsmod.vfs(basedir)
465
465
466 def getfile(self, fname):
466 def getfile(self, fname):
467 if self.opener.islink(fname):
467 if self.opener.islink(fname):
468 return (self.opener.readlink(fname), (True, False))
468 return (self.opener.readlink(fname), (True, False))
469
469
470 isexec = False
470 isexec = False
471 try:
471 try:
472 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
472 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
473 except OSError as e:
473 except OSError as e:
474 if e.errno != errno.ENOENT:
474 if e.errno != errno.ENOENT:
475 raise
475 raise
476 try:
476 try:
477 return (self.opener.read(fname), (False, isexec))
477 return (self.opener.read(fname), (False, isexec))
478 except IOError as e:
478 except IOError as e:
479 if e.errno != errno.ENOENT:
479 if e.errno != errno.ENOENT:
480 raise
480 raise
481 return None, None
481 return None, None
482
482
483 def setfile(self, fname, data, mode, copysource):
483 def setfile(self, fname, data, mode, copysource):
484 islink, isexec = mode
484 islink, isexec = mode
485 if data is None:
485 if data is None:
486 self.opener.setflags(fname, islink, isexec)
486 self.opener.setflags(fname, islink, isexec)
487 return
487 return
488 if islink:
488 if islink:
489 self.opener.symlink(data, fname)
489 self.opener.symlink(data, fname)
490 else:
490 else:
491 self.opener.write(fname, data)
491 self.opener.write(fname, data)
492 if isexec:
492 if isexec:
493 self.opener.setflags(fname, False, True)
493 self.opener.setflags(fname, False, True)
494
494
495 def unlink(self, fname):
495 def unlink(self, fname):
496 self.opener.unlinkpath(fname, ignoremissing=True)
496 self.opener.unlinkpath(fname, ignoremissing=True)
497
497
498 def writerej(self, fname, failed, total, lines):
498 def writerej(self, fname, failed, total, lines):
499 fname = fname + ".rej"
499 fname = fname + ".rej"
500 self.ui.warn(
500 self.ui.warn(
501 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
501 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
502 (failed, total, fname))
502 (failed, total, fname))
503 fp = self.opener(fname, 'w')
503 fp = self.opener(fname, 'w')
504 fp.writelines(lines)
504 fp.writelines(lines)
505 fp.close()
505 fp.close()
506
506
507 def exists(self, fname):
507 def exists(self, fname):
508 return self.opener.lexists(fname)
508 return self.opener.lexists(fname)
509
509
510 class workingbackend(fsbackend):
510 class workingbackend(fsbackend):
511 def __init__(self, ui, repo, similarity):
511 def __init__(self, ui, repo, similarity):
512 super(workingbackend, self).__init__(ui, repo.root)
512 super(workingbackend, self).__init__(ui, repo.root)
513 self.repo = repo
513 self.repo = repo
514 self.similarity = similarity
514 self.similarity = similarity
515 self.removed = set()
515 self.removed = set()
516 self.changed = set()
516 self.changed = set()
517 self.copied = []
517 self.copied = []
518
518
519 def _checkknown(self, fname):
519 def _checkknown(self, fname):
520 if self.repo.dirstate[fname] == '?' and self.exists(fname):
520 if self.repo.dirstate[fname] == '?' and self.exists(fname):
521 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
521 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
522
522
523 def setfile(self, fname, data, mode, copysource):
523 def setfile(self, fname, data, mode, copysource):
524 self._checkknown(fname)
524 self._checkknown(fname)
525 super(workingbackend, self).setfile(fname, data, mode, copysource)
525 super(workingbackend, self).setfile(fname, data, mode, copysource)
526 if copysource is not None:
526 if copysource is not None:
527 self.copied.append((copysource, fname))
527 self.copied.append((copysource, fname))
528 self.changed.add(fname)
528 self.changed.add(fname)
529
529
530 def unlink(self, fname):
530 def unlink(self, fname):
531 self._checkknown(fname)
531 self._checkknown(fname)
532 super(workingbackend, self).unlink(fname)
532 super(workingbackend, self).unlink(fname)
533 self.removed.add(fname)
533 self.removed.add(fname)
534 self.changed.add(fname)
534 self.changed.add(fname)
535
535
536 def close(self):
536 def close(self):
537 wctx = self.repo[None]
537 wctx = self.repo[None]
538 changed = set(self.changed)
538 changed = set(self.changed)
539 for src, dst in self.copied:
539 for src, dst in self.copied:
540 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
540 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
541 if self.removed:
541 if self.removed:
542 wctx.forget(sorted(self.removed))
542 wctx.forget(sorted(self.removed))
543 for f in self.removed:
543 for f in self.removed:
544 if f not in self.repo.dirstate:
544 if f not in self.repo.dirstate:
545 # File was deleted and no longer belongs to the
545 # File was deleted and no longer belongs to the
546 # dirstate, it was probably marked added then
546 # dirstate, it was probably marked added then
547 # deleted, and should not be considered by
547 # deleted, and should not be considered by
548 # marktouched().
548 # marktouched().
549 changed.discard(f)
549 changed.discard(f)
550 if changed:
550 if changed:
551 scmutil.marktouched(self.repo, changed, self.similarity)
551 scmutil.marktouched(self.repo, changed, self.similarity)
552 return sorted(self.changed)
552 return sorted(self.changed)
553
553
554 class filestore(object):
554 class filestore(object):
555 def __init__(self, maxsize=None):
555 def __init__(self, maxsize=None):
556 self.opener = None
556 self.opener = None
557 self.files = {}
557 self.files = {}
558 self.created = 0
558 self.created = 0
559 self.maxsize = maxsize
559 self.maxsize = maxsize
560 if self.maxsize is None:
560 if self.maxsize is None:
561 self.maxsize = 4*(2**20)
561 self.maxsize = 4*(2**20)
562 self.size = 0
562 self.size = 0
563 self.data = {}
563 self.data = {}
564
564
565 def setfile(self, fname, data, mode, copied=None):
565 def setfile(self, fname, data, mode, copied=None):
566 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
566 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
567 self.data[fname] = (data, mode, copied)
567 self.data[fname] = (data, mode, copied)
568 self.size += len(data)
568 self.size += len(data)
569 else:
569 else:
570 if self.opener is None:
570 if self.opener is None:
571 root = tempfile.mkdtemp(prefix='hg-patch-')
571 root = tempfile.mkdtemp(prefix='hg-patch-')
572 self.opener = vfsmod.vfs(root)
572 self.opener = vfsmod.vfs(root)
573 # Avoid filename issues with these simple names
573 # Avoid filename issues with these simple names
574 fn = '%d' % self.created
574 fn = '%d' % self.created
575 self.opener.write(fn, data)
575 self.opener.write(fn, data)
576 self.created += 1
576 self.created += 1
577 self.files[fname] = (fn, mode, copied)
577 self.files[fname] = (fn, mode, copied)
578
578
579 def getfile(self, fname):
579 def getfile(self, fname):
580 if fname in self.data:
580 if fname in self.data:
581 return self.data[fname]
581 return self.data[fname]
582 if not self.opener or fname not in self.files:
582 if not self.opener or fname not in self.files:
583 return None, None, None
583 return None, None, None
584 fn, mode, copied = self.files[fname]
584 fn, mode, copied = self.files[fname]
585 return self.opener.read(fn), mode, copied
585 return self.opener.read(fn), mode, copied
586
586
587 def close(self):
587 def close(self):
588 if self.opener:
588 if self.opener:
589 shutil.rmtree(self.opener.base)
589 shutil.rmtree(self.opener.base)
590
590
591 class repobackend(abstractbackend):
591 class repobackend(abstractbackend):
592 def __init__(self, ui, repo, ctx, store):
592 def __init__(self, ui, repo, ctx, store):
593 super(repobackend, self).__init__(ui)
593 super(repobackend, self).__init__(ui)
594 self.repo = repo
594 self.repo = repo
595 self.ctx = ctx
595 self.ctx = ctx
596 self.store = store
596 self.store = store
597 self.changed = set()
597 self.changed = set()
598 self.removed = set()
598 self.removed = set()
599 self.copied = {}
599 self.copied = {}
600
600
601 def _checkknown(self, fname):
601 def _checkknown(self, fname):
602 if fname not in self.ctx:
602 if fname not in self.ctx:
603 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
603 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
604
604
605 def getfile(self, fname):
605 def getfile(self, fname):
606 try:
606 try:
607 fctx = self.ctx[fname]
607 fctx = self.ctx[fname]
608 except error.LookupError:
608 except error.LookupError:
609 return None, None
609 return None, None
610 flags = fctx.flags()
610 flags = fctx.flags()
611 return fctx.data(), ('l' in flags, 'x' in flags)
611 return fctx.data(), ('l' in flags, 'x' in flags)
612
612
613 def setfile(self, fname, data, mode, copysource):
613 def setfile(self, fname, data, mode, copysource):
614 if copysource:
614 if copysource:
615 self._checkknown(copysource)
615 self._checkknown(copysource)
616 if data is None:
616 if data is None:
617 data = self.ctx[fname].data()
617 data = self.ctx[fname].data()
618 self.store.setfile(fname, data, mode, copysource)
618 self.store.setfile(fname, data, mode, copysource)
619 self.changed.add(fname)
619 self.changed.add(fname)
620 if copysource:
620 if copysource:
621 self.copied[fname] = copysource
621 self.copied[fname] = copysource
622
622
623 def unlink(self, fname):
623 def unlink(self, fname):
624 self._checkknown(fname)
624 self._checkknown(fname)
625 self.removed.add(fname)
625 self.removed.add(fname)
626
626
627 def exists(self, fname):
627 def exists(self, fname):
628 return fname in self.ctx
628 return fname in self.ctx
629
629
630 def close(self):
630 def close(self):
631 return self.changed | self.removed
631 return self.changed | self.removed
632
632
633 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
633 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
634 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
634 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
635 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
635 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
636 eolmodes = ['strict', 'crlf', 'lf', 'auto']
636 eolmodes = ['strict', 'crlf', 'lf', 'auto']
637
637
638 class patchfile(object):
638 class patchfile(object):
639 def __init__(self, ui, gp, backend, store, eolmode='strict'):
639 def __init__(self, ui, gp, backend, store, eolmode='strict'):
640 self.fname = gp.path
640 self.fname = gp.path
641 self.eolmode = eolmode
641 self.eolmode = eolmode
642 self.eol = None
642 self.eol = None
643 self.backend = backend
643 self.backend = backend
644 self.ui = ui
644 self.ui = ui
645 self.lines = []
645 self.lines = []
646 self.exists = False
646 self.exists = False
647 self.missing = True
647 self.missing = True
648 self.mode = gp.mode
648 self.mode = gp.mode
649 self.copysource = gp.oldpath
649 self.copysource = gp.oldpath
650 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
650 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
651 self.remove = gp.op == 'DELETE'
651 self.remove = gp.op == 'DELETE'
652 if self.copysource is None:
652 if self.copysource is None:
653 data, mode = backend.getfile(self.fname)
653 data, mode = backend.getfile(self.fname)
654 else:
654 else:
655 data, mode = store.getfile(self.copysource)[:2]
655 data, mode = store.getfile(self.copysource)[:2]
656 if data is not None:
656 if data is not None:
657 self.exists = self.copysource is None or backend.exists(self.fname)
657 self.exists = self.copysource is None or backend.exists(self.fname)
658 self.missing = False
658 self.missing = False
659 if data:
659 if data:
660 self.lines = mdiff.splitnewlines(data)
660 self.lines = mdiff.splitnewlines(data)
661 if self.mode is None:
661 if self.mode is None:
662 self.mode = mode
662 self.mode = mode
663 if self.lines:
663 if self.lines:
664 # Normalize line endings
664 # Normalize line endings
665 if self.lines[0].endswith('\r\n'):
665 if self.lines[0].endswith('\r\n'):
666 self.eol = '\r\n'
666 self.eol = '\r\n'
667 elif self.lines[0].endswith('\n'):
667 elif self.lines[0].endswith('\n'):
668 self.eol = '\n'
668 self.eol = '\n'
669 if eolmode != 'strict':
669 if eolmode != 'strict':
670 nlines = []
670 nlines = []
671 for l in self.lines:
671 for l in self.lines:
672 if l.endswith('\r\n'):
672 if l.endswith('\r\n'):
673 l = l[:-2] + '\n'
673 l = l[:-2] + '\n'
674 nlines.append(l)
674 nlines.append(l)
675 self.lines = nlines
675 self.lines = nlines
676 else:
676 else:
677 if self.create:
677 if self.create:
678 self.missing = False
678 self.missing = False
679 if self.mode is None:
679 if self.mode is None:
680 self.mode = (False, False)
680 self.mode = (False, False)
681 if self.missing:
681 if self.missing:
682 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
682 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
683 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
683 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
684 "current directory)\n"))
684 "current directory)\n"))
685
685
686 self.hash = {}
686 self.hash = {}
687 self.dirty = 0
687 self.dirty = 0
688 self.offset = 0
688 self.offset = 0
689 self.skew = 0
689 self.skew = 0
690 self.rej = []
690 self.rej = []
691 self.fileprinted = False
691 self.fileprinted = False
692 self.printfile(False)
692 self.printfile(False)
693 self.hunks = 0
693 self.hunks = 0
694
694
695 def writelines(self, fname, lines, mode):
695 def writelines(self, fname, lines, mode):
696 if self.eolmode == 'auto':
696 if self.eolmode == 'auto':
697 eol = self.eol
697 eol = self.eol
698 elif self.eolmode == 'crlf':
698 elif self.eolmode == 'crlf':
699 eol = '\r\n'
699 eol = '\r\n'
700 else:
700 else:
701 eol = '\n'
701 eol = '\n'
702
702
703 if self.eolmode != 'strict' and eol and eol != '\n':
703 if self.eolmode != 'strict' and eol and eol != '\n':
704 rawlines = []
704 rawlines = []
705 for l in lines:
705 for l in lines:
706 if l and l[-1] == '\n':
706 if l and l[-1] == '\n':
707 l = l[:-1] + eol
707 l = l[:-1] + eol
708 rawlines.append(l)
708 rawlines.append(l)
709 lines = rawlines
709 lines = rawlines
710
710
711 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
711 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
712
712
713 def printfile(self, warn):
713 def printfile(self, warn):
714 if self.fileprinted:
714 if self.fileprinted:
715 return
715 return
716 if warn or self.ui.verbose:
716 if warn or self.ui.verbose:
717 self.fileprinted = True
717 self.fileprinted = True
718 s = _("patching file %s\n") % self.fname
718 s = _("patching file %s\n") % self.fname
719 if warn:
719 if warn:
720 self.ui.warn(s)
720 self.ui.warn(s)
721 else:
721 else:
722 self.ui.note(s)
722 self.ui.note(s)
723
723
724
724
725 def findlines(self, l, linenum):
725 def findlines(self, l, linenum):
726 # looks through the hash and finds candidate lines. The
726 # looks through the hash and finds candidate lines. The
727 # result is a list of line numbers sorted based on distance
727 # result is a list of line numbers sorted based on distance
728 # from linenum
728 # from linenum
729
729
730 cand = self.hash.get(l, [])
730 cand = self.hash.get(l, [])
731 if len(cand) > 1:
731 if len(cand) > 1:
732 # resort our list of potentials forward then back.
732 # resort our list of potentials forward then back.
733 cand.sort(key=lambda x: abs(x - linenum))
733 cand.sort(key=lambda x: abs(x - linenum))
734 return cand
734 return cand
735
735
736 def write_rej(self):
736 def write_rej(self):
737 # our rejects are a little different from patch(1). This always
737 # our rejects are a little different from patch(1). This always
738 # creates rejects in the same form as the original patch. A file
738 # creates rejects in the same form as the original patch. A file
739 # header is inserted so that you can run the reject through patch again
739 # header is inserted so that you can run the reject through patch again
740 # without having to type the filename.
740 # without having to type the filename.
741 if not self.rej:
741 if not self.rej:
742 return
742 return
743 base = os.path.basename(self.fname)
743 base = os.path.basename(self.fname)
744 lines = ["--- %s\n+++ %s\n" % (base, base)]
744 lines = ["--- %s\n+++ %s\n" % (base, base)]
745 for x in self.rej:
745 for x in self.rej:
746 for l in x.hunk:
746 for l in x.hunk:
747 lines.append(l)
747 lines.append(l)
748 if l[-1:] != '\n':
748 if l[-1:] != '\n':
749 lines.append("\n\ No newline at end of file\n")
749 lines.append("\n\ No newline at end of file\n")
750 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
750 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
751
751
752 def apply(self, h):
752 def apply(self, h):
753 if not h.complete():
753 if not h.complete():
754 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
754 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
755 (h.number, h.desc, len(h.a), h.lena, len(h.b),
755 (h.number, h.desc, len(h.a), h.lena, len(h.b),
756 h.lenb))
756 h.lenb))
757
757
758 self.hunks += 1
758 self.hunks += 1
759
759
760 if self.missing:
760 if self.missing:
761 self.rej.append(h)
761 self.rej.append(h)
762 return -1
762 return -1
763
763
764 if self.exists and self.create:
764 if self.exists and self.create:
765 if self.copysource:
765 if self.copysource:
766 self.ui.warn(_("cannot create %s: destination already "
766 self.ui.warn(_("cannot create %s: destination already "
767 "exists\n") % self.fname)
767 "exists\n") % self.fname)
768 else:
768 else:
769 self.ui.warn(_("file %s already exists\n") % self.fname)
769 self.ui.warn(_("file %s already exists\n") % self.fname)
770 self.rej.append(h)
770 self.rej.append(h)
771 return -1
771 return -1
772
772
773 if isinstance(h, binhunk):
773 if isinstance(h, binhunk):
774 if self.remove:
774 if self.remove:
775 self.backend.unlink(self.fname)
775 self.backend.unlink(self.fname)
776 else:
776 else:
777 l = h.new(self.lines)
777 l = h.new(self.lines)
778 self.lines[:] = l
778 self.lines[:] = l
779 self.offset += len(l)
779 self.offset += len(l)
780 self.dirty = True
780 self.dirty = True
781 return 0
781 return 0
782
782
783 horig = h
783 horig = h
784 if (self.eolmode in ('crlf', 'lf')
784 if (self.eolmode in ('crlf', 'lf')
785 or self.eolmode == 'auto' and self.eol):
785 or self.eolmode == 'auto' and self.eol):
786 # If new eols are going to be normalized, then normalize
786 # If new eols are going to be normalized, then normalize
787 # hunk data before patching. Otherwise, preserve input
787 # hunk data before patching. Otherwise, preserve input
788 # line-endings.
788 # line-endings.
789 h = h.getnormalized()
789 h = h.getnormalized()
790
790
791 # fast case first, no offsets, no fuzz
791 # fast case first, no offsets, no fuzz
792 old, oldstart, new, newstart = h.fuzzit(0, False)
792 old, oldstart, new, newstart = h.fuzzit(0, False)
793 oldstart += self.offset
793 oldstart += self.offset
794 orig_start = oldstart
794 orig_start = oldstart
795 # if there's skew we want to emit the "(offset %d lines)" even
795 # if there's skew we want to emit the "(offset %d lines)" even
796 # when the hunk cleanly applies at start + skew, so skip the
796 # when the hunk cleanly applies at start + skew, so skip the
797 # fast case code
797 # fast case code
798 if (self.skew == 0 and
798 if (self.skew == 0 and
799 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
799 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
800 if self.remove:
800 if self.remove:
801 self.backend.unlink(self.fname)
801 self.backend.unlink(self.fname)
802 else:
802 else:
803 self.lines[oldstart:oldstart + len(old)] = new
803 self.lines[oldstart:oldstart + len(old)] = new
804 self.offset += len(new) - len(old)
804 self.offset += len(new) - len(old)
805 self.dirty = True
805 self.dirty = True
806 return 0
806 return 0
807
807
808 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
808 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
809 self.hash = {}
809 self.hash = {}
810 for x, s in enumerate(self.lines):
810 for x, s in enumerate(self.lines):
811 self.hash.setdefault(s, []).append(x)
811 self.hash.setdefault(s, []).append(x)
812
812
813 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
813 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
814 for toponly in [True, False]:
814 for toponly in [True, False]:
815 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
815 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
816 oldstart = oldstart + self.offset + self.skew
816 oldstart = oldstart + self.offset + self.skew
817 oldstart = min(oldstart, len(self.lines))
817 oldstart = min(oldstart, len(self.lines))
818 if old:
818 if old:
819 cand = self.findlines(old[0][1:], oldstart)
819 cand = self.findlines(old[0][1:], oldstart)
820 else:
820 else:
821 # Only adding lines with no or fuzzed context, just
821 # Only adding lines with no or fuzzed context, just
822 # take the skew in account
822 # take the skew in account
823 cand = [oldstart]
823 cand = [oldstart]
824
824
825 for l in cand:
825 for l in cand:
826 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
826 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
827 self.lines[l : l + len(old)] = new
827 self.lines[l : l + len(old)] = new
828 self.offset += len(new) - len(old)
828 self.offset += len(new) - len(old)
829 self.skew = l - orig_start
829 self.skew = l - orig_start
830 self.dirty = True
830 self.dirty = True
831 offset = l - orig_start - fuzzlen
831 offset = l - orig_start - fuzzlen
832 if fuzzlen:
832 if fuzzlen:
833 msg = _("Hunk #%d succeeded at %d "
833 msg = _("Hunk #%d succeeded at %d "
834 "with fuzz %d "
834 "with fuzz %d "
835 "(offset %d lines).\n")
835 "(offset %d lines).\n")
836 self.printfile(True)
836 self.printfile(True)
837 self.ui.warn(msg %
837 self.ui.warn(msg %
838 (h.number, l + 1, fuzzlen, offset))
838 (h.number, l + 1, fuzzlen, offset))
839 else:
839 else:
840 msg = _("Hunk #%d succeeded at %d "
840 msg = _("Hunk #%d succeeded at %d "
841 "(offset %d lines).\n")
841 "(offset %d lines).\n")
842 self.ui.note(msg % (h.number, l + 1, offset))
842 self.ui.note(msg % (h.number, l + 1, offset))
843 return fuzzlen
843 return fuzzlen
844 self.printfile(True)
844 self.printfile(True)
845 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
845 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
846 self.rej.append(horig)
846 self.rej.append(horig)
847 return -1
847 return -1
848
848
849 def close(self):
849 def close(self):
850 if self.dirty:
850 if self.dirty:
851 self.writelines(self.fname, self.lines, self.mode)
851 self.writelines(self.fname, self.lines, self.mode)
852 self.write_rej()
852 self.write_rej()
853 return len(self.rej)
853 return len(self.rej)
854
854
855 class header(object):
855 class header(object):
856 """patch header
856 """patch header
857 """
857 """
858 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
858 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
859 diff_re = re.compile('diff -r .* (.*)$')
859 diff_re = re.compile('diff -r .* (.*)$')
860 allhunks_re = re.compile('(?:index|deleted file) ')
860 allhunks_re = re.compile('(?:index|deleted file) ')
861 pretty_re = re.compile('(?:new file|deleted file) ')
861 pretty_re = re.compile('(?:new file|deleted file) ')
862 special_re = re.compile('(?:index|deleted|copy|rename) ')
862 special_re = re.compile('(?:index|deleted|copy|rename) ')
863 newfile_re = re.compile('(?:new file)')
863 newfile_re = re.compile('(?:new file)')
864
864
865 def __init__(self, header):
865 def __init__(self, header):
866 self.header = header
866 self.header = header
867 self.hunks = []
867 self.hunks = []
868
868
869 def binary(self):
869 def binary(self):
870 return any(h.startswith('index ') for h in self.header)
870 return any(h.startswith('index ') for h in self.header)
871
871
872 def pretty(self, fp):
872 def pretty(self, fp):
873 for h in self.header:
873 for h in self.header:
874 if h.startswith('index '):
874 if h.startswith('index '):
875 fp.write(_('this modifies a binary file (all or nothing)\n'))
875 fp.write(_('this modifies a binary file (all or nothing)\n'))
876 break
876 break
877 if self.pretty_re.match(h):
877 if self.pretty_re.match(h):
878 fp.write(h)
878 fp.write(h)
879 if self.binary():
879 if self.binary():
880 fp.write(_('this is a binary file\n'))
880 fp.write(_('this is a binary file\n'))
881 break
881 break
882 if h.startswith('---'):
882 if h.startswith('---'):
883 fp.write(_('%d hunks, %d lines changed\n') %
883 fp.write(_('%d hunks, %d lines changed\n') %
884 (len(self.hunks),
884 (len(self.hunks),
885 sum([max(h.added, h.removed) for h in self.hunks])))
885 sum([max(h.added, h.removed) for h in self.hunks])))
886 break
886 break
887 fp.write(h)
887 fp.write(h)
888
888
889 def write(self, fp):
889 def write(self, fp):
890 fp.write(''.join(self.header))
890 fp.write(''.join(self.header))
891
891
892 def allhunks(self):
892 def allhunks(self):
893 return any(self.allhunks_re.match(h) for h in self.header)
893 return any(self.allhunks_re.match(h) for h in self.header)
894
894
895 def files(self):
895 def files(self):
896 match = self.diffgit_re.match(self.header[0])
896 match = self.diffgit_re.match(self.header[0])
897 if match:
897 if match:
898 fromfile, tofile = match.groups()
898 fromfile, tofile = match.groups()
899 if fromfile == tofile:
899 if fromfile == tofile:
900 return [fromfile]
900 return [fromfile]
901 return [fromfile, tofile]
901 return [fromfile, tofile]
902 else:
902 else:
903 return self.diff_re.match(self.header[0]).groups()
903 return self.diff_re.match(self.header[0]).groups()
904
904
905 def filename(self):
905 def filename(self):
906 return self.files()[-1]
906 return self.files()[-1]
907
907
908 def __repr__(self):
908 def __repr__(self):
909 return '<header %s>' % (' '.join(map(repr, self.files())))
909 return '<header %s>' % (' '.join(map(repr, self.files())))
910
910
911 def isnewfile(self):
911 def isnewfile(self):
912 return any(self.newfile_re.match(h) for h in self.header)
912 return any(self.newfile_re.match(h) for h in self.header)
913
913
914 def special(self):
914 def special(self):
915 # Special files are shown only at the header level and not at the hunk
915 # Special files are shown only at the header level and not at the hunk
916 # level for example a file that has been deleted is a special file.
916 # level for example a file that has been deleted is a special file.
917 # The user cannot change the content of the operation, in the case of
917 # The user cannot change the content of the operation, in the case of
918 # the deleted file he has to take the deletion or not take it, he
918 # the deleted file he has to take the deletion or not take it, he
919 # cannot take some of it.
919 # cannot take some of it.
920 # Newly added files are special if they are empty, they are not special
920 # Newly added files are special if they are empty, they are not special
921 # if they have some content as we want to be able to change it
921 # if they have some content as we want to be able to change it
922 nocontent = len(self.header) == 2
922 nocontent = len(self.header) == 2
923 emptynewfile = self.isnewfile() and nocontent
923 emptynewfile = self.isnewfile() and nocontent
924 return emptynewfile or \
924 return emptynewfile or \
925 any(self.special_re.match(h) for h in self.header)
925 any(self.special_re.match(h) for h in self.header)
926
926
927 class recordhunk(object):
927 class recordhunk(object):
928 """patch hunk
928 """patch hunk
929
929
930 XXX shouldn't we merge this with the other hunk class?
930 XXX shouldn't we merge this with the other hunk class?
931 """
931 """
932
932
933 def __init__(self, header, fromline, toline, proc, before, hunk, after,
933 def __init__(self, header, fromline, toline, proc, before, hunk, after,
934 maxcontext=None):
934 maxcontext=None):
935 def trimcontext(lines, reverse=False):
935 def trimcontext(lines, reverse=False):
936 if maxcontext is not None:
936 if maxcontext is not None:
937 delta = len(lines) - maxcontext
937 delta = len(lines) - maxcontext
938 if delta > 0:
938 if delta > 0:
939 if reverse:
939 if reverse:
940 return delta, lines[delta:]
940 return delta, lines[delta:]
941 else:
941 else:
942 return delta, lines[:maxcontext]
942 return delta, lines[:maxcontext]
943 return 0, lines
943 return 0, lines
944
944
945 self.header = header
945 self.header = header
946 trimedbefore, self.before = trimcontext(before, True)
946 trimedbefore, self.before = trimcontext(before, True)
947 self.fromline = fromline + trimedbefore
947 self.fromline = fromline + trimedbefore
948 self.toline = toline + trimedbefore
948 self.toline = toline + trimedbefore
949 _trimedafter, self.after = trimcontext(after, False)
949 _trimedafter, self.after = trimcontext(after, False)
950 self.proc = proc
950 self.proc = proc
951 self.hunk = hunk
951 self.hunk = hunk
952 self.added, self.removed = self.countchanges(self.hunk)
952 self.added, self.removed = self.countchanges(self.hunk)
953
953
954 def __eq__(self, v):
954 def __eq__(self, v):
955 if not isinstance(v, recordhunk):
955 if not isinstance(v, recordhunk):
956 return False
956 return False
957
957
958 return ((v.hunk == self.hunk) and
958 return ((v.hunk == self.hunk) and
959 (v.proc == self.proc) and
959 (v.proc == self.proc) and
960 (self.fromline == v.fromline) and
960 (self.fromline == v.fromline) and
961 (self.header.files() == v.header.files()))
961 (self.header.files() == v.header.files()))
962
962
963 def __hash__(self):
963 def __hash__(self):
964 return hash((tuple(self.hunk),
964 return hash((tuple(self.hunk),
965 tuple(self.header.files()),
965 tuple(self.header.files()),
966 self.fromline,
966 self.fromline,
967 self.proc))
967 self.proc))
968
968
969 def countchanges(self, hunk):
969 def countchanges(self, hunk):
970 """hunk -> (n+,n-)"""
970 """hunk -> (n+,n-)"""
971 add = len([h for h in hunk if h.startswith('+')])
971 add = len([h for h in hunk if h.startswith('+')])
972 rem = len([h for h in hunk if h.startswith('-')])
972 rem = len([h for h in hunk if h.startswith('-')])
973 return add, rem
973 return add, rem
974
974
975 def reversehunk(self):
975 def reversehunk(self):
976 """return another recordhunk which is the reverse of the hunk
976 """return another recordhunk which is the reverse of the hunk
977
977
978 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
978 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
979 that, swap fromline/toline and +/- signs while keep other things
979 that, swap fromline/toline and +/- signs while keep other things
980 unchanged.
980 unchanged.
981 """
981 """
982 m = {'+': '-', '-': '+', '\\': '\\'}
982 m = {'+': '-', '-': '+', '\\': '\\'}
983 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
983 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
984 return recordhunk(self.header, self.toline, self.fromline, self.proc,
984 return recordhunk(self.header, self.toline, self.fromline, self.proc,
985 self.before, hunk, self.after)
985 self.before, hunk, self.after)
986
986
987 def write(self, fp):
987 def write(self, fp):
988 delta = len(self.before) + len(self.after)
988 delta = len(self.before) + len(self.after)
989 if self.after and self.after[-1] == '\\ No newline at end of file\n':
989 if self.after and self.after[-1] == '\\ No newline at end of file\n':
990 delta -= 1
990 delta -= 1
991 fromlen = delta + self.removed
991 fromlen = delta + self.removed
992 tolen = delta + self.added
992 tolen = delta + self.added
993 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
993 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
994 (self.fromline, fromlen, self.toline, tolen,
994 (self.fromline, fromlen, self.toline, tolen,
995 self.proc and (' ' + self.proc)))
995 self.proc and (' ' + self.proc)))
996 fp.write(''.join(self.before + self.hunk + self.after))
996 fp.write(''.join(self.before + self.hunk + self.after))
997
997
998 pretty = write
998 pretty = write
999
999
1000 def filename(self):
1000 def filename(self):
1001 return self.header.filename()
1001 return self.header.filename()
1002
1002
1003 def __repr__(self):
1003 def __repr__(self):
1004 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1004 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1005
1005
1006 def getmessages():
1006 def getmessages():
1007 return {
1007 return {
1008 'multiple': {
1008 'multiple': {
1009 'apply': _("apply change %d/%d to '%s'?"),
1009 'apply': _("apply change %d/%d to '%s'?"),
1010 'discard': _("discard change %d/%d to '%s'?"),
1010 'discard': _("discard change %d/%d to '%s'?"),
1011 'record': _("record change %d/%d to '%s'?"),
1011 'record': _("record change %d/%d to '%s'?"),
1012 },
1012 },
1013 'single': {
1013 'single': {
1014 'apply': _("apply this change to '%s'?"),
1014 'apply': _("apply this change to '%s'?"),
1015 'discard': _("discard this change to '%s'?"),
1015 'discard': _("discard this change to '%s'?"),
1016 'record': _("record this change to '%s'?"),
1016 'record': _("record this change to '%s'?"),
1017 },
1017 },
1018 'help': {
1018 'help': {
1019 'apply': _('[Ynesfdaq?]'
1019 'apply': _('[Ynesfdaq?]'
1020 '$$ &Yes, apply this change'
1020 '$$ &Yes, apply this change'
1021 '$$ &No, skip this change'
1021 '$$ &No, skip this change'
1022 '$$ &Edit this change manually'
1022 '$$ &Edit this change manually'
1023 '$$ &Skip remaining changes to this file'
1023 '$$ &Skip remaining changes to this file'
1024 '$$ Apply remaining changes to this &file'
1024 '$$ Apply remaining changes to this &file'
1025 '$$ &Done, skip remaining changes and files'
1025 '$$ &Done, skip remaining changes and files'
1026 '$$ Apply &all changes to all remaining files'
1026 '$$ Apply &all changes to all remaining files'
1027 '$$ &Quit, applying no changes'
1027 '$$ &Quit, applying no changes'
1028 '$$ &? (display help)'),
1028 '$$ &? (display help)'),
1029 'discard': _('[Ynesfdaq?]'
1029 'discard': _('[Ynesfdaq?]'
1030 '$$ &Yes, discard this change'
1030 '$$ &Yes, discard this change'
1031 '$$ &No, skip this change'
1031 '$$ &No, skip this change'
1032 '$$ &Edit this change manually'
1032 '$$ &Edit this change manually'
1033 '$$ &Skip remaining changes to this file'
1033 '$$ &Skip remaining changes to this file'
1034 '$$ Discard remaining changes to this &file'
1034 '$$ Discard remaining changes to this &file'
1035 '$$ &Done, skip remaining changes and files'
1035 '$$ &Done, skip remaining changes and files'
1036 '$$ Discard &all changes to all remaining files'
1036 '$$ Discard &all changes to all remaining files'
1037 '$$ &Quit, discarding no changes'
1037 '$$ &Quit, discarding no changes'
1038 '$$ &? (display help)'),
1038 '$$ &? (display help)'),
1039 'record': _('[Ynesfdaq?]'
1039 'record': _('[Ynesfdaq?]'
1040 '$$ &Yes, record this change'
1040 '$$ &Yes, record this change'
1041 '$$ &No, skip this change'
1041 '$$ &No, skip this change'
1042 '$$ &Edit this change manually'
1042 '$$ &Edit this change manually'
1043 '$$ &Skip remaining changes to this file'
1043 '$$ &Skip remaining changes to this file'
1044 '$$ Record remaining changes to this &file'
1044 '$$ Record remaining changes to this &file'
1045 '$$ &Done, skip remaining changes and files'
1045 '$$ &Done, skip remaining changes and files'
1046 '$$ Record &all changes to all remaining files'
1046 '$$ Record &all changes to all remaining files'
1047 '$$ &Quit, recording no changes'
1047 '$$ &Quit, recording no changes'
1048 '$$ &? (display help)'),
1048 '$$ &? (display help)'),
1049 }
1049 }
1050 }
1050 }
1051
1051
1052 def filterpatch(ui, headers, operation=None):
1052 def filterpatch(ui, headers, operation=None):
1053 """Interactively filter patch chunks into applied-only chunks"""
1053 """Interactively filter patch chunks into applied-only chunks"""
1054 messages = getmessages()
1054 messages = getmessages()
1055
1055
1056 if operation is None:
1056 if operation is None:
1057 operation = 'record'
1057 operation = 'record'
1058
1058
1059 def prompt(skipfile, skipall, query, chunk):
1059 def prompt(skipfile, skipall, query, chunk):
1060 """prompt query, and process base inputs
1060 """prompt query, and process base inputs
1061
1061
1062 - y/n for the rest of file
1062 - y/n for the rest of file
1063 - y/n for the rest
1063 - y/n for the rest
1064 - ? (help)
1064 - ? (help)
1065 - q (quit)
1065 - q (quit)
1066
1066
1067 Return True/False and possibly updated skipfile and skipall.
1067 Return True/False and possibly updated skipfile and skipall.
1068 """
1068 """
1069 newpatches = None
1069 newpatches = None
1070 if skipall is not None:
1070 if skipall is not None:
1071 return skipall, skipfile, skipall, newpatches
1071 return skipall, skipfile, skipall, newpatches
1072 if skipfile is not None:
1072 if skipfile is not None:
1073 return skipfile, skipfile, skipall, newpatches
1073 return skipfile, skipfile, skipall, newpatches
1074 while True:
1074 while True:
1075 resps = messages['help'][operation]
1075 resps = messages['help'][operation]
1076 r = ui.promptchoice("%s %s" % (query, resps))
1076 r = ui.promptchoice("%s %s" % (query, resps))
1077 ui.write("\n")
1077 ui.write("\n")
1078 if r == 8: # ?
1078 if r == 8: # ?
1079 for c, t in ui.extractchoices(resps)[1]:
1079 for c, t in ui.extractchoices(resps)[1]:
1080 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1080 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1081 continue
1081 continue
1082 elif r == 0: # yes
1082 elif r == 0: # yes
1083 ret = True
1083 ret = True
1084 elif r == 1: # no
1084 elif r == 1: # no
1085 ret = False
1085 ret = False
1086 elif r == 2: # Edit patch
1086 elif r == 2: # Edit patch
1087 if chunk is None:
1087 if chunk is None:
1088 ui.write(_('cannot edit patch for whole file'))
1088 ui.write(_('cannot edit patch for whole file'))
1089 ui.write("\n")
1089 ui.write("\n")
1090 continue
1090 continue
1091 if chunk.header.binary():
1091 if chunk.header.binary():
1092 ui.write(_('cannot edit patch for binary file'))
1092 ui.write(_('cannot edit patch for binary file'))
1093 ui.write("\n")
1093 ui.write("\n")
1094 continue
1094 continue
1095 # Patch comment based on the Git one (based on comment at end of
1095 # Patch comment based on the Git one (based on comment at end of
1096 # https://mercurial-scm.org/wiki/RecordExtension)
1096 # https://mercurial-scm.org/wiki/RecordExtension)
1097 phelp = '---' + _("""
1097 phelp = '---' + _("""
1098 To remove '-' lines, make them ' ' lines (context).
1098 To remove '-' lines, make them ' ' lines (context).
1099 To remove '+' lines, delete them.
1099 To remove '+' lines, delete them.
1100 Lines starting with # will be removed from the patch.
1100 Lines starting with # will be removed from the patch.
1101
1101
1102 If the patch applies cleanly, the edited hunk will immediately be
1102 If the patch applies cleanly, the edited hunk will immediately be
1103 added to the record list. If it does not apply cleanly, a rejects
1103 added to the record list. If it does not apply cleanly, a rejects
1104 file will be generated: you can use that when you try again. If
1104 file will be generated: you can use that when you try again. If
1105 all lines of the hunk are removed, then the edit is aborted and
1105 all lines of the hunk are removed, then the edit is aborted and
1106 the hunk is left unchanged.
1106 the hunk is left unchanged.
1107 """)
1107 """)
1108 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1108 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1109 suffix=".diff")
1109 suffix=".diff")
1110 ncpatchfp = None
1110 ncpatchfp = None
1111 try:
1111 try:
1112 # Write the initial patch
1112 # Write the initial patch
1113 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1113 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1114 chunk.header.write(f)
1114 chunk.header.write(f)
1115 chunk.write(f)
1115 chunk.write(f)
1116 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1116 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1117 f.close()
1117 f.close()
1118 # Start the editor and wait for it to complete
1118 # Start the editor and wait for it to complete
1119 editor = ui.geteditor()
1119 editor = ui.geteditor()
1120 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1120 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1121 environ={'HGUSER': ui.username()},
1121 environ={'HGUSER': ui.username()},
1122 blockedtag='filterpatch')
1122 blockedtag='filterpatch')
1123 if ret != 0:
1123 if ret != 0:
1124 ui.warn(_("editor exited with exit code %d\n") % ret)
1124 ui.warn(_("editor exited with exit code %d\n") % ret)
1125 continue
1125 continue
1126 # Remove comment lines
1126 # Remove comment lines
1127 patchfp = open(patchfn, r'rb')
1127 patchfp = open(patchfn, r'rb')
1128 ncpatchfp = stringio()
1128 ncpatchfp = stringio()
1129 for line in util.iterfile(patchfp):
1129 for line in util.iterfile(patchfp):
1130 line = util.fromnativeeol(line)
1130 line = util.fromnativeeol(line)
1131 if not line.startswith('#'):
1131 if not line.startswith('#'):
1132 ncpatchfp.write(line)
1132 ncpatchfp.write(line)
1133 patchfp.close()
1133 patchfp.close()
1134 ncpatchfp.seek(0)
1134 ncpatchfp.seek(0)
1135 newpatches = parsepatch(ncpatchfp)
1135 newpatches = parsepatch(ncpatchfp)
1136 finally:
1136 finally:
1137 os.unlink(patchfn)
1137 os.unlink(patchfn)
1138 del ncpatchfp
1138 del ncpatchfp
1139 # Signal that the chunk shouldn't be applied as-is, but
1139 # Signal that the chunk shouldn't be applied as-is, but
1140 # provide the new patch to be used instead.
1140 # provide the new patch to be used instead.
1141 ret = False
1141 ret = False
1142 elif r == 3: # Skip
1142 elif r == 3: # Skip
1143 ret = skipfile = False
1143 ret = skipfile = False
1144 elif r == 4: # file (Record remaining)
1144 elif r == 4: # file (Record remaining)
1145 ret = skipfile = True
1145 ret = skipfile = True
1146 elif r == 5: # done, skip remaining
1146 elif r == 5: # done, skip remaining
1147 ret = skipall = False
1147 ret = skipall = False
1148 elif r == 6: # all
1148 elif r == 6: # all
1149 ret = skipall = True
1149 ret = skipall = True
1150 elif r == 7: # quit
1150 elif r == 7: # quit
1151 raise error.Abort(_('user quit'))
1151 raise error.Abort(_('user quit'))
1152 return ret, skipfile, skipall, newpatches
1152 return ret, skipfile, skipall, newpatches
1153
1153
1154 seen = set()
1154 seen = set()
1155 applied = {} # 'filename' -> [] of chunks
1155 applied = {} # 'filename' -> [] of chunks
1156 skipfile, skipall = None, None
1156 skipfile, skipall = None, None
1157 pos, total = 1, sum(len(h.hunks) for h in headers)
1157 pos, total = 1, sum(len(h.hunks) for h in headers)
1158 for h in headers:
1158 for h in headers:
1159 pos += len(h.hunks)
1159 pos += len(h.hunks)
1160 skipfile = None
1160 skipfile = None
1161 fixoffset = 0
1161 fixoffset = 0
1162 hdr = ''.join(h.header)
1162 hdr = ''.join(h.header)
1163 if hdr in seen:
1163 if hdr in seen:
1164 continue
1164 continue
1165 seen.add(hdr)
1165 seen.add(hdr)
1166 if skipall is None:
1166 if skipall is None:
1167 h.pretty(ui)
1167 h.pretty(ui)
1168 msg = (_('examine changes to %s?') %
1168 msg = (_('examine changes to %s?') %
1169 _(' and ').join("'%s'" % f for f in h.files()))
1169 _(' and ').join("'%s'" % f for f in h.files()))
1170 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1170 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1171 if not r:
1171 if not r:
1172 continue
1172 continue
1173 applied[h.filename()] = [h]
1173 applied[h.filename()] = [h]
1174 if h.allhunks():
1174 if h.allhunks():
1175 applied[h.filename()] += h.hunks
1175 applied[h.filename()] += h.hunks
1176 continue
1176 continue
1177 for i, chunk in enumerate(h.hunks):
1177 for i, chunk in enumerate(h.hunks):
1178 if skipfile is None and skipall is None:
1178 if skipfile is None and skipall is None:
1179 chunk.pretty(ui)
1179 chunk.pretty(ui)
1180 if total == 1:
1180 if total == 1:
1181 msg = messages['single'][operation] % chunk.filename()
1181 msg = messages['single'][operation] % chunk.filename()
1182 else:
1182 else:
1183 idx = pos - len(h.hunks) + i
1183 idx = pos - len(h.hunks) + i
1184 msg = messages['multiple'][operation] % (idx, total,
1184 msg = messages['multiple'][operation] % (idx, total,
1185 chunk.filename())
1185 chunk.filename())
1186 r, skipfile, skipall, newpatches = prompt(skipfile,
1186 r, skipfile, skipall, newpatches = prompt(skipfile,
1187 skipall, msg, chunk)
1187 skipall, msg, chunk)
1188 if r:
1188 if r:
1189 if fixoffset:
1189 if fixoffset:
1190 chunk = copy.copy(chunk)
1190 chunk = copy.copy(chunk)
1191 chunk.toline += fixoffset
1191 chunk.toline += fixoffset
1192 applied[chunk.filename()].append(chunk)
1192 applied[chunk.filename()].append(chunk)
1193 elif newpatches is not None:
1193 elif newpatches is not None:
1194 for newpatch in newpatches:
1194 for newpatch in newpatches:
1195 for newhunk in newpatch.hunks:
1195 for newhunk in newpatch.hunks:
1196 if fixoffset:
1196 if fixoffset:
1197 newhunk.toline += fixoffset
1197 newhunk.toline += fixoffset
1198 applied[newhunk.filename()].append(newhunk)
1198 applied[newhunk.filename()].append(newhunk)
1199 else:
1199 else:
1200 fixoffset += chunk.removed - chunk.added
1200 fixoffset += chunk.removed - chunk.added
1201 return (sum([h for h in applied.itervalues()
1201 return (sum([h for h in applied.itervalues()
1202 if h[0].special() or len(h) > 1], []), {})
1202 if h[0].special() or len(h) > 1], []), {})
1203 class hunk(object):
1203 class hunk(object):
1204 def __init__(self, desc, num, lr, context):
1204 def __init__(self, desc, num, lr, context):
1205 self.number = num
1205 self.number = num
1206 self.desc = desc
1206 self.desc = desc
1207 self.hunk = [desc]
1207 self.hunk = [desc]
1208 self.a = []
1208 self.a = []
1209 self.b = []
1209 self.b = []
1210 self.starta = self.lena = None
1210 self.starta = self.lena = None
1211 self.startb = self.lenb = None
1211 self.startb = self.lenb = None
1212 if lr is not None:
1212 if lr is not None:
1213 if context:
1213 if context:
1214 self.read_context_hunk(lr)
1214 self.read_context_hunk(lr)
1215 else:
1215 else:
1216 self.read_unified_hunk(lr)
1216 self.read_unified_hunk(lr)
1217
1217
1218 def getnormalized(self):
1218 def getnormalized(self):
1219 """Return a copy with line endings normalized to LF."""
1219 """Return a copy with line endings normalized to LF."""
1220
1220
1221 def normalize(lines):
1221 def normalize(lines):
1222 nlines = []
1222 nlines = []
1223 for line in lines:
1223 for line in lines:
1224 if line.endswith('\r\n'):
1224 if line.endswith('\r\n'):
1225 line = line[:-2] + '\n'
1225 line = line[:-2] + '\n'
1226 nlines.append(line)
1226 nlines.append(line)
1227 return nlines
1227 return nlines
1228
1228
1229 # Dummy object, it is rebuilt manually
1229 # Dummy object, it is rebuilt manually
1230 nh = hunk(self.desc, self.number, None, None)
1230 nh = hunk(self.desc, self.number, None, None)
1231 nh.number = self.number
1231 nh.number = self.number
1232 nh.desc = self.desc
1232 nh.desc = self.desc
1233 nh.hunk = self.hunk
1233 nh.hunk = self.hunk
1234 nh.a = normalize(self.a)
1234 nh.a = normalize(self.a)
1235 nh.b = normalize(self.b)
1235 nh.b = normalize(self.b)
1236 nh.starta = self.starta
1236 nh.starta = self.starta
1237 nh.startb = self.startb
1237 nh.startb = self.startb
1238 nh.lena = self.lena
1238 nh.lena = self.lena
1239 nh.lenb = self.lenb
1239 nh.lenb = self.lenb
1240 return nh
1240 return nh
1241
1241
1242 def read_unified_hunk(self, lr):
1242 def read_unified_hunk(self, lr):
1243 m = unidesc.match(self.desc)
1243 m = unidesc.match(self.desc)
1244 if not m:
1244 if not m:
1245 raise PatchError(_("bad hunk #%d") % self.number)
1245 raise PatchError(_("bad hunk #%d") % self.number)
1246 self.starta, self.lena, self.startb, self.lenb = m.groups()
1246 self.starta, self.lena, self.startb, self.lenb = m.groups()
1247 if self.lena is None:
1247 if self.lena is None:
1248 self.lena = 1
1248 self.lena = 1
1249 else:
1249 else:
1250 self.lena = int(self.lena)
1250 self.lena = int(self.lena)
1251 if self.lenb is None:
1251 if self.lenb is None:
1252 self.lenb = 1
1252 self.lenb = 1
1253 else:
1253 else:
1254 self.lenb = int(self.lenb)
1254 self.lenb = int(self.lenb)
1255 self.starta = int(self.starta)
1255 self.starta = int(self.starta)
1256 self.startb = int(self.startb)
1256 self.startb = int(self.startb)
1257 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1257 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1258 self.b)
1258 self.b)
1259 # if we hit eof before finishing out the hunk, the last line will
1259 # if we hit eof before finishing out the hunk, the last line will
1260 # be zero length. Lets try to fix it up.
1260 # be zero length. Lets try to fix it up.
1261 while len(self.hunk[-1]) == 0:
1261 while len(self.hunk[-1]) == 0:
1262 del self.hunk[-1]
1262 del self.hunk[-1]
1263 del self.a[-1]
1263 del self.a[-1]
1264 del self.b[-1]
1264 del self.b[-1]
1265 self.lena -= 1
1265 self.lena -= 1
1266 self.lenb -= 1
1266 self.lenb -= 1
1267 self._fixnewline(lr)
1267 self._fixnewline(lr)
1268
1268
1269 def read_context_hunk(self, lr):
1269 def read_context_hunk(self, lr):
1270 self.desc = lr.readline()
1270 self.desc = lr.readline()
1271 m = contextdesc.match(self.desc)
1271 m = contextdesc.match(self.desc)
1272 if not m:
1272 if not m:
1273 raise PatchError(_("bad hunk #%d") % self.number)
1273 raise PatchError(_("bad hunk #%d") % self.number)
1274 self.starta, aend = m.groups()
1274 self.starta, aend = m.groups()
1275 self.starta = int(self.starta)
1275 self.starta = int(self.starta)
1276 if aend is None:
1276 if aend is None:
1277 aend = self.starta
1277 aend = self.starta
1278 self.lena = int(aend) - self.starta
1278 self.lena = int(aend) - self.starta
1279 if self.starta:
1279 if self.starta:
1280 self.lena += 1
1280 self.lena += 1
1281 for x in xrange(self.lena):
1281 for x in xrange(self.lena):
1282 l = lr.readline()
1282 l = lr.readline()
1283 if l.startswith('---'):
1283 if l.startswith('---'):
1284 # lines addition, old block is empty
1284 # lines addition, old block is empty
1285 lr.push(l)
1285 lr.push(l)
1286 break
1286 break
1287 s = l[2:]
1287 s = l[2:]
1288 if l.startswith('- ') or l.startswith('! '):
1288 if l.startswith('- ') or l.startswith('! '):
1289 u = '-' + s
1289 u = '-' + s
1290 elif l.startswith(' '):
1290 elif l.startswith(' '):
1291 u = ' ' + s
1291 u = ' ' + s
1292 else:
1292 else:
1293 raise PatchError(_("bad hunk #%d old text line %d") %
1293 raise PatchError(_("bad hunk #%d old text line %d") %
1294 (self.number, x))
1294 (self.number, x))
1295 self.a.append(u)
1295 self.a.append(u)
1296 self.hunk.append(u)
1296 self.hunk.append(u)
1297
1297
1298 l = lr.readline()
1298 l = lr.readline()
1299 if l.startswith('\ '):
1299 if l.startswith('\ '):
1300 s = self.a[-1][:-1]
1300 s = self.a[-1][:-1]
1301 self.a[-1] = s
1301 self.a[-1] = s
1302 self.hunk[-1] = s
1302 self.hunk[-1] = s
1303 l = lr.readline()
1303 l = lr.readline()
1304 m = contextdesc.match(l)
1304 m = contextdesc.match(l)
1305 if not m:
1305 if not m:
1306 raise PatchError(_("bad hunk #%d") % self.number)
1306 raise PatchError(_("bad hunk #%d") % self.number)
1307 self.startb, bend = m.groups()
1307 self.startb, bend = m.groups()
1308 self.startb = int(self.startb)
1308 self.startb = int(self.startb)
1309 if bend is None:
1309 if bend is None:
1310 bend = self.startb
1310 bend = self.startb
1311 self.lenb = int(bend) - self.startb
1311 self.lenb = int(bend) - self.startb
1312 if self.startb:
1312 if self.startb:
1313 self.lenb += 1
1313 self.lenb += 1
1314 hunki = 1
1314 hunki = 1
1315 for x in xrange(self.lenb):
1315 for x in xrange(self.lenb):
1316 l = lr.readline()
1316 l = lr.readline()
1317 if l.startswith('\ '):
1317 if l.startswith('\ '):
1318 # XXX: the only way to hit this is with an invalid line range.
1318 # XXX: the only way to hit this is with an invalid line range.
1319 # The no-eol marker is not counted in the line range, but I
1319 # The no-eol marker is not counted in the line range, but I
1320 # guess there are diff(1) out there which behave differently.
1320 # guess there are diff(1) out there which behave differently.
1321 s = self.b[-1][:-1]
1321 s = self.b[-1][:-1]
1322 self.b[-1] = s
1322 self.b[-1] = s
1323 self.hunk[hunki - 1] = s
1323 self.hunk[hunki - 1] = s
1324 continue
1324 continue
1325 if not l:
1325 if not l:
1326 # line deletions, new block is empty and we hit EOF
1326 # line deletions, new block is empty and we hit EOF
1327 lr.push(l)
1327 lr.push(l)
1328 break
1328 break
1329 s = l[2:]
1329 s = l[2:]
1330 if l.startswith('+ ') or l.startswith('! '):
1330 if l.startswith('+ ') or l.startswith('! '):
1331 u = '+' + s
1331 u = '+' + s
1332 elif l.startswith(' '):
1332 elif l.startswith(' '):
1333 u = ' ' + s
1333 u = ' ' + s
1334 elif len(self.b) == 0:
1334 elif len(self.b) == 0:
1335 # line deletions, new block is empty
1335 # line deletions, new block is empty
1336 lr.push(l)
1336 lr.push(l)
1337 break
1337 break
1338 else:
1338 else:
1339 raise PatchError(_("bad hunk #%d old text line %d") %
1339 raise PatchError(_("bad hunk #%d old text line %d") %
1340 (self.number, x))
1340 (self.number, x))
1341 self.b.append(s)
1341 self.b.append(s)
1342 while True:
1342 while True:
1343 if hunki >= len(self.hunk):
1343 if hunki >= len(self.hunk):
1344 h = ""
1344 h = ""
1345 else:
1345 else:
1346 h = self.hunk[hunki]
1346 h = self.hunk[hunki]
1347 hunki += 1
1347 hunki += 1
1348 if h == u:
1348 if h == u:
1349 break
1349 break
1350 elif h.startswith('-'):
1350 elif h.startswith('-'):
1351 continue
1351 continue
1352 else:
1352 else:
1353 self.hunk.insert(hunki - 1, u)
1353 self.hunk.insert(hunki - 1, u)
1354 break
1354 break
1355
1355
1356 if not self.a:
1356 if not self.a:
1357 # this happens when lines were only added to the hunk
1357 # this happens when lines were only added to the hunk
1358 for x in self.hunk:
1358 for x in self.hunk:
1359 if x.startswith('-') or x.startswith(' '):
1359 if x.startswith('-') or x.startswith(' '):
1360 self.a.append(x)
1360 self.a.append(x)
1361 if not self.b:
1361 if not self.b:
1362 # this happens when lines were only deleted from the hunk
1362 # this happens when lines were only deleted from the hunk
1363 for x in self.hunk:
1363 for x in self.hunk:
1364 if x.startswith('+') or x.startswith(' '):
1364 if x.startswith('+') or x.startswith(' '):
1365 self.b.append(x[1:])
1365 self.b.append(x[1:])
1366 # @@ -start,len +start,len @@
1366 # @@ -start,len +start,len @@
1367 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1367 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1368 self.startb, self.lenb)
1368 self.startb, self.lenb)
1369 self.hunk[0] = self.desc
1369 self.hunk[0] = self.desc
1370 self._fixnewline(lr)
1370 self._fixnewline(lr)
1371
1371
1372 def _fixnewline(self, lr):
1372 def _fixnewline(self, lr):
1373 l = lr.readline()
1373 l = lr.readline()
1374 if l.startswith('\ '):
1374 if l.startswith('\ '):
1375 diffhelpers.fixnewline(self.hunk, self.a, self.b)
1375 diffhelpers.fixnewline(self.hunk, self.a, self.b)
1376 else:
1376 else:
1377 lr.push(l)
1377 lr.push(l)
1378
1378
1379 def complete(self):
1379 def complete(self):
1380 return len(self.a) == self.lena and len(self.b) == self.lenb
1380 return len(self.a) == self.lena and len(self.b) == self.lenb
1381
1381
1382 def _fuzzit(self, old, new, fuzz, toponly):
1382 def _fuzzit(self, old, new, fuzz, toponly):
1383 # this removes context lines from the top and bottom of list 'l'. It
1383 # this removes context lines from the top and bottom of list 'l'. It
1384 # checks the hunk to make sure only context lines are removed, and then
1384 # checks the hunk to make sure only context lines are removed, and then
1385 # returns a new shortened list of lines.
1385 # returns a new shortened list of lines.
1386 fuzz = min(fuzz, len(old))
1386 fuzz = min(fuzz, len(old))
1387 if fuzz:
1387 if fuzz:
1388 top = 0
1388 top = 0
1389 bot = 0
1389 bot = 0
1390 hlen = len(self.hunk)
1390 hlen = len(self.hunk)
1391 for x in xrange(hlen - 1):
1391 for x in xrange(hlen - 1):
1392 # the hunk starts with the @@ line, so use x+1
1392 # the hunk starts with the @@ line, so use x+1
1393 if self.hunk[x + 1].startswith(' '):
1393 if self.hunk[x + 1].startswith(' '):
1394 top += 1
1394 top += 1
1395 else:
1395 else:
1396 break
1396 break
1397 if not toponly:
1397 if not toponly:
1398 for x in xrange(hlen - 1):
1398 for x in xrange(hlen - 1):
1399 if self.hunk[hlen - bot - 1].startswith(' '):
1399 if self.hunk[hlen - bot - 1].startswith(' '):
1400 bot += 1
1400 bot += 1
1401 else:
1401 else:
1402 break
1402 break
1403
1403
1404 bot = min(fuzz, bot)
1404 bot = min(fuzz, bot)
1405 top = min(fuzz, top)
1405 top = min(fuzz, top)
1406 return old[top:len(old) - bot], new[top:len(new) - bot], top
1406 return old[top:len(old) - bot], new[top:len(new) - bot], top
1407 return old, new, 0
1407 return old, new, 0
1408
1408
1409 def fuzzit(self, fuzz, toponly):
1409 def fuzzit(self, fuzz, toponly):
1410 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1410 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1411 oldstart = self.starta + top
1411 oldstart = self.starta + top
1412 newstart = self.startb + top
1412 newstart = self.startb + top
1413 # zero length hunk ranges already have their start decremented
1413 # zero length hunk ranges already have their start decremented
1414 if self.lena and oldstart > 0:
1414 if self.lena and oldstart > 0:
1415 oldstart -= 1
1415 oldstart -= 1
1416 if self.lenb and newstart > 0:
1416 if self.lenb and newstart > 0:
1417 newstart -= 1
1417 newstart -= 1
1418 return old, oldstart, new, newstart
1418 return old, oldstart, new, newstart
1419
1419
1420 class binhunk(object):
1420 class binhunk(object):
1421 'A binary patch file.'
1421 'A binary patch file.'
1422 def __init__(self, lr, fname):
1422 def __init__(self, lr, fname):
1423 self.text = None
1423 self.text = None
1424 self.delta = False
1424 self.delta = False
1425 self.hunk = ['GIT binary patch\n']
1425 self.hunk = ['GIT binary patch\n']
1426 self._fname = fname
1426 self._fname = fname
1427 self._read(lr)
1427 self._read(lr)
1428
1428
1429 def complete(self):
1429 def complete(self):
1430 return self.text is not None
1430 return self.text is not None
1431
1431
1432 def new(self, lines):
1432 def new(self, lines):
1433 if self.delta:
1433 if self.delta:
1434 return [applybindelta(self.text, ''.join(lines))]
1434 return [applybindelta(self.text, ''.join(lines))]
1435 return [self.text]
1435 return [self.text]
1436
1436
1437 def _read(self, lr):
1437 def _read(self, lr):
1438 def getline(lr, hunk):
1438 def getline(lr, hunk):
1439 l = lr.readline()
1439 l = lr.readline()
1440 hunk.append(l)
1440 hunk.append(l)
1441 return l.rstrip('\r\n')
1441 return l.rstrip('\r\n')
1442
1442
1443 size = 0
1443 size = 0
1444 while True:
1444 while True:
1445 line = getline(lr, self.hunk)
1445 line = getline(lr, self.hunk)
1446 if not line:
1446 if not line:
1447 raise PatchError(_('could not extract "%s" binary data')
1447 raise PatchError(_('could not extract "%s" binary data')
1448 % self._fname)
1448 % self._fname)
1449 if line.startswith('literal '):
1449 if line.startswith('literal '):
1450 size = int(line[8:].rstrip())
1450 size = int(line[8:].rstrip())
1451 break
1451 break
1452 if line.startswith('delta '):
1452 if line.startswith('delta '):
1453 size = int(line[6:].rstrip())
1453 size = int(line[6:].rstrip())
1454 self.delta = True
1454 self.delta = True
1455 break
1455 break
1456 dec = []
1456 dec = []
1457 line = getline(lr, self.hunk)
1457 line = getline(lr, self.hunk)
1458 while len(line) > 1:
1458 while len(line) > 1:
1459 l = line[0:1]
1459 l = line[0:1]
1460 if l <= 'Z' and l >= 'A':
1460 if l <= 'Z' and l >= 'A':
1461 l = ord(l) - ord('A') + 1
1461 l = ord(l) - ord('A') + 1
1462 else:
1462 else:
1463 l = ord(l) - ord('a') + 27
1463 l = ord(l) - ord('a') + 27
1464 try:
1464 try:
1465 dec.append(util.b85decode(line[1:])[:l])
1465 dec.append(util.b85decode(line[1:])[:l])
1466 except ValueError as e:
1466 except ValueError as e:
1467 raise PatchError(_('could not decode "%s" binary patch: %s')
1467 raise PatchError(_('could not decode "%s" binary patch: %s')
1468 % (self._fname, stringutil.forcebytestr(e)))
1468 % (self._fname, stringutil.forcebytestr(e)))
1469 line = getline(lr, self.hunk)
1469 line = getline(lr, self.hunk)
1470 text = zlib.decompress(''.join(dec))
1470 text = zlib.decompress(''.join(dec))
1471 if len(text) != size:
1471 if len(text) != size:
1472 raise PatchError(_('"%s" length is %d bytes, should be %d')
1472 raise PatchError(_('"%s" length is %d bytes, should be %d')
1473 % (self._fname, len(text), size))
1473 % (self._fname, len(text), size))
1474 self.text = text
1474 self.text = text
1475
1475
1476 def parsefilename(str):
1476 def parsefilename(str):
1477 # --- filename \t|space stuff
1477 # --- filename \t|space stuff
1478 s = str[4:].rstrip('\r\n')
1478 s = str[4:].rstrip('\r\n')
1479 i = s.find('\t')
1479 i = s.find('\t')
1480 if i < 0:
1480 if i < 0:
1481 i = s.find(' ')
1481 i = s.find(' ')
1482 if i < 0:
1482 if i < 0:
1483 return s
1483 return s
1484 return s[:i]
1484 return s[:i]
1485
1485
1486 def reversehunks(hunks):
1486 def reversehunks(hunks):
1487 '''reverse the signs in the hunks given as argument
1487 '''reverse the signs in the hunks given as argument
1488
1488
1489 This function operates on hunks coming out of patch.filterpatch, that is
1489 This function operates on hunks coming out of patch.filterpatch, that is
1490 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1490 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1491
1491
1492 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1492 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1493 ... --- a/folder1/g
1493 ... --- a/folder1/g
1494 ... +++ b/folder1/g
1494 ... +++ b/folder1/g
1495 ... @@ -1,7 +1,7 @@
1495 ... @@ -1,7 +1,7 @@
1496 ... +firstline
1496 ... +firstline
1497 ... c
1497 ... c
1498 ... 1
1498 ... 1
1499 ... 2
1499 ... 2
1500 ... + 3
1500 ... + 3
1501 ... -4
1501 ... -4
1502 ... 5
1502 ... 5
1503 ... d
1503 ... d
1504 ... +lastline"""
1504 ... +lastline"""
1505 >>> hunks = parsepatch([rawpatch])
1505 >>> hunks = parsepatch([rawpatch])
1506 >>> hunkscomingfromfilterpatch = []
1506 >>> hunkscomingfromfilterpatch = []
1507 >>> for h in hunks:
1507 >>> for h in hunks:
1508 ... hunkscomingfromfilterpatch.append(h)
1508 ... hunkscomingfromfilterpatch.append(h)
1509 ... hunkscomingfromfilterpatch.extend(h.hunks)
1509 ... hunkscomingfromfilterpatch.extend(h.hunks)
1510
1510
1511 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1511 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1512 >>> from . import util
1512 >>> from . import util
1513 >>> fp = util.stringio()
1513 >>> fp = util.stringio()
1514 >>> for c in reversedhunks:
1514 >>> for c in reversedhunks:
1515 ... c.write(fp)
1515 ... c.write(fp)
1516 >>> fp.seek(0) or None
1516 >>> fp.seek(0) or None
1517 >>> reversedpatch = fp.read()
1517 >>> reversedpatch = fp.read()
1518 >>> print(pycompat.sysstr(reversedpatch))
1518 >>> print(pycompat.sysstr(reversedpatch))
1519 diff --git a/folder1/g b/folder1/g
1519 diff --git a/folder1/g b/folder1/g
1520 --- a/folder1/g
1520 --- a/folder1/g
1521 +++ b/folder1/g
1521 +++ b/folder1/g
1522 @@ -1,4 +1,3 @@
1522 @@ -1,4 +1,3 @@
1523 -firstline
1523 -firstline
1524 c
1524 c
1525 1
1525 1
1526 2
1526 2
1527 @@ -2,6 +1,6 @@
1527 @@ -2,6 +1,6 @@
1528 c
1528 c
1529 1
1529 1
1530 2
1530 2
1531 - 3
1531 - 3
1532 +4
1532 +4
1533 5
1533 5
1534 d
1534 d
1535 @@ -6,3 +5,2 @@
1535 @@ -6,3 +5,2 @@
1536 5
1536 5
1537 d
1537 d
1538 -lastline
1538 -lastline
1539
1539
1540 '''
1540 '''
1541
1541
1542 newhunks = []
1542 newhunks = []
1543 for c in hunks:
1543 for c in hunks:
1544 if util.safehasattr(c, 'reversehunk'):
1544 if util.safehasattr(c, 'reversehunk'):
1545 c = c.reversehunk()
1545 c = c.reversehunk()
1546 newhunks.append(c)
1546 newhunks.append(c)
1547 return newhunks
1547 return newhunks
1548
1548
1549 def parsepatch(originalchunks, maxcontext=None):
1549 def parsepatch(originalchunks, maxcontext=None):
1550 """patch -> [] of headers -> [] of hunks
1550 """patch -> [] of headers -> [] of hunks
1551
1551
1552 If maxcontext is not None, trim context lines if necessary.
1552 If maxcontext is not None, trim context lines if necessary.
1553
1553
1554 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1554 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1555 ... --- a/folder1/g
1555 ... --- a/folder1/g
1556 ... +++ b/folder1/g
1556 ... +++ b/folder1/g
1557 ... @@ -1,8 +1,10 @@
1557 ... @@ -1,8 +1,10 @@
1558 ... 1
1558 ... 1
1559 ... 2
1559 ... 2
1560 ... -3
1560 ... -3
1561 ... 4
1561 ... 4
1562 ... 5
1562 ... 5
1563 ... 6
1563 ... 6
1564 ... +6.1
1564 ... +6.1
1565 ... +6.2
1565 ... +6.2
1566 ... 7
1566 ... 7
1567 ... 8
1567 ... 8
1568 ... +9'''
1568 ... +9'''
1569 >>> out = util.stringio()
1569 >>> out = util.stringio()
1570 >>> headers = parsepatch([rawpatch], maxcontext=1)
1570 >>> headers = parsepatch([rawpatch], maxcontext=1)
1571 >>> for header in headers:
1571 >>> for header in headers:
1572 ... header.write(out)
1572 ... header.write(out)
1573 ... for hunk in header.hunks:
1573 ... for hunk in header.hunks:
1574 ... hunk.write(out)
1574 ... hunk.write(out)
1575 >>> print(pycompat.sysstr(out.getvalue()))
1575 >>> print(pycompat.sysstr(out.getvalue()))
1576 diff --git a/folder1/g b/folder1/g
1576 diff --git a/folder1/g b/folder1/g
1577 --- a/folder1/g
1577 --- a/folder1/g
1578 +++ b/folder1/g
1578 +++ b/folder1/g
1579 @@ -2,3 +2,2 @@
1579 @@ -2,3 +2,2 @@
1580 2
1580 2
1581 -3
1581 -3
1582 4
1582 4
1583 @@ -6,2 +5,4 @@
1583 @@ -6,2 +5,4 @@
1584 6
1584 6
1585 +6.1
1585 +6.1
1586 +6.2
1586 +6.2
1587 7
1587 7
1588 @@ -8,1 +9,2 @@
1588 @@ -8,1 +9,2 @@
1589 8
1589 8
1590 +9
1590 +9
1591 """
1591 """
1592 class parser(object):
1592 class parser(object):
1593 """patch parsing state machine"""
1593 """patch parsing state machine"""
1594 def __init__(self):
1594 def __init__(self):
1595 self.fromline = 0
1595 self.fromline = 0
1596 self.toline = 0
1596 self.toline = 0
1597 self.proc = ''
1597 self.proc = ''
1598 self.header = None
1598 self.header = None
1599 self.context = []
1599 self.context = []
1600 self.before = []
1600 self.before = []
1601 self.hunk = []
1601 self.hunk = []
1602 self.headers = []
1602 self.headers = []
1603
1603
1604 def addrange(self, limits):
1604 def addrange(self, limits):
1605 fromstart, fromend, tostart, toend, proc = limits
1605 fromstart, fromend, tostart, toend, proc = limits
1606 self.fromline = int(fromstart)
1606 self.fromline = int(fromstart)
1607 self.toline = int(tostart)
1607 self.toline = int(tostart)
1608 self.proc = proc
1608 self.proc = proc
1609
1609
1610 def addcontext(self, context):
1610 def addcontext(self, context):
1611 if self.hunk:
1611 if self.hunk:
1612 h = recordhunk(self.header, self.fromline, self.toline,
1612 h = recordhunk(self.header, self.fromline, self.toline,
1613 self.proc, self.before, self.hunk, context, maxcontext)
1613 self.proc, self.before, self.hunk, context, maxcontext)
1614 self.header.hunks.append(h)
1614 self.header.hunks.append(h)
1615 self.fromline += len(self.before) + h.removed
1615 self.fromline += len(self.before) + h.removed
1616 self.toline += len(self.before) + h.added
1616 self.toline += len(self.before) + h.added
1617 self.before = []
1617 self.before = []
1618 self.hunk = []
1618 self.hunk = []
1619 self.context = context
1619 self.context = context
1620
1620
1621 def addhunk(self, hunk):
1621 def addhunk(self, hunk):
1622 if self.context:
1622 if self.context:
1623 self.before = self.context
1623 self.before = self.context
1624 self.context = []
1624 self.context = []
1625 self.hunk = hunk
1625 self.hunk = hunk
1626
1626
1627 def newfile(self, hdr):
1627 def newfile(self, hdr):
1628 self.addcontext([])
1628 self.addcontext([])
1629 h = header(hdr)
1629 h = header(hdr)
1630 self.headers.append(h)
1630 self.headers.append(h)
1631 self.header = h
1631 self.header = h
1632
1632
1633 def addother(self, line):
1633 def addother(self, line):
1634 pass # 'other' lines are ignored
1634 pass # 'other' lines are ignored
1635
1635
1636 def finished(self):
1636 def finished(self):
1637 self.addcontext([])
1637 self.addcontext([])
1638 return self.headers
1638 return self.headers
1639
1639
1640 transitions = {
1640 transitions = {
1641 'file': {'context': addcontext,
1641 'file': {'context': addcontext,
1642 'file': newfile,
1642 'file': newfile,
1643 'hunk': addhunk,
1643 'hunk': addhunk,
1644 'range': addrange},
1644 'range': addrange},
1645 'context': {'file': newfile,
1645 'context': {'file': newfile,
1646 'hunk': addhunk,
1646 'hunk': addhunk,
1647 'range': addrange,
1647 'range': addrange,
1648 'other': addother},
1648 'other': addother},
1649 'hunk': {'context': addcontext,
1649 'hunk': {'context': addcontext,
1650 'file': newfile,
1650 'file': newfile,
1651 'range': addrange},
1651 'range': addrange},
1652 'range': {'context': addcontext,
1652 'range': {'context': addcontext,
1653 'hunk': addhunk},
1653 'hunk': addhunk},
1654 'other': {'other': addother},
1654 'other': {'other': addother},
1655 }
1655 }
1656
1656
1657 p = parser()
1657 p = parser()
1658 fp = stringio()
1658 fp = stringio()
1659 fp.write(''.join(originalchunks))
1659 fp.write(''.join(originalchunks))
1660 fp.seek(0)
1660 fp.seek(0)
1661
1661
1662 state = 'context'
1662 state = 'context'
1663 for newstate, data in scanpatch(fp):
1663 for newstate, data in scanpatch(fp):
1664 try:
1664 try:
1665 p.transitions[state][newstate](p, data)
1665 p.transitions[state][newstate](p, data)
1666 except KeyError:
1666 except KeyError:
1667 raise PatchError('unhandled transition: %s -> %s' %
1667 raise PatchError('unhandled transition: %s -> %s' %
1668 (state, newstate))
1668 (state, newstate))
1669 state = newstate
1669 state = newstate
1670 del fp
1670 del fp
1671 return p.finished()
1671 return p.finished()
1672
1672
1673 def pathtransform(path, strip, prefix):
1673 def pathtransform(path, strip, prefix):
1674 '''turn a path from a patch into a path suitable for the repository
1674 '''turn a path from a patch into a path suitable for the repository
1675
1675
1676 prefix, if not empty, is expected to be normalized with a / at the end.
1676 prefix, if not empty, is expected to be normalized with a / at the end.
1677
1677
1678 Returns (stripped components, path in repository).
1678 Returns (stripped components, path in repository).
1679
1679
1680 >>> pathtransform(b'a/b/c', 0, b'')
1680 >>> pathtransform(b'a/b/c', 0, b'')
1681 ('', 'a/b/c')
1681 ('', 'a/b/c')
1682 >>> pathtransform(b' a/b/c ', 0, b'')
1682 >>> pathtransform(b' a/b/c ', 0, b'')
1683 ('', ' a/b/c')
1683 ('', ' a/b/c')
1684 >>> pathtransform(b' a/b/c ', 2, b'')
1684 >>> pathtransform(b' a/b/c ', 2, b'')
1685 ('a/b/', 'c')
1685 ('a/b/', 'c')
1686 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1686 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1687 ('', 'd/e/a/b/c')
1687 ('', 'd/e/a/b/c')
1688 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1688 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1689 ('a//b/', 'd/e/c')
1689 ('a//b/', 'd/e/c')
1690 >>> pathtransform(b'a/b/c', 3, b'')
1690 >>> pathtransform(b'a/b/c', 3, b'')
1691 Traceback (most recent call last):
1691 Traceback (most recent call last):
1692 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1692 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1693 '''
1693 '''
1694 pathlen = len(path)
1694 pathlen = len(path)
1695 i = 0
1695 i = 0
1696 if strip == 0:
1696 if strip == 0:
1697 return '', prefix + path.rstrip()
1697 return '', prefix + path.rstrip()
1698 count = strip
1698 count = strip
1699 while count > 0:
1699 while count > 0:
1700 i = path.find('/', i)
1700 i = path.find('/', i)
1701 if i == -1:
1701 if i == -1:
1702 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1702 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1703 (count, strip, path))
1703 (count, strip, path))
1704 i += 1
1704 i += 1
1705 # consume '//' in the path
1705 # consume '//' in the path
1706 while i < pathlen - 1 and path[i:i + 1] == '/':
1706 while i < pathlen - 1 and path[i:i + 1] == '/':
1707 i += 1
1707 i += 1
1708 count -= 1
1708 count -= 1
1709 return path[:i].lstrip(), prefix + path[i:].rstrip()
1709 return path[:i].lstrip(), prefix + path[i:].rstrip()
1710
1710
1711 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1711 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1712 nulla = afile_orig == "/dev/null"
1712 nulla = afile_orig == "/dev/null"
1713 nullb = bfile_orig == "/dev/null"
1713 nullb = bfile_orig == "/dev/null"
1714 create = nulla and hunk.starta == 0 and hunk.lena == 0
1714 create = nulla and hunk.starta == 0 and hunk.lena == 0
1715 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1715 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1716 abase, afile = pathtransform(afile_orig, strip, prefix)
1716 abase, afile = pathtransform(afile_orig, strip, prefix)
1717 gooda = not nulla and backend.exists(afile)
1717 gooda = not nulla and backend.exists(afile)
1718 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1718 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1719 if afile == bfile:
1719 if afile == bfile:
1720 goodb = gooda
1720 goodb = gooda
1721 else:
1721 else:
1722 goodb = not nullb and backend.exists(bfile)
1722 goodb = not nullb and backend.exists(bfile)
1723 missing = not goodb and not gooda and not create
1723 missing = not goodb and not gooda and not create
1724
1724
1725 # some diff programs apparently produce patches where the afile is
1725 # some diff programs apparently produce patches where the afile is
1726 # not /dev/null, but afile starts with bfile
1726 # not /dev/null, but afile starts with bfile
1727 abasedir = afile[:afile.rfind('/') + 1]
1727 abasedir = afile[:afile.rfind('/') + 1]
1728 bbasedir = bfile[:bfile.rfind('/') + 1]
1728 bbasedir = bfile[:bfile.rfind('/') + 1]
1729 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1729 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1730 and hunk.starta == 0 and hunk.lena == 0):
1730 and hunk.starta == 0 and hunk.lena == 0):
1731 create = True
1731 create = True
1732 missing = False
1732 missing = False
1733
1733
1734 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1734 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1735 # diff is between a file and its backup. In this case, the original
1735 # diff is between a file and its backup. In this case, the original
1736 # file should be patched (see original mpatch code).
1736 # file should be patched (see original mpatch code).
1737 isbackup = (abase == bbase and bfile.startswith(afile))
1737 isbackup = (abase == bbase and bfile.startswith(afile))
1738 fname = None
1738 fname = None
1739 if not missing:
1739 if not missing:
1740 if gooda and goodb:
1740 if gooda and goodb:
1741 if isbackup:
1741 if isbackup:
1742 fname = afile
1742 fname = afile
1743 else:
1743 else:
1744 fname = bfile
1744 fname = bfile
1745 elif gooda:
1745 elif gooda:
1746 fname = afile
1746 fname = afile
1747
1747
1748 if not fname:
1748 if not fname:
1749 if not nullb:
1749 if not nullb:
1750 if isbackup:
1750 if isbackup:
1751 fname = afile
1751 fname = afile
1752 else:
1752 else:
1753 fname = bfile
1753 fname = bfile
1754 elif not nulla:
1754 elif not nulla:
1755 fname = afile
1755 fname = afile
1756 else:
1756 else:
1757 raise PatchError(_("undefined source and destination files"))
1757 raise PatchError(_("undefined source and destination files"))
1758
1758
1759 gp = patchmeta(fname)
1759 gp = patchmeta(fname)
1760 if create:
1760 if create:
1761 gp.op = 'ADD'
1761 gp.op = 'ADD'
1762 elif remove:
1762 elif remove:
1763 gp.op = 'DELETE'
1763 gp.op = 'DELETE'
1764 return gp
1764 return gp
1765
1765
1766 def scanpatch(fp):
1766 def scanpatch(fp):
1767 """like patch.iterhunks, but yield different events
1767 """like patch.iterhunks, but yield different events
1768
1768
1769 - ('file', [header_lines + fromfile + tofile])
1769 - ('file', [header_lines + fromfile + tofile])
1770 - ('context', [context_lines])
1770 - ('context', [context_lines])
1771 - ('hunk', [hunk_lines])
1771 - ('hunk', [hunk_lines])
1772 - ('range', (-start,len, +start,len, proc))
1772 - ('range', (-start,len, +start,len, proc))
1773 """
1773 """
1774 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1774 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1775 lr = linereader(fp)
1775 lr = linereader(fp)
1776
1776
1777 def scanwhile(first, p):
1777 def scanwhile(first, p):
1778 """scan lr while predicate holds"""
1778 """scan lr while predicate holds"""
1779 lines = [first]
1779 lines = [first]
1780 for line in iter(lr.readline, ''):
1780 for line in iter(lr.readline, ''):
1781 if p(line):
1781 if p(line):
1782 lines.append(line)
1782 lines.append(line)
1783 else:
1783 else:
1784 lr.push(line)
1784 lr.push(line)
1785 break
1785 break
1786 return lines
1786 return lines
1787
1787
1788 for line in iter(lr.readline, ''):
1788 for line in iter(lr.readline, ''):
1789 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1789 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1790 def notheader(line):
1790 def notheader(line):
1791 s = line.split(None, 1)
1791 s = line.split(None, 1)
1792 return not s or s[0] not in ('---', 'diff')
1792 return not s or s[0] not in ('---', 'diff')
1793 header = scanwhile(line, notheader)
1793 header = scanwhile(line, notheader)
1794 fromfile = lr.readline()
1794 fromfile = lr.readline()
1795 if fromfile.startswith('---'):
1795 if fromfile.startswith('---'):
1796 tofile = lr.readline()
1796 tofile = lr.readline()
1797 header += [fromfile, tofile]
1797 header += [fromfile, tofile]
1798 else:
1798 else:
1799 lr.push(fromfile)
1799 lr.push(fromfile)
1800 yield 'file', header
1800 yield 'file', header
1801 elif line.startswith(' '):
1801 elif line.startswith(' '):
1802 cs = (' ', '\\')
1802 cs = (' ', '\\')
1803 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1803 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1804 elif line.startswith(('-', '+')):
1804 elif line.startswith(('-', '+')):
1805 cs = ('-', '+', '\\')
1805 cs = ('-', '+', '\\')
1806 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1806 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1807 else:
1807 else:
1808 m = lines_re.match(line)
1808 m = lines_re.match(line)
1809 if m:
1809 if m:
1810 yield 'range', m.groups()
1810 yield 'range', m.groups()
1811 else:
1811 else:
1812 yield 'other', line
1812 yield 'other', line
1813
1813
1814 def scangitpatch(lr, firstline):
1814 def scangitpatch(lr, firstline):
1815 """
1815 """
1816 Git patches can emit:
1816 Git patches can emit:
1817 - rename a to b
1817 - rename a to b
1818 - change b
1818 - change b
1819 - copy a to c
1819 - copy a to c
1820 - change c
1820 - change c
1821
1821
1822 We cannot apply this sequence as-is, the renamed 'a' could not be
1822 We cannot apply this sequence as-is, the renamed 'a' could not be
1823 found for it would have been renamed already. And we cannot copy
1823 found for it would have been renamed already. And we cannot copy
1824 from 'b' instead because 'b' would have been changed already. So
1824 from 'b' instead because 'b' would have been changed already. So
1825 we scan the git patch for copy and rename commands so we can
1825 we scan the git patch for copy and rename commands so we can
1826 perform the copies ahead of time.
1826 perform the copies ahead of time.
1827 """
1827 """
1828 pos = 0
1828 pos = 0
1829 try:
1829 try:
1830 pos = lr.fp.tell()
1830 pos = lr.fp.tell()
1831 fp = lr.fp
1831 fp = lr.fp
1832 except IOError:
1832 except IOError:
1833 fp = stringio(lr.fp.read())
1833 fp = stringio(lr.fp.read())
1834 gitlr = linereader(fp)
1834 gitlr = linereader(fp)
1835 gitlr.push(firstline)
1835 gitlr.push(firstline)
1836 gitpatches = readgitpatch(gitlr)
1836 gitpatches = readgitpatch(gitlr)
1837 fp.seek(pos)
1837 fp.seek(pos)
1838 return gitpatches
1838 return gitpatches
1839
1839
1840 def iterhunks(fp):
1840 def iterhunks(fp):
1841 """Read a patch and yield the following events:
1841 """Read a patch and yield the following events:
1842 - ("file", afile, bfile, firsthunk): select a new target file.
1842 - ("file", afile, bfile, firsthunk): select a new target file.
1843 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1843 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1844 "file" event.
1844 "file" event.
1845 - ("git", gitchanges): current diff is in git format, gitchanges
1845 - ("git", gitchanges): current diff is in git format, gitchanges
1846 maps filenames to gitpatch records. Unique event.
1846 maps filenames to gitpatch records. Unique event.
1847 """
1847 """
1848 afile = ""
1848 afile = ""
1849 bfile = ""
1849 bfile = ""
1850 state = None
1850 state = None
1851 hunknum = 0
1851 hunknum = 0
1852 emitfile = newfile = False
1852 emitfile = newfile = False
1853 gitpatches = None
1853 gitpatches = None
1854
1854
1855 # our states
1855 # our states
1856 BFILE = 1
1856 BFILE = 1
1857 context = None
1857 context = None
1858 lr = linereader(fp)
1858 lr = linereader(fp)
1859
1859
1860 for x in iter(lr.readline, ''):
1860 for x in iter(lr.readline, ''):
1861 if state == BFILE and (
1861 if state == BFILE and (
1862 (not context and x.startswith('@'))
1862 (not context and x.startswith('@'))
1863 or (context is not False and x.startswith('***************'))
1863 or (context is not False and x.startswith('***************'))
1864 or x.startswith('GIT binary patch')):
1864 or x.startswith('GIT binary patch')):
1865 gp = None
1865 gp = None
1866 if (gitpatches and
1866 if (gitpatches and
1867 gitpatches[-1].ispatching(afile, bfile)):
1867 gitpatches[-1].ispatching(afile, bfile)):
1868 gp = gitpatches.pop()
1868 gp = gitpatches.pop()
1869 if x.startswith('GIT binary patch'):
1869 if x.startswith('GIT binary patch'):
1870 h = binhunk(lr, gp.path)
1870 h = binhunk(lr, gp.path)
1871 else:
1871 else:
1872 if context is None and x.startswith('***************'):
1872 if context is None and x.startswith('***************'):
1873 context = True
1873 context = True
1874 h = hunk(x, hunknum + 1, lr, context)
1874 h = hunk(x, hunknum + 1, lr, context)
1875 hunknum += 1
1875 hunknum += 1
1876 if emitfile:
1876 if emitfile:
1877 emitfile = False
1877 emitfile = False
1878 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1878 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1879 yield 'hunk', h
1879 yield 'hunk', h
1880 elif x.startswith('diff --git a/'):
1880 elif x.startswith('diff --git a/'):
1881 m = gitre.match(x.rstrip(' \r\n'))
1881 m = gitre.match(x.rstrip(' \r\n'))
1882 if not m:
1882 if not m:
1883 continue
1883 continue
1884 if gitpatches is None:
1884 if gitpatches is None:
1885 # scan whole input for git metadata
1885 # scan whole input for git metadata
1886 gitpatches = scangitpatch(lr, x)
1886 gitpatches = scangitpatch(lr, x)
1887 yield 'git', [g.copy() for g in gitpatches
1887 yield 'git', [g.copy() for g in gitpatches
1888 if g.op in ('COPY', 'RENAME')]
1888 if g.op in ('COPY', 'RENAME')]
1889 gitpatches.reverse()
1889 gitpatches.reverse()
1890 afile = 'a/' + m.group(1)
1890 afile = 'a/' + m.group(1)
1891 bfile = 'b/' + m.group(2)
1891 bfile = 'b/' + m.group(2)
1892 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1892 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1893 gp = gitpatches.pop()
1893 gp = gitpatches.pop()
1894 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1894 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1895 if not gitpatches:
1895 if not gitpatches:
1896 raise PatchError(_('failed to synchronize metadata for "%s"')
1896 raise PatchError(_('failed to synchronize metadata for "%s"')
1897 % afile[2:])
1897 % afile[2:])
1898 gp = gitpatches[-1]
1898 gp = gitpatches[-1]
1899 newfile = True
1899 newfile = True
1900 elif x.startswith('---'):
1900 elif x.startswith('---'):
1901 # check for a unified diff
1901 # check for a unified diff
1902 l2 = lr.readline()
1902 l2 = lr.readline()
1903 if not l2.startswith('+++'):
1903 if not l2.startswith('+++'):
1904 lr.push(l2)
1904 lr.push(l2)
1905 continue
1905 continue
1906 newfile = True
1906 newfile = True
1907 context = False
1907 context = False
1908 afile = parsefilename(x)
1908 afile = parsefilename(x)
1909 bfile = parsefilename(l2)
1909 bfile = parsefilename(l2)
1910 elif x.startswith('***'):
1910 elif x.startswith('***'):
1911 # check for a context diff
1911 # check for a context diff
1912 l2 = lr.readline()
1912 l2 = lr.readline()
1913 if not l2.startswith('---'):
1913 if not l2.startswith('---'):
1914 lr.push(l2)
1914 lr.push(l2)
1915 continue
1915 continue
1916 l3 = lr.readline()
1916 l3 = lr.readline()
1917 lr.push(l3)
1917 lr.push(l3)
1918 if not l3.startswith("***************"):
1918 if not l3.startswith("***************"):
1919 lr.push(l2)
1919 lr.push(l2)
1920 continue
1920 continue
1921 newfile = True
1921 newfile = True
1922 context = True
1922 context = True
1923 afile = parsefilename(x)
1923 afile = parsefilename(x)
1924 bfile = parsefilename(l2)
1924 bfile = parsefilename(l2)
1925
1925
1926 if newfile:
1926 if newfile:
1927 newfile = False
1927 newfile = False
1928 emitfile = True
1928 emitfile = True
1929 state = BFILE
1929 state = BFILE
1930 hunknum = 0
1930 hunknum = 0
1931
1931
1932 while gitpatches:
1932 while gitpatches:
1933 gp = gitpatches.pop()
1933 gp = gitpatches.pop()
1934 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1934 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1935
1935
1936 def applybindelta(binchunk, data):
1936 def applybindelta(binchunk, data):
1937 """Apply a binary delta hunk
1937 """Apply a binary delta hunk
1938 The algorithm used is the algorithm from git's patch-delta.c
1938 The algorithm used is the algorithm from git's patch-delta.c
1939 """
1939 """
1940 def deltahead(binchunk):
1940 def deltahead(binchunk):
1941 i = 0
1941 i = 0
1942 for c in binchunk:
1942 for c in binchunk:
1943 i += 1
1943 i += 1
1944 if not (ord(c) & 0x80):
1944 if not (ord(c) & 0x80):
1945 return i
1945 return i
1946 return i
1946 return i
1947 out = ""
1947 out = ""
1948 s = deltahead(binchunk)
1948 s = deltahead(binchunk)
1949 binchunk = binchunk[s:]
1949 binchunk = binchunk[s:]
1950 s = deltahead(binchunk)
1950 s = deltahead(binchunk)
1951 binchunk = binchunk[s:]
1951 binchunk = binchunk[s:]
1952 i = 0
1952 i = 0
1953 while i < len(binchunk):
1953 while i < len(binchunk):
1954 cmd = ord(binchunk[i])
1954 cmd = ord(binchunk[i])
1955 i += 1
1955 i += 1
1956 if (cmd & 0x80):
1956 if (cmd & 0x80):
1957 offset = 0
1957 offset = 0
1958 size = 0
1958 size = 0
1959 if (cmd & 0x01):
1959 if (cmd & 0x01):
1960 offset = ord(binchunk[i])
1960 offset = ord(binchunk[i])
1961 i += 1
1961 i += 1
1962 if (cmd & 0x02):
1962 if (cmd & 0x02):
1963 offset |= ord(binchunk[i]) << 8
1963 offset |= ord(binchunk[i]) << 8
1964 i += 1
1964 i += 1
1965 if (cmd & 0x04):
1965 if (cmd & 0x04):
1966 offset |= ord(binchunk[i]) << 16
1966 offset |= ord(binchunk[i]) << 16
1967 i += 1
1967 i += 1
1968 if (cmd & 0x08):
1968 if (cmd & 0x08):
1969 offset |= ord(binchunk[i]) << 24
1969 offset |= ord(binchunk[i]) << 24
1970 i += 1
1970 i += 1
1971 if (cmd & 0x10):
1971 if (cmd & 0x10):
1972 size = ord(binchunk[i])
1972 size = ord(binchunk[i])
1973 i += 1
1973 i += 1
1974 if (cmd & 0x20):
1974 if (cmd & 0x20):
1975 size |= ord(binchunk[i]) << 8
1975 size |= ord(binchunk[i]) << 8
1976 i += 1
1976 i += 1
1977 if (cmd & 0x40):
1977 if (cmd & 0x40):
1978 size |= ord(binchunk[i]) << 16
1978 size |= ord(binchunk[i]) << 16
1979 i += 1
1979 i += 1
1980 if size == 0:
1980 if size == 0:
1981 size = 0x10000
1981 size = 0x10000
1982 offset_end = offset + size
1982 offset_end = offset + size
1983 out += data[offset:offset_end]
1983 out += data[offset:offset_end]
1984 elif cmd != 0:
1984 elif cmd != 0:
1985 offset_end = i + cmd
1985 offset_end = i + cmd
1986 out += binchunk[i:offset_end]
1986 out += binchunk[i:offset_end]
1987 i += cmd
1987 i += cmd
1988 else:
1988 else:
1989 raise PatchError(_('unexpected delta opcode 0'))
1989 raise PatchError(_('unexpected delta opcode 0'))
1990 return out
1990 return out
1991
1991
1992 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1992 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1993 """Reads a patch from fp and tries to apply it.
1993 """Reads a patch from fp and tries to apply it.
1994
1994
1995 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1995 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1996 there was any fuzz.
1996 there was any fuzz.
1997
1997
1998 If 'eolmode' is 'strict', the patch content and patched file are
1998 If 'eolmode' is 'strict', the patch content and patched file are
1999 read in binary mode. Otherwise, line endings are ignored when
1999 read in binary mode. Otherwise, line endings are ignored when
2000 patching then normalized according to 'eolmode'.
2000 patching then normalized according to 'eolmode'.
2001 """
2001 """
2002 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2002 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2003 prefix=prefix, eolmode=eolmode)
2003 prefix=prefix, eolmode=eolmode)
2004
2004
2005 def _canonprefix(repo, prefix):
2005 def _canonprefix(repo, prefix):
2006 if prefix:
2006 if prefix:
2007 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2007 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2008 if prefix != '':
2008 if prefix != '':
2009 prefix += '/'
2009 prefix += '/'
2010 return prefix
2010 return prefix
2011
2011
2012 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2012 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2013 eolmode='strict'):
2013 eolmode='strict'):
2014 prefix = _canonprefix(backend.repo, prefix)
2014 prefix = _canonprefix(backend.repo, prefix)
2015 def pstrip(p):
2015 def pstrip(p):
2016 return pathtransform(p, strip - 1, prefix)[1]
2016 return pathtransform(p, strip - 1, prefix)[1]
2017
2017
2018 rejects = 0
2018 rejects = 0
2019 err = 0
2019 err = 0
2020 current_file = None
2020 current_file = None
2021
2021
2022 for state, values in iterhunks(fp):
2022 for state, values in iterhunks(fp):
2023 if state == 'hunk':
2023 if state == 'hunk':
2024 if not current_file:
2024 if not current_file:
2025 continue
2025 continue
2026 ret = current_file.apply(values)
2026 ret = current_file.apply(values)
2027 if ret > 0:
2027 if ret > 0:
2028 err = 1
2028 err = 1
2029 elif state == 'file':
2029 elif state == 'file':
2030 if current_file:
2030 if current_file:
2031 rejects += current_file.close()
2031 rejects += current_file.close()
2032 current_file = None
2032 current_file = None
2033 afile, bfile, first_hunk, gp = values
2033 afile, bfile, first_hunk, gp = values
2034 if gp:
2034 if gp:
2035 gp.path = pstrip(gp.path)
2035 gp.path = pstrip(gp.path)
2036 if gp.oldpath:
2036 if gp.oldpath:
2037 gp.oldpath = pstrip(gp.oldpath)
2037 gp.oldpath = pstrip(gp.oldpath)
2038 else:
2038 else:
2039 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2039 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2040 prefix)
2040 prefix)
2041 if gp.op == 'RENAME':
2041 if gp.op == 'RENAME':
2042 backend.unlink(gp.oldpath)
2042 backend.unlink(gp.oldpath)
2043 if not first_hunk:
2043 if not first_hunk:
2044 if gp.op == 'DELETE':
2044 if gp.op == 'DELETE':
2045 backend.unlink(gp.path)
2045 backend.unlink(gp.path)
2046 continue
2046 continue
2047 data, mode = None, None
2047 data, mode = None, None
2048 if gp.op in ('RENAME', 'COPY'):
2048 if gp.op in ('RENAME', 'COPY'):
2049 data, mode = store.getfile(gp.oldpath)[:2]
2049 data, mode = store.getfile(gp.oldpath)[:2]
2050 if data is None:
2050 if data is None:
2051 # This means that the old path does not exist
2051 # This means that the old path does not exist
2052 raise PatchError(_("source file '%s' does not exist")
2052 raise PatchError(_("source file '%s' does not exist")
2053 % gp.oldpath)
2053 % gp.oldpath)
2054 if gp.mode:
2054 if gp.mode:
2055 mode = gp.mode
2055 mode = gp.mode
2056 if gp.op == 'ADD':
2056 if gp.op == 'ADD':
2057 # Added files without content have no hunk and
2057 # Added files without content have no hunk and
2058 # must be created
2058 # must be created
2059 data = ''
2059 data = ''
2060 if data or mode:
2060 if data or mode:
2061 if (gp.op in ('ADD', 'RENAME', 'COPY')
2061 if (gp.op in ('ADD', 'RENAME', 'COPY')
2062 and backend.exists(gp.path)):
2062 and backend.exists(gp.path)):
2063 raise PatchError(_("cannot create %s: destination "
2063 raise PatchError(_("cannot create %s: destination "
2064 "already exists") % gp.path)
2064 "already exists") % gp.path)
2065 backend.setfile(gp.path, data, mode, gp.oldpath)
2065 backend.setfile(gp.path, data, mode, gp.oldpath)
2066 continue
2066 continue
2067 try:
2067 try:
2068 current_file = patcher(ui, gp, backend, store,
2068 current_file = patcher(ui, gp, backend, store,
2069 eolmode=eolmode)
2069 eolmode=eolmode)
2070 except PatchError as inst:
2070 except PatchError as inst:
2071 ui.warn(str(inst) + '\n')
2071 ui.warn(str(inst) + '\n')
2072 current_file = None
2072 current_file = None
2073 rejects += 1
2073 rejects += 1
2074 continue
2074 continue
2075 elif state == 'git':
2075 elif state == 'git':
2076 for gp in values:
2076 for gp in values:
2077 path = pstrip(gp.oldpath)
2077 path = pstrip(gp.oldpath)
2078 data, mode = backend.getfile(path)
2078 data, mode = backend.getfile(path)
2079 if data is None:
2079 if data is None:
2080 # The error ignored here will trigger a getfile()
2080 # The error ignored here will trigger a getfile()
2081 # error in a place more appropriate for error
2081 # error in a place more appropriate for error
2082 # handling, and will not interrupt the patching
2082 # handling, and will not interrupt the patching
2083 # process.
2083 # process.
2084 pass
2084 pass
2085 else:
2085 else:
2086 store.setfile(path, data, mode)
2086 store.setfile(path, data, mode)
2087 else:
2087 else:
2088 raise error.Abort(_('unsupported parser state: %s') % state)
2088 raise error.Abort(_('unsupported parser state: %s') % state)
2089
2089
2090 if current_file:
2090 if current_file:
2091 rejects += current_file.close()
2091 rejects += current_file.close()
2092
2092
2093 if rejects:
2093 if rejects:
2094 return -1
2094 return -1
2095 return err
2095 return err
2096
2096
2097 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2097 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2098 similarity):
2098 similarity):
2099 """use <patcher> to apply <patchname> to the working directory.
2099 """use <patcher> to apply <patchname> to the working directory.
2100 returns whether patch was applied with fuzz factor."""
2100 returns whether patch was applied with fuzz factor."""
2101
2101
2102 fuzz = False
2102 fuzz = False
2103 args = []
2103 args = []
2104 cwd = repo.root
2104 cwd = repo.root
2105 if cwd:
2105 if cwd:
2106 args.append('-d %s' % procutil.shellquote(cwd))
2106 args.append('-d %s' % procutil.shellquote(cwd))
2107 cmd = ('%s %s -p%d < %s'
2107 cmd = ('%s %s -p%d < %s'
2108 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2108 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2109 fp = procutil.popen(cmd, 'rb')
2109 fp = procutil.popen(cmd, 'rb')
2110 try:
2110 try:
2111 for line in util.iterfile(fp):
2111 for line in util.iterfile(fp):
2112 line = line.rstrip()
2112 line = line.rstrip()
2113 ui.note(line + '\n')
2113 ui.note(line + '\n')
2114 if line.startswith('patching file '):
2114 if line.startswith('patching file '):
2115 pf = util.parsepatchoutput(line)
2115 pf = util.parsepatchoutput(line)
2116 printed_file = False
2116 printed_file = False
2117 files.add(pf)
2117 files.add(pf)
2118 elif line.find('with fuzz') >= 0:
2118 elif line.find('with fuzz') >= 0:
2119 fuzz = True
2119 fuzz = True
2120 if not printed_file:
2120 if not printed_file:
2121 ui.warn(pf + '\n')
2121 ui.warn(pf + '\n')
2122 printed_file = True
2122 printed_file = True
2123 ui.warn(line + '\n')
2123 ui.warn(line + '\n')
2124 elif line.find('saving rejects to file') >= 0:
2124 elif line.find('saving rejects to file') >= 0:
2125 ui.warn(line + '\n')
2125 ui.warn(line + '\n')
2126 elif line.find('FAILED') >= 0:
2126 elif line.find('FAILED') >= 0:
2127 if not printed_file:
2127 if not printed_file:
2128 ui.warn(pf + '\n')
2128 ui.warn(pf + '\n')
2129 printed_file = True
2129 printed_file = True
2130 ui.warn(line + '\n')
2130 ui.warn(line + '\n')
2131 finally:
2131 finally:
2132 if files:
2132 if files:
2133 scmutil.marktouched(repo, files, similarity)
2133 scmutil.marktouched(repo, files, similarity)
2134 code = fp.close()
2134 code = fp.close()
2135 if code:
2135 if code:
2136 raise PatchError(_("patch command failed: %s") %
2136 raise PatchError(_("patch command failed: %s") %
2137 procutil.explainexit(code))
2137 procutil.explainexit(code))
2138 return fuzz
2138 return fuzz
2139
2139
2140 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2140 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2141 eolmode='strict'):
2141 eolmode='strict'):
2142 if files is None:
2142 if files is None:
2143 files = set()
2143 files = set()
2144 if eolmode is None:
2144 if eolmode is None:
2145 eolmode = ui.config('patch', 'eol')
2145 eolmode = ui.config('patch', 'eol')
2146 if eolmode.lower() not in eolmodes:
2146 if eolmode.lower() not in eolmodes:
2147 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2147 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2148 eolmode = eolmode.lower()
2148 eolmode = eolmode.lower()
2149
2149
2150 store = filestore()
2150 store = filestore()
2151 try:
2151 try:
2152 fp = open(patchobj, 'rb')
2152 fp = open(patchobj, 'rb')
2153 except TypeError:
2153 except TypeError:
2154 fp = patchobj
2154 fp = patchobj
2155 try:
2155 try:
2156 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2156 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2157 eolmode=eolmode)
2157 eolmode=eolmode)
2158 finally:
2158 finally:
2159 if fp != patchobj:
2159 if fp != patchobj:
2160 fp.close()
2160 fp.close()
2161 files.update(backend.close())
2161 files.update(backend.close())
2162 store.close()
2162 store.close()
2163 if ret < 0:
2163 if ret < 0:
2164 raise PatchError(_('patch failed to apply'))
2164 raise PatchError(_('patch failed to apply'))
2165 return ret > 0
2165 return ret > 0
2166
2166
2167 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2167 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2168 eolmode='strict', similarity=0):
2168 eolmode='strict', similarity=0):
2169 """use builtin patch to apply <patchobj> to the working directory.
2169 """use builtin patch to apply <patchobj> to the working directory.
2170 returns whether patch was applied with fuzz factor."""
2170 returns whether patch was applied with fuzz factor."""
2171 backend = workingbackend(ui, repo, similarity)
2171 backend = workingbackend(ui, repo, similarity)
2172 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2172 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2173
2173
2174 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2174 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2175 eolmode='strict'):
2175 eolmode='strict'):
2176 backend = repobackend(ui, repo, ctx, store)
2176 backend = repobackend(ui, repo, ctx, store)
2177 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2177 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2178
2178
2179 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2179 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2180 similarity=0):
2180 similarity=0):
2181 """Apply <patchname> to the working directory.
2181 """Apply <patchname> to the working directory.
2182
2182
2183 'eolmode' specifies how end of lines should be handled. It can be:
2183 'eolmode' specifies how end of lines should be handled. It can be:
2184 - 'strict': inputs are read in binary mode, EOLs are preserved
2184 - 'strict': inputs are read in binary mode, EOLs are preserved
2185 - 'crlf': EOLs are ignored when patching and reset to CRLF
2185 - 'crlf': EOLs are ignored when patching and reset to CRLF
2186 - 'lf': EOLs are ignored when patching and reset to LF
2186 - 'lf': EOLs are ignored when patching and reset to LF
2187 - None: get it from user settings, default to 'strict'
2187 - None: get it from user settings, default to 'strict'
2188 'eolmode' is ignored when using an external patcher program.
2188 'eolmode' is ignored when using an external patcher program.
2189
2189
2190 Returns whether patch was applied with fuzz factor.
2190 Returns whether patch was applied with fuzz factor.
2191 """
2191 """
2192 patcher = ui.config('ui', 'patch')
2192 patcher = ui.config('ui', 'patch')
2193 if files is None:
2193 if files is None:
2194 files = set()
2194 files = set()
2195 if patcher:
2195 if patcher:
2196 return _externalpatch(ui, repo, patcher, patchname, strip,
2196 return _externalpatch(ui, repo, patcher, patchname, strip,
2197 files, similarity)
2197 files, similarity)
2198 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2198 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2199 similarity)
2199 similarity)
2200
2200
2201 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2201 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2202 backend = fsbackend(ui, repo.root)
2202 backend = fsbackend(ui, repo.root)
2203 prefix = _canonprefix(repo, prefix)
2203 prefix = _canonprefix(repo, prefix)
2204 with open(patchpath, 'rb') as fp:
2204 with open(patchpath, 'rb') as fp:
2205 changed = set()
2205 changed = set()
2206 for state, values in iterhunks(fp):
2206 for state, values in iterhunks(fp):
2207 if state == 'file':
2207 if state == 'file':
2208 afile, bfile, first_hunk, gp = values
2208 afile, bfile, first_hunk, gp = values
2209 if gp:
2209 if gp:
2210 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2210 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2211 if gp.oldpath:
2211 if gp.oldpath:
2212 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2212 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2213 prefix)[1]
2213 prefix)[1]
2214 else:
2214 else:
2215 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2215 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2216 prefix)
2216 prefix)
2217 changed.add(gp.path)
2217 changed.add(gp.path)
2218 if gp.op == 'RENAME':
2218 if gp.op == 'RENAME':
2219 changed.add(gp.oldpath)
2219 changed.add(gp.oldpath)
2220 elif state not in ('hunk', 'git'):
2220 elif state not in ('hunk', 'git'):
2221 raise error.Abort(_('unsupported parser state: %s') % state)
2221 raise error.Abort(_('unsupported parser state: %s') % state)
2222 return changed
2222 return changed
2223
2223
2224 class GitDiffRequired(Exception):
2224 class GitDiffRequired(Exception):
2225 pass
2225 pass
2226
2226
2227 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2227 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2228 '''return diffopts with all features supported and parsed'''
2228 '''return diffopts with all features supported and parsed'''
2229 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2229 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2230 git=True, whitespace=True, formatchanging=True)
2230 git=True, whitespace=True, formatchanging=True)
2231
2231
2232 diffopts = diffallopts
2232 diffopts = diffallopts
2233
2233
2234 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2234 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2235 whitespace=False, formatchanging=False):
2235 whitespace=False, formatchanging=False):
2236 '''return diffopts with only opted-in features parsed
2236 '''return diffopts with only opted-in features parsed
2237
2237
2238 Features:
2238 Features:
2239 - git: git-style diffs
2239 - git: git-style diffs
2240 - whitespace: whitespace options like ignoreblanklines and ignorews
2240 - whitespace: whitespace options like ignoreblanklines and ignorews
2241 - formatchanging: options that will likely break or cause correctness issues
2241 - formatchanging: options that will likely break or cause correctness issues
2242 with most diff parsers
2242 with most diff parsers
2243 '''
2243 '''
2244 def get(key, name=None, getter=ui.configbool, forceplain=None):
2244 def get(key, name=None, getter=ui.configbool, forceplain=None):
2245 if opts:
2245 if opts:
2246 v = opts.get(key)
2246 v = opts.get(key)
2247 # diffopts flags are either None-default (which is passed
2247 # diffopts flags are either None-default (which is passed
2248 # through unchanged, so we can identify unset values), or
2248 # through unchanged, so we can identify unset values), or
2249 # some other falsey default (eg --unified, which defaults
2249 # some other falsey default (eg --unified, which defaults
2250 # to an empty string). We only want to override the config
2250 # to an empty string). We only want to override the config
2251 # entries from hgrc with command line values if they
2251 # entries from hgrc with command line values if they
2252 # appear to have been set, which is any truthy value,
2252 # appear to have been set, which is any truthy value,
2253 # True, or False.
2253 # True, or False.
2254 if v or isinstance(v, bool):
2254 if v or isinstance(v, bool):
2255 return v
2255 return v
2256 if forceplain is not None and ui.plain():
2256 if forceplain is not None and ui.plain():
2257 return forceplain
2257 return forceplain
2258 return getter(section, name or key, untrusted=untrusted)
2258 return getter(section, name or key, untrusted=untrusted)
2259
2259
2260 # core options, expected to be understood by every diff parser
2260 # core options, expected to be understood by every diff parser
2261 buildopts = {
2261 buildopts = {
2262 'nodates': get('nodates'),
2262 'nodates': get('nodates'),
2263 'showfunc': get('show_function', 'showfunc'),
2263 'showfunc': get('show_function', 'showfunc'),
2264 'context': get('unified', getter=ui.config),
2264 'context': get('unified', getter=ui.config),
2265 }
2265 }
2266 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2266 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2267 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2267 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2268
2268
2269 if git:
2269 if git:
2270 buildopts['git'] = get('git')
2270 buildopts['git'] = get('git')
2271
2271
2272 # since this is in the experimental section, we need to call
2272 # since this is in the experimental section, we need to call
2273 # ui.configbool directory
2273 # ui.configbool directory
2274 buildopts['showsimilarity'] = ui.configbool('experimental',
2274 buildopts['showsimilarity'] = ui.configbool('experimental',
2275 'extendedheader.similarity')
2275 'extendedheader.similarity')
2276
2276
2277 # need to inspect the ui object instead of using get() since we want to
2277 # need to inspect the ui object instead of using get() since we want to
2278 # test for an int
2278 # test for an int
2279 hconf = ui.config('experimental', 'extendedheader.index')
2279 hconf = ui.config('experimental', 'extendedheader.index')
2280 if hconf is not None:
2280 if hconf is not None:
2281 hlen = None
2281 hlen = None
2282 try:
2282 try:
2283 # the hash config could be an integer (for length of hash) or a
2283 # the hash config could be an integer (for length of hash) or a
2284 # word (e.g. short, full, none)
2284 # word (e.g. short, full, none)
2285 hlen = int(hconf)
2285 hlen = int(hconf)
2286 if hlen < 0 or hlen > 40:
2286 if hlen < 0 or hlen > 40:
2287 msg = _("invalid length for extendedheader.index: '%d'\n")
2287 msg = _("invalid length for extendedheader.index: '%d'\n")
2288 ui.warn(msg % hlen)
2288 ui.warn(msg % hlen)
2289 except ValueError:
2289 except ValueError:
2290 # default value
2290 # default value
2291 if hconf == 'short' or hconf == '':
2291 if hconf == 'short' or hconf == '':
2292 hlen = 12
2292 hlen = 12
2293 elif hconf == 'full':
2293 elif hconf == 'full':
2294 hlen = 40
2294 hlen = 40
2295 elif hconf != 'none':
2295 elif hconf != 'none':
2296 msg = _("invalid value for extendedheader.index: '%s'\n")
2296 msg = _("invalid value for extendedheader.index: '%s'\n")
2297 ui.warn(msg % hconf)
2297 ui.warn(msg % hconf)
2298 finally:
2298 finally:
2299 buildopts['index'] = hlen
2299 buildopts['index'] = hlen
2300
2300
2301 if whitespace:
2301 if whitespace:
2302 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2302 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2303 buildopts['ignorewsamount'] = get('ignore_space_change',
2303 buildopts['ignorewsamount'] = get('ignore_space_change',
2304 'ignorewsamount')
2304 'ignorewsamount')
2305 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2305 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2306 'ignoreblanklines')
2306 'ignoreblanklines')
2307 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2307 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2308 if formatchanging:
2308 if formatchanging:
2309 buildopts['text'] = opts and opts.get('text')
2309 buildopts['text'] = opts and opts.get('text')
2310 binary = None if opts is None else opts.get('binary')
2310 binary = None if opts is None else opts.get('binary')
2311 buildopts['nobinary'] = (not binary if binary is not None
2311 buildopts['nobinary'] = (not binary if binary is not None
2312 else get('nobinary', forceplain=False))
2312 else get('nobinary', forceplain=False))
2313 buildopts['noprefix'] = get('noprefix', forceplain=False)
2313 buildopts['noprefix'] = get('noprefix', forceplain=False)
2314
2314
2315 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2315 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2316
2316
2317 def diff(repo, node1=None, node2=None, match=None, changes=None,
2317 def diff(repo, node1=None, node2=None, match=None, changes=None,
2318 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2318 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2319 hunksfilterfn=None):
2319 hunksfilterfn=None):
2320 '''yields diff of changes to files between two nodes, or node and
2320 '''yields diff of changes to files between two nodes, or node and
2321 working directory.
2321 working directory.
2322
2322
2323 if node1 is None, use first dirstate parent instead.
2323 if node1 is None, use first dirstate parent instead.
2324 if node2 is None, compare node1 with working directory.
2324 if node2 is None, compare node1 with working directory.
2325
2325
2326 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2326 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2327 every time some change cannot be represented with the current
2327 every time some change cannot be represented with the current
2328 patch format. Return False to upgrade to git patch format, True to
2328 patch format. Return False to upgrade to git patch format, True to
2329 accept the loss or raise an exception to abort the diff. It is
2329 accept the loss or raise an exception to abort the diff. It is
2330 called with the name of current file being diffed as 'fn'. If set
2330 called with the name of current file being diffed as 'fn'. If set
2331 to None, patches will always be upgraded to git format when
2331 to None, patches will always be upgraded to git format when
2332 necessary.
2332 necessary.
2333
2333
2334 prefix is a filename prefix that is prepended to all filenames on
2334 prefix is a filename prefix that is prepended to all filenames on
2335 display (used for subrepos).
2335 display (used for subrepos).
2336
2336
2337 relroot, if not empty, must be normalized with a trailing /. Any match
2337 relroot, if not empty, must be normalized with a trailing /. Any match
2338 patterns that fall outside it will be ignored.
2338 patterns that fall outside it will be ignored.
2339
2339
2340 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2340 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2341 information.
2341 information.
2342
2342
2343 hunksfilterfn, if not None, should be a function taking a filectx and
2343 hunksfilterfn, if not None, should be a function taking a filectx and
2344 hunks generator that may yield filtered hunks.
2344 hunks generator that may yield filtered hunks.
2345 '''
2345 '''
2346 for fctx1, fctx2, hdr, hunks in diffhunks(
2346 for fctx1, fctx2, hdr, hunks in diffhunks(
2347 repo, node1=node1, node2=node2,
2347 repo, node1=node1, node2=node2,
2348 match=match, changes=changes, opts=opts,
2348 match=match, changes=changes, opts=opts,
2349 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2349 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2350 ):
2350 ):
2351 if hunksfilterfn is not None:
2351 if hunksfilterfn is not None:
2352 # If the file has been removed, fctx2 is None; but this should
2352 # If the file has been removed, fctx2 is None; but this should
2353 # not occur here since we catch removed files early in
2353 # not occur here since we catch removed files early in
2354 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2354 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2355 assert fctx2 is not None, \
2355 assert fctx2 is not None, \
2356 'fctx2 unexpectly None in diff hunks filtering'
2356 'fctx2 unexpectly None in diff hunks filtering'
2357 hunks = hunksfilterfn(fctx2, hunks)
2357 hunks = hunksfilterfn(fctx2, hunks)
2358 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2358 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2359 if hdr and (text or len(hdr) > 1):
2359 if hdr and (text or len(hdr) > 1):
2360 yield '\n'.join(hdr) + '\n'
2360 yield '\n'.join(hdr) + '\n'
2361 if text:
2361 if text:
2362 yield text
2362 yield text
2363
2363
2364 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2364 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2365 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2365 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2366 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2366 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2367 where `header` is a list of diff headers and `hunks` is an iterable of
2367 where `header` is a list of diff headers and `hunks` is an iterable of
2368 (`hunkrange`, `hunklines`) tuples.
2368 (`hunkrange`, `hunklines`) tuples.
2369
2369
2370 See diff() for the meaning of parameters.
2370 See diff() for the meaning of parameters.
2371 """
2371 """
2372
2372
2373 if opts is None:
2373 if opts is None:
2374 opts = mdiff.defaultopts
2374 opts = mdiff.defaultopts
2375
2375
2376 if not node1 and not node2:
2376 if not node1 and not node2:
2377 node1 = repo.dirstate.p1()
2377 node1 = repo.dirstate.p1()
2378
2378
2379 def lrugetfilectx():
2379 def lrugetfilectx():
2380 cache = {}
2380 cache = {}
2381 order = collections.deque()
2381 order = collections.deque()
2382 def getfilectx(f, ctx):
2382 def getfilectx(f, ctx):
2383 fctx = ctx.filectx(f, filelog=cache.get(f))
2383 fctx = ctx.filectx(f, filelog=cache.get(f))
2384 if f not in cache:
2384 if f not in cache:
2385 if len(cache) > 20:
2385 if len(cache) > 20:
2386 del cache[order.popleft()]
2386 del cache[order.popleft()]
2387 cache[f] = fctx.filelog()
2387 cache[f] = fctx.filelog()
2388 else:
2388 else:
2389 order.remove(f)
2389 order.remove(f)
2390 order.append(f)
2390 order.append(f)
2391 return fctx
2391 return fctx
2392 return getfilectx
2392 return getfilectx
2393 getfilectx = lrugetfilectx()
2393 getfilectx = lrugetfilectx()
2394
2394
2395 ctx1 = repo[node1]
2395 ctx1 = repo[node1]
2396 ctx2 = repo[node2]
2396 ctx2 = repo[node2]
2397
2397
2398 relfiltered = False
2398 relfiltered = False
2399 if relroot != '' and match.always():
2399 if relroot != '' and match.always():
2400 # as a special case, create a new matcher with just the relroot
2400 # as a special case, create a new matcher with just the relroot
2401 pats = [relroot]
2401 pats = [relroot]
2402 match = scmutil.match(ctx2, pats, default='path')
2402 match = scmutil.match(ctx2, pats, default='path')
2403 relfiltered = True
2403 relfiltered = True
2404
2404
2405 if not changes:
2405 if not changes:
2406 changes = repo.status(ctx1, ctx2, match=match)
2406 changes = repo.status(ctx1, ctx2, match=match)
2407 modified, added, removed = changes[:3]
2407 modified, added, removed = changes[:3]
2408
2408
2409 if not modified and not added and not removed:
2409 if not modified and not added and not removed:
2410 return []
2410 return []
2411
2411
2412 if repo.ui.debugflag:
2412 if repo.ui.debugflag:
2413 hexfunc = hex
2413 hexfunc = hex
2414 else:
2414 else:
2415 hexfunc = short
2415 hexfunc = short
2416 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2416 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2417
2417
2418 if copy is None:
2418 if copy is None:
2419 copy = {}
2419 copy = {}
2420 if opts.git or opts.upgrade:
2420 if opts.git or opts.upgrade:
2421 copy = copies.pathcopies(ctx1, ctx2, match=match)
2421 copy = copies.pathcopies(ctx1, ctx2, match=match)
2422
2422
2423 if relroot is not None:
2423 if relroot is not None:
2424 if not relfiltered:
2424 if not relfiltered:
2425 # XXX this would ideally be done in the matcher, but that is
2425 # XXX this would ideally be done in the matcher, but that is
2426 # generally meant to 'or' patterns, not 'and' them. In this case we
2426 # generally meant to 'or' patterns, not 'and' them. In this case we
2427 # need to 'and' all the patterns from the matcher with relroot.
2427 # need to 'and' all the patterns from the matcher with relroot.
2428 def filterrel(l):
2428 def filterrel(l):
2429 return [f for f in l if f.startswith(relroot)]
2429 return [f for f in l if f.startswith(relroot)]
2430 modified = filterrel(modified)
2430 modified = filterrel(modified)
2431 added = filterrel(added)
2431 added = filterrel(added)
2432 removed = filterrel(removed)
2432 removed = filterrel(removed)
2433 relfiltered = True
2433 relfiltered = True
2434 # filter out copies where either side isn't inside the relative root
2434 # filter out copies where either side isn't inside the relative root
2435 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2435 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2436 if dst.startswith(relroot)
2436 if dst.startswith(relroot)
2437 and src.startswith(relroot)))
2437 and src.startswith(relroot)))
2438
2438
2439 modifiedset = set(modified)
2439 modifiedset = set(modified)
2440 addedset = set(added)
2440 addedset = set(added)
2441 removedset = set(removed)
2441 removedset = set(removed)
2442 for f in modified:
2442 for f in modified:
2443 if f not in ctx1:
2443 if f not in ctx1:
2444 # Fix up added, since merged-in additions appear as
2444 # Fix up added, since merged-in additions appear as
2445 # modifications during merges
2445 # modifications during merges
2446 modifiedset.remove(f)
2446 modifiedset.remove(f)
2447 addedset.add(f)
2447 addedset.add(f)
2448 for f in removed:
2448 for f in removed:
2449 if f not in ctx1:
2449 if f not in ctx1:
2450 # Merged-in additions that are then removed are reported as removed.
2450 # Merged-in additions that are then removed are reported as removed.
2451 # They are not in ctx1, so We don't want to show them in the diff.
2451 # They are not in ctx1, so We don't want to show them in the diff.
2452 removedset.remove(f)
2452 removedset.remove(f)
2453 modified = sorted(modifiedset)
2453 modified = sorted(modifiedset)
2454 added = sorted(addedset)
2454 added = sorted(addedset)
2455 removed = sorted(removedset)
2455 removed = sorted(removedset)
2456 for dst, src in list(copy.items()):
2456 for dst, src in list(copy.items()):
2457 if src not in ctx1:
2457 if src not in ctx1:
2458 # Files merged in during a merge and then copied/renamed are
2458 # Files merged in during a merge and then copied/renamed are
2459 # reported as copies. We want to show them in the diff as additions.
2459 # reported as copies. We want to show them in the diff as additions.
2460 del copy[dst]
2460 del copy[dst]
2461
2461
2462 def difffn(opts, losedata):
2462 def difffn(opts, losedata):
2463 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2463 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2464 copy, getfilectx, opts, losedata, prefix, relroot)
2464 copy, getfilectx, opts, losedata, prefix, relroot)
2465 if opts.upgrade and not opts.git:
2465 if opts.upgrade and not opts.git:
2466 try:
2466 try:
2467 def losedata(fn):
2467 def losedata(fn):
2468 if not losedatafn or not losedatafn(fn=fn):
2468 if not losedatafn or not losedatafn(fn=fn):
2469 raise GitDiffRequired
2469 raise GitDiffRequired
2470 # Buffer the whole output until we are sure it can be generated
2470 # Buffer the whole output until we are sure it can be generated
2471 return list(difffn(opts.copy(git=False), losedata))
2471 return list(difffn(opts.copy(git=False), losedata))
2472 except GitDiffRequired:
2472 except GitDiffRequired:
2473 return difffn(opts.copy(git=True), None)
2473 return difffn(opts.copy(git=True), None)
2474 else:
2474 else:
2475 return difffn(opts, None)
2475 return difffn(opts, None)
2476
2476
2477 def difflabel(func, *args, **kw):
2477 def difflabel(func, *args, **kw):
2478 '''yields 2-tuples of (output, label) based on the output of func()'''
2478 '''yields 2-tuples of (output, label) based on the output of func()'''
2479 inlinecolor = False
2479 inlinecolor = False
2480 if kw.get(r'opts'):
2480 if kw.get(r'opts'):
2481 inlinecolor = kw[r'opts'].worddiff
2481 inlinecolor = kw[r'opts'].worddiff
2482 headprefixes = [('diff', 'diff.diffline'),
2482 headprefixes = [('diff', 'diff.diffline'),
2483 ('copy', 'diff.extended'),
2483 ('copy', 'diff.extended'),
2484 ('rename', 'diff.extended'),
2484 ('rename', 'diff.extended'),
2485 ('old', 'diff.extended'),
2485 ('old', 'diff.extended'),
2486 ('new', 'diff.extended'),
2486 ('new', 'diff.extended'),
2487 ('deleted', 'diff.extended'),
2487 ('deleted', 'diff.extended'),
2488 ('index', 'diff.extended'),
2488 ('index', 'diff.extended'),
2489 ('similarity', 'diff.extended'),
2489 ('similarity', 'diff.extended'),
2490 ('---', 'diff.file_a'),
2490 ('---', 'diff.file_a'),
2491 ('+++', 'diff.file_b')]
2491 ('+++', 'diff.file_b')]
2492 textprefixes = [('@', 'diff.hunk'),
2492 textprefixes = [('@', 'diff.hunk'),
2493 ('-', 'diff.deleted'),
2493 ('-', 'diff.deleted'),
2494 ('+', 'diff.inserted')]
2494 ('+', 'diff.inserted')]
2495 head = False
2495 head = False
2496 for chunk in func(*args, **kw):
2496 for chunk in func(*args, **kw):
2497 lines = chunk.split('\n')
2497 lines = chunk.split('\n')
2498 matches = {}
2498 matches = {}
2499 if inlinecolor:
2499 if inlinecolor:
2500 matches = _findmatches(lines)
2500 matches = _findmatches(lines)
2501 for i, line in enumerate(lines):
2501 for i, line in enumerate(lines):
2502 if i != 0:
2502 if i != 0:
2503 yield ('\n', '')
2503 yield ('\n', '')
2504 if head:
2504 if head:
2505 if line.startswith('@'):
2505 if line.startswith('@'):
2506 head = False
2506 head = False
2507 else:
2507 else:
2508 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2508 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2509 head = True
2509 head = True
2510 stripline = line
2510 stripline = line
2511 diffline = False
2511 diffline = False
2512 if not head and line and line.startswith(('+', '-')):
2512 if not head and line and line.startswith(('+', '-')):
2513 # highlight tabs and trailing whitespace, but only in
2513 # highlight tabs and trailing whitespace, but only in
2514 # changed lines
2514 # changed lines
2515 stripline = line.rstrip()
2515 stripline = line.rstrip()
2516 diffline = True
2516 diffline = True
2517
2517
2518 prefixes = textprefixes
2518 prefixes = textprefixes
2519 if head:
2519 if head:
2520 prefixes = headprefixes
2520 prefixes = headprefixes
2521 for prefix, label in prefixes:
2521 for prefix, label in prefixes:
2522 if stripline.startswith(prefix):
2522 if stripline.startswith(prefix):
2523 if diffline:
2523 if diffline:
2524 if i in matches:
2524 if i in matches:
2525 for t, l in _inlinediff(lines[i].rstrip(),
2525 for t, l in _inlinediff(lines[i].rstrip(),
2526 lines[matches[i]].rstrip(),
2526 lines[matches[i]].rstrip(),
2527 label):
2527 label):
2528 yield (t, l)
2528 yield (t, l)
2529 else:
2529 else:
2530 for token in tabsplitter.findall(stripline):
2530 for token in tabsplitter.findall(stripline):
2531 if token.startswith('\t'):
2531 if token.startswith('\t'):
2532 yield (token, 'diff.tab')
2532 yield (token, 'diff.tab')
2533 else:
2533 else:
2534 yield (token, label)
2534 yield (token, label)
2535 else:
2535 else:
2536 yield (stripline, label)
2536 yield (stripline, label)
2537 break
2537 break
2538 else:
2538 else:
2539 yield (line, '')
2539 yield (line, '')
2540 if line != stripline:
2540 if line != stripline:
2541 yield (line[len(stripline):], 'diff.trailingwhitespace')
2541 yield (line[len(stripline):], 'diff.trailingwhitespace')
2542
2542
2543 def _findmatches(slist):
2543 def _findmatches(slist):
2544 '''Look for insertion matches to deletion and returns a dict of
2544 '''Look for insertion matches to deletion and returns a dict of
2545 correspondences.
2545 correspondences.
2546 '''
2546 '''
2547 lastmatch = 0
2547 lastmatch = 0
2548 matches = {}
2548 matches = {}
2549 for i, line in enumerate(slist):
2549 for i, line in enumerate(slist):
2550 if line == '':
2550 if line == '':
2551 continue
2551 continue
2552 if line.startswith('-'):
2552 if line.startswith('-'):
2553 lastmatch = max(lastmatch, i)
2553 lastmatch = max(lastmatch, i)
2554 newgroup = False
2554 newgroup = False
2555 for j, newline in enumerate(slist[lastmatch + 1:]):
2555 for j, newline in enumerate(slist[lastmatch + 1:]):
2556 if newline == '':
2556 if newline == '':
2557 continue
2557 continue
2558 if newline.startswith('-') and newgroup: # too far, no match
2558 if newline.startswith('-') and newgroup: # too far, no match
2559 break
2559 break
2560 if newline.startswith('+'): # potential match
2560 if newline.startswith('+'): # potential match
2561 newgroup = True
2561 newgroup = True
2562 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2562 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2563 if sim > 0.7:
2563 if sim > 0.7:
2564 lastmatch = lastmatch + 1 + j
2564 lastmatch = lastmatch + 1 + j
2565 matches[i] = lastmatch
2565 matches[i] = lastmatch
2566 matches[lastmatch] = i
2566 matches[lastmatch] = i
2567 break
2567 break
2568 return matches
2568 return matches
2569
2569
2570 def _inlinediff(s1, s2, operation):
2570 def _inlinediff(s1, s2, operation):
2571 '''Perform string diff to highlight specific changes.'''
2571 '''Perform string diff to highlight specific changes.'''
2572 operation_skip = ('+', '?') if operation == 'diff.deleted' else ('-', '?')
2572 operation_skip = ('+', '?') if operation == 'diff.deleted' else ('-', '?')
2573 if operation == 'diff.deleted':
2573 if operation == 'diff.deleted':
2574 s2, s1 = s1, s2
2574 s2, s1 = s1, s2
2575
2575
2576 buff = []
2576 buff = []
2577 # we never want to higlight the leading +-
2577 # we never want to higlight the leading +-
2578 if operation == 'diff.deleted' and s2.startswith('-'):
2578 if operation == 'diff.deleted' and s2.startswith('-'):
2579 label = operation
2579 label = operation
2580 token = '-'
2580 token = '-'
2581 s2 = s2[1:]
2581 s2 = s2[1:]
2582 s1 = s1[1:]
2582 s1 = s1[1:]
2583 elif operation == 'diff.inserted' and s1.startswith('+'):
2583 elif operation == 'diff.inserted' and s1.startswith('+'):
2584 label = operation
2584 label = operation
2585 token = '+'
2585 token = '+'
2586 s2 = s2[1:]
2586 s2 = s2[1:]
2587 s1 = s1[1:]
2587 s1 = s1[1:]
2588 else:
2588 else:
2589 raise error.ProgrammingError("Case not expected, operation = %s" %
2589 raise error.ProgrammingError("Case not expected, operation = %s" %
2590 operation)
2590 operation)
2591
2591
2592 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2592 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2593 for part in s:
2593 for part in s:
2594 if part.startswith(operation_skip) or len(part) == 2:
2594 if part.startswith(operation_skip) or len(part) == 2:
2595 continue
2595 continue
2596 l = operation + '.highlight'
2596 l = operation + '.highlight'
2597 if part.startswith(' '):
2597 if part.startswith(' '):
2598 l = operation
2598 l = operation
2599 if part[2:] == '\t':
2599 if part[2:] == '\t':
2600 l = 'diff.tab'
2600 l = 'diff.tab'
2601 if l == label: # contiguous token with same label
2601 if l == label: # contiguous token with same label
2602 token += part[2:]
2602 token += part[2:]
2603 continue
2603 continue
2604 else:
2604 else:
2605 buff.append((token, label))
2605 buff.append((token, label))
2606 label = l
2606 label = l
2607 token = part[2:]
2607 token = part[2:]
2608 buff.append((token, label))
2608 buff.append((token, label))
2609
2609
2610 return buff
2610 return buff
2611
2611
2612 def diffui(*args, **kw):
2612 def diffui(*args, **kw):
2613 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2613 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2614 return difflabel(diff, *args, **kw)
2614 return difflabel(diff, *args, **kw)
2615
2615
2616 def _filepairs(modified, added, removed, copy, opts):
2616 def _filepairs(modified, added, removed, copy, opts):
2617 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2617 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2618 before and f2 is the the name after. For added files, f1 will be None,
2618 before and f2 is the the name after. For added files, f1 will be None,
2619 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2619 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2620 or 'rename' (the latter two only if opts.git is set).'''
2620 or 'rename' (the latter two only if opts.git is set).'''
2621 gone = set()
2621 gone = set()
2622
2622
2623 copyto = dict([(v, k) for k, v in copy.items()])
2623 copyto = dict([(v, k) for k, v in copy.items()])
2624
2624
2625 addedset, removedset = set(added), set(removed)
2625 addedset, removedset = set(added), set(removed)
2626
2626
2627 for f in sorted(modified + added + removed):
2627 for f in sorted(modified + added + removed):
2628 copyop = None
2628 copyop = None
2629 f1, f2 = f, f
2629 f1, f2 = f, f
2630 if f in addedset:
2630 if f in addedset:
2631 f1 = None
2631 f1 = None
2632 if f in copy:
2632 if f in copy:
2633 if opts.git:
2633 if opts.git:
2634 f1 = copy[f]
2634 f1 = copy[f]
2635 if f1 in removedset and f1 not in gone:
2635 if f1 in removedset and f1 not in gone:
2636 copyop = 'rename'
2636 copyop = 'rename'
2637 gone.add(f1)
2637 gone.add(f1)
2638 else:
2638 else:
2639 copyop = 'copy'
2639 copyop = 'copy'
2640 elif f in removedset:
2640 elif f in removedset:
2641 f2 = None
2641 f2 = None
2642 if opts.git:
2642 if opts.git:
2643 # have we already reported a copy above?
2643 # have we already reported a copy above?
2644 if (f in copyto and copyto[f] in addedset
2644 if (f in copyto and copyto[f] in addedset
2645 and copy[copyto[f]] == f):
2645 and copy[copyto[f]] == f):
2646 continue
2646 continue
2647 yield f1, f2, copyop
2647 yield f1, f2, copyop
2648
2648
2649 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2649 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2650 copy, getfilectx, opts, losedatafn, prefix, relroot):
2650 copy, getfilectx, opts, losedatafn, prefix, relroot):
2651 '''given input data, generate a diff and yield it in blocks
2651 '''given input data, generate a diff and yield it in blocks
2652
2652
2653 If generating a diff would lose data like flags or binary data and
2653 If generating a diff would lose data like flags or binary data and
2654 losedatafn is not None, it will be called.
2654 losedatafn is not None, it will be called.
2655
2655
2656 relroot is removed and prefix is added to every path in the diff output.
2656 relroot is removed and prefix is added to every path in the diff output.
2657
2657
2658 If relroot is not empty, this function expects every path in modified,
2658 If relroot is not empty, this function expects every path in modified,
2659 added, removed and copy to start with it.'''
2659 added, removed and copy to start with it.'''
2660
2660
2661 def gitindex(text):
2661 def gitindex(text):
2662 if not text:
2662 if not text:
2663 text = ""
2663 text = ""
2664 l = len(text)
2664 l = len(text)
2665 s = hashlib.sha1('blob %d\0' % l)
2665 s = hashlib.sha1('blob %d\0' % l)
2666 s.update(text)
2666 s.update(text)
2667 return hex(s.digest())
2667 return hex(s.digest())
2668
2668
2669 if opts.noprefix:
2669 if opts.noprefix:
2670 aprefix = bprefix = ''
2670 aprefix = bprefix = ''
2671 else:
2671 else:
2672 aprefix = 'a/'
2672 aprefix = 'a/'
2673 bprefix = 'b/'
2673 bprefix = 'b/'
2674
2674
2675 def diffline(f, revs):
2675 def diffline(f, revs):
2676 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2676 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2677 return 'diff %s %s' % (revinfo, f)
2677 return 'diff %s %s' % (revinfo, f)
2678
2678
2679 def isempty(fctx):
2679 def isempty(fctx):
2680 return fctx is None or fctx.size() == 0
2680 return fctx is None or fctx.size() == 0
2681
2681
2682 date1 = dateutil.datestr(ctx1.date())
2682 date1 = dateutil.datestr(ctx1.date())
2683 date2 = dateutil.datestr(ctx2.date())
2683 date2 = dateutil.datestr(ctx2.date())
2684
2684
2685 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2685 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2686
2686
2687 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2687 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2688 or repo.ui.configbool('devel', 'check-relroot')):
2688 or repo.ui.configbool('devel', 'check-relroot')):
2689 for f in modified + added + removed + list(copy) + list(copy.values()):
2689 for f in modified + added + removed + list(copy) + list(copy.values()):
2690 if f is not None and not f.startswith(relroot):
2690 if f is not None and not f.startswith(relroot):
2691 raise AssertionError(
2691 raise AssertionError(
2692 "file %s doesn't start with relroot %s" % (f, relroot))
2692 "file %s doesn't start with relroot %s" % (f, relroot))
2693
2693
2694 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2694 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2695 content1 = None
2695 content1 = None
2696 content2 = None
2696 content2 = None
2697 fctx1 = None
2697 fctx1 = None
2698 fctx2 = None
2698 fctx2 = None
2699 flag1 = None
2699 flag1 = None
2700 flag2 = None
2700 flag2 = None
2701 if f1:
2701 if f1:
2702 fctx1 = getfilectx(f1, ctx1)
2702 fctx1 = getfilectx(f1, ctx1)
2703 if opts.git or losedatafn:
2703 if opts.git or losedatafn:
2704 flag1 = ctx1.flags(f1)
2704 flag1 = ctx1.flags(f1)
2705 if f2:
2705 if f2:
2706 fctx2 = getfilectx(f2, ctx2)
2706 fctx2 = getfilectx(f2, ctx2)
2707 if opts.git or losedatafn:
2707 if opts.git or losedatafn:
2708 flag2 = ctx2.flags(f2)
2708 flag2 = ctx2.flags(f2)
2709 # if binary is True, output "summary" or "base85", but not "text diff"
2709 # if binary is True, output "summary" or "base85", but not "text diff"
2710 if opts.text:
2710 if opts.text:
2711 binary = False
2711 binary = False
2712 else:
2712 else:
2713 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2713 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2714
2714
2715 if losedatafn and not opts.git:
2715 if losedatafn and not opts.git:
2716 if (binary or
2716 if (binary or
2717 # copy/rename
2717 # copy/rename
2718 f2 in copy or
2718 f2 in copy or
2719 # empty file creation
2719 # empty file creation
2720 (not f1 and isempty(fctx2)) or
2720 (not f1 and isempty(fctx2)) or
2721 # empty file deletion
2721 # empty file deletion
2722 (isempty(fctx1) and not f2) or
2722 (isempty(fctx1) and not f2) or
2723 # create with flags
2723 # create with flags
2724 (not f1 and flag2) or
2724 (not f1 and flag2) or
2725 # change flags
2725 # change flags
2726 (f1 and f2 and flag1 != flag2)):
2726 (f1 and f2 and flag1 != flag2)):
2727 losedatafn(f2 or f1)
2727 losedatafn(f2 or f1)
2728
2728
2729 path1 = f1 or f2
2729 path1 = f1 or f2
2730 path2 = f2 or f1
2730 path2 = f2 or f1
2731 path1 = posixpath.join(prefix, path1[len(relroot):])
2731 path1 = posixpath.join(prefix, path1[len(relroot):])
2732 path2 = posixpath.join(prefix, path2[len(relroot):])
2732 path2 = posixpath.join(prefix, path2[len(relroot):])
2733 header = []
2733 header = []
2734 if opts.git:
2734 if opts.git:
2735 header.append('diff --git %s%s %s%s' %
2735 header.append('diff --git %s%s %s%s' %
2736 (aprefix, path1, bprefix, path2))
2736 (aprefix, path1, bprefix, path2))
2737 if not f1: # added
2737 if not f1: # added
2738 header.append('new file mode %s' % gitmode[flag2])
2738 header.append('new file mode %s' % gitmode[flag2])
2739 elif not f2: # removed
2739 elif not f2: # removed
2740 header.append('deleted file mode %s' % gitmode[flag1])
2740 header.append('deleted file mode %s' % gitmode[flag1])
2741 else: # modified/copied/renamed
2741 else: # modified/copied/renamed
2742 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2742 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2743 if mode1 != mode2:
2743 if mode1 != mode2:
2744 header.append('old mode %s' % mode1)
2744 header.append('old mode %s' % mode1)
2745 header.append('new mode %s' % mode2)
2745 header.append('new mode %s' % mode2)
2746 if copyop is not None:
2746 if copyop is not None:
2747 if opts.showsimilarity:
2747 if opts.showsimilarity:
2748 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2748 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2749 header.append('similarity index %d%%' % sim)
2749 header.append('similarity index %d%%' % sim)
2750 header.append('%s from %s' % (copyop, path1))
2750 header.append('%s from %s' % (copyop, path1))
2751 header.append('%s to %s' % (copyop, path2))
2751 header.append('%s to %s' % (copyop, path2))
2752 elif revs and not repo.ui.quiet:
2752 elif revs and not repo.ui.quiet:
2753 header.append(diffline(path1, revs))
2753 header.append(diffline(path1, revs))
2754
2754
2755 # fctx.is | diffopts | what to | is fctx.data()
2755 # fctx.is | diffopts | what to | is fctx.data()
2756 # binary() | text nobinary git index | output? | outputted?
2756 # binary() | text nobinary git index | output? | outputted?
2757 # ------------------------------------|----------------------------
2757 # ------------------------------------|----------------------------
2758 # yes | no no no * | summary | no
2758 # yes | no no no * | summary | no
2759 # yes | no no yes * | base85 | yes
2759 # yes | no no yes * | base85 | yes
2760 # yes | no yes no * | summary | no
2760 # yes | no yes no * | summary | no
2761 # yes | no yes yes 0 | summary | no
2761 # yes | no yes yes 0 | summary | no
2762 # yes | no yes yes >0 | summary | semi [1]
2762 # yes | no yes yes >0 | summary | semi [1]
2763 # yes | yes * * * | text diff | yes
2763 # yes | yes * * * | text diff | yes
2764 # no | * * * * | text diff | yes
2764 # no | * * * * | text diff | yes
2765 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2765 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2766 if binary and (not opts.git or (opts.git and opts.nobinary and not
2766 if binary and (not opts.git or (opts.git and opts.nobinary and not
2767 opts.index)):
2767 opts.index)):
2768 # fast path: no binary content will be displayed, content1 and
2768 # fast path: no binary content will be displayed, content1 and
2769 # content2 are only used for equivalent test. cmp() could have a
2769 # content2 are only used for equivalent test. cmp() could have a
2770 # fast path.
2770 # fast path.
2771 if fctx1 is not None:
2771 if fctx1 is not None:
2772 content1 = b'\0'
2772 content1 = b'\0'
2773 if fctx2 is not None:
2773 if fctx2 is not None:
2774 if fctx1 is not None and not fctx1.cmp(fctx2):
2774 if fctx1 is not None and not fctx1.cmp(fctx2):
2775 content2 = b'\0' # not different
2775 content2 = b'\0' # not different
2776 else:
2776 else:
2777 content2 = b'\0\0'
2777 content2 = b'\0\0'
2778 else:
2778 else:
2779 # normal path: load contents
2779 # normal path: load contents
2780 if fctx1 is not None:
2780 if fctx1 is not None:
2781 content1 = fctx1.data()
2781 content1 = fctx1.data()
2782 if fctx2 is not None:
2782 if fctx2 is not None:
2783 content2 = fctx2.data()
2783 content2 = fctx2.data()
2784
2784
2785 if binary and opts.git and not opts.nobinary:
2785 if binary and opts.git and not opts.nobinary:
2786 text = mdiff.b85diff(content1, content2)
2786 text = mdiff.b85diff(content1, content2)
2787 if text:
2787 if text:
2788 header.append('index %s..%s' %
2788 header.append('index %s..%s' %
2789 (gitindex(content1), gitindex(content2)))
2789 (gitindex(content1), gitindex(content2)))
2790 hunks = (None, [text]),
2790 hunks = (None, [text]),
2791 else:
2791 else:
2792 if opts.git and opts.index > 0:
2792 if opts.git and opts.index > 0:
2793 flag = flag1
2793 flag = flag1
2794 if flag is None:
2794 if flag is None:
2795 flag = flag2
2795 flag = flag2
2796 header.append('index %s..%s %s' %
2796 header.append('index %s..%s %s' %
2797 (gitindex(content1)[0:opts.index],
2797 (gitindex(content1)[0:opts.index],
2798 gitindex(content2)[0:opts.index],
2798 gitindex(content2)[0:opts.index],
2799 gitmode[flag]))
2799 gitmode[flag]))
2800
2800
2801 uheaders, hunks = mdiff.unidiff(content1, date1,
2801 uheaders, hunks = mdiff.unidiff(content1, date1,
2802 content2, date2,
2802 content2, date2,
2803 path1, path2,
2803 path1, path2,
2804 binary=binary, opts=opts)
2804 binary=binary, opts=opts)
2805 header.extend(uheaders)
2805 header.extend(uheaders)
2806 yield fctx1, fctx2, header, hunks
2806 yield fctx1, fctx2, header, hunks
2807
2807
2808 def diffstatsum(stats):
2808 def diffstatsum(stats):
2809 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2809 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2810 for f, a, r, b in stats:
2810 for f, a, r, b in stats:
2811 maxfile = max(maxfile, encoding.colwidth(f))
2811 maxfile = max(maxfile, encoding.colwidth(f))
2812 maxtotal = max(maxtotal, a + r)
2812 maxtotal = max(maxtotal, a + r)
2813 addtotal += a
2813 addtotal += a
2814 removetotal += r
2814 removetotal += r
2815 binary = binary or b
2815 binary = binary or b
2816
2816
2817 return maxfile, maxtotal, addtotal, removetotal, binary
2817 return maxfile, maxtotal, addtotal, removetotal, binary
2818
2818
2819 def diffstatdata(lines):
2819 def diffstatdata(lines):
2820 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2820 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2821
2821
2822 results = []
2822 results = []
2823 filename, adds, removes, isbinary = None, 0, 0, False
2823 filename, adds, removes, isbinary = None, 0, 0, False
2824
2824
2825 def addresult():
2825 def addresult():
2826 if filename:
2826 if filename:
2827 results.append((filename, adds, removes, isbinary))
2827 results.append((filename, adds, removes, isbinary))
2828
2828
2829 # inheader is used to track if a line is in the
2829 # inheader is used to track if a line is in the
2830 # header portion of the diff. This helps properly account
2830 # header portion of the diff. This helps properly account
2831 # for lines that start with '--' or '++'
2831 # for lines that start with '--' or '++'
2832 inheader = False
2832 inheader = False
2833
2833
2834 for line in lines:
2834 for line in lines:
2835 if line.startswith('diff'):
2835 if line.startswith('diff'):
2836 addresult()
2836 addresult()
2837 # starting a new file diff
2837 # starting a new file diff
2838 # set numbers to 0 and reset inheader
2838 # set numbers to 0 and reset inheader
2839 inheader = True
2839 inheader = True
2840 adds, removes, isbinary = 0, 0, False
2840 adds, removes, isbinary = 0, 0, False
2841 if line.startswith('diff --git a/'):
2841 if line.startswith('diff --git a/'):
2842 filename = gitre.search(line).group(2)
2842 filename = gitre.search(line).group(2)
2843 elif line.startswith('diff -r'):
2843 elif line.startswith('diff -r'):
2844 # format: "diff -r ... -r ... filename"
2844 # format: "diff -r ... -r ... filename"
2845 filename = diffre.search(line).group(1)
2845 filename = diffre.search(line).group(1)
2846 elif line.startswith('@@'):
2846 elif line.startswith('@@'):
2847 inheader = False
2847 inheader = False
2848 elif line.startswith('+') and not inheader:
2848 elif line.startswith('+') and not inheader:
2849 adds += 1
2849 adds += 1
2850 elif line.startswith('-') and not inheader:
2850 elif line.startswith('-') and not inheader:
2851 removes += 1
2851 removes += 1
2852 elif (line.startswith('GIT binary patch') or
2852 elif (line.startswith('GIT binary patch') or
2853 line.startswith('Binary file')):
2853 line.startswith('Binary file')):
2854 isbinary = True
2854 isbinary = True
2855 addresult()
2855 addresult()
2856 return results
2856 return results
2857
2857
2858 def diffstat(lines, width=80):
2858 def diffstat(lines, width=80):
2859 output = []
2859 output = []
2860 stats = diffstatdata(lines)
2860 stats = diffstatdata(lines)
2861 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2861 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2862
2862
2863 countwidth = len(str(maxtotal))
2863 countwidth = len(str(maxtotal))
2864 if hasbinary and countwidth < 3:
2864 if hasbinary and countwidth < 3:
2865 countwidth = 3
2865 countwidth = 3
2866 graphwidth = width - countwidth - maxname - 6
2866 graphwidth = width - countwidth - maxname - 6
2867 if graphwidth < 10:
2867 if graphwidth < 10:
2868 graphwidth = 10
2868 graphwidth = 10
2869
2869
2870 def scale(i):
2870 def scale(i):
2871 if maxtotal <= graphwidth:
2871 if maxtotal <= graphwidth:
2872 return i
2872 return i
2873 # If diffstat runs out of room it doesn't print anything,
2873 # If diffstat runs out of room it doesn't print anything,
2874 # which isn't very useful, so always print at least one + or -
2874 # which isn't very useful, so always print at least one + or -
2875 # if there were at least some changes.
2875 # if there were at least some changes.
2876 return max(i * graphwidth // maxtotal, int(bool(i)))
2876 return max(i * graphwidth // maxtotal, int(bool(i)))
2877
2877
2878 for filename, adds, removes, isbinary in stats:
2878 for filename, adds, removes, isbinary in stats:
2879 if isbinary:
2879 if isbinary:
2880 count = 'Bin'
2880 count = 'Bin'
2881 else:
2881 else:
2882 count = '%d' % (adds + removes)
2882 count = '%d' % (adds + removes)
2883 pluses = '+' * scale(adds)
2883 pluses = '+' * scale(adds)
2884 minuses = '-' * scale(removes)
2884 minuses = '-' * scale(removes)
2885 output.append(' %s%s | %*s %s%s\n' %
2885 output.append(' %s%s | %*s %s%s\n' %
2886 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2886 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2887 countwidth, count, pluses, minuses))
2887 countwidth, count, pluses, minuses))
2888
2888
2889 if stats:
2889 if stats:
2890 output.append(_(' %d files changed, %d insertions(+), '
2890 output.append(_(' %d files changed, %d insertions(+), '
2891 '%d deletions(-)\n')
2891 '%d deletions(-)\n')
2892 % (len(stats), totaladds, totalremoves))
2892 % (len(stats), totaladds, totalremoves))
2893
2893
2894 return ''.join(output)
2894 return ''.join(output)
2895
2895
2896 def diffstatui(*args, **kw):
2896 def diffstatui(*args, **kw):
2897 '''like diffstat(), but yields 2-tuples of (output, label) for
2897 '''like diffstat(), but yields 2-tuples of (output, label) for
2898 ui.write()
2898 ui.write()
2899 '''
2899 '''
2900
2900
2901 for line in diffstat(*args, **kw).splitlines():
2901 for line in diffstat(*args, **kw).splitlines():
2902 if line and line[-1] in '+-':
2902 if line and line[-1] in '+-':
2903 name, graph = line.rsplit(' ', 1)
2903 name, graph = line.rsplit(' ', 1)
2904 yield (name + ' ', '')
2904 yield (name + ' ', '')
2905 m = re.search(br'\++', graph)
2905 m = re.search(br'\++', graph)
2906 if m:
2906 if m:
2907 yield (m.group(0), 'diffstat.inserted')
2907 yield (m.group(0), 'diffstat.inserted')
2908 m = re.search(br'-+', graph)
2908 m = re.search(br'-+', graph)
2909 if m:
2909 if m:
2910 yield (m.group(0), 'diffstat.deleted')
2910 yield (m.group(0), 'diffstat.deleted')
2911 else:
2911 else:
2912 yield (line, '')
2912 yield (line, '')
2913 yield ('\n', '')
2913 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now