##// END OF EJS Templates
py3: open patch file in binary mode and convert eol manually...
Yuya Nishihara -
r36856:c268ba15 default
parent child Browse files
Show More
@@ -1,2905 +1,2906 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import tempfile
21 import tempfile
22 import zlib
22 import zlib
23
23
24 from .i18n import _
24 from .i18n import _
25 from .node import (
25 from .node import (
26 hex,
26 hex,
27 short,
27 short,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 policy,
36 policy,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 from .utils import dateutil
43 from .utils import dateutil
44
44
45 diffhelpers = policy.importmod(r'diffhelpers')
45 diffhelpers = policy.importmod(r'diffhelpers')
46 stringio = util.stringio
46 stringio = util.stringio
47
47
48 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
49 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 tabsplitter = re.compile(br'(\t+|[^\t]+)')
50 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
50 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
51
51
52 PatchError = error.PatchError
52 PatchError = error.PatchError
53
53
54 # public functions
54 # public functions
55
55
56 def split(stream):
56 def split(stream):
57 '''return an iterator of individual patches from a stream'''
57 '''return an iterator of individual patches from a stream'''
58 def isheader(line, inheader):
58 def isheader(line, inheader):
59 if inheader and line[0] in (' ', '\t'):
59 if inheader and line[0] in (' ', '\t'):
60 # continuation
60 # continuation
61 return True
61 return True
62 if line[0] in (' ', '-', '+'):
62 if line[0] in (' ', '-', '+'):
63 # diff line - don't check for header pattern in there
63 # diff line - don't check for header pattern in there
64 return False
64 return False
65 l = line.split(': ', 1)
65 l = line.split(': ', 1)
66 return len(l) == 2 and ' ' not in l[0]
66 return len(l) == 2 and ' ' not in l[0]
67
67
68 def chunk(lines):
68 def chunk(lines):
69 return stringio(''.join(lines))
69 return stringio(''.join(lines))
70
70
71 def hgsplit(stream, cur):
71 def hgsplit(stream, cur):
72 inheader = True
72 inheader = True
73
73
74 for line in stream:
74 for line in stream:
75 if not line.strip():
75 if not line.strip():
76 inheader = False
76 inheader = False
77 if not inheader and line.startswith('# HG changeset patch'):
77 if not inheader and line.startswith('# HG changeset patch'):
78 yield chunk(cur)
78 yield chunk(cur)
79 cur = []
79 cur = []
80 inheader = True
80 inheader = True
81
81
82 cur.append(line)
82 cur.append(line)
83
83
84 if cur:
84 if cur:
85 yield chunk(cur)
85 yield chunk(cur)
86
86
87 def mboxsplit(stream, cur):
87 def mboxsplit(stream, cur):
88 for line in stream:
88 for line in stream:
89 if line.startswith('From '):
89 if line.startswith('From '):
90 for c in split(chunk(cur[1:])):
90 for c in split(chunk(cur[1:])):
91 yield c
91 yield c
92 cur = []
92 cur = []
93
93
94 cur.append(line)
94 cur.append(line)
95
95
96 if cur:
96 if cur:
97 for c in split(chunk(cur[1:])):
97 for c in split(chunk(cur[1:])):
98 yield c
98 yield c
99
99
100 def mimesplit(stream, cur):
100 def mimesplit(stream, cur):
101 def msgfp(m):
101 def msgfp(m):
102 fp = stringio()
102 fp = stringio()
103 g = email.Generator.Generator(fp, mangle_from_=False)
103 g = email.Generator.Generator(fp, mangle_from_=False)
104 g.flatten(m)
104 g.flatten(m)
105 fp.seek(0)
105 fp.seek(0)
106 return fp
106 return fp
107
107
108 for line in stream:
108 for line in stream:
109 cur.append(line)
109 cur.append(line)
110 c = chunk(cur)
110 c = chunk(cur)
111
111
112 m = pycompat.emailparser().parse(c)
112 m = pycompat.emailparser().parse(c)
113 if not m.is_multipart():
113 if not m.is_multipart():
114 yield msgfp(m)
114 yield msgfp(m)
115 else:
115 else:
116 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
117 for part in m.walk():
117 for part in m.walk():
118 ct = part.get_content_type()
118 ct = part.get_content_type()
119 if ct not in ok_types:
119 if ct not in ok_types:
120 continue
120 continue
121 yield msgfp(part)
121 yield msgfp(part)
122
122
123 def headersplit(stream, cur):
123 def headersplit(stream, cur):
124 inheader = False
124 inheader = False
125
125
126 for line in stream:
126 for line in stream:
127 if not inheader and isheader(line, inheader):
127 if not inheader and isheader(line, inheader):
128 yield chunk(cur)
128 yield chunk(cur)
129 cur = []
129 cur = []
130 inheader = True
130 inheader = True
131 if inheader and not isheader(line, inheader):
131 if inheader and not isheader(line, inheader):
132 inheader = False
132 inheader = False
133
133
134 cur.append(line)
134 cur.append(line)
135
135
136 if cur:
136 if cur:
137 yield chunk(cur)
137 yield chunk(cur)
138
138
139 def remainder(cur):
139 def remainder(cur):
140 yield chunk(cur)
140 yield chunk(cur)
141
141
142 class fiter(object):
142 class fiter(object):
143 def __init__(self, fp):
143 def __init__(self, fp):
144 self.fp = fp
144 self.fp = fp
145
145
146 def __iter__(self):
146 def __iter__(self):
147 return self
147 return self
148
148
149 def next(self):
149 def next(self):
150 l = self.fp.readline()
150 l = self.fp.readline()
151 if not l:
151 if not l:
152 raise StopIteration
152 raise StopIteration
153 return l
153 return l
154
154
155 __next__ = next
155 __next__ = next
156
156
157 inheader = False
157 inheader = False
158 cur = []
158 cur = []
159
159
160 mimeheaders = ['content-type']
160 mimeheaders = ['content-type']
161
161
162 if not util.safehasattr(stream, 'next'):
162 if not util.safehasattr(stream, 'next'):
163 # http responses, for example, have readline but not next
163 # http responses, for example, have readline but not next
164 stream = fiter(stream)
164 stream = fiter(stream)
165
165
166 for line in stream:
166 for line in stream:
167 cur.append(line)
167 cur.append(line)
168 if line.startswith('# HG changeset patch'):
168 if line.startswith('# HG changeset patch'):
169 return hgsplit(stream, cur)
169 return hgsplit(stream, cur)
170 elif line.startswith('From '):
170 elif line.startswith('From '):
171 return mboxsplit(stream, cur)
171 return mboxsplit(stream, cur)
172 elif isheader(line, inheader):
172 elif isheader(line, inheader):
173 inheader = True
173 inheader = True
174 if line.split(':', 1)[0].lower() in mimeheaders:
174 if line.split(':', 1)[0].lower() in mimeheaders:
175 # let email parser handle this
175 # let email parser handle this
176 return mimesplit(stream, cur)
176 return mimesplit(stream, cur)
177 elif line.startswith('--- ') and inheader:
177 elif line.startswith('--- ') and inheader:
178 # No evil headers seen by diff start, split by hand
178 # No evil headers seen by diff start, split by hand
179 return headersplit(stream, cur)
179 return headersplit(stream, cur)
180 # Not enough info, keep reading
180 # Not enough info, keep reading
181
181
182 # if we are here, we have a very plain patch
182 # if we are here, we have a very plain patch
183 return remainder(cur)
183 return remainder(cur)
184
184
185 ## Some facility for extensible patch parsing:
185 ## Some facility for extensible patch parsing:
186 # list of pairs ("header to match", "data key")
186 # list of pairs ("header to match", "data key")
187 patchheadermap = [('Date', 'date'),
187 patchheadermap = [('Date', 'date'),
188 ('Branch', 'branch'),
188 ('Branch', 'branch'),
189 ('Node ID', 'nodeid'),
189 ('Node ID', 'nodeid'),
190 ]
190 ]
191
191
192 def extract(ui, fileobj):
192 def extract(ui, fileobj):
193 '''extract patch from data read from fileobj.
193 '''extract patch from data read from fileobj.
194
194
195 patch can be a normal patch or contained in an email message.
195 patch can be a normal patch or contained in an email message.
196
196
197 return a dictionary. Standard keys are:
197 return a dictionary. Standard keys are:
198 - filename,
198 - filename,
199 - message,
199 - message,
200 - user,
200 - user,
201 - date,
201 - date,
202 - branch,
202 - branch,
203 - node,
203 - node,
204 - p1,
204 - p1,
205 - p2.
205 - p2.
206 Any item can be missing from the dictionary. If filename is missing,
206 Any item can be missing from the dictionary. If filename is missing,
207 fileobj did not contain a patch. Caller must unlink filename when done.'''
207 fileobj did not contain a patch. Caller must unlink filename when done.'''
208
208
209 # attempt to detect the start of a patch
209 # attempt to detect the start of a patch
210 # (this heuristic is borrowed from quilt)
210 # (this heuristic is borrowed from quilt)
211 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
211 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
212 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
212 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
213 br'---[ \t].*?^\+\+\+[ \t]|'
213 br'---[ \t].*?^\+\+\+[ \t]|'
214 br'\*\*\*[ \t].*?^---[ \t])',
214 br'\*\*\*[ \t].*?^---[ \t])',
215 re.MULTILINE | re.DOTALL)
215 re.MULTILINE | re.DOTALL)
216
216
217 data = {}
217 data = {}
218 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
218 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
219 tmpfp = os.fdopen(fd, r'wb')
219 tmpfp = os.fdopen(fd, r'wb')
220 try:
220 try:
221 msg = pycompat.emailparser().parse(fileobj)
221 msg = pycompat.emailparser().parse(fileobj)
222
222
223 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
223 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
224 data['user'] = msg['From'] and mail.headdecode(msg['From'])
224 data['user'] = msg['From'] and mail.headdecode(msg['From'])
225 if not subject and not data['user']:
225 if not subject and not data['user']:
226 # Not an email, restore parsed headers if any
226 # Not an email, restore parsed headers if any
227 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
227 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
228
228
229 # should try to parse msg['Date']
229 # should try to parse msg['Date']
230 parents = []
230 parents = []
231
231
232 if subject:
232 if subject:
233 if subject.startswith('[PATCH'):
233 if subject.startswith('[PATCH'):
234 pend = subject.find(']')
234 pend = subject.find(']')
235 if pend >= 0:
235 if pend >= 0:
236 subject = subject[pend + 1:].lstrip()
236 subject = subject[pend + 1:].lstrip()
237 subject = re.sub(br'\n[ \t]+', ' ', subject)
237 subject = re.sub(br'\n[ \t]+', ' ', subject)
238 ui.debug('Subject: %s\n' % subject)
238 ui.debug('Subject: %s\n' % subject)
239 if data['user']:
239 if data['user']:
240 ui.debug('From: %s\n' % data['user'])
240 ui.debug('From: %s\n' % data['user'])
241 diffs_seen = 0
241 diffs_seen = 0
242 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
242 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
243 message = ''
243 message = ''
244 for part in msg.walk():
244 for part in msg.walk():
245 content_type = pycompat.bytestr(part.get_content_type())
245 content_type = pycompat.bytestr(part.get_content_type())
246 ui.debug('Content-Type: %s\n' % content_type)
246 ui.debug('Content-Type: %s\n' % content_type)
247 if content_type not in ok_types:
247 if content_type not in ok_types:
248 continue
248 continue
249 payload = part.get_payload(decode=True)
249 payload = part.get_payload(decode=True)
250 m = diffre.search(payload)
250 m = diffre.search(payload)
251 if m:
251 if m:
252 hgpatch = False
252 hgpatch = False
253 hgpatchheader = False
253 hgpatchheader = False
254 ignoretext = False
254 ignoretext = False
255
255
256 ui.debug('found patch at byte %d\n' % m.start(0))
256 ui.debug('found patch at byte %d\n' % m.start(0))
257 diffs_seen += 1
257 diffs_seen += 1
258 cfp = stringio()
258 cfp = stringio()
259 for line in payload[:m.start(0)].splitlines():
259 for line in payload[:m.start(0)].splitlines():
260 if line.startswith('# HG changeset patch') and not hgpatch:
260 if line.startswith('# HG changeset patch') and not hgpatch:
261 ui.debug('patch generated by hg export\n')
261 ui.debug('patch generated by hg export\n')
262 hgpatch = True
262 hgpatch = True
263 hgpatchheader = True
263 hgpatchheader = True
264 # drop earlier commit message content
264 # drop earlier commit message content
265 cfp.seek(0)
265 cfp.seek(0)
266 cfp.truncate()
266 cfp.truncate()
267 subject = None
267 subject = None
268 elif hgpatchheader:
268 elif hgpatchheader:
269 if line.startswith('# User '):
269 if line.startswith('# User '):
270 data['user'] = line[7:]
270 data['user'] = line[7:]
271 ui.debug('From: %s\n' % data['user'])
271 ui.debug('From: %s\n' % data['user'])
272 elif line.startswith("# Parent "):
272 elif line.startswith("# Parent "):
273 parents.append(line[9:].lstrip())
273 parents.append(line[9:].lstrip())
274 elif line.startswith("# "):
274 elif line.startswith("# "):
275 for header, key in patchheadermap:
275 for header, key in patchheadermap:
276 prefix = '# %s ' % header
276 prefix = '# %s ' % header
277 if line.startswith(prefix):
277 if line.startswith(prefix):
278 data[key] = line[len(prefix):]
278 data[key] = line[len(prefix):]
279 else:
279 else:
280 hgpatchheader = False
280 hgpatchheader = False
281 elif line == '---':
281 elif line == '---':
282 ignoretext = True
282 ignoretext = True
283 if not hgpatchheader and not ignoretext:
283 if not hgpatchheader and not ignoretext:
284 cfp.write(line)
284 cfp.write(line)
285 cfp.write('\n')
285 cfp.write('\n')
286 message = cfp.getvalue()
286 message = cfp.getvalue()
287 if tmpfp:
287 if tmpfp:
288 tmpfp.write(payload)
288 tmpfp.write(payload)
289 if not payload.endswith('\n'):
289 if not payload.endswith('\n'):
290 tmpfp.write('\n')
290 tmpfp.write('\n')
291 elif not diffs_seen and message and content_type == 'text/plain':
291 elif not diffs_seen and message and content_type == 'text/plain':
292 message += '\n' + payload
292 message += '\n' + payload
293 except: # re-raises
293 except: # re-raises
294 tmpfp.close()
294 tmpfp.close()
295 os.unlink(tmpname)
295 os.unlink(tmpname)
296 raise
296 raise
297
297
298 if subject and not message.startswith(subject):
298 if subject and not message.startswith(subject):
299 message = '%s\n%s' % (subject, message)
299 message = '%s\n%s' % (subject, message)
300 data['message'] = message
300 data['message'] = message
301 tmpfp.close()
301 tmpfp.close()
302 if parents:
302 if parents:
303 data['p1'] = parents.pop(0)
303 data['p1'] = parents.pop(0)
304 if parents:
304 if parents:
305 data['p2'] = parents.pop(0)
305 data['p2'] = parents.pop(0)
306
306
307 if diffs_seen:
307 if diffs_seen:
308 data['filename'] = tmpname
308 data['filename'] = tmpname
309 else:
309 else:
310 os.unlink(tmpname)
310 os.unlink(tmpname)
311 return data
311 return data
312
312
313 class patchmeta(object):
313 class patchmeta(object):
314 """Patched file metadata
314 """Patched file metadata
315
315
316 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
316 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
317 or COPY. 'path' is patched file path. 'oldpath' is set to the
317 or COPY. 'path' is patched file path. 'oldpath' is set to the
318 origin file when 'op' is either COPY or RENAME, None otherwise. If
318 origin file when 'op' is either COPY or RENAME, None otherwise. If
319 file mode is changed, 'mode' is a tuple (islink, isexec) where
319 file mode is changed, 'mode' is a tuple (islink, isexec) where
320 'islink' is True if the file is a symlink and 'isexec' is True if
320 'islink' is True if the file is a symlink and 'isexec' is True if
321 the file is executable. Otherwise, 'mode' is None.
321 the file is executable. Otherwise, 'mode' is None.
322 """
322 """
323 def __init__(self, path):
323 def __init__(self, path):
324 self.path = path
324 self.path = path
325 self.oldpath = None
325 self.oldpath = None
326 self.mode = None
326 self.mode = None
327 self.op = 'MODIFY'
327 self.op = 'MODIFY'
328 self.binary = False
328 self.binary = False
329
329
330 def setmode(self, mode):
330 def setmode(self, mode):
331 islink = mode & 0o20000
331 islink = mode & 0o20000
332 isexec = mode & 0o100
332 isexec = mode & 0o100
333 self.mode = (islink, isexec)
333 self.mode = (islink, isexec)
334
334
335 def copy(self):
335 def copy(self):
336 other = patchmeta(self.path)
336 other = patchmeta(self.path)
337 other.oldpath = self.oldpath
337 other.oldpath = self.oldpath
338 other.mode = self.mode
338 other.mode = self.mode
339 other.op = self.op
339 other.op = self.op
340 other.binary = self.binary
340 other.binary = self.binary
341 return other
341 return other
342
342
343 def _ispatchinga(self, afile):
343 def _ispatchinga(self, afile):
344 if afile == '/dev/null':
344 if afile == '/dev/null':
345 return self.op == 'ADD'
345 return self.op == 'ADD'
346 return afile == 'a/' + (self.oldpath or self.path)
346 return afile == 'a/' + (self.oldpath or self.path)
347
347
348 def _ispatchingb(self, bfile):
348 def _ispatchingb(self, bfile):
349 if bfile == '/dev/null':
349 if bfile == '/dev/null':
350 return self.op == 'DELETE'
350 return self.op == 'DELETE'
351 return bfile == 'b/' + self.path
351 return bfile == 'b/' + self.path
352
352
353 def ispatching(self, afile, bfile):
353 def ispatching(self, afile, bfile):
354 return self._ispatchinga(afile) and self._ispatchingb(bfile)
354 return self._ispatchinga(afile) and self._ispatchingb(bfile)
355
355
356 def __repr__(self):
356 def __repr__(self):
357 return "<patchmeta %s %r>" % (self.op, self.path)
357 return "<patchmeta %s %r>" % (self.op, self.path)
358
358
359 def readgitpatch(lr):
359 def readgitpatch(lr):
360 """extract git-style metadata about patches from <patchname>"""
360 """extract git-style metadata about patches from <patchname>"""
361
361
362 # Filter patch for git information
362 # Filter patch for git information
363 gp = None
363 gp = None
364 gitpatches = []
364 gitpatches = []
365 for line in lr:
365 for line in lr:
366 line = line.rstrip(' \r\n')
366 line = line.rstrip(' \r\n')
367 if line.startswith('diff --git a/'):
367 if line.startswith('diff --git a/'):
368 m = gitre.match(line)
368 m = gitre.match(line)
369 if m:
369 if m:
370 if gp:
370 if gp:
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 dst = m.group(2)
372 dst = m.group(2)
373 gp = patchmeta(dst)
373 gp = patchmeta(dst)
374 elif gp:
374 elif gp:
375 if line.startswith('--- '):
375 if line.startswith('--- '):
376 gitpatches.append(gp)
376 gitpatches.append(gp)
377 gp = None
377 gp = None
378 continue
378 continue
379 if line.startswith('rename from '):
379 if line.startswith('rename from '):
380 gp.op = 'RENAME'
380 gp.op = 'RENAME'
381 gp.oldpath = line[12:]
381 gp.oldpath = line[12:]
382 elif line.startswith('rename to '):
382 elif line.startswith('rename to '):
383 gp.path = line[10:]
383 gp.path = line[10:]
384 elif line.startswith('copy from '):
384 elif line.startswith('copy from '):
385 gp.op = 'COPY'
385 gp.op = 'COPY'
386 gp.oldpath = line[10:]
386 gp.oldpath = line[10:]
387 elif line.startswith('copy to '):
387 elif line.startswith('copy to '):
388 gp.path = line[8:]
388 gp.path = line[8:]
389 elif line.startswith('deleted file'):
389 elif line.startswith('deleted file'):
390 gp.op = 'DELETE'
390 gp.op = 'DELETE'
391 elif line.startswith('new file mode '):
391 elif line.startswith('new file mode '):
392 gp.op = 'ADD'
392 gp.op = 'ADD'
393 gp.setmode(int(line[-6:], 8))
393 gp.setmode(int(line[-6:], 8))
394 elif line.startswith('new mode '):
394 elif line.startswith('new mode '):
395 gp.setmode(int(line[-6:], 8))
395 gp.setmode(int(line[-6:], 8))
396 elif line.startswith('GIT binary patch'):
396 elif line.startswith('GIT binary patch'):
397 gp.binary = True
397 gp.binary = True
398 if gp:
398 if gp:
399 gitpatches.append(gp)
399 gitpatches.append(gp)
400
400
401 return gitpatches
401 return gitpatches
402
402
403 class linereader(object):
403 class linereader(object):
404 # simple class to allow pushing lines back into the input stream
404 # simple class to allow pushing lines back into the input stream
405 def __init__(self, fp):
405 def __init__(self, fp):
406 self.fp = fp
406 self.fp = fp
407 self.buf = []
407 self.buf = []
408
408
409 def push(self, line):
409 def push(self, line):
410 if line is not None:
410 if line is not None:
411 self.buf.append(line)
411 self.buf.append(line)
412
412
413 def readline(self):
413 def readline(self):
414 if self.buf:
414 if self.buf:
415 l = self.buf[0]
415 l = self.buf[0]
416 del self.buf[0]
416 del self.buf[0]
417 return l
417 return l
418 return self.fp.readline()
418 return self.fp.readline()
419
419
420 def __iter__(self):
420 def __iter__(self):
421 return iter(self.readline, '')
421 return iter(self.readline, '')
422
422
423 class abstractbackend(object):
423 class abstractbackend(object):
424 def __init__(self, ui):
424 def __init__(self, ui):
425 self.ui = ui
425 self.ui = ui
426
426
427 def getfile(self, fname):
427 def getfile(self, fname):
428 """Return target file data and flags as a (data, (islink,
428 """Return target file data and flags as a (data, (islink,
429 isexec)) tuple. Data is None if file is missing/deleted.
429 isexec)) tuple. Data is None if file is missing/deleted.
430 """
430 """
431 raise NotImplementedError
431 raise NotImplementedError
432
432
433 def setfile(self, fname, data, mode, copysource):
433 def setfile(self, fname, data, mode, copysource):
434 """Write data to target file fname and set its mode. mode is a
434 """Write data to target file fname and set its mode. mode is a
435 (islink, isexec) tuple. If data is None, the file content should
435 (islink, isexec) tuple. If data is None, the file content should
436 be left unchanged. If the file is modified after being copied,
436 be left unchanged. If the file is modified after being copied,
437 copysource is set to the original file name.
437 copysource is set to the original file name.
438 """
438 """
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def unlink(self, fname):
441 def unlink(self, fname):
442 """Unlink target file."""
442 """Unlink target file."""
443 raise NotImplementedError
443 raise NotImplementedError
444
444
445 def writerej(self, fname, failed, total, lines):
445 def writerej(self, fname, failed, total, lines):
446 """Write rejected lines for fname. total is the number of hunks
446 """Write rejected lines for fname. total is the number of hunks
447 which failed to apply and total the total number of hunks for this
447 which failed to apply and total the total number of hunks for this
448 files.
448 files.
449 """
449 """
450
450
451 def exists(self, fname):
451 def exists(self, fname):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 def close(self):
454 def close(self):
455 raise NotImplementedError
455 raise NotImplementedError
456
456
457 class fsbackend(abstractbackend):
457 class fsbackend(abstractbackend):
458 def __init__(self, ui, basedir):
458 def __init__(self, ui, basedir):
459 super(fsbackend, self).__init__(ui)
459 super(fsbackend, self).__init__(ui)
460 self.opener = vfsmod.vfs(basedir)
460 self.opener = vfsmod.vfs(basedir)
461
461
462 def getfile(self, fname):
462 def getfile(self, fname):
463 if self.opener.islink(fname):
463 if self.opener.islink(fname):
464 return (self.opener.readlink(fname), (True, False))
464 return (self.opener.readlink(fname), (True, False))
465
465
466 isexec = False
466 isexec = False
467 try:
467 try:
468 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
469 except OSError as e:
469 except OSError as e:
470 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
471 raise
471 raise
472 try:
472 try:
473 return (self.opener.read(fname), (False, isexec))
473 return (self.opener.read(fname), (False, isexec))
474 except IOError as e:
474 except IOError as e:
475 if e.errno != errno.ENOENT:
475 if e.errno != errno.ENOENT:
476 raise
476 raise
477 return None, None
477 return None, None
478
478
479 def setfile(self, fname, data, mode, copysource):
479 def setfile(self, fname, data, mode, copysource):
480 islink, isexec = mode
480 islink, isexec = mode
481 if data is None:
481 if data is None:
482 self.opener.setflags(fname, islink, isexec)
482 self.opener.setflags(fname, islink, isexec)
483 return
483 return
484 if islink:
484 if islink:
485 self.opener.symlink(data, fname)
485 self.opener.symlink(data, fname)
486 else:
486 else:
487 self.opener.write(fname, data)
487 self.opener.write(fname, data)
488 if isexec:
488 if isexec:
489 self.opener.setflags(fname, False, True)
489 self.opener.setflags(fname, False, True)
490
490
491 def unlink(self, fname):
491 def unlink(self, fname):
492 self.opener.unlinkpath(fname, ignoremissing=True)
492 self.opener.unlinkpath(fname, ignoremissing=True)
493
493
494 def writerej(self, fname, failed, total, lines):
494 def writerej(self, fname, failed, total, lines):
495 fname = fname + ".rej"
495 fname = fname + ".rej"
496 self.ui.warn(
496 self.ui.warn(
497 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
498 (failed, total, fname))
498 (failed, total, fname))
499 fp = self.opener(fname, 'w')
499 fp = self.opener(fname, 'w')
500 fp.writelines(lines)
500 fp.writelines(lines)
501 fp.close()
501 fp.close()
502
502
503 def exists(self, fname):
503 def exists(self, fname):
504 return self.opener.lexists(fname)
504 return self.opener.lexists(fname)
505
505
506 class workingbackend(fsbackend):
506 class workingbackend(fsbackend):
507 def __init__(self, ui, repo, similarity):
507 def __init__(self, ui, repo, similarity):
508 super(workingbackend, self).__init__(ui, repo.root)
508 super(workingbackend, self).__init__(ui, repo.root)
509 self.repo = repo
509 self.repo = repo
510 self.similarity = similarity
510 self.similarity = similarity
511 self.removed = set()
511 self.removed = set()
512 self.changed = set()
512 self.changed = set()
513 self.copied = []
513 self.copied = []
514
514
515 def _checkknown(self, fname):
515 def _checkknown(self, fname):
516 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 if self.repo.dirstate[fname] == '?' and self.exists(fname):
517 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
518
518
519 def setfile(self, fname, data, mode, copysource):
519 def setfile(self, fname, data, mode, copysource):
520 self._checkknown(fname)
520 self._checkknown(fname)
521 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 super(workingbackend, self).setfile(fname, data, mode, copysource)
522 if copysource is not None:
522 if copysource is not None:
523 self.copied.append((copysource, fname))
523 self.copied.append((copysource, fname))
524 self.changed.add(fname)
524 self.changed.add(fname)
525
525
526 def unlink(self, fname):
526 def unlink(self, fname):
527 self._checkknown(fname)
527 self._checkknown(fname)
528 super(workingbackend, self).unlink(fname)
528 super(workingbackend, self).unlink(fname)
529 self.removed.add(fname)
529 self.removed.add(fname)
530 self.changed.add(fname)
530 self.changed.add(fname)
531
531
532 def close(self):
532 def close(self):
533 wctx = self.repo[None]
533 wctx = self.repo[None]
534 changed = set(self.changed)
534 changed = set(self.changed)
535 for src, dst in self.copied:
535 for src, dst in self.copied:
536 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
537 if self.removed:
537 if self.removed:
538 wctx.forget(sorted(self.removed))
538 wctx.forget(sorted(self.removed))
539 for f in self.removed:
539 for f in self.removed:
540 if f not in self.repo.dirstate:
540 if f not in self.repo.dirstate:
541 # File was deleted and no longer belongs to the
541 # File was deleted and no longer belongs to the
542 # dirstate, it was probably marked added then
542 # dirstate, it was probably marked added then
543 # deleted, and should not be considered by
543 # deleted, and should not be considered by
544 # marktouched().
544 # marktouched().
545 changed.discard(f)
545 changed.discard(f)
546 if changed:
546 if changed:
547 scmutil.marktouched(self.repo, changed, self.similarity)
547 scmutil.marktouched(self.repo, changed, self.similarity)
548 return sorted(self.changed)
548 return sorted(self.changed)
549
549
550 class filestore(object):
550 class filestore(object):
551 def __init__(self, maxsize=None):
551 def __init__(self, maxsize=None):
552 self.opener = None
552 self.opener = None
553 self.files = {}
553 self.files = {}
554 self.created = 0
554 self.created = 0
555 self.maxsize = maxsize
555 self.maxsize = maxsize
556 if self.maxsize is None:
556 if self.maxsize is None:
557 self.maxsize = 4*(2**20)
557 self.maxsize = 4*(2**20)
558 self.size = 0
558 self.size = 0
559 self.data = {}
559 self.data = {}
560
560
561 def setfile(self, fname, data, mode, copied=None):
561 def setfile(self, fname, data, mode, copied=None):
562 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
563 self.data[fname] = (data, mode, copied)
563 self.data[fname] = (data, mode, copied)
564 self.size += len(data)
564 self.size += len(data)
565 else:
565 else:
566 if self.opener is None:
566 if self.opener is None:
567 root = tempfile.mkdtemp(prefix='hg-patch-')
567 root = tempfile.mkdtemp(prefix='hg-patch-')
568 self.opener = vfsmod.vfs(root)
568 self.opener = vfsmod.vfs(root)
569 # Avoid filename issues with these simple names
569 # Avoid filename issues with these simple names
570 fn = '%d' % self.created
570 fn = '%d' % self.created
571 self.opener.write(fn, data)
571 self.opener.write(fn, data)
572 self.created += 1
572 self.created += 1
573 self.files[fname] = (fn, mode, copied)
573 self.files[fname] = (fn, mode, copied)
574
574
575 def getfile(self, fname):
575 def getfile(self, fname):
576 if fname in self.data:
576 if fname in self.data:
577 return self.data[fname]
577 return self.data[fname]
578 if not self.opener or fname not in self.files:
578 if not self.opener or fname not in self.files:
579 return None, None, None
579 return None, None, None
580 fn, mode, copied = self.files[fname]
580 fn, mode, copied = self.files[fname]
581 return self.opener.read(fn), mode, copied
581 return self.opener.read(fn), mode, copied
582
582
583 def close(self):
583 def close(self):
584 if self.opener:
584 if self.opener:
585 shutil.rmtree(self.opener.base)
585 shutil.rmtree(self.opener.base)
586
586
587 class repobackend(abstractbackend):
587 class repobackend(abstractbackend):
588 def __init__(self, ui, repo, ctx, store):
588 def __init__(self, ui, repo, ctx, store):
589 super(repobackend, self).__init__(ui)
589 super(repobackend, self).__init__(ui)
590 self.repo = repo
590 self.repo = repo
591 self.ctx = ctx
591 self.ctx = ctx
592 self.store = store
592 self.store = store
593 self.changed = set()
593 self.changed = set()
594 self.removed = set()
594 self.removed = set()
595 self.copied = {}
595 self.copied = {}
596
596
597 def _checkknown(self, fname):
597 def _checkknown(self, fname):
598 if fname not in self.ctx:
598 if fname not in self.ctx:
599 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
600
600
601 def getfile(self, fname):
601 def getfile(self, fname):
602 try:
602 try:
603 fctx = self.ctx[fname]
603 fctx = self.ctx[fname]
604 except error.LookupError:
604 except error.LookupError:
605 return None, None
605 return None, None
606 flags = fctx.flags()
606 flags = fctx.flags()
607 return fctx.data(), ('l' in flags, 'x' in flags)
607 return fctx.data(), ('l' in flags, 'x' in flags)
608
608
609 def setfile(self, fname, data, mode, copysource):
609 def setfile(self, fname, data, mode, copysource):
610 if copysource:
610 if copysource:
611 self._checkknown(copysource)
611 self._checkknown(copysource)
612 if data is None:
612 if data is None:
613 data = self.ctx[fname].data()
613 data = self.ctx[fname].data()
614 self.store.setfile(fname, data, mode, copysource)
614 self.store.setfile(fname, data, mode, copysource)
615 self.changed.add(fname)
615 self.changed.add(fname)
616 if copysource:
616 if copysource:
617 self.copied[fname] = copysource
617 self.copied[fname] = copysource
618
618
619 def unlink(self, fname):
619 def unlink(self, fname):
620 self._checkknown(fname)
620 self._checkknown(fname)
621 self.removed.add(fname)
621 self.removed.add(fname)
622
622
623 def exists(self, fname):
623 def exists(self, fname):
624 return fname in self.ctx
624 return fname in self.ctx
625
625
626 def close(self):
626 def close(self):
627 return self.changed | self.removed
627 return self.changed | self.removed
628
628
629 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
630 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
631 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
632 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632 eolmodes = ['strict', 'crlf', 'lf', 'auto']
633
633
634 class patchfile(object):
634 class patchfile(object):
635 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 def __init__(self, ui, gp, backend, store, eolmode='strict'):
636 self.fname = gp.path
636 self.fname = gp.path
637 self.eolmode = eolmode
637 self.eolmode = eolmode
638 self.eol = None
638 self.eol = None
639 self.backend = backend
639 self.backend = backend
640 self.ui = ui
640 self.ui = ui
641 self.lines = []
641 self.lines = []
642 self.exists = False
642 self.exists = False
643 self.missing = True
643 self.missing = True
644 self.mode = gp.mode
644 self.mode = gp.mode
645 self.copysource = gp.oldpath
645 self.copysource = gp.oldpath
646 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
647 self.remove = gp.op == 'DELETE'
647 self.remove = gp.op == 'DELETE'
648 if self.copysource is None:
648 if self.copysource is None:
649 data, mode = backend.getfile(self.fname)
649 data, mode = backend.getfile(self.fname)
650 else:
650 else:
651 data, mode = store.getfile(self.copysource)[:2]
651 data, mode = store.getfile(self.copysource)[:2]
652 if data is not None:
652 if data is not None:
653 self.exists = self.copysource is None or backend.exists(self.fname)
653 self.exists = self.copysource is None or backend.exists(self.fname)
654 self.missing = False
654 self.missing = False
655 if data:
655 if data:
656 self.lines = mdiff.splitnewlines(data)
656 self.lines = mdiff.splitnewlines(data)
657 if self.mode is None:
657 if self.mode is None:
658 self.mode = mode
658 self.mode = mode
659 if self.lines:
659 if self.lines:
660 # Normalize line endings
660 # Normalize line endings
661 if self.lines[0].endswith('\r\n'):
661 if self.lines[0].endswith('\r\n'):
662 self.eol = '\r\n'
662 self.eol = '\r\n'
663 elif self.lines[0].endswith('\n'):
663 elif self.lines[0].endswith('\n'):
664 self.eol = '\n'
664 self.eol = '\n'
665 if eolmode != 'strict':
665 if eolmode != 'strict':
666 nlines = []
666 nlines = []
667 for l in self.lines:
667 for l in self.lines:
668 if l.endswith('\r\n'):
668 if l.endswith('\r\n'):
669 l = l[:-2] + '\n'
669 l = l[:-2] + '\n'
670 nlines.append(l)
670 nlines.append(l)
671 self.lines = nlines
671 self.lines = nlines
672 else:
672 else:
673 if self.create:
673 if self.create:
674 self.missing = False
674 self.missing = False
675 if self.mode is None:
675 if self.mode is None:
676 self.mode = (False, False)
676 self.mode = (False, False)
677 if self.missing:
677 if self.missing:
678 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
679 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
680 "current directory)\n"))
680 "current directory)\n"))
681
681
682 self.hash = {}
682 self.hash = {}
683 self.dirty = 0
683 self.dirty = 0
684 self.offset = 0
684 self.offset = 0
685 self.skew = 0
685 self.skew = 0
686 self.rej = []
686 self.rej = []
687 self.fileprinted = False
687 self.fileprinted = False
688 self.printfile(False)
688 self.printfile(False)
689 self.hunks = 0
689 self.hunks = 0
690
690
691 def writelines(self, fname, lines, mode):
691 def writelines(self, fname, lines, mode):
692 if self.eolmode == 'auto':
692 if self.eolmode == 'auto':
693 eol = self.eol
693 eol = self.eol
694 elif self.eolmode == 'crlf':
694 elif self.eolmode == 'crlf':
695 eol = '\r\n'
695 eol = '\r\n'
696 else:
696 else:
697 eol = '\n'
697 eol = '\n'
698
698
699 if self.eolmode != 'strict' and eol and eol != '\n':
699 if self.eolmode != 'strict' and eol and eol != '\n':
700 rawlines = []
700 rawlines = []
701 for l in lines:
701 for l in lines:
702 if l and l[-1] == '\n':
702 if l and l[-1] == '\n':
703 l = l[:-1] + eol
703 l = l[:-1] + eol
704 rawlines.append(l)
704 rawlines.append(l)
705 lines = rawlines
705 lines = rawlines
706
706
707 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
708
708
709 def printfile(self, warn):
709 def printfile(self, warn):
710 if self.fileprinted:
710 if self.fileprinted:
711 return
711 return
712 if warn or self.ui.verbose:
712 if warn or self.ui.verbose:
713 self.fileprinted = True
713 self.fileprinted = True
714 s = _("patching file %s\n") % self.fname
714 s = _("patching file %s\n") % self.fname
715 if warn:
715 if warn:
716 self.ui.warn(s)
716 self.ui.warn(s)
717 else:
717 else:
718 self.ui.note(s)
718 self.ui.note(s)
719
719
720
720
721 def findlines(self, l, linenum):
721 def findlines(self, l, linenum):
722 # looks through the hash and finds candidate lines. The
722 # looks through the hash and finds candidate lines. The
723 # result is a list of line numbers sorted based on distance
723 # result is a list of line numbers sorted based on distance
724 # from linenum
724 # from linenum
725
725
726 cand = self.hash.get(l, [])
726 cand = self.hash.get(l, [])
727 if len(cand) > 1:
727 if len(cand) > 1:
728 # resort our list of potentials forward then back.
728 # resort our list of potentials forward then back.
729 cand.sort(key=lambda x: abs(x - linenum))
729 cand.sort(key=lambda x: abs(x - linenum))
730 return cand
730 return cand
731
731
732 def write_rej(self):
732 def write_rej(self):
733 # our rejects are a little different from patch(1). This always
733 # our rejects are a little different from patch(1). This always
734 # creates rejects in the same form as the original patch. A file
734 # creates rejects in the same form as the original patch. A file
735 # header is inserted so that you can run the reject through patch again
735 # header is inserted so that you can run the reject through patch again
736 # without having to type the filename.
736 # without having to type the filename.
737 if not self.rej:
737 if not self.rej:
738 return
738 return
739 base = os.path.basename(self.fname)
739 base = os.path.basename(self.fname)
740 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 lines = ["--- %s\n+++ %s\n" % (base, base)]
741 for x in self.rej:
741 for x in self.rej:
742 for l in x.hunk:
742 for l in x.hunk:
743 lines.append(l)
743 lines.append(l)
744 if l[-1:] != '\n':
744 if l[-1:] != '\n':
745 lines.append("\n\ No newline at end of file\n")
745 lines.append("\n\ No newline at end of file\n")
746 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
747
747
748 def apply(self, h):
748 def apply(self, h):
749 if not h.complete():
749 if not h.complete():
750 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
751 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 (h.number, h.desc, len(h.a), h.lena, len(h.b),
752 h.lenb))
752 h.lenb))
753
753
754 self.hunks += 1
754 self.hunks += 1
755
755
756 if self.missing:
756 if self.missing:
757 self.rej.append(h)
757 self.rej.append(h)
758 return -1
758 return -1
759
759
760 if self.exists and self.create:
760 if self.exists and self.create:
761 if self.copysource:
761 if self.copysource:
762 self.ui.warn(_("cannot create %s: destination already "
762 self.ui.warn(_("cannot create %s: destination already "
763 "exists\n") % self.fname)
763 "exists\n") % self.fname)
764 else:
764 else:
765 self.ui.warn(_("file %s already exists\n") % self.fname)
765 self.ui.warn(_("file %s already exists\n") % self.fname)
766 self.rej.append(h)
766 self.rej.append(h)
767 return -1
767 return -1
768
768
769 if isinstance(h, binhunk):
769 if isinstance(h, binhunk):
770 if self.remove:
770 if self.remove:
771 self.backend.unlink(self.fname)
771 self.backend.unlink(self.fname)
772 else:
772 else:
773 l = h.new(self.lines)
773 l = h.new(self.lines)
774 self.lines[:] = l
774 self.lines[:] = l
775 self.offset += len(l)
775 self.offset += len(l)
776 self.dirty = True
776 self.dirty = True
777 return 0
777 return 0
778
778
779 horig = h
779 horig = h
780 if (self.eolmode in ('crlf', 'lf')
780 if (self.eolmode in ('crlf', 'lf')
781 or self.eolmode == 'auto' and self.eol):
781 or self.eolmode == 'auto' and self.eol):
782 # If new eols are going to be normalized, then normalize
782 # If new eols are going to be normalized, then normalize
783 # hunk data before patching. Otherwise, preserve input
783 # hunk data before patching. Otherwise, preserve input
784 # line-endings.
784 # line-endings.
785 h = h.getnormalized()
785 h = h.getnormalized()
786
786
787 # fast case first, no offsets, no fuzz
787 # fast case first, no offsets, no fuzz
788 old, oldstart, new, newstart = h.fuzzit(0, False)
788 old, oldstart, new, newstart = h.fuzzit(0, False)
789 oldstart += self.offset
789 oldstart += self.offset
790 orig_start = oldstart
790 orig_start = oldstart
791 # if there's skew we want to emit the "(offset %d lines)" even
791 # if there's skew we want to emit the "(offset %d lines)" even
792 # when the hunk cleanly applies at start + skew, so skip the
792 # when the hunk cleanly applies at start + skew, so skip the
793 # fast case code
793 # fast case code
794 if (self.skew == 0 and
794 if (self.skew == 0 and
795 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
796 if self.remove:
796 if self.remove:
797 self.backend.unlink(self.fname)
797 self.backend.unlink(self.fname)
798 else:
798 else:
799 self.lines[oldstart:oldstart + len(old)] = new
799 self.lines[oldstart:oldstart + len(old)] = new
800 self.offset += len(new) - len(old)
800 self.offset += len(new) - len(old)
801 self.dirty = True
801 self.dirty = True
802 return 0
802 return 0
803
803
804 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
805 self.hash = {}
805 self.hash = {}
806 for x, s in enumerate(self.lines):
806 for x, s in enumerate(self.lines):
807 self.hash.setdefault(s, []).append(x)
807 self.hash.setdefault(s, []).append(x)
808
808
809 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
810 for toponly in [True, False]:
810 for toponly in [True, False]:
811 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
812 oldstart = oldstart + self.offset + self.skew
812 oldstart = oldstart + self.offset + self.skew
813 oldstart = min(oldstart, len(self.lines))
813 oldstart = min(oldstart, len(self.lines))
814 if old:
814 if old:
815 cand = self.findlines(old[0][1:], oldstart)
815 cand = self.findlines(old[0][1:], oldstart)
816 else:
816 else:
817 # Only adding lines with no or fuzzed context, just
817 # Only adding lines with no or fuzzed context, just
818 # take the skew in account
818 # take the skew in account
819 cand = [oldstart]
819 cand = [oldstart]
820
820
821 for l in cand:
821 for l in cand:
822 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
823 self.lines[l : l + len(old)] = new
823 self.lines[l : l + len(old)] = new
824 self.offset += len(new) - len(old)
824 self.offset += len(new) - len(old)
825 self.skew = l - orig_start
825 self.skew = l - orig_start
826 self.dirty = True
826 self.dirty = True
827 offset = l - orig_start - fuzzlen
827 offset = l - orig_start - fuzzlen
828 if fuzzlen:
828 if fuzzlen:
829 msg = _("Hunk #%d succeeded at %d "
829 msg = _("Hunk #%d succeeded at %d "
830 "with fuzz %d "
830 "with fuzz %d "
831 "(offset %d lines).\n")
831 "(offset %d lines).\n")
832 self.printfile(True)
832 self.printfile(True)
833 self.ui.warn(msg %
833 self.ui.warn(msg %
834 (h.number, l + 1, fuzzlen, offset))
834 (h.number, l + 1, fuzzlen, offset))
835 else:
835 else:
836 msg = _("Hunk #%d succeeded at %d "
836 msg = _("Hunk #%d succeeded at %d "
837 "(offset %d lines).\n")
837 "(offset %d lines).\n")
838 self.ui.note(msg % (h.number, l + 1, offset))
838 self.ui.note(msg % (h.number, l + 1, offset))
839 return fuzzlen
839 return fuzzlen
840 self.printfile(True)
840 self.printfile(True)
841 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
842 self.rej.append(horig)
842 self.rej.append(horig)
843 return -1
843 return -1
844
844
845 def close(self):
845 def close(self):
846 if self.dirty:
846 if self.dirty:
847 self.writelines(self.fname, self.lines, self.mode)
847 self.writelines(self.fname, self.lines, self.mode)
848 self.write_rej()
848 self.write_rej()
849 return len(self.rej)
849 return len(self.rej)
850
850
851 class header(object):
851 class header(object):
852 """patch header
852 """patch header
853 """
853 """
854 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
855 diff_re = re.compile('diff -r .* (.*)$')
855 diff_re = re.compile('diff -r .* (.*)$')
856 allhunks_re = re.compile('(?:index|deleted file) ')
856 allhunks_re = re.compile('(?:index|deleted file) ')
857 pretty_re = re.compile('(?:new file|deleted file) ')
857 pretty_re = re.compile('(?:new file|deleted file) ')
858 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 special_re = re.compile('(?:index|deleted|copy|rename) ')
859 newfile_re = re.compile('(?:new file)')
859 newfile_re = re.compile('(?:new file)')
860
860
861 def __init__(self, header):
861 def __init__(self, header):
862 self.header = header
862 self.header = header
863 self.hunks = []
863 self.hunks = []
864
864
865 def binary(self):
865 def binary(self):
866 return any(h.startswith('index ') for h in self.header)
866 return any(h.startswith('index ') for h in self.header)
867
867
868 def pretty(self, fp):
868 def pretty(self, fp):
869 for h in self.header:
869 for h in self.header:
870 if h.startswith('index '):
870 if h.startswith('index '):
871 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 fp.write(_('this modifies a binary file (all or nothing)\n'))
872 break
872 break
873 if self.pretty_re.match(h):
873 if self.pretty_re.match(h):
874 fp.write(h)
874 fp.write(h)
875 if self.binary():
875 if self.binary():
876 fp.write(_('this is a binary file\n'))
876 fp.write(_('this is a binary file\n'))
877 break
877 break
878 if h.startswith('---'):
878 if h.startswith('---'):
879 fp.write(_('%d hunks, %d lines changed\n') %
879 fp.write(_('%d hunks, %d lines changed\n') %
880 (len(self.hunks),
880 (len(self.hunks),
881 sum([max(h.added, h.removed) for h in self.hunks])))
881 sum([max(h.added, h.removed) for h in self.hunks])))
882 break
882 break
883 fp.write(h)
883 fp.write(h)
884
884
885 def write(self, fp):
885 def write(self, fp):
886 fp.write(''.join(self.header))
886 fp.write(''.join(self.header))
887
887
888 def allhunks(self):
888 def allhunks(self):
889 return any(self.allhunks_re.match(h) for h in self.header)
889 return any(self.allhunks_re.match(h) for h in self.header)
890
890
891 def files(self):
891 def files(self):
892 match = self.diffgit_re.match(self.header[0])
892 match = self.diffgit_re.match(self.header[0])
893 if match:
893 if match:
894 fromfile, tofile = match.groups()
894 fromfile, tofile = match.groups()
895 if fromfile == tofile:
895 if fromfile == tofile:
896 return [fromfile]
896 return [fromfile]
897 return [fromfile, tofile]
897 return [fromfile, tofile]
898 else:
898 else:
899 return self.diff_re.match(self.header[0]).groups()
899 return self.diff_re.match(self.header[0]).groups()
900
900
901 def filename(self):
901 def filename(self):
902 return self.files()[-1]
902 return self.files()[-1]
903
903
904 def __repr__(self):
904 def __repr__(self):
905 return '<header %s>' % (' '.join(map(repr, self.files())))
905 return '<header %s>' % (' '.join(map(repr, self.files())))
906
906
907 def isnewfile(self):
907 def isnewfile(self):
908 return any(self.newfile_re.match(h) for h in self.header)
908 return any(self.newfile_re.match(h) for h in self.header)
909
909
910 def special(self):
910 def special(self):
911 # Special files are shown only at the header level and not at the hunk
911 # Special files are shown only at the header level and not at the hunk
912 # level for example a file that has been deleted is a special file.
912 # level for example a file that has been deleted is a special file.
913 # The user cannot change the content of the operation, in the case of
913 # The user cannot change the content of the operation, in the case of
914 # the deleted file he has to take the deletion or not take it, he
914 # the deleted file he has to take the deletion or not take it, he
915 # cannot take some of it.
915 # cannot take some of it.
916 # Newly added files are special if they are empty, they are not special
916 # Newly added files are special if they are empty, they are not special
917 # if they have some content as we want to be able to change it
917 # if they have some content as we want to be able to change it
918 nocontent = len(self.header) == 2
918 nocontent = len(self.header) == 2
919 emptynewfile = self.isnewfile() and nocontent
919 emptynewfile = self.isnewfile() and nocontent
920 return emptynewfile or \
920 return emptynewfile or \
921 any(self.special_re.match(h) for h in self.header)
921 any(self.special_re.match(h) for h in self.header)
922
922
923 class recordhunk(object):
923 class recordhunk(object):
924 """patch hunk
924 """patch hunk
925
925
926 XXX shouldn't we merge this with the other hunk class?
926 XXX shouldn't we merge this with the other hunk class?
927 """
927 """
928
928
929 def __init__(self, header, fromline, toline, proc, before, hunk, after,
929 def __init__(self, header, fromline, toline, proc, before, hunk, after,
930 maxcontext=None):
930 maxcontext=None):
931 def trimcontext(lines, reverse=False):
931 def trimcontext(lines, reverse=False):
932 if maxcontext is not None:
932 if maxcontext is not None:
933 delta = len(lines) - maxcontext
933 delta = len(lines) - maxcontext
934 if delta > 0:
934 if delta > 0:
935 if reverse:
935 if reverse:
936 return delta, lines[delta:]
936 return delta, lines[delta:]
937 else:
937 else:
938 return delta, lines[:maxcontext]
938 return delta, lines[:maxcontext]
939 return 0, lines
939 return 0, lines
940
940
941 self.header = header
941 self.header = header
942 trimedbefore, self.before = trimcontext(before, True)
942 trimedbefore, self.before = trimcontext(before, True)
943 self.fromline = fromline + trimedbefore
943 self.fromline = fromline + trimedbefore
944 self.toline = toline + trimedbefore
944 self.toline = toline + trimedbefore
945 _trimedafter, self.after = trimcontext(after, False)
945 _trimedafter, self.after = trimcontext(after, False)
946 self.proc = proc
946 self.proc = proc
947 self.hunk = hunk
947 self.hunk = hunk
948 self.added, self.removed = self.countchanges(self.hunk)
948 self.added, self.removed = self.countchanges(self.hunk)
949
949
950 def __eq__(self, v):
950 def __eq__(self, v):
951 if not isinstance(v, recordhunk):
951 if not isinstance(v, recordhunk):
952 return False
952 return False
953
953
954 return ((v.hunk == self.hunk) and
954 return ((v.hunk == self.hunk) and
955 (v.proc == self.proc) and
955 (v.proc == self.proc) and
956 (self.fromline == v.fromline) and
956 (self.fromline == v.fromline) and
957 (self.header.files() == v.header.files()))
957 (self.header.files() == v.header.files()))
958
958
959 def __hash__(self):
959 def __hash__(self):
960 return hash((tuple(self.hunk),
960 return hash((tuple(self.hunk),
961 tuple(self.header.files()),
961 tuple(self.header.files()),
962 self.fromline,
962 self.fromline,
963 self.proc))
963 self.proc))
964
964
965 def countchanges(self, hunk):
965 def countchanges(self, hunk):
966 """hunk -> (n+,n-)"""
966 """hunk -> (n+,n-)"""
967 add = len([h for h in hunk if h.startswith('+')])
967 add = len([h for h in hunk if h.startswith('+')])
968 rem = len([h for h in hunk if h.startswith('-')])
968 rem = len([h for h in hunk if h.startswith('-')])
969 return add, rem
969 return add, rem
970
970
971 def reversehunk(self):
971 def reversehunk(self):
972 """return another recordhunk which is the reverse of the hunk
972 """return another recordhunk which is the reverse of the hunk
973
973
974 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
974 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
975 that, swap fromline/toline and +/- signs while keep other things
975 that, swap fromline/toline and +/- signs while keep other things
976 unchanged.
976 unchanged.
977 """
977 """
978 m = {'+': '-', '-': '+', '\\': '\\'}
978 m = {'+': '-', '-': '+', '\\': '\\'}
979 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
979 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
980 return recordhunk(self.header, self.toline, self.fromline, self.proc,
980 return recordhunk(self.header, self.toline, self.fromline, self.proc,
981 self.before, hunk, self.after)
981 self.before, hunk, self.after)
982
982
983 def write(self, fp):
983 def write(self, fp):
984 delta = len(self.before) + len(self.after)
984 delta = len(self.before) + len(self.after)
985 if self.after and self.after[-1] == '\\ No newline at end of file\n':
985 if self.after and self.after[-1] == '\\ No newline at end of file\n':
986 delta -= 1
986 delta -= 1
987 fromlen = delta + self.removed
987 fromlen = delta + self.removed
988 tolen = delta + self.added
988 tolen = delta + self.added
989 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
989 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
990 (self.fromline, fromlen, self.toline, tolen,
990 (self.fromline, fromlen, self.toline, tolen,
991 self.proc and (' ' + self.proc)))
991 self.proc and (' ' + self.proc)))
992 fp.write(''.join(self.before + self.hunk + self.after))
992 fp.write(''.join(self.before + self.hunk + self.after))
993
993
994 pretty = write
994 pretty = write
995
995
996 def filename(self):
996 def filename(self):
997 return self.header.filename()
997 return self.header.filename()
998
998
999 def __repr__(self):
999 def __repr__(self):
1000 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1000 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1001
1001
1002 def getmessages():
1002 def getmessages():
1003 return {
1003 return {
1004 'multiple': {
1004 'multiple': {
1005 'apply': _("apply change %d/%d to '%s'?"),
1005 'apply': _("apply change %d/%d to '%s'?"),
1006 'discard': _("discard change %d/%d to '%s'?"),
1006 'discard': _("discard change %d/%d to '%s'?"),
1007 'record': _("record change %d/%d to '%s'?"),
1007 'record': _("record change %d/%d to '%s'?"),
1008 },
1008 },
1009 'single': {
1009 'single': {
1010 'apply': _("apply this change to '%s'?"),
1010 'apply': _("apply this change to '%s'?"),
1011 'discard': _("discard this change to '%s'?"),
1011 'discard': _("discard this change to '%s'?"),
1012 'record': _("record this change to '%s'?"),
1012 'record': _("record this change to '%s'?"),
1013 },
1013 },
1014 'help': {
1014 'help': {
1015 'apply': _('[Ynesfdaq?]'
1015 'apply': _('[Ynesfdaq?]'
1016 '$$ &Yes, apply this change'
1016 '$$ &Yes, apply this change'
1017 '$$ &No, skip this change'
1017 '$$ &No, skip this change'
1018 '$$ &Edit this change manually'
1018 '$$ &Edit this change manually'
1019 '$$ &Skip remaining changes to this file'
1019 '$$ &Skip remaining changes to this file'
1020 '$$ Apply remaining changes to this &file'
1020 '$$ Apply remaining changes to this &file'
1021 '$$ &Done, skip remaining changes and files'
1021 '$$ &Done, skip remaining changes and files'
1022 '$$ Apply &all changes to all remaining files'
1022 '$$ Apply &all changes to all remaining files'
1023 '$$ &Quit, applying no changes'
1023 '$$ &Quit, applying no changes'
1024 '$$ &? (display help)'),
1024 '$$ &? (display help)'),
1025 'discard': _('[Ynesfdaq?]'
1025 'discard': _('[Ynesfdaq?]'
1026 '$$ &Yes, discard this change'
1026 '$$ &Yes, discard this change'
1027 '$$ &No, skip this change'
1027 '$$ &No, skip this change'
1028 '$$ &Edit this change manually'
1028 '$$ &Edit this change manually'
1029 '$$ &Skip remaining changes to this file'
1029 '$$ &Skip remaining changes to this file'
1030 '$$ Discard remaining changes to this &file'
1030 '$$ Discard remaining changes to this &file'
1031 '$$ &Done, skip remaining changes and files'
1031 '$$ &Done, skip remaining changes and files'
1032 '$$ Discard &all changes to all remaining files'
1032 '$$ Discard &all changes to all remaining files'
1033 '$$ &Quit, discarding no changes'
1033 '$$ &Quit, discarding no changes'
1034 '$$ &? (display help)'),
1034 '$$ &? (display help)'),
1035 'record': _('[Ynesfdaq?]'
1035 'record': _('[Ynesfdaq?]'
1036 '$$ &Yes, record this change'
1036 '$$ &Yes, record this change'
1037 '$$ &No, skip this change'
1037 '$$ &No, skip this change'
1038 '$$ &Edit this change manually'
1038 '$$ &Edit this change manually'
1039 '$$ &Skip remaining changes to this file'
1039 '$$ &Skip remaining changes to this file'
1040 '$$ Record remaining changes to this &file'
1040 '$$ Record remaining changes to this &file'
1041 '$$ &Done, skip remaining changes and files'
1041 '$$ &Done, skip remaining changes and files'
1042 '$$ Record &all changes to all remaining files'
1042 '$$ Record &all changes to all remaining files'
1043 '$$ &Quit, recording no changes'
1043 '$$ &Quit, recording no changes'
1044 '$$ &? (display help)'),
1044 '$$ &? (display help)'),
1045 }
1045 }
1046 }
1046 }
1047
1047
1048 def filterpatch(ui, headers, operation=None):
1048 def filterpatch(ui, headers, operation=None):
1049 """Interactively filter patch chunks into applied-only chunks"""
1049 """Interactively filter patch chunks into applied-only chunks"""
1050 messages = getmessages()
1050 messages = getmessages()
1051
1051
1052 if operation is None:
1052 if operation is None:
1053 operation = 'record'
1053 operation = 'record'
1054
1054
1055 def prompt(skipfile, skipall, query, chunk):
1055 def prompt(skipfile, skipall, query, chunk):
1056 """prompt query, and process base inputs
1056 """prompt query, and process base inputs
1057
1057
1058 - y/n for the rest of file
1058 - y/n for the rest of file
1059 - y/n for the rest
1059 - y/n for the rest
1060 - ? (help)
1060 - ? (help)
1061 - q (quit)
1061 - q (quit)
1062
1062
1063 Return True/False and possibly updated skipfile and skipall.
1063 Return True/False and possibly updated skipfile and skipall.
1064 """
1064 """
1065 newpatches = None
1065 newpatches = None
1066 if skipall is not None:
1066 if skipall is not None:
1067 return skipall, skipfile, skipall, newpatches
1067 return skipall, skipfile, skipall, newpatches
1068 if skipfile is not None:
1068 if skipfile is not None:
1069 return skipfile, skipfile, skipall, newpatches
1069 return skipfile, skipfile, skipall, newpatches
1070 while True:
1070 while True:
1071 resps = messages['help'][operation]
1071 resps = messages['help'][operation]
1072 r = ui.promptchoice("%s %s" % (query, resps))
1072 r = ui.promptchoice("%s %s" % (query, resps))
1073 ui.write("\n")
1073 ui.write("\n")
1074 if r == 8: # ?
1074 if r == 8: # ?
1075 for c, t in ui.extractchoices(resps)[1]:
1075 for c, t in ui.extractchoices(resps)[1]:
1076 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1076 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1077 continue
1077 continue
1078 elif r == 0: # yes
1078 elif r == 0: # yes
1079 ret = True
1079 ret = True
1080 elif r == 1: # no
1080 elif r == 1: # no
1081 ret = False
1081 ret = False
1082 elif r == 2: # Edit patch
1082 elif r == 2: # Edit patch
1083 if chunk is None:
1083 if chunk is None:
1084 ui.write(_('cannot edit patch for whole file'))
1084 ui.write(_('cannot edit patch for whole file'))
1085 ui.write("\n")
1085 ui.write("\n")
1086 continue
1086 continue
1087 if chunk.header.binary():
1087 if chunk.header.binary():
1088 ui.write(_('cannot edit patch for binary file'))
1088 ui.write(_('cannot edit patch for binary file'))
1089 ui.write("\n")
1089 ui.write("\n")
1090 continue
1090 continue
1091 # Patch comment based on the Git one (based on comment at end of
1091 # Patch comment based on the Git one (based on comment at end of
1092 # https://mercurial-scm.org/wiki/RecordExtension)
1092 # https://mercurial-scm.org/wiki/RecordExtension)
1093 phelp = '---' + _("""
1093 phelp = '---' + _("""
1094 To remove '-' lines, make them ' ' lines (context).
1094 To remove '-' lines, make them ' ' lines (context).
1095 To remove '+' lines, delete them.
1095 To remove '+' lines, delete them.
1096 Lines starting with # will be removed from the patch.
1096 Lines starting with # will be removed from the patch.
1097
1097
1098 If the patch applies cleanly, the edited hunk will immediately be
1098 If the patch applies cleanly, the edited hunk will immediately be
1099 added to the record list. If it does not apply cleanly, a rejects
1099 added to the record list. If it does not apply cleanly, a rejects
1100 file will be generated: you can use that when you try again. If
1100 file will be generated: you can use that when you try again. If
1101 all lines of the hunk are removed, then the edit is aborted and
1101 all lines of the hunk are removed, then the edit is aborted and
1102 the hunk is left unchanged.
1102 the hunk is left unchanged.
1103 """)
1103 """)
1104 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1104 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1105 suffix=".diff")
1105 suffix=".diff")
1106 ncpatchfp = None
1106 ncpatchfp = None
1107 try:
1107 try:
1108 # Write the initial patch
1108 # Write the initial patch
1109 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1109 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1110 chunk.header.write(f)
1110 chunk.header.write(f)
1111 chunk.write(f)
1111 chunk.write(f)
1112 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1112 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1113 f.close()
1113 f.close()
1114 # Start the editor and wait for it to complete
1114 # Start the editor and wait for it to complete
1115 editor = ui.geteditor()
1115 editor = ui.geteditor()
1116 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1116 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1117 environ={'HGUSER': ui.username()},
1117 environ={'HGUSER': ui.username()},
1118 blockedtag='filterpatch')
1118 blockedtag='filterpatch')
1119 if ret != 0:
1119 if ret != 0:
1120 ui.warn(_("editor exited with exit code %d\n") % ret)
1120 ui.warn(_("editor exited with exit code %d\n") % ret)
1121 continue
1121 continue
1122 # Remove comment lines
1122 # Remove comment lines
1123 patchfp = open(patchfn)
1123 patchfp = open(patchfn, r'rb')
1124 ncpatchfp = stringio()
1124 ncpatchfp = stringio()
1125 for line in util.iterfile(patchfp):
1125 for line in util.iterfile(patchfp):
1126 line = util.fromnativeeol(line)
1126 if not line.startswith('#'):
1127 if not line.startswith('#'):
1127 ncpatchfp.write(line)
1128 ncpatchfp.write(line)
1128 patchfp.close()
1129 patchfp.close()
1129 ncpatchfp.seek(0)
1130 ncpatchfp.seek(0)
1130 newpatches = parsepatch(ncpatchfp)
1131 newpatches = parsepatch(ncpatchfp)
1131 finally:
1132 finally:
1132 os.unlink(patchfn)
1133 os.unlink(patchfn)
1133 del ncpatchfp
1134 del ncpatchfp
1134 # Signal that the chunk shouldn't be applied as-is, but
1135 # Signal that the chunk shouldn't be applied as-is, but
1135 # provide the new patch to be used instead.
1136 # provide the new patch to be used instead.
1136 ret = False
1137 ret = False
1137 elif r == 3: # Skip
1138 elif r == 3: # Skip
1138 ret = skipfile = False
1139 ret = skipfile = False
1139 elif r == 4: # file (Record remaining)
1140 elif r == 4: # file (Record remaining)
1140 ret = skipfile = True
1141 ret = skipfile = True
1141 elif r == 5: # done, skip remaining
1142 elif r == 5: # done, skip remaining
1142 ret = skipall = False
1143 ret = skipall = False
1143 elif r == 6: # all
1144 elif r == 6: # all
1144 ret = skipall = True
1145 ret = skipall = True
1145 elif r == 7: # quit
1146 elif r == 7: # quit
1146 raise error.Abort(_('user quit'))
1147 raise error.Abort(_('user quit'))
1147 return ret, skipfile, skipall, newpatches
1148 return ret, skipfile, skipall, newpatches
1148
1149
1149 seen = set()
1150 seen = set()
1150 applied = {} # 'filename' -> [] of chunks
1151 applied = {} # 'filename' -> [] of chunks
1151 skipfile, skipall = None, None
1152 skipfile, skipall = None, None
1152 pos, total = 1, sum(len(h.hunks) for h in headers)
1153 pos, total = 1, sum(len(h.hunks) for h in headers)
1153 for h in headers:
1154 for h in headers:
1154 pos += len(h.hunks)
1155 pos += len(h.hunks)
1155 skipfile = None
1156 skipfile = None
1156 fixoffset = 0
1157 fixoffset = 0
1157 hdr = ''.join(h.header)
1158 hdr = ''.join(h.header)
1158 if hdr in seen:
1159 if hdr in seen:
1159 continue
1160 continue
1160 seen.add(hdr)
1161 seen.add(hdr)
1161 if skipall is None:
1162 if skipall is None:
1162 h.pretty(ui)
1163 h.pretty(ui)
1163 msg = (_('examine changes to %s?') %
1164 msg = (_('examine changes to %s?') %
1164 _(' and ').join("'%s'" % f for f in h.files()))
1165 _(' and ').join("'%s'" % f for f in h.files()))
1165 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1166 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1166 if not r:
1167 if not r:
1167 continue
1168 continue
1168 applied[h.filename()] = [h]
1169 applied[h.filename()] = [h]
1169 if h.allhunks():
1170 if h.allhunks():
1170 applied[h.filename()] += h.hunks
1171 applied[h.filename()] += h.hunks
1171 continue
1172 continue
1172 for i, chunk in enumerate(h.hunks):
1173 for i, chunk in enumerate(h.hunks):
1173 if skipfile is None and skipall is None:
1174 if skipfile is None and skipall is None:
1174 chunk.pretty(ui)
1175 chunk.pretty(ui)
1175 if total == 1:
1176 if total == 1:
1176 msg = messages['single'][operation] % chunk.filename()
1177 msg = messages['single'][operation] % chunk.filename()
1177 else:
1178 else:
1178 idx = pos - len(h.hunks) + i
1179 idx = pos - len(h.hunks) + i
1179 msg = messages['multiple'][operation] % (idx, total,
1180 msg = messages['multiple'][operation] % (idx, total,
1180 chunk.filename())
1181 chunk.filename())
1181 r, skipfile, skipall, newpatches = prompt(skipfile,
1182 r, skipfile, skipall, newpatches = prompt(skipfile,
1182 skipall, msg, chunk)
1183 skipall, msg, chunk)
1183 if r:
1184 if r:
1184 if fixoffset:
1185 if fixoffset:
1185 chunk = copy.copy(chunk)
1186 chunk = copy.copy(chunk)
1186 chunk.toline += fixoffset
1187 chunk.toline += fixoffset
1187 applied[chunk.filename()].append(chunk)
1188 applied[chunk.filename()].append(chunk)
1188 elif newpatches is not None:
1189 elif newpatches is not None:
1189 for newpatch in newpatches:
1190 for newpatch in newpatches:
1190 for newhunk in newpatch.hunks:
1191 for newhunk in newpatch.hunks:
1191 if fixoffset:
1192 if fixoffset:
1192 newhunk.toline += fixoffset
1193 newhunk.toline += fixoffset
1193 applied[newhunk.filename()].append(newhunk)
1194 applied[newhunk.filename()].append(newhunk)
1194 else:
1195 else:
1195 fixoffset += chunk.removed - chunk.added
1196 fixoffset += chunk.removed - chunk.added
1196 return (sum([h for h in applied.itervalues()
1197 return (sum([h for h in applied.itervalues()
1197 if h[0].special() or len(h) > 1], []), {})
1198 if h[0].special() or len(h) > 1], []), {})
1198 class hunk(object):
1199 class hunk(object):
1199 def __init__(self, desc, num, lr, context):
1200 def __init__(self, desc, num, lr, context):
1200 self.number = num
1201 self.number = num
1201 self.desc = desc
1202 self.desc = desc
1202 self.hunk = [desc]
1203 self.hunk = [desc]
1203 self.a = []
1204 self.a = []
1204 self.b = []
1205 self.b = []
1205 self.starta = self.lena = None
1206 self.starta = self.lena = None
1206 self.startb = self.lenb = None
1207 self.startb = self.lenb = None
1207 if lr is not None:
1208 if lr is not None:
1208 if context:
1209 if context:
1209 self.read_context_hunk(lr)
1210 self.read_context_hunk(lr)
1210 else:
1211 else:
1211 self.read_unified_hunk(lr)
1212 self.read_unified_hunk(lr)
1212
1213
1213 def getnormalized(self):
1214 def getnormalized(self):
1214 """Return a copy with line endings normalized to LF."""
1215 """Return a copy with line endings normalized to LF."""
1215
1216
1216 def normalize(lines):
1217 def normalize(lines):
1217 nlines = []
1218 nlines = []
1218 for line in lines:
1219 for line in lines:
1219 if line.endswith('\r\n'):
1220 if line.endswith('\r\n'):
1220 line = line[:-2] + '\n'
1221 line = line[:-2] + '\n'
1221 nlines.append(line)
1222 nlines.append(line)
1222 return nlines
1223 return nlines
1223
1224
1224 # Dummy object, it is rebuilt manually
1225 # Dummy object, it is rebuilt manually
1225 nh = hunk(self.desc, self.number, None, None)
1226 nh = hunk(self.desc, self.number, None, None)
1226 nh.number = self.number
1227 nh.number = self.number
1227 nh.desc = self.desc
1228 nh.desc = self.desc
1228 nh.hunk = self.hunk
1229 nh.hunk = self.hunk
1229 nh.a = normalize(self.a)
1230 nh.a = normalize(self.a)
1230 nh.b = normalize(self.b)
1231 nh.b = normalize(self.b)
1231 nh.starta = self.starta
1232 nh.starta = self.starta
1232 nh.startb = self.startb
1233 nh.startb = self.startb
1233 nh.lena = self.lena
1234 nh.lena = self.lena
1234 nh.lenb = self.lenb
1235 nh.lenb = self.lenb
1235 return nh
1236 return nh
1236
1237
1237 def read_unified_hunk(self, lr):
1238 def read_unified_hunk(self, lr):
1238 m = unidesc.match(self.desc)
1239 m = unidesc.match(self.desc)
1239 if not m:
1240 if not m:
1240 raise PatchError(_("bad hunk #%d") % self.number)
1241 raise PatchError(_("bad hunk #%d") % self.number)
1241 self.starta, self.lena, self.startb, self.lenb = m.groups()
1242 self.starta, self.lena, self.startb, self.lenb = m.groups()
1242 if self.lena is None:
1243 if self.lena is None:
1243 self.lena = 1
1244 self.lena = 1
1244 else:
1245 else:
1245 self.lena = int(self.lena)
1246 self.lena = int(self.lena)
1246 if self.lenb is None:
1247 if self.lenb is None:
1247 self.lenb = 1
1248 self.lenb = 1
1248 else:
1249 else:
1249 self.lenb = int(self.lenb)
1250 self.lenb = int(self.lenb)
1250 self.starta = int(self.starta)
1251 self.starta = int(self.starta)
1251 self.startb = int(self.startb)
1252 self.startb = int(self.startb)
1252 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1253 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1253 self.b)
1254 self.b)
1254 # if we hit eof before finishing out the hunk, the last line will
1255 # if we hit eof before finishing out the hunk, the last line will
1255 # be zero length. Lets try to fix it up.
1256 # be zero length. Lets try to fix it up.
1256 while len(self.hunk[-1]) == 0:
1257 while len(self.hunk[-1]) == 0:
1257 del self.hunk[-1]
1258 del self.hunk[-1]
1258 del self.a[-1]
1259 del self.a[-1]
1259 del self.b[-1]
1260 del self.b[-1]
1260 self.lena -= 1
1261 self.lena -= 1
1261 self.lenb -= 1
1262 self.lenb -= 1
1262 self._fixnewline(lr)
1263 self._fixnewline(lr)
1263
1264
1264 def read_context_hunk(self, lr):
1265 def read_context_hunk(self, lr):
1265 self.desc = lr.readline()
1266 self.desc = lr.readline()
1266 m = contextdesc.match(self.desc)
1267 m = contextdesc.match(self.desc)
1267 if not m:
1268 if not m:
1268 raise PatchError(_("bad hunk #%d") % self.number)
1269 raise PatchError(_("bad hunk #%d") % self.number)
1269 self.starta, aend = m.groups()
1270 self.starta, aend = m.groups()
1270 self.starta = int(self.starta)
1271 self.starta = int(self.starta)
1271 if aend is None:
1272 if aend is None:
1272 aend = self.starta
1273 aend = self.starta
1273 self.lena = int(aend) - self.starta
1274 self.lena = int(aend) - self.starta
1274 if self.starta:
1275 if self.starta:
1275 self.lena += 1
1276 self.lena += 1
1276 for x in xrange(self.lena):
1277 for x in xrange(self.lena):
1277 l = lr.readline()
1278 l = lr.readline()
1278 if l.startswith('---'):
1279 if l.startswith('---'):
1279 # lines addition, old block is empty
1280 # lines addition, old block is empty
1280 lr.push(l)
1281 lr.push(l)
1281 break
1282 break
1282 s = l[2:]
1283 s = l[2:]
1283 if l.startswith('- ') or l.startswith('! '):
1284 if l.startswith('- ') or l.startswith('! '):
1284 u = '-' + s
1285 u = '-' + s
1285 elif l.startswith(' '):
1286 elif l.startswith(' '):
1286 u = ' ' + s
1287 u = ' ' + s
1287 else:
1288 else:
1288 raise PatchError(_("bad hunk #%d old text line %d") %
1289 raise PatchError(_("bad hunk #%d old text line %d") %
1289 (self.number, x))
1290 (self.number, x))
1290 self.a.append(u)
1291 self.a.append(u)
1291 self.hunk.append(u)
1292 self.hunk.append(u)
1292
1293
1293 l = lr.readline()
1294 l = lr.readline()
1294 if l.startswith('\ '):
1295 if l.startswith('\ '):
1295 s = self.a[-1][:-1]
1296 s = self.a[-1][:-1]
1296 self.a[-1] = s
1297 self.a[-1] = s
1297 self.hunk[-1] = s
1298 self.hunk[-1] = s
1298 l = lr.readline()
1299 l = lr.readline()
1299 m = contextdesc.match(l)
1300 m = contextdesc.match(l)
1300 if not m:
1301 if not m:
1301 raise PatchError(_("bad hunk #%d") % self.number)
1302 raise PatchError(_("bad hunk #%d") % self.number)
1302 self.startb, bend = m.groups()
1303 self.startb, bend = m.groups()
1303 self.startb = int(self.startb)
1304 self.startb = int(self.startb)
1304 if bend is None:
1305 if bend is None:
1305 bend = self.startb
1306 bend = self.startb
1306 self.lenb = int(bend) - self.startb
1307 self.lenb = int(bend) - self.startb
1307 if self.startb:
1308 if self.startb:
1308 self.lenb += 1
1309 self.lenb += 1
1309 hunki = 1
1310 hunki = 1
1310 for x in xrange(self.lenb):
1311 for x in xrange(self.lenb):
1311 l = lr.readline()
1312 l = lr.readline()
1312 if l.startswith('\ '):
1313 if l.startswith('\ '):
1313 # XXX: the only way to hit this is with an invalid line range.
1314 # XXX: the only way to hit this is with an invalid line range.
1314 # The no-eol marker is not counted in the line range, but I
1315 # The no-eol marker is not counted in the line range, but I
1315 # guess there are diff(1) out there which behave differently.
1316 # guess there are diff(1) out there which behave differently.
1316 s = self.b[-1][:-1]
1317 s = self.b[-1][:-1]
1317 self.b[-1] = s
1318 self.b[-1] = s
1318 self.hunk[hunki - 1] = s
1319 self.hunk[hunki - 1] = s
1319 continue
1320 continue
1320 if not l:
1321 if not l:
1321 # line deletions, new block is empty and we hit EOF
1322 # line deletions, new block is empty and we hit EOF
1322 lr.push(l)
1323 lr.push(l)
1323 break
1324 break
1324 s = l[2:]
1325 s = l[2:]
1325 if l.startswith('+ ') or l.startswith('! '):
1326 if l.startswith('+ ') or l.startswith('! '):
1326 u = '+' + s
1327 u = '+' + s
1327 elif l.startswith(' '):
1328 elif l.startswith(' '):
1328 u = ' ' + s
1329 u = ' ' + s
1329 elif len(self.b) == 0:
1330 elif len(self.b) == 0:
1330 # line deletions, new block is empty
1331 # line deletions, new block is empty
1331 lr.push(l)
1332 lr.push(l)
1332 break
1333 break
1333 else:
1334 else:
1334 raise PatchError(_("bad hunk #%d old text line %d") %
1335 raise PatchError(_("bad hunk #%d old text line %d") %
1335 (self.number, x))
1336 (self.number, x))
1336 self.b.append(s)
1337 self.b.append(s)
1337 while True:
1338 while True:
1338 if hunki >= len(self.hunk):
1339 if hunki >= len(self.hunk):
1339 h = ""
1340 h = ""
1340 else:
1341 else:
1341 h = self.hunk[hunki]
1342 h = self.hunk[hunki]
1342 hunki += 1
1343 hunki += 1
1343 if h == u:
1344 if h == u:
1344 break
1345 break
1345 elif h.startswith('-'):
1346 elif h.startswith('-'):
1346 continue
1347 continue
1347 else:
1348 else:
1348 self.hunk.insert(hunki - 1, u)
1349 self.hunk.insert(hunki - 1, u)
1349 break
1350 break
1350
1351
1351 if not self.a:
1352 if not self.a:
1352 # this happens when lines were only added to the hunk
1353 # this happens when lines were only added to the hunk
1353 for x in self.hunk:
1354 for x in self.hunk:
1354 if x.startswith('-') or x.startswith(' '):
1355 if x.startswith('-') or x.startswith(' '):
1355 self.a.append(x)
1356 self.a.append(x)
1356 if not self.b:
1357 if not self.b:
1357 # this happens when lines were only deleted from the hunk
1358 # this happens when lines were only deleted from the hunk
1358 for x in self.hunk:
1359 for x in self.hunk:
1359 if x.startswith('+') or x.startswith(' '):
1360 if x.startswith('+') or x.startswith(' '):
1360 self.b.append(x[1:])
1361 self.b.append(x[1:])
1361 # @@ -start,len +start,len @@
1362 # @@ -start,len +start,len @@
1362 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1363 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1363 self.startb, self.lenb)
1364 self.startb, self.lenb)
1364 self.hunk[0] = self.desc
1365 self.hunk[0] = self.desc
1365 self._fixnewline(lr)
1366 self._fixnewline(lr)
1366
1367
1367 def _fixnewline(self, lr):
1368 def _fixnewline(self, lr):
1368 l = lr.readline()
1369 l = lr.readline()
1369 if l.startswith('\ '):
1370 if l.startswith('\ '):
1370 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1371 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1371 else:
1372 else:
1372 lr.push(l)
1373 lr.push(l)
1373
1374
1374 def complete(self):
1375 def complete(self):
1375 return len(self.a) == self.lena and len(self.b) == self.lenb
1376 return len(self.a) == self.lena and len(self.b) == self.lenb
1376
1377
1377 def _fuzzit(self, old, new, fuzz, toponly):
1378 def _fuzzit(self, old, new, fuzz, toponly):
1378 # this removes context lines from the top and bottom of list 'l'. It
1379 # this removes context lines from the top and bottom of list 'l'. It
1379 # checks the hunk to make sure only context lines are removed, and then
1380 # checks the hunk to make sure only context lines are removed, and then
1380 # returns a new shortened list of lines.
1381 # returns a new shortened list of lines.
1381 fuzz = min(fuzz, len(old))
1382 fuzz = min(fuzz, len(old))
1382 if fuzz:
1383 if fuzz:
1383 top = 0
1384 top = 0
1384 bot = 0
1385 bot = 0
1385 hlen = len(self.hunk)
1386 hlen = len(self.hunk)
1386 for x in xrange(hlen - 1):
1387 for x in xrange(hlen - 1):
1387 # the hunk starts with the @@ line, so use x+1
1388 # the hunk starts with the @@ line, so use x+1
1388 if self.hunk[x + 1][0] == ' ':
1389 if self.hunk[x + 1][0] == ' ':
1389 top += 1
1390 top += 1
1390 else:
1391 else:
1391 break
1392 break
1392 if not toponly:
1393 if not toponly:
1393 for x in xrange(hlen - 1):
1394 for x in xrange(hlen - 1):
1394 if self.hunk[hlen - bot - 1][0] == ' ':
1395 if self.hunk[hlen - bot - 1][0] == ' ':
1395 bot += 1
1396 bot += 1
1396 else:
1397 else:
1397 break
1398 break
1398
1399
1399 bot = min(fuzz, bot)
1400 bot = min(fuzz, bot)
1400 top = min(fuzz, top)
1401 top = min(fuzz, top)
1401 return old[top:len(old) - bot], new[top:len(new) - bot], top
1402 return old[top:len(old) - bot], new[top:len(new) - bot], top
1402 return old, new, 0
1403 return old, new, 0
1403
1404
1404 def fuzzit(self, fuzz, toponly):
1405 def fuzzit(self, fuzz, toponly):
1405 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1406 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1406 oldstart = self.starta + top
1407 oldstart = self.starta + top
1407 newstart = self.startb + top
1408 newstart = self.startb + top
1408 # zero length hunk ranges already have their start decremented
1409 # zero length hunk ranges already have their start decremented
1409 if self.lena and oldstart > 0:
1410 if self.lena and oldstart > 0:
1410 oldstart -= 1
1411 oldstart -= 1
1411 if self.lenb and newstart > 0:
1412 if self.lenb and newstart > 0:
1412 newstart -= 1
1413 newstart -= 1
1413 return old, oldstart, new, newstart
1414 return old, oldstart, new, newstart
1414
1415
1415 class binhunk(object):
1416 class binhunk(object):
1416 'A binary patch file.'
1417 'A binary patch file.'
1417 def __init__(self, lr, fname):
1418 def __init__(self, lr, fname):
1418 self.text = None
1419 self.text = None
1419 self.delta = False
1420 self.delta = False
1420 self.hunk = ['GIT binary patch\n']
1421 self.hunk = ['GIT binary patch\n']
1421 self._fname = fname
1422 self._fname = fname
1422 self._read(lr)
1423 self._read(lr)
1423
1424
1424 def complete(self):
1425 def complete(self):
1425 return self.text is not None
1426 return self.text is not None
1426
1427
1427 def new(self, lines):
1428 def new(self, lines):
1428 if self.delta:
1429 if self.delta:
1429 return [applybindelta(self.text, ''.join(lines))]
1430 return [applybindelta(self.text, ''.join(lines))]
1430 return [self.text]
1431 return [self.text]
1431
1432
1432 def _read(self, lr):
1433 def _read(self, lr):
1433 def getline(lr, hunk):
1434 def getline(lr, hunk):
1434 l = lr.readline()
1435 l = lr.readline()
1435 hunk.append(l)
1436 hunk.append(l)
1436 return l.rstrip('\r\n')
1437 return l.rstrip('\r\n')
1437
1438
1438 size = 0
1439 size = 0
1439 while True:
1440 while True:
1440 line = getline(lr, self.hunk)
1441 line = getline(lr, self.hunk)
1441 if not line:
1442 if not line:
1442 raise PatchError(_('could not extract "%s" binary data')
1443 raise PatchError(_('could not extract "%s" binary data')
1443 % self._fname)
1444 % self._fname)
1444 if line.startswith('literal '):
1445 if line.startswith('literal '):
1445 size = int(line[8:].rstrip())
1446 size = int(line[8:].rstrip())
1446 break
1447 break
1447 if line.startswith('delta '):
1448 if line.startswith('delta '):
1448 size = int(line[6:].rstrip())
1449 size = int(line[6:].rstrip())
1449 self.delta = True
1450 self.delta = True
1450 break
1451 break
1451 dec = []
1452 dec = []
1452 line = getline(lr, self.hunk)
1453 line = getline(lr, self.hunk)
1453 while len(line) > 1:
1454 while len(line) > 1:
1454 l = line[0:1]
1455 l = line[0:1]
1455 if l <= 'Z' and l >= 'A':
1456 if l <= 'Z' and l >= 'A':
1456 l = ord(l) - ord('A') + 1
1457 l = ord(l) - ord('A') + 1
1457 else:
1458 else:
1458 l = ord(l) - ord('a') + 27
1459 l = ord(l) - ord('a') + 27
1459 try:
1460 try:
1460 dec.append(util.b85decode(line[1:])[:l])
1461 dec.append(util.b85decode(line[1:])[:l])
1461 except ValueError as e:
1462 except ValueError as e:
1462 raise PatchError(_('could not decode "%s" binary patch: %s')
1463 raise PatchError(_('could not decode "%s" binary patch: %s')
1463 % (self._fname, util.forcebytestr(e)))
1464 % (self._fname, util.forcebytestr(e)))
1464 line = getline(lr, self.hunk)
1465 line = getline(lr, self.hunk)
1465 text = zlib.decompress(''.join(dec))
1466 text = zlib.decompress(''.join(dec))
1466 if len(text) != size:
1467 if len(text) != size:
1467 raise PatchError(_('"%s" length is %d bytes, should be %d')
1468 raise PatchError(_('"%s" length is %d bytes, should be %d')
1468 % (self._fname, len(text), size))
1469 % (self._fname, len(text), size))
1469 self.text = text
1470 self.text = text
1470
1471
1471 def parsefilename(str):
1472 def parsefilename(str):
1472 # --- filename \t|space stuff
1473 # --- filename \t|space stuff
1473 s = str[4:].rstrip('\r\n')
1474 s = str[4:].rstrip('\r\n')
1474 i = s.find('\t')
1475 i = s.find('\t')
1475 if i < 0:
1476 if i < 0:
1476 i = s.find(' ')
1477 i = s.find(' ')
1477 if i < 0:
1478 if i < 0:
1478 return s
1479 return s
1479 return s[:i]
1480 return s[:i]
1480
1481
1481 def reversehunks(hunks):
1482 def reversehunks(hunks):
1482 '''reverse the signs in the hunks given as argument
1483 '''reverse the signs in the hunks given as argument
1483
1484
1484 This function operates on hunks coming out of patch.filterpatch, that is
1485 This function operates on hunks coming out of patch.filterpatch, that is
1485 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1486 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1486
1487
1487 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1488 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1488 ... --- a/folder1/g
1489 ... --- a/folder1/g
1489 ... +++ b/folder1/g
1490 ... +++ b/folder1/g
1490 ... @@ -1,7 +1,7 @@
1491 ... @@ -1,7 +1,7 @@
1491 ... +firstline
1492 ... +firstline
1492 ... c
1493 ... c
1493 ... 1
1494 ... 1
1494 ... 2
1495 ... 2
1495 ... + 3
1496 ... + 3
1496 ... -4
1497 ... -4
1497 ... 5
1498 ... 5
1498 ... d
1499 ... d
1499 ... +lastline"""
1500 ... +lastline"""
1500 >>> hunks = parsepatch([rawpatch])
1501 >>> hunks = parsepatch([rawpatch])
1501 >>> hunkscomingfromfilterpatch = []
1502 >>> hunkscomingfromfilterpatch = []
1502 >>> for h in hunks:
1503 >>> for h in hunks:
1503 ... hunkscomingfromfilterpatch.append(h)
1504 ... hunkscomingfromfilterpatch.append(h)
1504 ... hunkscomingfromfilterpatch.extend(h.hunks)
1505 ... hunkscomingfromfilterpatch.extend(h.hunks)
1505
1506
1506 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1507 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1507 >>> from . import util
1508 >>> from . import util
1508 >>> fp = util.stringio()
1509 >>> fp = util.stringio()
1509 >>> for c in reversedhunks:
1510 >>> for c in reversedhunks:
1510 ... c.write(fp)
1511 ... c.write(fp)
1511 >>> fp.seek(0) or None
1512 >>> fp.seek(0) or None
1512 >>> reversedpatch = fp.read()
1513 >>> reversedpatch = fp.read()
1513 >>> print(pycompat.sysstr(reversedpatch))
1514 >>> print(pycompat.sysstr(reversedpatch))
1514 diff --git a/folder1/g b/folder1/g
1515 diff --git a/folder1/g b/folder1/g
1515 --- a/folder1/g
1516 --- a/folder1/g
1516 +++ b/folder1/g
1517 +++ b/folder1/g
1517 @@ -1,4 +1,3 @@
1518 @@ -1,4 +1,3 @@
1518 -firstline
1519 -firstline
1519 c
1520 c
1520 1
1521 1
1521 2
1522 2
1522 @@ -2,6 +1,6 @@
1523 @@ -2,6 +1,6 @@
1523 c
1524 c
1524 1
1525 1
1525 2
1526 2
1526 - 3
1527 - 3
1527 +4
1528 +4
1528 5
1529 5
1529 d
1530 d
1530 @@ -6,3 +5,2 @@
1531 @@ -6,3 +5,2 @@
1531 5
1532 5
1532 d
1533 d
1533 -lastline
1534 -lastline
1534
1535
1535 '''
1536 '''
1536
1537
1537 newhunks = []
1538 newhunks = []
1538 for c in hunks:
1539 for c in hunks:
1539 if util.safehasattr(c, 'reversehunk'):
1540 if util.safehasattr(c, 'reversehunk'):
1540 c = c.reversehunk()
1541 c = c.reversehunk()
1541 newhunks.append(c)
1542 newhunks.append(c)
1542 return newhunks
1543 return newhunks
1543
1544
1544 def parsepatch(originalchunks, maxcontext=None):
1545 def parsepatch(originalchunks, maxcontext=None):
1545 """patch -> [] of headers -> [] of hunks
1546 """patch -> [] of headers -> [] of hunks
1546
1547
1547 If maxcontext is not None, trim context lines if necessary.
1548 If maxcontext is not None, trim context lines if necessary.
1548
1549
1549 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1550 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1550 ... --- a/folder1/g
1551 ... --- a/folder1/g
1551 ... +++ b/folder1/g
1552 ... +++ b/folder1/g
1552 ... @@ -1,8 +1,10 @@
1553 ... @@ -1,8 +1,10 @@
1553 ... 1
1554 ... 1
1554 ... 2
1555 ... 2
1555 ... -3
1556 ... -3
1556 ... 4
1557 ... 4
1557 ... 5
1558 ... 5
1558 ... 6
1559 ... 6
1559 ... +6.1
1560 ... +6.1
1560 ... +6.2
1561 ... +6.2
1561 ... 7
1562 ... 7
1562 ... 8
1563 ... 8
1563 ... +9'''
1564 ... +9'''
1564 >>> out = util.stringio()
1565 >>> out = util.stringio()
1565 >>> headers = parsepatch([rawpatch], maxcontext=1)
1566 >>> headers = parsepatch([rawpatch], maxcontext=1)
1566 >>> for header in headers:
1567 >>> for header in headers:
1567 ... header.write(out)
1568 ... header.write(out)
1568 ... for hunk in header.hunks:
1569 ... for hunk in header.hunks:
1569 ... hunk.write(out)
1570 ... hunk.write(out)
1570 >>> print(pycompat.sysstr(out.getvalue()))
1571 >>> print(pycompat.sysstr(out.getvalue()))
1571 diff --git a/folder1/g b/folder1/g
1572 diff --git a/folder1/g b/folder1/g
1572 --- a/folder1/g
1573 --- a/folder1/g
1573 +++ b/folder1/g
1574 +++ b/folder1/g
1574 @@ -2,3 +2,2 @@
1575 @@ -2,3 +2,2 @@
1575 2
1576 2
1576 -3
1577 -3
1577 4
1578 4
1578 @@ -6,2 +5,4 @@
1579 @@ -6,2 +5,4 @@
1579 6
1580 6
1580 +6.1
1581 +6.1
1581 +6.2
1582 +6.2
1582 7
1583 7
1583 @@ -8,1 +9,2 @@
1584 @@ -8,1 +9,2 @@
1584 8
1585 8
1585 +9
1586 +9
1586 """
1587 """
1587 class parser(object):
1588 class parser(object):
1588 """patch parsing state machine"""
1589 """patch parsing state machine"""
1589 def __init__(self):
1590 def __init__(self):
1590 self.fromline = 0
1591 self.fromline = 0
1591 self.toline = 0
1592 self.toline = 0
1592 self.proc = ''
1593 self.proc = ''
1593 self.header = None
1594 self.header = None
1594 self.context = []
1595 self.context = []
1595 self.before = []
1596 self.before = []
1596 self.hunk = []
1597 self.hunk = []
1597 self.headers = []
1598 self.headers = []
1598
1599
1599 def addrange(self, limits):
1600 def addrange(self, limits):
1600 fromstart, fromend, tostart, toend, proc = limits
1601 fromstart, fromend, tostart, toend, proc = limits
1601 self.fromline = int(fromstart)
1602 self.fromline = int(fromstart)
1602 self.toline = int(tostart)
1603 self.toline = int(tostart)
1603 self.proc = proc
1604 self.proc = proc
1604
1605
1605 def addcontext(self, context):
1606 def addcontext(self, context):
1606 if self.hunk:
1607 if self.hunk:
1607 h = recordhunk(self.header, self.fromline, self.toline,
1608 h = recordhunk(self.header, self.fromline, self.toline,
1608 self.proc, self.before, self.hunk, context, maxcontext)
1609 self.proc, self.before, self.hunk, context, maxcontext)
1609 self.header.hunks.append(h)
1610 self.header.hunks.append(h)
1610 self.fromline += len(self.before) + h.removed
1611 self.fromline += len(self.before) + h.removed
1611 self.toline += len(self.before) + h.added
1612 self.toline += len(self.before) + h.added
1612 self.before = []
1613 self.before = []
1613 self.hunk = []
1614 self.hunk = []
1614 self.context = context
1615 self.context = context
1615
1616
1616 def addhunk(self, hunk):
1617 def addhunk(self, hunk):
1617 if self.context:
1618 if self.context:
1618 self.before = self.context
1619 self.before = self.context
1619 self.context = []
1620 self.context = []
1620 self.hunk = hunk
1621 self.hunk = hunk
1621
1622
1622 def newfile(self, hdr):
1623 def newfile(self, hdr):
1623 self.addcontext([])
1624 self.addcontext([])
1624 h = header(hdr)
1625 h = header(hdr)
1625 self.headers.append(h)
1626 self.headers.append(h)
1626 self.header = h
1627 self.header = h
1627
1628
1628 def addother(self, line):
1629 def addother(self, line):
1629 pass # 'other' lines are ignored
1630 pass # 'other' lines are ignored
1630
1631
1631 def finished(self):
1632 def finished(self):
1632 self.addcontext([])
1633 self.addcontext([])
1633 return self.headers
1634 return self.headers
1634
1635
1635 transitions = {
1636 transitions = {
1636 'file': {'context': addcontext,
1637 'file': {'context': addcontext,
1637 'file': newfile,
1638 'file': newfile,
1638 'hunk': addhunk,
1639 'hunk': addhunk,
1639 'range': addrange},
1640 'range': addrange},
1640 'context': {'file': newfile,
1641 'context': {'file': newfile,
1641 'hunk': addhunk,
1642 'hunk': addhunk,
1642 'range': addrange,
1643 'range': addrange,
1643 'other': addother},
1644 'other': addother},
1644 'hunk': {'context': addcontext,
1645 'hunk': {'context': addcontext,
1645 'file': newfile,
1646 'file': newfile,
1646 'range': addrange},
1647 'range': addrange},
1647 'range': {'context': addcontext,
1648 'range': {'context': addcontext,
1648 'hunk': addhunk},
1649 'hunk': addhunk},
1649 'other': {'other': addother},
1650 'other': {'other': addother},
1650 }
1651 }
1651
1652
1652 p = parser()
1653 p = parser()
1653 fp = stringio()
1654 fp = stringio()
1654 fp.write(''.join(originalchunks))
1655 fp.write(''.join(originalchunks))
1655 fp.seek(0)
1656 fp.seek(0)
1656
1657
1657 state = 'context'
1658 state = 'context'
1658 for newstate, data in scanpatch(fp):
1659 for newstate, data in scanpatch(fp):
1659 try:
1660 try:
1660 p.transitions[state][newstate](p, data)
1661 p.transitions[state][newstate](p, data)
1661 except KeyError:
1662 except KeyError:
1662 raise PatchError('unhandled transition: %s -> %s' %
1663 raise PatchError('unhandled transition: %s -> %s' %
1663 (state, newstate))
1664 (state, newstate))
1664 state = newstate
1665 state = newstate
1665 del fp
1666 del fp
1666 return p.finished()
1667 return p.finished()
1667
1668
1668 def pathtransform(path, strip, prefix):
1669 def pathtransform(path, strip, prefix):
1669 '''turn a path from a patch into a path suitable for the repository
1670 '''turn a path from a patch into a path suitable for the repository
1670
1671
1671 prefix, if not empty, is expected to be normalized with a / at the end.
1672 prefix, if not empty, is expected to be normalized with a / at the end.
1672
1673
1673 Returns (stripped components, path in repository).
1674 Returns (stripped components, path in repository).
1674
1675
1675 >>> pathtransform(b'a/b/c', 0, b'')
1676 >>> pathtransform(b'a/b/c', 0, b'')
1676 ('', 'a/b/c')
1677 ('', 'a/b/c')
1677 >>> pathtransform(b' a/b/c ', 0, b'')
1678 >>> pathtransform(b' a/b/c ', 0, b'')
1678 ('', ' a/b/c')
1679 ('', ' a/b/c')
1679 >>> pathtransform(b' a/b/c ', 2, b'')
1680 >>> pathtransform(b' a/b/c ', 2, b'')
1680 ('a/b/', 'c')
1681 ('a/b/', 'c')
1681 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1682 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1682 ('', 'd/e/a/b/c')
1683 ('', 'd/e/a/b/c')
1683 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1684 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1684 ('a//b/', 'd/e/c')
1685 ('a//b/', 'd/e/c')
1685 >>> pathtransform(b'a/b/c', 3, b'')
1686 >>> pathtransform(b'a/b/c', 3, b'')
1686 Traceback (most recent call last):
1687 Traceback (most recent call last):
1687 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1688 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1688 '''
1689 '''
1689 pathlen = len(path)
1690 pathlen = len(path)
1690 i = 0
1691 i = 0
1691 if strip == 0:
1692 if strip == 0:
1692 return '', prefix + path.rstrip()
1693 return '', prefix + path.rstrip()
1693 count = strip
1694 count = strip
1694 while count > 0:
1695 while count > 0:
1695 i = path.find('/', i)
1696 i = path.find('/', i)
1696 if i == -1:
1697 if i == -1:
1697 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1698 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1698 (count, strip, path))
1699 (count, strip, path))
1699 i += 1
1700 i += 1
1700 # consume '//' in the path
1701 # consume '//' in the path
1701 while i < pathlen - 1 and path[i:i + 1] == '/':
1702 while i < pathlen - 1 and path[i:i + 1] == '/':
1702 i += 1
1703 i += 1
1703 count -= 1
1704 count -= 1
1704 return path[:i].lstrip(), prefix + path[i:].rstrip()
1705 return path[:i].lstrip(), prefix + path[i:].rstrip()
1705
1706
1706 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1707 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1707 nulla = afile_orig == "/dev/null"
1708 nulla = afile_orig == "/dev/null"
1708 nullb = bfile_orig == "/dev/null"
1709 nullb = bfile_orig == "/dev/null"
1709 create = nulla and hunk.starta == 0 and hunk.lena == 0
1710 create = nulla and hunk.starta == 0 and hunk.lena == 0
1710 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1711 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1711 abase, afile = pathtransform(afile_orig, strip, prefix)
1712 abase, afile = pathtransform(afile_orig, strip, prefix)
1712 gooda = not nulla and backend.exists(afile)
1713 gooda = not nulla and backend.exists(afile)
1713 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1714 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1714 if afile == bfile:
1715 if afile == bfile:
1715 goodb = gooda
1716 goodb = gooda
1716 else:
1717 else:
1717 goodb = not nullb and backend.exists(bfile)
1718 goodb = not nullb and backend.exists(bfile)
1718 missing = not goodb and not gooda and not create
1719 missing = not goodb and not gooda and not create
1719
1720
1720 # some diff programs apparently produce patches where the afile is
1721 # some diff programs apparently produce patches where the afile is
1721 # not /dev/null, but afile starts with bfile
1722 # not /dev/null, but afile starts with bfile
1722 abasedir = afile[:afile.rfind('/') + 1]
1723 abasedir = afile[:afile.rfind('/') + 1]
1723 bbasedir = bfile[:bfile.rfind('/') + 1]
1724 bbasedir = bfile[:bfile.rfind('/') + 1]
1724 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1725 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1725 and hunk.starta == 0 and hunk.lena == 0):
1726 and hunk.starta == 0 and hunk.lena == 0):
1726 create = True
1727 create = True
1727 missing = False
1728 missing = False
1728
1729
1729 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1730 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1730 # diff is between a file and its backup. In this case, the original
1731 # diff is between a file and its backup. In this case, the original
1731 # file should be patched (see original mpatch code).
1732 # file should be patched (see original mpatch code).
1732 isbackup = (abase == bbase and bfile.startswith(afile))
1733 isbackup = (abase == bbase and bfile.startswith(afile))
1733 fname = None
1734 fname = None
1734 if not missing:
1735 if not missing:
1735 if gooda and goodb:
1736 if gooda and goodb:
1736 if isbackup:
1737 if isbackup:
1737 fname = afile
1738 fname = afile
1738 else:
1739 else:
1739 fname = bfile
1740 fname = bfile
1740 elif gooda:
1741 elif gooda:
1741 fname = afile
1742 fname = afile
1742
1743
1743 if not fname:
1744 if not fname:
1744 if not nullb:
1745 if not nullb:
1745 if isbackup:
1746 if isbackup:
1746 fname = afile
1747 fname = afile
1747 else:
1748 else:
1748 fname = bfile
1749 fname = bfile
1749 elif not nulla:
1750 elif not nulla:
1750 fname = afile
1751 fname = afile
1751 else:
1752 else:
1752 raise PatchError(_("undefined source and destination files"))
1753 raise PatchError(_("undefined source and destination files"))
1753
1754
1754 gp = patchmeta(fname)
1755 gp = patchmeta(fname)
1755 if create:
1756 if create:
1756 gp.op = 'ADD'
1757 gp.op = 'ADD'
1757 elif remove:
1758 elif remove:
1758 gp.op = 'DELETE'
1759 gp.op = 'DELETE'
1759 return gp
1760 return gp
1760
1761
1761 def scanpatch(fp):
1762 def scanpatch(fp):
1762 """like patch.iterhunks, but yield different events
1763 """like patch.iterhunks, but yield different events
1763
1764
1764 - ('file', [header_lines + fromfile + tofile])
1765 - ('file', [header_lines + fromfile + tofile])
1765 - ('context', [context_lines])
1766 - ('context', [context_lines])
1766 - ('hunk', [hunk_lines])
1767 - ('hunk', [hunk_lines])
1767 - ('range', (-start,len, +start,len, proc))
1768 - ('range', (-start,len, +start,len, proc))
1768 """
1769 """
1769 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1770 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1770 lr = linereader(fp)
1771 lr = linereader(fp)
1771
1772
1772 def scanwhile(first, p):
1773 def scanwhile(first, p):
1773 """scan lr while predicate holds"""
1774 """scan lr while predicate holds"""
1774 lines = [first]
1775 lines = [first]
1775 for line in iter(lr.readline, ''):
1776 for line in iter(lr.readline, ''):
1776 if p(line):
1777 if p(line):
1777 lines.append(line)
1778 lines.append(line)
1778 else:
1779 else:
1779 lr.push(line)
1780 lr.push(line)
1780 break
1781 break
1781 return lines
1782 return lines
1782
1783
1783 for line in iter(lr.readline, ''):
1784 for line in iter(lr.readline, ''):
1784 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1785 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1785 def notheader(line):
1786 def notheader(line):
1786 s = line.split(None, 1)
1787 s = line.split(None, 1)
1787 return not s or s[0] not in ('---', 'diff')
1788 return not s or s[0] not in ('---', 'diff')
1788 header = scanwhile(line, notheader)
1789 header = scanwhile(line, notheader)
1789 fromfile = lr.readline()
1790 fromfile = lr.readline()
1790 if fromfile.startswith('---'):
1791 if fromfile.startswith('---'):
1791 tofile = lr.readline()
1792 tofile = lr.readline()
1792 header += [fromfile, tofile]
1793 header += [fromfile, tofile]
1793 else:
1794 else:
1794 lr.push(fromfile)
1795 lr.push(fromfile)
1795 yield 'file', header
1796 yield 'file', header
1796 elif line[0:1] == ' ':
1797 elif line[0:1] == ' ':
1797 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1798 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1798 elif line[0] in '-+':
1799 elif line[0] in '-+':
1799 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1800 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1800 else:
1801 else:
1801 m = lines_re.match(line)
1802 m = lines_re.match(line)
1802 if m:
1803 if m:
1803 yield 'range', m.groups()
1804 yield 'range', m.groups()
1804 else:
1805 else:
1805 yield 'other', line
1806 yield 'other', line
1806
1807
1807 def scangitpatch(lr, firstline):
1808 def scangitpatch(lr, firstline):
1808 """
1809 """
1809 Git patches can emit:
1810 Git patches can emit:
1810 - rename a to b
1811 - rename a to b
1811 - change b
1812 - change b
1812 - copy a to c
1813 - copy a to c
1813 - change c
1814 - change c
1814
1815
1815 We cannot apply this sequence as-is, the renamed 'a' could not be
1816 We cannot apply this sequence as-is, the renamed 'a' could not be
1816 found for it would have been renamed already. And we cannot copy
1817 found for it would have been renamed already. And we cannot copy
1817 from 'b' instead because 'b' would have been changed already. So
1818 from 'b' instead because 'b' would have been changed already. So
1818 we scan the git patch for copy and rename commands so we can
1819 we scan the git patch for copy and rename commands so we can
1819 perform the copies ahead of time.
1820 perform the copies ahead of time.
1820 """
1821 """
1821 pos = 0
1822 pos = 0
1822 try:
1823 try:
1823 pos = lr.fp.tell()
1824 pos = lr.fp.tell()
1824 fp = lr.fp
1825 fp = lr.fp
1825 except IOError:
1826 except IOError:
1826 fp = stringio(lr.fp.read())
1827 fp = stringio(lr.fp.read())
1827 gitlr = linereader(fp)
1828 gitlr = linereader(fp)
1828 gitlr.push(firstline)
1829 gitlr.push(firstline)
1829 gitpatches = readgitpatch(gitlr)
1830 gitpatches = readgitpatch(gitlr)
1830 fp.seek(pos)
1831 fp.seek(pos)
1831 return gitpatches
1832 return gitpatches
1832
1833
1833 def iterhunks(fp):
1834 def iterhunks(fp):
1834 """Read a patch and yield the following events:
1835 """Read a patch and yield the following events:
1835 - ("file", afile, bfile, firsthunk): select a new target file.
1836 - ("file", afile, bfile, firsthunk): select a new target file.
1836 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1837 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1837 "file" event.
1838 "file" event.
1838 - ("git", gitchanges): current diff is in git format, gitchanges
1839 - ("git", gitchanges): current diff is in git format, gitchanges
1839 maps filenames to gitpatch records. Unique event.
1840 maps filenames to gitpatch records. Unique event.
1840 """
1841 """
1841 afile = ""
1842 afile = ""
1842 bfile = ""
1843 bfile = ""
1843 state = None
1844 state = None
1844 hunknum = 0
1845 hunknum = 0
1845 emitfile = newfile = False
1846 emitfile = newfile = False
1846 gitpatches = None
1847 gitpatches = None
1847
1848
1848 # our states
1849 # our states
1849 BFILE = 1
1850 BFILE = 1
1850 context = None
1851 context = None
1851 lr = linereader(fp)
1852 lr = linereader(fp)
1852
1853
1853 for x in iter(lr.readline, ''):
1854 for x in iter(lr.readline, ''):
1854 if state == BFILE and (
1855 if state == BFILE and (
1855 (not context and x.startswith('@'))
1856 (not context and x.startswith('@'))
1856 or (context is not False and x.startswith('***************'))
1857 or (context is not False and x.startswith('***************'))
1857 or x.startswith('GIT binary patch')):
1858 or x.startswith('GIT binary patch')):
1858 gp = None
1859 gp = None
1859 if (gitpatches and
1860 if (gitpatches and
1860 gitpatches[-1].ispatching(afile, bfile)):
1861 gitpatches[-1].ispatching(afile, bfile)):
1861 gp = gitpatches.pop()
1862 gp = gitpatches.pop()
1862 if x.startswith('GIT binary patch'):
1863 if x.startswith('GIT binary patch'):
1863 h = binhunk(lr, gp.path)
1864 h = binhunk(lr, gp.path)
1864 else:
1865 else:
1865 if context is None and x.startswith('***************'):
1866 if context is None and x.startswith('***************'):
1866 context = True
1867 context = True
1867 h = hunk(x, hunknum + 1, lr, context)
1868 h = hunk(x, hunknum + 1, lr, context)
1868 hunknum += 1
1869 hunknum += 1
1869 if emitfile:
1870 if emitfile:
1870 emitfile = False
1871 emitfile = False
1871 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1872 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1872 yield 'hunk', h
1873 yield 'hunk', h
1873 elif x.startswith('diff --git a/'):
1874 elif x.startswith('diff --git a/'):
1874 m = gitre.match(x.rstrip(' \r\n'))
1875 m = gitre.match(x.rstrip(' \r\n'))
1875 if not m:
1876 if not m:
1876 continue
1877 continue
1877 if gitpatches is None:
1878 if gitpatches is None:
1878 # scan whole input for git metadata
1879 # scan whole input for git metadata
1879 gitpatches = scangitpatch(lr, x)
1880 gitpatches = scangitpatch(lr, x)
1880 yield 'git', [g.copy() for g in gitpatches
1881 yield 'git', [g.copy() for g in gitpatches
1881 if g.op in ('COPY', 'RENAME')]
1882 if g.op in ('COPY', 'RENAME')]
1882 gitpatches.reverse()
1883 gitpatches.reverse()
1883 afile = 'a/' + m.group(1)
1884 afile = 'a/' + m.group(1)
1884 bfile = 'b/' + m.group(2)
1885 bfile = 'b/' + m.group(2)
1885 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1886 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1886 gp = gitpatches.pop()
1887 gp = gitpatches.pop()
1887 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1888 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1888 if not gitpatches:
1889 if not gitpatches:
1889 raise PatchError(_('failed to synchronize metadata for "%s"')
1890 raise PatchError(_('failed to synchronize metadata for "%s"')
1890 % afile[2:])
1891 % afile[2:])
1891 gp = gitpatches[-1]
1892 gp = gitpatches[-1]
1892 newfile = True
1893 newfile = True
1893 elif x.startswith('---'):
1894 elif x.startswith('---'):
1894 # check for a unified diff
1895 # check for a unified diff
1895 l2 = lr.readline()
1896 l2 = lr.readline()
1896 if not l2.startswith('+++'):
1897 if not l2.startswith('+++'):
1897 lr.push(l2)
1898 lr.push(l2)
1898 continue
1899 continue
1899 newfile = True
1900 newfile = True
1900 context = False
1901 context = False
1901 afile = parsefilename(x)
1902 afile = parsefilename(x)
1902 bfile = parsefilename(l2)
1903 bfile = parsefilename(l2)
1903 elif x.startswith('***'):
1904 elif x.startswith('***'):
1904 # check for a context diff
1905 # check for a context diff
1905 l2 = lr.readline()
1906 l2 = lr.readline()
1906 if not l2.startswith('---'):
1907 if not l2.startswith('---'):
1907 lr.push(l2)
1908 lr.push(l2)
1908 continue
1909 continue
1909 l3 = lr.readline()
1910 l3 = lr.readline()
1910 lr.push(l3)
1911 lr.push(l3)
1911 if not l3.startswith("***************"):
1912 if not l3.startswith("***************"):
1912 lr.push(l2)
1913 lr.push(l2)
1913 continue
1914 continue
1914 newfile = True
1915 newfile = True
1915 context = True
1916 context = True
1916 afile = parsefilename(x)
1917 afile = parsefilename(x)
1917 bfile = parsefilename(l2)
1918 bfile = parsefilename(l2)
1918
1919
1919 if newfile:
1920 if newfile:
1920 newfile = False
1921 newfile = False
1921 emitfile = True
1922 emitfile = True
1922 state = BFILE
1923 state = BFILE
1923 hunknum = 0
1924 hunknum = 0
1924
1925
1925 while gitpatches:
1926 while gitpatches:
1926 gp = gitpatches.pop()
1927 gp = gitpatches.pop()
1927 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1928 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1928
1929
1929 def applybindelta(binchunk, data):
1930 def applybindelta(binchunk, data):
1930 """Apply a binary delta hunk
1931 """Apply a binary delta hunk
1931 The algorithm used is the algorithm from git's patch-delta.c
1932 The algorithm used is the algorithm from git's patch-delta.c
1932 """
1933 """
1933 def deltahead(binchunk):
1934 def deltahead(binchunk):
1934 i = 0
1935 i = 0
1935 for c in binchunk:
1936 for c in binchunk:
1936 i += 1
1937 i += 1
1937 if not (ord(c) & 0x80):
1938 if not (ord(c) & 0x80):
1938 return i
1939 return i
1939 return i
1940 return i
1940 out = ""
1941 out = ""
1941 s = deltahead(binchunk)
1942 s = deltahead(binchunk)
1942 binchunk = binchunk[s:]
1943 binchunk = binchunk[s:]
1943 s = deltahead(binchunk)
1944 s = deltahead(binchunk)
1944 binchunk = binchunk[s:]
1945 binchunk = binchunk[s:]
1945 i = 0
1946 i = 0
1946 while i < len(binchunk):
1947 while i < len(binchunk):
1947 cmd = ord(binchunk[i])
1948 cmd = ord(binchunk[i])
1948 i += 1
1949 i += 1
1949 if (cmd & 0x80):
1950 if (cmd & 0x80):
1950 offset = 0
1951 offset = 0
1951 size = 0
1952 size = 0
1952 if (cmd & 0x01):
1953 if (cmd & 0x01):
1953 offset = ord(binchunk[i])
1954 offset = ord(binchunk[i])
1954 i += 1
1955 i += 1
1955 if (cmd & 0x02):
1956 if (cmd & 0x02):
1956 offset |= ord(binchunk[i]) << 8
1957 offset |= ord(binchunk[i]) << 8
1957 i += 1
1958 i += 1
1958 if (cmd & 0x04):
1959 if (cmd & 0x04):
1959 offset |= ord(binchunk[i]) << 16
1960 offset |= ord(binchunk[i]) << 16
1960 i += 1
1961 i += 1
1961 if (cmd & 0x08):
1962 if (cmd & 0x08):
1962 offset |= ord(binchunk[i]) << 24
1963 offset |= ord(binchunk[i]) << 24
1963 i += 1
1964 i += 1
1964 if (cmd & 0x10):
1965 if (cmd & 0x10):
1965 size = ord(binchunk[i])
1966 size = ord(binchunk[i])
1966 i += 1
1967 i += 1
1967 if (cmd & 0x20):
1968 if (cmd & 0x20):
1968 size |= ord(binchunk[i]) << 8
1969 size |= ord(binchunk[i]) << 8
1969 i += 1
1970 i += 1
1970 if (cmd & 0x40):
1971 if (cmd & 0x40):
1971 size |= ord(binchunk[i]) << 16
1972 size |= ord(binchunk[i]) << 16
1972 i += 1
1973 i += 1
1973 if size == 0:
1974 if size == 0:
1974 size = 0x10000
1975 size = 0x10000
1975 offset_end = offset + size
1976 offset_end = offset + size
1976 out += data[offset:offset_end]
1977 out += data[offset:offset_end]
1977 elif cmd != 0:
1978 elif cmd != 0:
1978 offset_end = i + cmd
1979 offset_end = i + cmd
1979 out += binchunk[i:offset_end]
1980 out += binchunk[i:offset_end]
1980 i += cmd
1981 i += cmd
1981 else:
1982 else:
1982 raise PatchError(_('unexpected delta opcode 0'))
1983 raise PatchError(_('unexpected delta opcode 0'))
1983 return out
1984 return out
1984
1985
1985 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1986 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1986 """Reads a patch from fp and tries to apply it.
1987 """Reads a patch from fp and tries to apply it.
1987
1988
1988 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1989 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1989 there was any fuzz.
1990 there was any fuzz.
1990
1991
1991 If 'eolmode' is 'strict', the patch content and patched file are
1992 If 'eolmode' is 'strict', the patch content and patched file are
1992 read in binary mode. Otherwise, line endings are ignored when
1993 read in binary mode. Otherwise, line endings are ignored when
1993 patching then normalized according to 'eolmode'.
1994 patching then normalized according to 'eolmode'.
1994 """
1995 """
1995 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1996 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1996 prefix=prefix, eolmode=eolmode)
1997 prefix=prefix, eolmode=eolmode)
1997
1998
1998 def _canonprefix(repo, prefix):
1999 def _canonprefix(repo, prefix):
1999 if prefix:
2000 if prefix:
2000 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2001 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2001 if prefix != '':
2002 if prefix != '':
2002 prefix += '/'
2003 prefix += '/'
2003 return prefix
2004 return prefix
2004
2005
2005 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2006 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2006 eolmode='strict'):
2007 eolmode='strict'):
2007 prefix = _canonprefix(backend.repo, prefix)
2008 prefix = _canonprefix(backend.repo, prefix)
2008 def pstrip(p):
2009 def pstrip(p):
2009 return pathtransform(p, strip - 1, prefix)[1]
2010 return pathtransform(p, strip - 1, prefix)[1]
2010
2011
2011 rejects = 0
2012 rejects = 0
2012 err = 0
2013 err = 0
2013 current_file = None
2014 current_file = None
2014
2015
2015 for state, values in iterhunks(fp):
2016 for state, values in iterhunks(fp):
2016 if state == 'hunk':
2017 if state == 'hunk':
2017 if not current_file:
2018 if not current_file:
2018 continue
2019 continue
2019 ret = current_file.apply(values)
2020 ret = current_file.apply(values)
2020 if ret > 0:
2021 if ret > 0:
2021 err = 1
2022 err = 1
2022 elif state == 'file':
2023 elif state == 'file':
2023 if current_file:
2024 if current_file:
2024 rejects += current_file.close()
2025 rejects += current_file.close()
2025 current_file = None
2026 current_file = None
2026 afile, bfile, first_hunk, gp = values
2027 afile, bfile, first_hunk, gp = values
2027 if gp:
2028 if gp:
2028 gp.path = pstrip(gp.path)
2029 gp.path = pstrip(gp.path)
2029 if gp.oldpath:
2030 if gp.oldpath:
2030 gp.oldpath = pstrip(gp.oldpath)
2031 gp.oldpath = pstrip(gp.oldpath)
2031 else:
2032 else:
2032 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2033 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2033 prefix)
2034 prefix)
2034 if gp.op == 'RENAME':
2035 if gp.op == 'RENAME':
2035 backend.unlink(gp.oldpath)
2036 backend.unlink(gp.oldpath)
2036 if not first_hunk:
2037 if not first_hunk:
2037 if gp.op == 'DELETE':
2038 if gp.op == 'DELETE':
2038 backend.unlink(gp.path)
2039 backend.unlink(gp.path)
2039 continue
2040 continue
2040 data, mode = None, None
2041 data, mode = None, None
2041 if gp.op in ('RENAME', 'COPY'):
2042 if gp.op in ('RENAME', 'COPY'):
2042 data, mode = store.getfile(gp.oldpath)[:2]
2043 data, mode = store.getfile(gp.oldpath)[:2]
2043 if data is None:
2044 if data is None:
2044 # This means that the old path does not exist
2045 # This means that the old path does not exist
2045 raise PatchError(_("source file '%s' does not exist")
2046 raise PatchError(_("source file '%s' does not exist")
2046 % gp.oldpath)
2047 % gp.oldpath)
2047 if gp.mode:
2048 if gp.mode:
2048 mode = gp.mode
2049 mode = gp.mode
2049 if gp.op == 'ADD':
2050 if gp.op == 'ADD':
2050 # Added files without content have no hunk and
2051 # Added files without content have no hunk and
2051 # must be created
2052 # must be created
2052 data = ''
2053 data = ''
2053 if data or mode:
2054 if data or mode:
2054 if (gp.op in ('ADD', 'RENAME', 'COPY')
2055 if (gp.op in ('ADD', 'RENAME', 'COPY')
2055 and backend.exists(gp.path)):
2056 and backend.exists(gp.path)):
2056 raise PatchError(_("cannot create %s: destination "
2057 raise PatchError(_("cannot create %s: destination "
2057 "already exists") % gp.path)
2058 "already exists") % gp.path)
2058 backend.setfile(gp.path, data, mode, gp.oldpath)
2059 backend.setfile(gp.path, data, mode, gp.oldpath)
2059 continue
2060 continue
2060 try:
2061 try:
2061 current_file = patcher(ui, gp, backend, store,
2062 current_file = patcher(ui, gp, backend, store,
2062 eolmode=eolmode)
2063 eolmode=eolmode)
2063 except PatchError as inst:
2064 except PatchError as inst:
2064 ui.warn(str(inst) + '\n')
2065 ui.warn(str(inst) + '\n')
2065 current_file = None
2066 current_file = None
2066 rejects += 1
2067 rejects += 1
2067 continue
2068 continue
2068 elif state == 'git':
2069 elif state == 'git':
2069 for gp in values:
2070 for gp in values:
2070 path = pstrip(gp.oldpath)
2071 path = pstrip(gp.oldpath)
2071 data, mode = backend.getfile(path)
2072 data, mode = backend.getfile(path)
2072 if data is None:
2073 if data is None:
2073 # The error ignored here will trigger a getfile()
2074 # The error ignored here will trigger a getfile()
2074 # error in a place more appropriate for error
2075 # error in a place more appropriate for error
2075 # handling, and will not interrupt the patching
2076 # handling, and will not interrupt the patching
2076 # process.
2077 # process.
2077 pass
2078 pass
2078 else:
2079 else:
2079 store.setfile(path, data, mode)
2080 store.setfile(path, data, mode)
2080 else:
2081 else:
2081 raise error.Abort(_('unsupported parser state: %s') % state)
2082 raise error.Abort(_('unsupported parser state: %s') % state)
2082
2083
2083 if current_file:
2084 if current_file:
2084 rejects += current_file.close()
2085 rejects += current_file.close()
2085
2086
2086 if rejects:
2087 if rejects:
2087 return -1
2088 return -1
2088 return err
2089 return err
2089
2090
2090 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2091 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2091 similarity):
2092 similarity):
2092 """use <patcher> to apply <patchname> to the working directory.
2093 """use <patcher> to apply <patchname> to the working directory.
2093 returns whether patch was applied with fuzz factor."""
2094 returns whether patch was applied with fuzz factor."""
2094
2095
2095 fuzz = False
2096 fuzz = False
2096 args = []
2097 args = []
2097 cwd = repo.root
2098 cwd = repo.root
2098 if cwd:
2099 if cwd:
2099 args.append('-d %s' % util.shellquote(cwd))
2100 args.append('-d %s' % util.shellquote(cwd))
2100 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2101 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2101 util.shellquote(patchname)))
2102 util.shellquote(patchname)))
2102 try:
2103 try:
2103 for line in util.iterfile(fp):
2104 for line in util.iterfile(fp):
2104 line = line.rstrip()
2105 line = line.rstrip()
2105 ui.note(line + '\n')
2106 ui.note(line + '\n')
2106 if line.startswith('patching file '):
2107 if line.startswith('patching file '):
2107 pf = util.parsepatchoutput(line)
2108 pf = util.parsepatchoutput(line)
2108 printed_file = False
2109 printed_file = False
2109 files.add(pf)
2110 files.add(pf)
2110 elif line.find('with fuzz') >= 0:
2111 elif line.find('with fuzz') >= 0:
2111 fuzz = True
2112 fuzz = True
2112 if not printed_file:
2113 if not printed_file:
2113 ui.warn(pf + '\n')
2114 ui.warn(pf + '\n')
2114 printed_file = True
2115 printed_file = True
2115 ui.warn(line + '\n')
2116 ui.warn(line + '\n')
2116 elif line.find('saving rejects to file') >= 0:
2117 elif line.find('saving rejects to file') >= 0:
2117 ui.warn(line + '\n')
2118 ui.warn(line + '\n')
2118 elif line.find('FAILED') >= 0:
2119 elif line.find('FAILED') >= 0:
2119 if not printed_file:
2120 if not printed_file:
2120 ui.warn(pf + '\n')
2121 ui.warn(pf + '\n')
2121 printed_file = True
2122 printed_file = True
2122 ui.warn(line + '\n')
2123 ui.warn(line + '\n')
2123 finally:
2124 finally:
2124 if files:
2125 if files:
2125 scmutil.marktouched(repo, files, similarity)
2126 scmutil.marktouched(repo, files, similarity)
2126 code = fp.close()
2127 code = fp.close()
2127 if code:
2128 if code:
2128 raise PatchError(_("patch command failed: %s") %
2129 raise PatchError(_("patch command failed: %s") %
2129 util.explainexit(code)[0])
2130 util.explainexit(code)[0])
2130 return fuzz
2131 return fuzz
2131
2132
2132 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2133 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2133 eolmode='strict'):
2134 eolmode='strict'):
2134 if files is None:
2135 if files is None:
2135 files = set()
2136 files = set()
2136 if eolmode is None:
2137 if eolmode is None:
2137 eolmode = ui.config('patch', 'eol')
2138 eolmode = ui.config('patch', 'eol')
2138 if eolmode.lower() not in eolmodes:
2139 if eolmode.lower() not in eolmodes:
2139 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2140 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2140 eolmode = eolmode.lower()
2141 eolmode = eolmode.lower()
2141
2142
2142 store = filestore()
2143 store = filestore()
2143 try:
2144 try:
2144 fp = open(patchobj, 'rb')
2145 fp = open(patchobj, 'rb')
2145 except TypeError:
2146 except TypeError:
2146 fp = patchobj
2147 fp = patchobj
2147 try:
2148 try:
2148 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2149 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2149 eolmode=eolmode)
2150 eolmode=eolmode)
2150 finally:
2151 finally:
2151 if fp != patchobj:
2152 if fp != patchobj:
2152 fp.close()
2153 fp.close()
2153 files.update(backend.close())
2154 files.update(backend.close())
2154 store.close()
2155 store.close()
2155 if ret < 0:
2156 if ret < 0:
2156 raise PatchError(_('patch failed to apply'))
2157 raise PatchError(_('patch failed to apply'))
2157 return ret > 0
2158 return ret > 0
2158
2159
2159 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2160 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2160 eolmode='strict', similarity=0):
2161 eolmode='strict', similarity=0):
2161 """use builtin patch to apply <patchobj> to the working directory.
2162 """use builtin patch to apply <patchobj> to the working directory.
2162 returns whether patch was applied with fuzz factor."""
2163 returns whether patch was applied with fuzz factor."""
2163 backend = workingbackend(ui, repo, similarity)
2164 backend = workingbackend(ui, repo, similarity)
2164 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2165 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2165
2166
2166 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2167 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2167 eolmode='strict'):
2168 eolmode='strict'):
2168 backend = repobackend(ui, repo, ctx, store)
2169 backend = repobackend(ui, repo, ctx, store)
2169 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2170 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2170
2171
2171 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2172 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2172 similarity=0):
2173 similarity=0):
2173 """Apply <patchname> to the working directory.
2174 """Apply <patchname> to the working directory.
2174
2175
2175 'eolmode' specifies how end of lines should be handled. It can be:
2176 'eolmode' specifies how end of lines should be handled. It can be:
2176 - 'strict': inputs are read in binary mode, EOLs are preserved
2177 - 'strict': inputs are read in binary mode, EOLs are preserved
2177 - 'crlf': EOLs are ignored when patching and reset to CRLF
2178 - 'crlf': EOLs are ignored when patching and reset to CRLF
2178 - 'lf': EOLs are ignored when patching and reset to LF
2179 - 'lf': EOLs are ignored when patching and reset to LF
2179 - None: get it from user settings, default to 'strict'
2180 - None: get it from user settings, default to 'strict'
2180 'eolmode' is ignored when using an external patcher program.
2181 'eolmode' is ignored when using an external patcher program.
2181
2182
2182 Returns whether patch was applied with fuzz factor.
2183 Returns whether patch was applied with fuzz factor.
2183 """
2184 """
2184 patcher = ui.config('ui', 'patch')
2185 patcher = ui.config('ui', 'patch')
2185 if files is None:
2186 if files is None:
2186 files = set()
2187 files = set()
2187 if patcher:
2188 if patcher:
2188 return _externalpatch(ui, repo, patcher, patchname, strip,
2189 return _externalpatch(ui, repo, patcher, patchname, strip,
2189 files, similarity)
2190 files, similarity)
2190 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2191 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2191 similarity)
2192 similarity)
2192
2193
2193 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2194 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2194 backend = fsbackend(ui, repo.root)
2195 backend = fsbackend(ui, repo.root)
2195 prefix = _canonprefix(repo, prefix)
2196 prefix = _canonprefix(repo, prefix)
2196 with open(patchpath, 'rb') as fp:
2197 with open(patchpath, 'rb') as fp:
2197 changed = set()
2198 changed = set()
2198 for state, values in iterhunks(fp):
2199 for state, values in iterhunks(fp):
2199 if state == 'file':
2200 if state == 'file':
2200 afile, bfile, first_hunk, gp = values
2201 afile, bfile, first_hunk, gp = values
2201 if gp:
2202 if gp:
2202 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2203 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2203 if gp.oldpath:
2204 if gp.oldpath:
2204 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2205 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2205 prefix)[1]
2206 prefix)[1]
2206 else:
2207 else:
2207 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2208 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2208 prefix)
2209 prefix)
2209 changed.add(gp.path)
2210 changed.add(gp.path)
2210 if gp.op == 'RENAME':
2211 if gp.op == 'RENAME':
2211 changed.add(gp.oldpath)
2212 changed.add(gp.oldpath)
2212 elif state not in ('hunk', 'git'):
2213 elif state not in ('hunk', 'git'):
2213 raise error.Abort(_('unsupported parser state: %s') % state)
2214 raise error.Abort(_('unsupported parser state: %s') % state)
2214 return changed
2215 return changed
2215
2216
2216 class GitDiffRequired(Exception):
2217 class GitDiffRequired(Exception):
2217 pass
2218 pass
2218
2219
2219 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2220 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2220 '''return diffopts with all features supported and parsed'''
2221 '''return diffopts with all features supported and parsed'''
2221 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2222 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2222 git=True, whitespace=True, formatchanging=True)
2223 git=True, whitespace=True, formatchanging=True)
2223
2224
2224 diffopts = diffallopts
2225 diffopts = diffallopts
2225
2226
2226 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2227 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2227 whitespace=False, formatchanging=False):
2228 whitespace=False, formatchanging=False):
2228 '''return diffopts with only opted-in features parsed
2229 '''return diffopts with only opted-in features parsed
2229
2230
2230 Features:
2231 Features:
2231 - git: git-style diffs
2232 - git: git-style diffs
2232 - whitespace: whitespace options like ignoreblanklines and ignorews
2233 - whitespace: whitespace options like ignoreblanklines and ignorews
2233 - formatchanging: options that will likely break or cause correctness issues
2234 - formatchanging: options that will likely break or cause correctness issues
2234 with most diff parsers
2235 with most diff parsers
2235 '''
2236 '''
2236 def get(key, name=None, getter=ui.configbool, forceplain=None):
2237 def get(key, name=None, getter=ui.configbool, forceplain=None):
2237 if opts:
2238 if opts:
2238 v = opts.get(key)
2239 v = opts.get(key)
2239 # diffopts flags are either None-default (which is passed
2240 # diffopts flags are either None-default (which is passed
2240 # through unchanged, so we can identify unset values), or
2241 # through unchanged, so we can identify unset values), or
2241 # some other falsey default (eg --unified, which defaults
2242 # some other falsey default (eg --unified, which defaults
2242 # to an empty string). We only want to override the config
2243 # to an empty string). We only want to override the config
2243 # entries from hgrc with command line values if they
2244 # entries from hgrc with command line values if they
2244 # appear to have been set, which is any truthy value,
2245 # appear to have been set, which is any truthy value,
2245 # True, or False.
2246 # True, or False.
2246 if v or isinstance(v, bool):
2247 if v or isinstance(v, bool):
2247 return v
2248 return v
2248 if forceplain is not None and ui.plain():
2249 if forceplain is not None and ui.plain():
2249 return forceplain
2250 return forceplain
2250 return getter(section, name or key, untrusted=untrusted)
2251 return getter(section, name or key, untrusted=untrusted)
2251
2252
2252 # core options, expected to be understood by every diff parser
2253 # core options, expected to be understood by every diff parser
2253 buildopts = {
2254 buildopts = {
2254 'nodates': get('nodates'),
2255 'nodates': get('nodates'),
2255 'showfunc': get('show_function', 'showfunc'),
2256 'showfunc': get('show_function', 'showfunc'),
2256 'context': get('unified', getter=ui.config),
2257 'context': get('unified', getter=ui.config),
2257 }
2258 }
2258 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2259 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2259 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2260 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2260
2261
2261 if git:
2262 if git:
2262 buildopts['git'] = get('git')
2263 buildopts['git'] = get('git')
2263
2264
2264 # since this is in the experimental section, we need to call
2265 # since this is in the experimental section, we need to call
2265 # ui.configbool directory
2266 # ui.configbool directory
2266 buildopts['showsimilarity'] = ui.configbool('experimental',
2267 buildopts['showsimilarity'] = ui.configbool('experimental',
2267 'extendedheader.similarity')
2268 'extendedheader.similarity')
2268
2269
2269 # need to inspect the ui object instead of using get() since we want to
2270 # need to inspect the ui object instead of using get() since we want to
2270 # test for an int
2271 # test for an int
2271 hconf = ui.config('experimental', 'extendedheader.index')
2272 hconf = ui.config('experimental', 'extendedheader.index')
2272 if hconf is not None:
2273 if hconf is not None:
2273 hlen = None
2274 hlen = None
2274 try:
2275 try:
2275 # the hash config could be an integer (for length of hash) or a
2276 # the hash config could be an integer (for length of hash) or a
2276 # word (e.g. short, full, none)
2277 # word (e.g. short, full, none)
2277 hlen = int(hconf)
2278 hlen = int(hconf)
2278 if hlen < 0 or hlen > 40:
2279 if hlen < 0 or hlen > 40:
2279 msg = _("invalid length for extendedheader.index: '%d'\n")
2280 msg = _("invalid length for extendedheader.index: '%d'\n")
2280 ui.warn(msg % hlen)
2281 ui.warn(msg % hlen)
2281 except ValueError:
2282 except ValueError:
2282 # default value
2283 # default value
2283 if hconf == 'short' or hconf == '':
2284 if hconf == 'short' or hconf == '':
2284 hlen = 12
2285 hlen = 12
2285 elif hconf == 'full':
2286 elif hconf == 'full':
2286 hlen = 40
2287 hlen = 40
2287 elif hconf != 'none':
2288 elif hconf != 'none':
2288 msg = _("invalid value for extendedheader.index: '%s'\n")
2289 msg = _("invalid value for extendedheader.index: '%s'\n")
2289 ui.warn(msg % hconf)
2290 ui.warn(msg % hconf)
2290 finally:
2291 finally:
2291 buildopts['index'] = hlen
2292 buildopts['index'] = hlen
2292
2293
2293 if whitespace:
2294 if whitespace:
2294 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2295 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2295 buildopts['ignorewsamount'] = get('ignore_space_change',
2296 buildopts['ignorewsamount'] = get('ignore_space_change',
2296 'ignorewsamount')
2297 'ignorewsamount')
2297 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2298 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2298 'ignoreblanklines')
2299 'ignoreblanklines')
2299 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2300 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2300 if formatchanging:
2301 if formatchanging:
2301 buildopts['text'] = opts and opts.get('text')
2302 buildopts['text'] = opts and opts.get('text')
2302 binary = None if opts is None else opts.get('binary')
2303 binary = None if opts is None else opts.get('binary')
2303 buildopts['nobinary'] = (not binary if binary is not None
2304 buildopts['nobinary'] = (not binary if binary is not None
2304 else get('nobinary', forceplain=False))
2305 else get('nobinary', forceplain=False))
2305 buildopts['noprefix'] = get('noprefix', forceplain=False)
2306 buildopts['noprefix'] = get('noprefix', forceplain=False)
2306
2307
2307 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2308 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2308
2309
2309 def diff(repo, node1=None, node2=None, match=None, changes=None,
2310 def diff(repo, node1=None, node2=None, match=None, changes=None,
2310 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2311 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2311 hunksfilterfn=None):
2312 hunksfilterfn=None):
2312 '''yields diff of changes to files between two nodes, or node and
2313 '''yields diff of changes to files between two nodes, or node and
2313 working directory.
2314 working directory.
2314
2315
2315 if node1 is None, use first dirstate parent instead.
2316 if node1 is None, use first dirstate parent instead.
2316 if node2 is None, compare node1 with working directory.
2317 if node2 is None, compare node1 with working directory.
2317
2318
2318 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2319 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2319 every time some change cannot be represented with the current
2320 every time some change cannot be represented with the current
2320 patch format. Return False to upgrade to git patch format, True to
2321 patch format. Return False to upgrade to git patch format, True to
2321 accept the loss or raise an exception to abort the diff. It is
2322 accept the loss or raise an exception to abort the diff. It is
2322 called with the name of current file being diffed as 'fn'. If set
2323 called with the name of current file being diffed as 'fn'. If set
2323 to None, patches will always be upgraded to git format when
2324 to None, patches will always be upgraded to git format when
2324 necessary.
2325 necessary.
2325
2326
2326 prefix is a filename prefix that is prepended to all filenames on
2327 prefix is a filename prefix that is prepended to all filenames on
2327 display (used for subrepos).
2328 display (used for subrepos).
2328
2329
2329 relroot, if not empty, must be normalized with a trailing /. Any match
2330 relroot, if not empty, must be normalized with a trailing /. Any match
2330 patterns that fall outside it will be ignored.
2331 patterns that fall outside it will be ignored.
2331
2332
2332 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2333 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2333 information.
2334 information.
2334
2335
2335 hunksfilterfn, if not None, should be a function taking a filectx and
2336 hunksfilterfn, if not None, should be a function taking a filectx and
2336 hunks generator that may yield filtered hunks.
2337 hunks generator that may yield filtered hunks.
2337 '''
2338 '''
2338 for fctx1, fctx2, hdr, hunks in diffhunks(
2339 for fctx1, fctx2, hdr, hunks in diffhunks(
2339 repo, node1=node1, node2=node2,
2340 repo, node1=node1, node2=node2,
2340 match=match, changes=changes, opts=opts,
2341 match=match, changes=changes, opts=opts,
2341 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2342 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2342 ):
2343 ):
2343 if hunksfilterfn is not None:
2344 if hunksfilterfn is not None:
2344 # If the file has been removed, fctx2 is None; but this should
2345 # If the file has been removed, fctx2 is None; but this should
2345 # not occur here since we catch removed files early in
2346 # not occur here since we catch removed files early in
2346 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2347 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2347 assert fctx2 is not None, \
2348 assert fctx2 is not None, \
2348 'fctx2 unexpectly None in diff hunks filtering'
2349 'fctx2 unexpectly None in diff hunks filtering'
2349 hunks = hunksfilterfn(fctx2, hunks)
2350 hunks = hunksfilterfn(fctx2, hunks)
2350 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2351 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2351 if hdr and (text or len(hdr) > 1):
2352 if hdr and (text or len(hdr) > 1):
2352 yield '\n'.join(hdr) + '\n'
2353 yield '\n'.join(hdr) + '\n'
2353 if text:
2354 if text:
2354 yield text
2355 yield text
2355
2356
2356 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2357 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2357 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2358 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2358 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2359 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2359 where `header` is a list of diff headers and `hunks` is an iterable of
2360 where `header` is a list of diff headers and `hunks` is an iterable of
2360 (`hunkrange`, `hunklines`) tuples.
2361 (`hunkrange`, `hunklines`) tuples.
2361
2362
2362 See diff() for the meaning of parameters.
2363 See diff() for the meaning of parameters.
2363 """
2364 """
2364
2365
2365 if opts is None:
2366 if opts is None:
2366 opts = mdiff.defaultopts
2367 opts = mdiff.defaultopts
2367
2368
2368 if not node1 and not node2:
2369 if not node1 and not node2:
2369 node1 = repo.dirstate.p1()
2370 node1 = repo.dirstate.p1()
2370
2371
2371 def lrugetfilectx():
2372 def lrugetfilectx():
2372 cache = {}
2373 cache = {}
2373 order = collections.deque()
2374 order = collections.deque()
2374 def getfilectx(f, ctx):
2375 def getfilectx(f, ctx):
2375 fctx = ctx.filectx(f, filelog=cache.get(f))
2376 fctx = ctx.filectx(f, filelog=cache.get(f))
2376 if f not in cache:
2377 if f not in cache:
2377 if len(cache) > 20:
2378 if len(cache) > 20:
2378 del cache[order.popleft()]
2379 del cache[order.popleft()]
2379 cache[f] = fctx.filelog()
2380 cache[f] = fctx.filelog()
2380 else:
2381 else:
2381 order.remove(f)
2382 order.remove(f)
2382 order.append(f)
2383 order.append(f)
2383 return fctx
2384 return fctx
2384 return getfilectx
2385 return getfilectx
2385 getfilectx = lrugetfilectx()
2386 getfilectx = lrugetfilectx()
2386
2387
2387 ctx1 = repo[node1]
2388 ctx1 = repo[node1]
2388 ctx2 = repo[node2]
2389 ctx2 = repo[node2]
2389
2390
2390 relfiltered = False
2391 relfiltered = False
2391 if relroot != '' and match.always():
2392 if relroot != '' and match.always():
2392 # as a special case, create a new matcher with just the relroot
2393 # as a special case, create a new matcher with just the relroot
2393 pats = [relroot]
2394 pats = [relroot]
2394 match = scmutil.match(ctx2, pats, default='path')
2395 match = scmutil.match(ctx2, pats, default='path')
2395 relfiltered = True
2396 relfiltered = True
2396
2397
2397 if not changes:
2398 if not changes:
2398 changes = repo.status(ctx1, ctx2, match=match)
2399 changes = repo.status(ctx1, ctx2, match=match)
2399 modified, added, removed = changes[:3]
2400 modified, added, removed = changes[:3]
2400
2401
2401 if not modified and not added and not removed:
2402 if not modified and not added and not removed:
2402 return []
2403 return []
2403
2404
2404 if repo.ui.debugflag:
2405 if repo.ui.debugflag:
2405 hexfunc = hex
2406 hexfunc = hex
2406 else:
2407 else:
2407 hexfunc = short
2408 hexfunc = short
2408 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2409 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2409
2410
2410 if copy is None:
2411 if copy is None:
2411 copy = {}
2412 copy = {}
2412 if opts.git or opts.upgrade:
2413 if opts.git or opts.upgrade:
2413 copy = copies.pathcopies(ctx1, ctx2, match=match)
2414 copy = copies.pathcopies(ctx1, ctx2, match=match)
2414
2415
2415 if relroot is not None:
2416 if relroot is not None:
2416 if not relfiltered:
2417 if not relfiltered:
2417 # XXX this would ideally be done in the matcher, but that is
2418 # XXX this would ideally be done in the matcher, but that is
2418 # generally meant to 'or' patterns, not 'and' them. In this case we
2419 # generally meant to 'or' patterns, not 'and' them. In this case we
2419 # need to 'and' all the patterns from the matcher with relroot.
2420 # need to 'and' all the patterns from the matcher with relroot.
2420 def filterrel(l):
2421 def filterrel(l):
2421 return [f for f in l if f.startswith(relroot)]
2422 return [f for f in l if f.startswith(relroot)]
2422 modified = filterrel(modified)
2423 modified = filterrel(modified)
2423 added = filterrel(added)
2424 added = filterrel(added)
2424 removed = filterrel(removed)
2425 removed = filterrel(removed)
2425 relfiltered = True
2426 relfiltered = True
2426 # filter out copies where either side isn't inside the relative root
2427 # filter out copies where either side isn't inside the relative root
2427 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2428 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2428 if dst.startswith(relroot)
2429 if dst.startswith(relroot)
2429 and src.startswith(relroot)))
2430 and src.startswith(relroot)))
2430
2431
2431 modifiedset = set(modified)
2432 modifiedset = set(modified)
2432 addedset = set(added)
2433 addedset = set(added)
2433 removedset = set(removed)
2434 removedset = set(removed)
2434 for f in modified:
2435 for f in modified:
2435 if f not in ctx1:
2436 if f not in ctx1:
2436 # Fix up added, since merged-in additions appear as
2437 # Fix up added, since merged-in additions appear as
2437 # modifications during merges
2438 # modifications during merges
2438 modifiedset.remove(f)
2439 modifiedset.remove(f)
2439 addedset.add(f)
2440 addedset.add(f)
2440 for f in removed:
2441 for f in removed:
2441 if f not in ctx1:
2442 if f not in ctx1:
2442 # Merged-in additions that are then removed are reported as removed.
2443 # Merged-in additions that are then removed are reported as removed.
2443 # They are not in ctx1, so We don't want to show them in the diff.
2444 # They are not in ctx1, so We don't want to show them in the diff.
2444 removedset.remove(f)
2445 removedset.remove(f)
2445 modified = sorted(modifiedset)
2446 modified = sorted(modifiedset)
2446 added = sorted(addedset)
2447 added = sorted(addedset)
2447 removed = sorted(removedset)
2448 removed = sorted(removedset)
2448 for dst, src in list(copy.items()):
2449 for dst, src in list(copy.items()):
2449 if src not in ctx1:
2450 if src not in ctx1:
2450 # Files merged in during a merge and then copied/renamed are
2451 # Files merged in during a merge and then copied/renamed are
2451 # reported as copies. We want to show them in the diff as additions.
2452 # reported as copies. We want to show them in the diff as additions.
2452 del copy[dst]
2453 del copy[dst]
2453
2454
2454 def difffn(opts, losedata):
2455 def difffn(opts, losedata):
2455 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2456 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2456 copy, getfilectx, opts, losedata, prefix, relroot)
2457 copy, getfilectx, opts, losedata, prefix, relroot)
2457 if opts.upgrade and not opts.git:
2458 if opts.upgrade and not opts.git:
2458 try:
2459 try:
2459 def losedata(fn):
2460 def losedata(fn):
2460 if not losedatafn or not losedatafn(fn=fn):
2461 if not losedatafn or not losedatafn(fn=fn):
2461 raise GitDiffRequired
2462 raise GitDiffRequired
2462 # Buffer the whole output until we are sure it can be generated
2463 # Buffer the whole output until we are sure it can be generated
2463 return list(difffn(opts.copy(git=False), losedata))
2464 return list(difffn(opts.copy(git=False), losedata))
2464 except GitDiffRequired:
2465 except GitDiffRequired:
2465 return difffn(opts.copy(git=True), None)
2466 return difffn(opts.copy(git=True), None)
2466 else:
2467 else:
2467 return difffn(opts, None)
2468 return difffn(opts, None)
2468
2469
2469 def difflabel(func, *args, **kw):
2470 def difflabel(func, *args, **kw):
2470 '''yields 2-tuples of (output, label) based on the output of func()'''
2471 '''yields 2-tuples of (output, label) based on the output of func()'''
2471 inlinecolor = False
2472 inlinecolor = False
2472 if kw.get(r'opts'):
2473 if kw.get(r'opts'):
2473 inlinecolor = kw[r'opts'].worddiff
2474 inlinecolor = kw[r'opts'].worddiff
2474 headprefixes = [('diff', 'diff.diffline'),
2475 headprefixes = [('diff', 'diff.diffline'),
2475 ('copy', 'diff.extended'),
2476 ('copy', 'diff.extended'),
2476 ('rename', 'diff.extended'),
2477 ('rename', 'diff.extended'),
2477 ('old', 'diff.extended'),
2478 ('old', 'diff.extended'),
2478 ('new', 'diff.extended'),
2479 ('new', 'diff.extended'),
2479 ('deleted', 'diff.extended'),
2480 ('deleted', 'diff.extended'),
2480 ('index', 'diff.extended'),
2481 ('index', 'diff.extended'),
2481 ('similarity', 'diff.extended'),
2482 ('similarity', 'diff.extended'),
2482 ('---', 'diff.file_a'),
2483 ('---', 'diff.file_a'),
2483 ('+++', 'diff.file_b')]
2484 ('+++', 'diff.file_b')]
2484 textprefixes = [('@', 'diff.hunk'),
2485 textprefixes = [('@', 'diff.hunk'),
2485 ('-', 'diff.deleted'),
2486 ('-', 'diff.deleted'),
2486 ('+', 'diff.inserted')]
2487 ('+', 'diff.inserted')]
2487 head = False
2488 head = False
2488 for chunk in func(*args, **kw):
2489 for chunk in func(*args, **kw):
2489 lines = chunk.split('\n')
2490 lines = chunk.split('\n')
2490 matches = {}
2491 matches = {}
2491 if inlinecolor:
2492 if inlinecolor:
2492 matches = _findmatches(lines)
2493 matches = _findmatches(lines)
2493 for i, line in enumerate(lines):
2494 for i, line in enumerate(lines):
2494 if i != 0:
2495 if i != 0:
2495 yield ('\n', '')
2496 yield ('\n', '')
2496 if head:
2497 if head:
2497 if line.startswith('@'):
2498 if line.startswith('@'):
2498 head = False
2499 head = False
2499 else:
2500 else:
2500 if line and line[0] not in ' +-@\\':
2501 if line and line[0] not in ' +-@\\':
2501 head = True
2502 head = True
2502 stripline = line
2503 stripline = line
2503 diffline = False
2504 diffline = False
2504 if not head and line and line[0] in '+-':
2505 if not head and line and line[0] in '+-':
2505 # highlight tabs and trailing whitespace, but only in
2506 # highlight tabs and trailing whitespace, but only in
2506 # changed lines
2507 # changed lines
2507 stripline = line.rstrip()
2508 stripline = line.rstrip()
2508 diffline = True
2509 diffline = True
2509
2510
2510 prefixes = textprefixes
2511 prefixes = textprefixes
2511 if head:
2512 if head:
2512 prefixes = headprefixes
2513 prefixes = headprefixes
2513 for prefix, label in prefixes:
2514 for prefix, label in prefixes:
2514 if stripline.startswith(prefix):
2515 if stripline.startswith(prefix):
2515 if diffline:
2516 if diffline:
2516 if i in matches:
2517 if i in matches:
2517 for t, l in _inlinediff(lines[i].rstrip(),
2518 for t, l in _inlinediff(lines[i].rstrip(),
2518 lines[matches[i]].rstrip(),
2519 lines[matches[i]].rstrip(),
2519 label):
2520 label):
2520 yield (t, l)
2521 yield (t, l)
2521 else:
2522 else:
2522 for token in tabsplitter.findall(stripline):
2523 for token in tabsplitter.findall(stripline):
2523 if token.startswith('\t'):
2524 if token.startswith('\t'):
2524 yield (token, 'diff.tab')
2525 yield (token, 'diff.tab')
2525 else:
2526 else:
2526 yield (token, label)
2527 yield (token, label)
2527 else:
2528 else:
2528 yield (stripline, label)
2529 yield (stripline, label)
2529 break
2530 break
2530 else:
2531 else:
2531 yield (line, '')
2532 yield (line, '')
2532 if line != stripline:
2533 if line != stripline:
2533 yield (line[len(stripline):], 'diff.trailingwhitespace')
2534 yield (line[len(stripline):], 'diff.trailingwhitespace')
2534
2535
2535 def _findmatches(slist):
2536 def _findmatches(slist):
2536 '''Look for insertion matches to deletion and returns a dict of
2537 '''Look for insertion matches to deletion and returns a dict of
2537 correspondences.
2538 correspondences.
2538 '''
2539 '''
2539 lastmatch = 0
2540 lastmatch = 0
2540 matches = {}
2541 matches = {}
2541 for i, line in enumerate(slist):
2542 for i, line in enumerate(slist):
2542 if line == '':
2543 if line == '':
2543 continue
2544 continue
2544 if line[0] == '-':
2545 if line[0] == '-':
2545 lastmatch = max(lastmatch, i)
2546 lastmatch = max(lastmatch, i)
2546 newgroup = False
2547 newgroup = False
2547 for j, newline in enumerate(slist[lastmatch + 1:]):
2548 for j, newline in enumerate(slist[lastmatch + 1:]):
2548 if newline == '':
2549 if newline == '':
2549 continue
2550 continue
2550 if newline[0] == '-' and newgroup: # too far, no match
2551 if newline[0] == '-' and newgroup: # too far, no match
2551 break
2552 break
2552 if newline[0] == '+': # potential match
2553 if newline[0] == '+': # potential match
2553 newgroup = True
2554 newgroup = True
2554 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2555 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2555 if sim > 0.7:
2556 if sim > 0.7:
2556 lastmatch = lastmatch + 1 + j
2557 lastmatch = lastmatch + 1 + j
2557 matches[i] = lastmatch
2558 matches[i] = lastmatch
2558 matches[lastmatch] = i
2559 matches[lastmatch] = i
2559 break
2560 break
2560 return matches
2561 return matches
2561
2562
2562 def _inlinediff(s1, s2, operation):
2563 def _inlinediff(s1, s2, operation):
2563 '''Perform string diff to highlight specific changes.'''
2564 '''Perform string diff to highlight specific changes.'''
2564 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2565 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2565 if operation == 'diff.deleted':
2566 if operation == 'diff.deleted':
2566 s2, s1 = s1, s2
2567 s2, s1 = s1, s2
2567
2568
2568 buff = []
2569 buff = []
2569 # we never want to higlight the leading +-
2570 # we never want to higlight the leading +-
2570 if operation == 'diff.deleted' and s2.startswith('-'):
2571 if operation == 'diff.deleted' and s2.startswith('-'):
2571 label = operation
2572 label = operation
2572 token = '-'
2573 token = '-'
2573 s2 = s2[1:]
2574 s2 = s2[1:]
2574 s1 = s1[1:]
2575 s1 = s1[1:]
2575 elif operation == 'diff.inserted' and s1.startswith('+'):
2576 elif operation == 'diff.inserted' and s1.startswith('+'):
2576 label = operation
2577 label = operation
2577 token = '+'
2578 token = '+'
2578 s2 = s2[1:]
2579 s2 = s2[1:]
2579 s1 = s1[1:]
2580 s1 = s1[1:]
2580 else:
2581 else:
2581 raise error.ProgrammingError("Case not expected, operation = %s" %
2582 raise error.ProgrammingError("Case not expected, operation = %s" %
2582 operation)
2583 operation)
2583
2584
2584 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2585 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2585 for part in s:
2586 for part in s:
2586 if part[0] in operation_skip or len(part) == 2:
2587 if part[0] in operation_skip or len(part) == 2:
2587 continue
2588 continue
2588 l = operation + '.highlight'
2589 l = operation + '.highlight'
2589 if part[0] in ' ':
2590 if part[0] in ' ':
2590 l = operation
2591 l = operation
2591 if part[2:] == '\t':
2592 if part[2:] == '\t':
2592 l = 'diff.tab'
2593 l = 'diff.tab'
2593 if l == label: # contiguous token with same label
2594 if l == label: # contiguous token with same label
2594 token += part[2:]
2595 token += part[2:]
2595 continue
2596 continue
2596 else:
2597 else:
2597 buff.append((token, label))
2598 buff.append((token, label))
2598 label = l
2599 label = l
2599 token = part[2:]
2600 token = part[2:]
2600 buff.append((token, label))
2601 buff.append((token, label))
2601
2602
2602 return buff
2603 return buff
2603
2604
2604 def diffui(*args, **kw):
2605 def diffui(*args, **kw):
2605 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2606 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2606 return difflabel(diff, *args, **kw)
2607 return difflabel(diff, *args, **kw)
2607
2608
2608 def _filepairs(modified, added, removed, copy, opts):
2609 def _filepairs(modified, added, removed, copy, opts):
2609 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2610 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2610 before and f2 is the the name after. For added files, f1 will be None,
2611 before and f2 is the the name after. For added files, f1 will be None,
2611 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2612 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2612 or 'rename' (the latter two only if opts.git is set).'''
2613 or 'rename' (the latter two only if opts.git is set).'''
2613 gone = set()
2614 gone = set()
2614
2615
2615 copyto = dict([(v, k) for k, v in copy.items()])
2616 copyto = dict([(v, k) for k, v in copy.items()])
2616
2617
2617 addedset, removedset = set(added), set(removed)
2618 addedset, removedset = set(added), set(removed)
2618
2619
2619 for f in sorted(modified + added + removed):
2620 for f in sorted(modified + added + removed):
2620 copyop = None
2621 copyop = None
2621 f1, f2 = f, f
2622 f1, f2 = f, f
2622 if f in addedset:
2623 if f in addedset:
2623 f1 = None
2624 f1 = None
2624 if f in copy:
2625 if f in copy:
2625 if opts.git:
2626 if opts.git:
2626 f1 = copy[f]
2627 f1 = copy[f]
2627 if f1 in removedset and f1 not in gone:
2628 if f1 in removedset and f1 not in gone:
2628 copyop = 'rename'
2629 copyop = 'rename'
2629 gone.add(f1)
2630 gone.add(f1)
2630 else:
2631 else:
2631 copyop = 'copy'
2632 copyop = 'copy'
2632 elif f in removedset:
2633 elif f in removedset:
2633 f2 = None
2634 f2 = None
2634 if opts.git:
2635 if opts.git:
2635 # have we already reported a copy above?
2636 # have we already reported a copy above?
2636 if (f in copyto and copyto[f] in addedset
2637 if (f in copyto and copyto[f] in addedset
2637 and copy[copyto[f]] == f):
2638 and copy[copyto[f]] == f):
2638 continue
2639 continue
2639 yield f1, f2, copyop
2640 yield f1, f2, copyop
2640
2641
2641 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2642 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2642 copy, getfilectx, opts, losedatafn, prefix, relroot):
2643 copy, getfilectx, opts, losedatafn, prefix, relroot):
2643 '''given input data, generate a diff and yield it in blocks
2644 '''given input data, generate a diff and yield it in blocks
2644
2645
2645 If generating a diff would lose data like flags or binary data and
2646 If generating a diff would lose data like flags or binary data and
2646 losedatafn is not None, it will be called.
2647 losedatafn is not None, it will be called.
2647
2648
2648 relroot is removed and prefix is added to every path in the diff output.
2649 relroot is removed and prefix is added to every path in the diff output.
2649
2650
2650 If relroot is not empty, this function expects every path in modified,
2651 If relroot is not empty, this function expects every path in modified,
2651 added, removed and copy to start with it.'''
2652 added, removed and copy to start with it.'''
2652
2653
2653 def gitindex(text):
2654 def gitindex(text):
2654 if not text:
2655 if not text:
2655 text = ""
2656 text = ""
2656 l = len(text)
2657 l = len(text)
2657 s = hashlib.sha1('blob %d\0' % l)
2658 s = hashlib.sha1('blob %d\0' % l)
2658 s.update(text)
2659 s.update(text)
2659 return hex(s.digest())
2660 return hex(s.digest())
2660
2661
2661 if opts.noprefix:
2662 if opts.noprefix:
2662 aprefix = bprefix = ''
2663 aprefix = bprefix = ''
2663 else:
2664 else:
2664 aprefix = 'a/'
2665 aprefix = 'a/'
2665 bprefix = 'b/'
2666 bprefix = 'b/'
2666
2667
2667 def diffline(f, revs):
2668 def diffline(f, revs):
2668 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2669 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2669 return 'diff %s %s' % (revinfo, f)
2670 return 'diff %s %s' % (revinfo, f)
2670
2671
2671 def isempty(fctx):
2672 def isempty(fctx):
2672 return fctx is None or fctx.size() == 0
2673 return fctx is None or fctx.size() == 0
2673
2674
2674 date1 = dateutil.datestr(ctx1.date())
2675 date1 = dateutil.datestr(ctx1.date())
2675 date2 = dateutil.datestr(ctx2.date())
2676 date2 = dateutil.datestr(ctx2.date())
2676
2677
2677 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2678 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2678
2679
2679 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2680 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2680 or repo.ui.configbool('devel', 'check-relroot')):
2681 or repo.ui.configbool('devel', 'check-relroot')):
2681 for f in modified + added + removed + list(copy) + list(copy.values()):
2682 for f in modified + added + removed + list(copy) + list(copy.values()):
2682 if f is not None and not f.startswith(relroot):
2683 if f is not None and not f.startswith(relroot):
2683 raise AssertionError(
2684 raise AssertionError(
2684 "file %s doesn't start with relroot %s" % (f, relroot))
2685 "file %s doesn't start with relroot %s" % (f, relroot))
2685
2686
2686 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2687 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2687 content1 = None
2688 content1 = None
2688 content2 = None
2689 content2 = None
2689 fctx1 = None
2690 fctx1 = None
2690 fctx2 = None
2691 fctx2 = None
2691 flag1 = None
2692 flag1 = None
2692 flag2 = None
2693 flag2 = None
2693 if f1:
2694 if f1:
2694 fctx1 = getfilectx(f1, ctx1)
2695 fctx1 = getfilectx(f1, ctx1)
2695 if opts.git or losedatafn:
2696 if opts.git or losedatafn:
2696 flag1 = ctx1.flags(f1)
2697 flag1 = ctx1.flags(f1)
2697 if f2:
2698 if f2:
2698 fctx2 = getfilectx(f2, ctx2)
2699 fctx2 = getfilectx(f2, ctx2)
2699 if opts.git or losedatafn:
2700 if opts.git or losedatafn:
2700 flag2 = ctx2.flags(f2)
2701 flag2 = ctx2.flags(f2)
2701 # if binary is True, output "summary" or "base85", but not "text diff"
2702 # if binary is True, output "summary" or "base85", but not "text diff"
2702 if opts.text:
2703 if opts.text:
2703 binary = False
2704 binary = False
2704 else:
2705 else:
2705 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2706 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2706
2707
2707 if losedatafn and not opts.git:
2708 if losedatafn and not opts.git:
2708 if (binary or
2709 if (binary or
2709 # copy/rename
2710 # copy/rename
2710 f2 in copy or
2711 f2 in copy or
2711 # empty file creation
2712 # empty file creation
2712 (not f1 and isempty(fctx2)) or
2713 (not f1 and isempty(fctx2)) or
2713 # empty file deletion
2714 # empty file deletion
2714 (isempty(fctx1) and not f2) or
2715 (isempty(fctx1) and not f2) or
2715 # create with flags
2716 # create with flags
2716 (not f1 and flag2) or
2717 (not f1 and flag2) or
2717 # change flags
2718 # change flags
2718 (f1 and f2 and flag1 != flag2)):
2719 (f1 and f2 and flag1 != flag2)):
2719 losedatafn(f2 or f1)
2720 losedatafn(f2 or f1)
2720
2721
2721 path1 = f1 or f2
2722 path1 = f1 or f2
2722 path2 = f2 or f1
2723 path2 = f2 or f1
2723 path1 = posixpath.join(prefix, path1[len(relroot):])
2724 path1 = posixpath.join(prefix, path1[len(relroot):])
2724 path2 = posixpath.join(prefix, path2[len(relroot):])
2725 path2 = posixpath.join(prefix, path2[len(relroot):])
2725 header = []
2726 header = []
2726 if opts.git:
2727 if opts.git:
2727 header.append('diff --git %s%s %s%s' %
2728 header.append('diff --git %s%s %s%s' %
2728 (aprefix, path1, bprefix, path2))
2729 (aprefix, path1, bprefix, path2))
2729 if not f1: # added
2730 if not f1: # added
2730 header.append('new file mode %s' % gitmode[flag2])
2731 header.append('new file mode %s' % gitmode[flag2])
2731 elif not f2: # removed
2732 elif not f2: # removed
2732 header.append('deleted file mode %s' % gitmode[flag1])
2733 header.append('deleted file mode %s' % gitmode[flag1])
2733 else: # modified/copied/renamed
2734 else: # modified/copied/renamed
2734 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2735 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2735 if mode1 != mode2:
2736 if mode1 != mode2:
2736 header.append('old mode %s' % mode1)
2737 header.append('old mode %s' % mode1)
2737 header.append('new mode %s' % mode2)
2738 header.append('new mode %s' % mode2)
2738 if copyop is not None:
2739 if copyop is not None:
2739 if opts.showsimilarity:
2740 if opts.showsimilarity:
2740 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2741 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2741 header.append('similarity index %d%%' % sim)
2742 header.append('similarity index %d%%' % sim)
2742 header.append('%s from %s' % (copyop, path1))
2743 header.append('%s from %s' % (copyop, path1))
2743 header.append('%s to %s' % (copyop, path2))
2744 header.append('%s to %s' % (copyop, path2))
2744 elif revs and not repo.ui.quiet:
2745 elif revs and not repo.ui.quiet:
2745 header.append(diffline(path1, revs))
2746 header.append(diffline(path1, revs))
2746
2747
2747 # fctx.is | diffopts | what to | is fctx.data()
2748 # fctx.is | diffopts | what to | is fctx.data()
2748 # binary() | text nobinary git index | output? | outputted?
2749 # binary() | text nobinary git index | output? | outputted?
2749 # ------------------------------------|----------------------------
2750 # ------------------------------------|----------------------------
2750 # yes | no no no * | summary | no
2751 # yes | no no no * | summary | no
2751 # yes | no no yes * | base85 | yes
2752 # yes | no no yes * | base85 | yes
2752 # yes | no yes no * | summary | no
2753 # yes | no yes no * | summary | no
2753 # yes | no yes yes 0 | summary | no
2754 # yes | no yes yes 0 | summary | no
2754 # yes | no yes yes >0 | summary | semi [1]
2755 # yes | no yes yes >0 | summary | semi [1]
2755 # yes | yes * * * | text diff | yes
2756 # yes | yes * * * | text diff | yes
2756 # no | * * * * | text diff | yes
2757 # no | * * * * | text diff | yes
2757 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2758 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2758 if binary and (not opts.git or (opts.git and opts.nobinary and not
2759 if binary and (not opts.git or (opts.git and opts.nobinary and not
2759 opts.index)):
2760 opts.index)):
2760 # fast path: no binary content will be displayed, content1 and
2761 # fast path: no binary content will be displayed, content1 and
2761 # content2 are only used for equivalent test. cmp() could have a
2762 # content2 are only used for equivalent test. cmp() could have a
2762 # fast path.
2763 # fast path.
2763 if fctx1 is not None:
2764 if fctx1 is not None:
2764 content1 = b'\0'
2765 content1 = b'\0'
2765 if fctx2 is not None:
2766 if fctx2 is not None:
2766 if fctx1 is not None and not fctx1.cmp(fctx2):
2767 if fctx1 is not None and not fctx1.cmp(fctx2):
2767 content2 = b'\0' # not different
2768 content2 = b'\0' # not different
2768 else:
2769 else:
2769 content2 = b'\0\0'
2770 content2 = b'\0\0'
2770 else:
2771 else:
2771 # normal path: load contents
2772 # normal path: load contents
2772 if fctx1 is not None:
2773 if fctx1 is not None:
2773 content1 = fctx1.data()
2774 content1 = fctx1.data()
2774 if fctx2 is not None:
2775 if fctx2 is not None:
2775 content2 = fctx2.data()
2776 content2 = fctx2.data()
2776
2777
2777 if binary and opts.git and not opts.nobinary:
2778 if binary and opts.git and not opts.nobinary:
2778 text = mdiff.b85diff(content1, content2)
2779 text = mdiff.b85diff(content1, content2)
2779 if text:
2780 if text:
2780 header.append('index %s..%s' %
2781 header.append('index %s..%s' %
2781 (gitindex(content1), gitindex(content2)))
2782 (gitindex(content1), gitindex(content2)))
2782 hunks = (None, [text]),
2783 hunks = (None, [text]),
2783 else:
2784 else:
2784 if opts.git and opts.index > 0:
2785 if opts.git and opts.index > 0:
2785 flag = flag1
2786 flag = flag1
2786 if flag is None:
2787 if flag is None:
2787 flag = flag2
2788 flag = flag2
2788 header.append('index %s..%s %s' %
2789 header.append('index %s..%s %s' %
2789 (gitindex(content1)[0:opts.index],
2790 (gitindex(content1)[0:opts.index],
2790 gitindex(content2)[0:opts.index],
2791 gitindex(content2)[0:opts.index],
2791 gitmode[flag]))
2792 gitmode[flag]))
2792
2793
2793 uheaders, hunks = mdiff.unidiff(content1, date1,
2794 uheaders, hunks = mdiff.unidiff(content1, date1,
2794 content2, date2,
2795 content2, date2,
2795 path1, path2,
2796 path1, path2,
2796 binary=binary, opts=opts)
2797 binary=binary, opts=opts)
2797 header.extend(uheaders)
2798 header.extend(uheaders)
2798 yield fctx1, fctx2, header, hunks
2799 yield fctx1, fctx2, header, hunks
2799
2800
2800 def diffstatsum(stats):
2801 def diffstatsum(stats):
2801 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2802 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2802 for f, a, r, b in stats:
2803 for f, a, r, b in stats:
2803 maxfile = max(maxfile, encoding.colwidth(f))
2804 maxfile = max(maxfile, encoding.colwidth(f))
2804 maxtotal = max(maxtotal, a + r)
2805 maxtotal = max(maxtotal, a + r)
2805 addtotal += a
2806 addtotal += a
2806 removetotal += r
2807 removetotal += r
2807 binary = binary or b
2808 binary = binary or b
2808
2809
2809 return maxfile, maxtotal, addtotal, removetotal, binary
2810 return maxfile, maxtotal, addtotal, removetotal, binary
2810
2811
2811 def diffstatdata(lines):
2812 def diffstatdata(lines):
2812 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2813 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2813
2814
2814 results = []
2815 results = []
2815 filename, adds, removes, isbinary = None, 0, 0, False
2816 filename, adds, removes, isbinary = None, 0, 0, False
2816
2817
2817 def addresult():
2818 def addresult():
2818 if filename:
2819 if filename:
2819 results.append((filename, adds, removes, isbinary))
2820 results.append((filename, adds, removes, isbinary))
2820
2821
2821 # inheader is used to track if a line is in the
2822 # inheader is used to track if a line is in the
2822 # header portion of the diff. This helps properly account
2823 # header portion of the diff. This helps properly account
2823 # for lines that start with '--' or '++'
2824 # for lines that start with '--' or '++'
2824 inheader = False
2825 inheader = False
2825
2826
2826 for line in lines:
2827 for line in lines:
2827 if line.startswith('diff'):
2828 if line.startswith('diff'):
2828 addresult()
2829 addresult()
2829 # starting a new file diff
2830 # starting a new file diff
2830 # set numbers to 0 and reset inheader
2831 # set numbers to 0 and reset inheader
2831 inheader = True
2832 inheader = True
2832 adds, removes, isbinary = 0, 0, False
2833 adds, removes, isbinary = 0, 0, False
2833 if line.startswith('diff --git a/'):
2834 if line.startswith('diff --git a/'):
2834 filename = gitre.search(line).group(2)
2835 filename = gitre.search(line).group(2)
2835 elif line.startswith('diff -r'):
2836 elif line.startswith('diff -r'):
2836 # format: "diff -r ... -r ... filename"
2837 # format: "diff -r ... -r ... filename"
2837 filename = diffre.search(line).group(1)
2838 filename = diffre.search(line).group(1)
2838 elif line.startswith('@@'):
2839 elif line.startswith('@@'):
2839 inheader = False
2840 inheader = False
2840 elif line.startswith('+') and not inheader:
2841 elif line.startswith('+') and not inheader:
2841 adds += 1
2842 adds += 1
2842 elif line.startswith('-') and not inheader:
2843 elif line.startswith('-') and not inheader:
2843 removes += 1
2844 removes += 1
2844 elif (line.startswith('GIT binary patch') or
2845 elif (line.startswith('GIT binary patch') or
2845 line.startswith('Binary file')):
2846 line.startswith('Binary file')):
2846 isbinary = True
2847 isbinary = True
2847 addresult()
2848 addresult()
2848 return results
2849 return results
2849
2850
2850 def diffstat(lines, width=80):
2851 def diffstat(lines, width=80):
2851 output = []
2852 output = []
2852 stats = diffstatdata(lines)
2853 stats = diffstatdata(lines)
2853 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2854 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2854
2855
2855 countwidth = len(str(maxtotal))
2856 countwidth = len(str(maxtotal))
2856 if hasbinary and countwidth < 3:
2857 if hasbinary and countwidth < 3:
2857 countwidth = 3
2858 countwidth = 3
2858 graphwidth = width - countwidth - maxname - 6
2859 graphwidth = width - countwidth - maxname - 6
2859 if graphwidth < 10:
2860 if graphwidth < 10:
2860 graphwidth = 10
2861 graphwidth = 10
2861
2862
2862 def scale(i):
2863 def scale(i):
2863 if maxtotal <= graphwidth:
2864 if maxtotal <= graphwidth:
2864 return i
2865 return i
2865 # If diffstat runs out of room it doesn't print anything,
2866 # If diffstat runs out of room it doesn't print anything,
2866 # which isn't very useful, so always print at least one + or -
2867 # which isn't very useful, so always print at least one + or -
2867 # if there were at least some changes.
2868 # if there were at least some changes.
2868 return max(i * graphwidth // maxtotal, int(bool(i)))
2869 return max(i * graphwidth // maxtotal, int(bool(i)))
2869
2870
2870 for filename, adds, removes, isbinary in stats:
2871 for filename, adds, removes, isbinary in stats:
2871 if isbinary:
2872 if isbinary:
2872 count = 'Bin'
2873 count = 'Bin'
2873 else:
2874 else:
2874 count = '%d' % (adds + removes)
2875 count = '%d' % (adds + removes)
2875 pluses = '+' * scale(adds)
2876 pluses = '+' * scale(adds)
2876 minuses = '-' * scale(removes)
2877 minuses = '-' * scale(removes)
2877 output.append(' %s%s | %*s %s%s\n' %
2878 output.append(' %s%s | %*s %s%s\n' %
2878 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2879 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2879 countwidth, count, pluses, minuses))
2880 countwidth, count, pluses, minuses))
2880
2881
2881 if stats:
2882 if stats:
2882 output.append(_(' %d files changed, %d insertions(+), '
2883 output.append(_(' %d files changed, %d insertions(+), '
2883 '%d deletions(-)\n')
2884 '%d deletions(-)\n')
2884 % (len(stats), totaladds, totalremoves))
2885 % (len(stats), totaladds, totalremoves))
2885
2886
2886 return ''.join(output)
2887 return ''.join(output)
2887
2888
2888 def diffstatui(*args, **kw):
2889 def diffstatui(*args, **kw):
2889 '''like diffstat(), but yields 2-tuples of (output, label) for
2890 '''like diffstat(), but yields 2-tuples of (output, label) for
2890 ui.write()
2891 ui.write()
2891 '''
2892 '''
2892
2893
2893 for line in diffstat(*args, **kw).splitlines():
2894 for line in diffstat(*args, **kw).splitlines():
2894 if line and line[-1] in '+-':
2895 if line and line[-1] in '+-':
2895 name, graph = line.rsplit(' ', 1)
2896 name, graph = line.rsplit(' ', 1)
2896 yield (name + ' ', '')
2897 yield (name + ' ', '')
2897 m = re.search(br'\++', graph)
2898 m = re.search(br'\++', graph)
2898 if m:
2899 if m:
2899 yield (m.group(0), 'diffstat.inserted')
2900 yield (m.group(0), 'diffstat.inserted')
2900 m = re.search(br'-+', graph)
2901 m = re.search(br'-+', graph)
2901 if m:
2902 if m:
2902 yield (m.group(0), 'diffstat.deleted')
2903 yield (m.group(0), 'diffstat.deleted')
2903 else:
2904 else:
2904 yield (line, '')
2905 yield (line, '')
2905 yield ('\n', '')
2906 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now