##// END OF EJS Templates
py3: handle keyword arguments correctly in patch.py...
Pulkit Goyal -
r35367:dce76155 default
parent child Browse files
Show More
@@ -1,2899 +1,2899 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import tempfile
21 import tempfile
22 import zlib
22 import zlib
23
23
24 from .i18n import _
24 from .i18n import _
25 from .node import (
25 from .node import (
26 hex,
26 hex,
27 short,
27 short,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 policy,
36 policy,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 diffhelpers = policy.importmod(r'diffhelpers')
44 diffhelpers = policy.importmod(r'diffhelpers')
45 stringio = util.stringio
45 stringio = util.stringio
46
46
47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49
49
50 PatchError = error.PatchError
50 PatchError = error.PatchError
51
51
52 # public functions
52 # public functions
53
53
54 def split(stream):
54 def split(stream):
55 '''return an iterator of individual patches from a stream'''
55 '''return an iterator of individual patches from a stream'''
56 def isheader(line, inheader):
56 def isheader(line, inheader):
57 if inheader and line[0] in (' ', '\t'):
57 if inheader and line[0] in (' ', '\t'):
58 # continuation
58 # continuation
59 return True
59 return True
60 if line[0] in (' ', '-', '+'):
60 if line[0] in (' ', '-', '+'):
61 # diff line - don't check for header pattern in there
61 # diff line - don't check for header pattern in there
62 return False
62 return False
63 l = line.split(': ', 1)
63 l = line.split(': ', 1)
64 return len(l) == 2 and ' ' not in l[0]
64 return len(l) == 2 and ' ' not in l[0]
65
65
66 def chunk(lines):
66 def chunk(lines):
67 return stringio(''.join(lines))
67 return stringio(''.join(lines))
68
68
69 def hgsplit(stream, cur):
69 def hgsplit(stream, cur):
70 inheader = True
70 inheader = True
71
71
72 for line in stream:
72 for line in stream:
73 if not line.strip():
73 if not line.strip():
74 inheader = False
74 inheader = False
75 if not inheader and line.startswith('# HG changeset patch'):
75 if not inheader and line.startswith('# HG changeset patch'):
76 yield chunk(cur)
76 yield chunk(cur)
77 cur = []
77 cur = []
78 inheader = True
78 inheader = True
79
79
80 cur.append(line)
80 cur.append(line)
81
81
82 if cur:
82 if cur:
83 yield chunk(cur)
83 yield chunk(cur)
84
84
85 def mboxsplit(stream, cur):
85 def mboxsplit(stream, cur):
86 for line in stream:
86 for line in stream:
87 if line.startswith('From '):
87 if line.startswith('From '):
88 for c in split(chunk(cur[1:])):
88 for c in split(chunk(cur[1:])):
89 yield c
89 yield c
90 cur = []
90 cur = []
91
91
92 cur.append(line)
92 cur.append(line)
93
93
94 if cur:
94 if cur:
95 for c in split(chunk(cur[1:])):
95 for c in split(chunk(cur[1:])):
96 yield c
96 yield c
97
97
98 def mimesplit(stream, cur):
98 def mimesplit(stream, cur):
99 def msgfp(m):
99 def msgfp(m):
100 fp = stringio()
100 fp = stringio()
101 g = email.Generator.Generator(fp, mangle_from_=False)
101 g = email.Generator.Generator(fp, mangle_from_=False)
102 g.flatten(m)
102 g.flatten(m)
103 fp.seek(0)
103 fp.seek(0)
104 return fp
104 return fp
105
105
106 for line in stream:
106 for line in stream:
107 cur.append(line)
107 cur.append(line)
108 c = chunk(cur)
108 c = chunk(cur)
109
109
110 m = email.Parser.Parser().parse(c)
110 m = email.Parser.Parser().parse(c)
111 if not m.is_multipart():
111 if not m.is_multipart():
112 yield msgfp(m)
112 yield msgfp(m)
113 else:
113 else:
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 for part in m.walk():
115 for part in m.walk():
116 ct = part.get_content_type()
116 ct = part.get_content_type()
117 if ct not in ok_types:
117 if ct not in ok_types:
118 continue
118 continue
119 yield msgfp(part)
119 yield msgfp(part)
120
120
121 def headersplit(stream, cur):
121 def headersplit(stream, cur):
122 inheader = False
122 inheader = False
123
123
124 for line in stream:
124 for line in stream:
125 if not inheader and isheader(line, inheader):
125 if not inheader and isheader(line, inheader):
126 yield chunk(cur)
126 yield chunk(cur)
127 cur = []
127 cur = []
128 inheader = True
128 inheader = True
129 if inheader and not isheader(line, inheader):
129 if inheader and not isheader(line, inheader):
130 inheader = False
130 inheader = False
131
131
132 cur.append(line)
132 cur.append(line)
133
133
134 if cur:
134 if cur:
135 yield chunk(cur)
135 yield chunk(cur)
136
136
137 def remainder(cur):
137 def remainder(cur):
138 yield chunk(cur)
138 yield chunk(cur)
139
139
140 class fiter(object):
140 class fiter(object):
141 def __init__(self, fp):
141 def __init__(self, fp):
142 self.fp = fp
142 self.fp = fp
143
143
144 def __iter__(self):
144 def __iter__(self):
145 return self
145 return self
146
146
147 def next(self):
147 def next(self):
148 l = self.fp.readline()
148 l = self.fp.readline()
149 if not l:
149 if not l:
150 raise StopIteration
150 raise StopIteration
151 return l
151 return l
152
152
153 __next__ = next
153 __next__ = next
154
154
155 inheader = False
155 inheader = False
156 cur = []
156 cur = []
157
157
158 mimeheaders = ['content-type']
158 mimeheaders = ['content-type']
159
159
160 if not util.safehasattr(stream, 'next'):
160 if not util.safehasattr(stream, 'next'):
161 # http responses, for example, have readline but not next
161 # http responses, for example, have readline but not next
162 stream = fiter(stream)
162 stream = fiter(stream)
163
163
164 for line in stream:
164 for line in stream:
165 cur.append(line)
165 cur.append(line)
166 if line.startswith('# HG changeset patch'):
166 if line.startswith('# HG changeset patch'):
167 return hgsplit(stream, cur)
167 return hgsplit(stream, cur)
168 elif line.startswith('From '):
168 elif line.startswith('From '):
169 return mboxsplit(stream, cur)
169 return mboxsplit(stream, cur)
170 elif isheader(line, inheader):
170 elif isheader(line, inheader):
171 inheader = True
171 inheader = True
172 if line.split(':', 1)[0].lower() in mimeheaders:
172 if line.split(':', 1)[0].lower() in mimeheaders:
173 # let email parser handle this
173 # let email parser handle this
174 return mimesplit(stream, cur)
174 return mimesplit(stream, cur)
175 elif line.startswith('--- ') and inheader:
175 elif line.startswith('--- ') and inheader:
176 # No evil headers seen by diff start, split by hand
176 # No evil headers seen by diff start, split by hand
177 return headersplit(stream, cur)
177 return headersplit(stream, cur)
178 # Not enough info, keep reading
178 # Not enough info, keep reading
179
179
180 # if we are here, we have a very plain patch
180 # if we are here, we have a very plain patch
181 return remainder(cur)
181 return remainder(cur)
182
182
183 ## Some facility for extensible patch parsing:
183 ## Some facility for extensible patch parsing:
184 # list of pairs ("header to match", "data key")
184 # list of pairs ("header to match", "data key")
185 patchheadermap = [('Date', 'date'),
185 patchheadermap = [('Date', 'date'),
186 ('Branch', 'branch'),
186 ('Branch', 'branch'),
187 ('Node ID', 'nodeid'),
187 ('Node ID', 'nodeid'),
188 ]
188 ]
189
189
190 def extract(ui, fileobj):
190 def extract(ui, fileobj):
191 '''extract patch from data read from fileobj.
191 '''extract patch from data read from fileobj.
192
192
193 patch can be a normal patch or contained in an email message.
193 patch can be a normal patch or contained in an email message.
194
194
195 return a dictionary. Standard keys are:
195 return a dictionary. Standard keys are:
196 - filename,
196 - filename,
197 - message,
197 - message,
198 - user,
198 - user,
199 - date,
199 - date,
200 - branch,
200 - branch,
201 - node,
201 - node,
202 - p1,
202 - p1,
203 - p2.
203 - p2.
204 Any item can be missing from the dictionary. If filename is missing,
204 Any item can be missing from the dictionary. If filename is missing,
205 fileobj did not contain a patch. Caller must unlink filename when done.'''
205 fileobj did not contain a patch. Caller must unlink filename when done.'''
206
206
207 # attempt to detect the start of a patch
207 # attempt to detect the start of a patch
208 # (this heuristic is borrowed from quilt)
208 # (this heuristic is borrowed from quilt)
209 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
209 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
210 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
211 br'---[ \t].*?^\+\+\+[ \t]|'
211 br'---[ \t].*?^\+\+\+[ \t]|'
212 br'\*\*\*[ \t].*?^---[ \t])',
212 br'\*\*\*[ \t].*?^---[ \t])',
213 re.MULTILINE | re.DOTALL)
213 re.MULTILINE | re.DOTALL)
214
214
215 data = {}
215 data = {}
216 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
216 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
217 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
217 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
218 try:
218 try:
219 msg = email.Parser.Parser().parse(fileobj)
219 msg = email.Parser.Parser().parse(fileobj)
220
220
221 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
221 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
222 data['user'] = msg['From'] and mail.headdecode(msg['From'])
222 data['user'] = msg['From'] and mail.headdecode(msg['From'])
223 if not subject and not data['user']:
223 if not subject and not data['user']:
224 # Not an email, restore parsed headers if any
224 # Not an email, restore parsed headers if any
225 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
225 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
226
226
227 # should try to parse msg['Date']
227 # should try to parse msg['Date']
228 parents = []
228 parents = []
229
229
230 if subject:
230 if subject:
231 if subject.startswith('[PATCH'):
231 if subject.startswith('[PATCH'):
232 pend = subject.find(']')
232 pend = subject.find(']')
233 if pend >= 0:
233 if pend >= 0:
234 subject = subject[pend + 1:].lstrip()
234 subject = subject[pend + 1:].lstrip()
235 subject = re.sub(br'\n[ \t]+', ' ', subject)
235 subject = re.sub(br'\n[ \t]+', ' ', subject)
236 ui.debug('Subject: %s\n' % subject)
236 ui.debug('Subject: %s\n' % subject)
237 if data['user']:
237 if data['user']:
238 ui.debug('From: %s\n' % data['user'])
238 ui.debug('From: %s\n' % data['user'])
239 diffs_seen = 0
239 diffs_seen = 0
240 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
240 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
241 message = ''
241 message = ''
242 for part in msg.walk():
242 for part in msg.walk():
243 content_type = part.get_content_type()
243 content_type = part.get_content_type()
244 ui.debug('Content-Type: %s\n' % content_type)
244 ui.debug('Content-Type: %s\n' % content_type)
245 if content_type not in ok_types:
245 if content_type not in ok_types:
246 continue
246 continue
247 payload = part.get_payload(decode=True)
247 payload = part.get_payload(decode=True)
248 m = diffre.search(payload)
248 m = diffre.search(payload)
249 if m:
249 if m:
250 hgpatch = False
250 hgpatch = False
251 hgpatchheader = False
251 hgpatchheader = False
252 ignoretext = False
252 ignoretext = False
253
253
254 ui.debug('found patch at byte %d\n' % m.start(0))
254 ui.debug('found patch at byte %d\n' % m.start(0))
255 diffs_seen += 1
255 diffs_seen += 1
256 cfp = stringio()
256 cfp = stringio()
257 for line in payload[:m.start(0)].splitlines():
257 for line in payload[:m.start(0)].splitlines():
258 if line.startswith('# HG changeset patch') and not hgpatch:
258 if line.startswith('# HG changeset patch') and not hgpatch:
259 ui.debug('patch generated by hg export\n')
259 ui.debug('patch generated by hg export\n')
260 hgpatch = True
260 hgpatch = True
261 hgpatchheader = True
261 hgpatchheader = True
262 # drop earlier commit message content
262 # drop earlier commit message content
263 cfp.seek(0)
263 cfp.seek(0)
264 cfp.truncate()
264 cfp.truncate()
265 subject = None
265 subject = None
266 elif hgpatchheader:
266 elif hgpatchheader:
267 if line.startswith('# User '):
267 if line.startswith('# User '):
268 data['user'] = line[7:]
268 data['user'] = line[7:]
269 ui.debug('From: %s\n' % data['user'])
269 ui.debug('From: %s\n' % data['user'])
270 elif line.startswith("# Parent "):
270 elif line.startswith("# Parent "):
271 parents.append(line[9:].lstrip())
271 parents.append(line[9:].lstrip())
272 elif line.startswith("# "):
272 elif line.startswith("# "):
273 for header, key in patchheadermap:
273 for header, key in patchheadermap:
274 prefix = '# %s ' % header
274 prefix = '# %s ' % header
275 if line.startswith(prefix):
275 if line.startswith(prefix):
276 data[key] = line[len(prefix):]
276 data[key] = line[len(prefix):]
277 else:
277 else:
278 hgpatchheader = False
278 hgpatchheader = False
279 elif line == '---':
279 elif line == '---':
280 ignoretext = True
280 ignoretext = True
281 if not hgpatchheader and not ignoretext:
281 if not hgpatchheader and not ignoretext:
282 cfp.write(line)
282 cfp.write(line)
283 cfp.write('\n')
283 cfp.write('\n')
284 message = cfp.getvalue()
284 message = cfp.getvalue()
285 if tmpfp:
285 if tmpfp:
286 tmpfp.write(payload)
286 tmpfp.write(payload)
287 if not payload.endswith('\n'):
287 if not payload.endswith('\n'):
288 tmpfp.write('\n')
288 tmpfp.write('\n')
289 elif not diffs_seen and message and content_type == 'text/plain':
289 elif not diffs_seen and message and content_type == 'text/plain':
290 message += '\n' + payload
290 message += '\n' + payload
291 except: # re-raises
291 except: # re-raises
292 tmpfp.close()
292 tmpfp.close()
293 os.unlink(tmpname)
293 os.unlink(tmpname)
294 raise
294 raise
295
295
296 if subject and not message.startswith(subject):
296 if subject and not message.startswith(subject):
297 message = '%s\n%s' % (subject, message)
297 message = '%s\n%s' % (subject, message)
298 data['message'] = message
298 data['message'] = message
299 tmpfp.close()
299 tmpfp.close()
300 if parents:
300 if parents:
301 data['p1'] = parents.pop(0)
301 data['p1'] = parents.pop(0)
302 if parents:
302 if parents:
303 data['p2'] = parents.pop(0)
303 data['p2'] = parents.pop(0)
304
304
305 if diffs_seen:
305 if diffs_seen:
306 data['filename'] = tmpname
306 data['filename'] = tmpname
307 else:
307 else:
308 os.unlink(tmpname)
308 os.unlink(tmpname)
309 return data
309 return data
310
310
311 class patchmeta(object):
311 class patchmeta(object):
312 """Patched file metadata
312 """Patched file metadata
313
313
314 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
314 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
315 or COPY. 'path' is patched file path. 'oldpath' is set to the
315 or COPY. 'path' is patched file path. 'oldpath' is set to the
316 origin file when 'op' is either COPY or RENAME, None otherwise. If
316 origin file when 'op' is either COPY or RENAME, None otherwise. If
317 file mode is changed, 'mode' is a tuple (islink, isexec) where
317 file mode is changed, 'mode' is a tuple (islink, isexec) where
318 'islink' is True if the file is a symlink and 'isexec' is True if
318 'islink' is True if the file is a symlink and 'isexec' is True if
319 the file is executable. Otherwise, 'mode' is None.
319 the file is executable. Otherwise, 'mode' is None.
320 """
320 """
321 def __init__(self, path):
321 def __init__(self, path):
322 self.path = path
322 self.path = path
323 self.oldpath = None
323 self.oldpath = None
324 self.mode = None
324 self.mode = None
325 self.op = 'MODIFY'
325 self.op = 'MODIFY'
326 self.binary = False
326 self.binary = False
327
327
328 def setmode(self, mode):
328 def setmode(self, mode):
329 islink = mode & 0o20000
329 islink = mode & 0o20000
330 isexec = mode & 0o100
330 isexec = mode & 0o100
331 self.mode = (islink, isexec)
331 self.mode = (islink, isexec)
332
332
333 def copy(self):
333 def copy(self):
334 other = patchmeta(self.path)
334 other = patchmeta(self.path)
335 other.oldpath = self.oldpath
335 other.oldpath = self.oldpath
336 other.mode = self.mode
336 other.mode = self.mode
337 other.op = self.op
337 other.op = self.op
338 other.binary = self.binary
338 other.binary = self.binary
339 return other
339 return other
340
340
341 def _ispatchinga(self, afile):
341 def _ispatchinga(self, afile):
342 if afile == '/dev/null':
342 if afile == '/dev/null':
343 return self.op == 'ADD'
343 return self.op == 'ADD'
344 return afile == 'a/' + (self.oldpath or self.path)
344 return afile == 'a/' + (self.oldpath or self.path)
345
345
346 def _ispatchingb(self, bfile):
346 def _ispatchingb(self, bfile):
347 if bfile == '/dev/null':
347 if bfile == '/dev/null':
348 return self.op == 'DELETE'
348 return self.op == 'DELETE'
349 return bfile == 'b/' + self.path
349 return bfile == 'b/' + self.path
350
350
351 def ispatching(self, afile, bfile):
351 def ispatching(self, afile, bfile):
352 return self._ispatchinga(afile) and self._ispatchingb(bfile)
352 return self._ispatchinga(afile) and self._ispatchingb(bfile)
353
353
354 def __repr__(self):
354 def __repr__(self):
355 return "<patchmeta %s %r>" % (self.op, self.path)
355 return "<patchmeta %s %r>" % (self.op, self.path)
356
356
357 def readgitpatch(lr):
357 def readgitpatch(lr):
358 """extract git-style metadata about patches from <patchname>"""
358 """extract git-style metadata about patches from <patchname>"""
359
359
360 # Filter patch for git information
360 # Filter patch for git information
361 gp = None
361 gp = None
362 gitpatches = []
362 gitpatches = []
363 for line in lr:
363 for line in lr:
364 line = line.rstrip(' \r\n')
364 line = line.rstrip(' \r\n')
365 if line.startswith('diff --git a/'):
365 if line.startswith('diff --git a/'):
366 m = gitre.match(line)
366 m = gitre.match(line)
367 if m:
367 if m:
368 if gp:
368 if gp:
369 gitpatches.append(gp)
369 gitpatches.append(gp)
370 dst = m.group(2)
370 dst = m.group(2)
371 gp = patchmeta(dst)
371 gp = patchmeta(dst)
372 elif gp:
372 elif gp:
373 if line.startswith('--- '):
373 if line.startswith('--- '):
374 gitpatches.append(gp)
374 gitpatches.append(gp)
375 gp = None
375 gp = None
376 continue
376 continue
377 if line.startswith('rename from '):
377 if line.startswith('rename from '):
378 gp.op = 'RENAME'
378 gp.op = 'RENAME'
379 gp.oldpath = line[12:]
379 gp.oldpath = line[12:]
380 elif line.startswith('rename to '):
380 elif line.startswith('rename to '):
381 gp.path = line[10:]
381 gp.path = line[10:]
382 elif line.startswith('copy from '):
382 elif line.startswith('copy from '):
383 gp.op = 'COPY'
383 gp.op = 'COPY'
384 gp.oldpath = line[10:]
384 gp.oldpath = line[10:]
385 elif line.startswith('copy to '):
385 elif line.startswith('copy to '):
386 gp.path = line[8:]
386 gp.path = line[8:]
387 elif line.startswith('deleted file'):
387 elif line.startswith('deleted file'):
388 gp.op = 'DELETE'
388 gp.op = 'DELETE'
389 elif line.startswith('new file mode '):
389 elif line.startswith('new file mode '):
390 gp.op = 'ADD'
390 gp.op = 'ADD'
391 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
392 elif line.startswith('new mode '):
392 elif line.startswith('new mode '):
393 gp.setmode(int(line[-6:], 8))
393 gp.setmode(int(line[-6:], 8))
394 elif line.startswith('GIT binary patch'):
394 elif line.startswith('GIT binary patch'):
395 gp.binary = True
395 gp.binary = True
396 if gp:
396 if gp:
397 gitpatches.append(gp)
397 gitpatches.append(gp)
398
398
399 return gitpatches
399 return gitpatches
400
400
401 class linereader(object):
401 class linereader(object):
402 # simple class to allow pushing lines back into the input stream
402 # simple class to allow pushing lines back into the input stream
403 def __init__(self, fp):
403 def __init__(self, fp):
404 self.fp = fp
404 self.fp = fp
405 self.buf = []
405 self.buf = []
406
406
407 def push(self, line):
407 def push(self, line):
408 if line is not None:
408 if line is not None:
409 self.buf.append(line)
409 self.buf.append(line)
410
410
411 def readline(self):
411 def readline(self):
412 if self.buf:
412 if self.buf:
413 l = self.buf[0]
413 l = self.buf[0]
414 del self.buf[0]
414 del self.buf[0]
415 return l
415 return l
416 return self.fp.readline()
416 return self.fp.readline()
417
417
418 def __iter__(self):
418 def __iter__(self):
419 return iter(self.readline, '')
419 return iter(self.readline, '')
420
420
421 class abstractbackend(object):
421 class abstractbackend(object):
422 def __init__(self, ui):
422 def __init__(self, ui):
423 self.ui = ui
423 self.ui = ui
424
424
425 def getfile(self, fname):
425 def getfile(self, fname):
426 """Return target file data and flags as a (data, (islink,
426 """Return target file data and flags as a (data, (islink,
427 isexec)) tuple. Data is None if file is missing/deleted.
427 isexec)) tuple. Data is None if file is missing/deleted.
428 """
428 """
429 raise NotImplementedError
429 raise NotImplementedError
430
430
431 def setfile(self, fname, data, mode, copysource):
431 def setfile(self, fname, data, mode, copysource):
432 """Write data to target file fname and set its mode. mode is a
432 """Write data to target file fname and set its mode. mode is a
433 (islink, isexec) tuple. If data is None, the file content should
433 (islink, isexec) tuple. If data is None, the file content should
434 be left unchanged. If the file is modified after being copied,
434 be left unchanged. If the file is modified after being copied,
435 copysource is set to the original file name.
435 copysource is set to the original file name.
436 """
436 """
437 raise NotImplementedError
437 raise NotImplementedError
438
438
439 def unlink(self, fname):
439 def unlink(self, fname):
440 """Unlink target file."""
440 """Unlink target file."""
441 raise NotImplementedError
441 raise NotImplementedError
442
442
443 def writerej(self, fname, failed, total, lines):
443 def writerej(self, fname, failed, total, lines):
444 """Write rejected lines for fname. total is the number of hunks
444 """Write rejected lines for fname. total is the number of hunks
445 which failed to apply and total the total number of hunks for this
445 which failed to apply and total the total number of hunks for this
446 files.
446 files.
447 """
447 """
448
448
449 def exists(self, fname):
449 def exists(self, fname):
450 raise NotImplementedError
450 raise NotImplementedError
451
451
452 def close(self):
452 def close(self):
453 raise NotImplementedError
453 raise NotImplementedError
454
454
455 class fsbackend(abstractbackend):
455 class fsbackend(abstractbackend):
456 def __init__(self, ui, basedir):
456 def __init__(self, ui, basedir):
457 super(fsbackend, self).__init__(ui)
457 super(fsbackend, self).__init__(ui)
458 self.opener = vfsmod.vfs(basedir)
458 self.opener = vfsmod.vfs(basedir)
459
459
460 def getfile(self, fname):
460 def getfile(self, fname):
461 if self.opener.islink(fname):
461 if self.opener.islink(fname):
462 return (self.opener.readlink(fname), (True, False))
462 return (self.opener.readlink(fname), (True, False))
463
463
464 isexec = False
464 isexec = False
465 try:
465 try:
466 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
467 except OSError as e:
467 except OSError as e:
468 if e.errno != errno.ENOENT:
468 if e.errno != errno.ENOENT:
469 raise
469 raise
470 try:
470 try:
471 return (self.opener.read(fname), (False, isexec))
471 return (self.opener.read(fname), (False, isexec))
472 except IOError as e:
472 except IOError as e:
473 if e.errno != errno.ENOENT:
473 if e.errno != errno.ENOENT:
474 raise
474 raise
475 return None, None
475 return None, None
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 islink, isexec = mode
478 islink, isexec = mode
479 if data is None:
479 if data is None:
480 self.opener.setflags(fname, islink, isexec)
480 self.opener.setflags(fname, islink, isexec)
481 return
481 return
482 if islink:
482 if islink:
483 self.opener.symlink(data, fname)
483 self.opener.symlink(data, fname)
484 else:
484 else:
485 self.opener.write(fname, data)
485 self.opener.write(fname, data)
486 if isexec:
486 if isexec:
487 self.opener.setflags(fname, False, True)
487 self.opener.setflags(fname, False, True)
488
488
489 def unlink(self, fname):
489 def unlink(self, fname):
490 self.opener.unlinkpath(fname, ignoremissing=True)
490 self.opener.unlinkpath(fname, ignoremissing=True)
491
491
492 def writerej(self, fname, failed, total, lines):
492 def writerej(self, fname, failed, total, lines):
493 fname = fname + ".rej"
493 fname = fname + ".rej"
494 self.ui.warn(
494 self.ui.warn(
495 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
496 (failed, total, fname))
496 (failed, total, fname))
497 fp = self.opener(fname, 'w')
497 fp = self.opener(fname, 'w')
498 fp.writelines(lines)
498 fp.writelines(lines)
499 fp.close()
499 fp.close()
500
500
501 def exists(self, fname):
501 def exists(self, fname):
502 return self.opener.lexists(fname)
502 return self.opener.lexists(fname)
503
503
504 class workingbackend(fsbackend):
504 class workingbackend(fsbackend):
505 def __init__(self, ui, repo, similarity):
505 def __init__(self, ui, repo, similarity):
506 super(workingbackend, self).__init__(ui, repo.root)
506 super(workingbackend, self).__init__(ui, repo.root)
507 self.repo = repo
507 self.repo = repo
508 self.similarity = similarity
508 self.similarity = similarity
509 self.removed = set()
509 self.removed = set()
510 self.changed = set()
510 self.changed = set()
511 self.copied = []
511 self.copied = []
512
512
513 def _checkknown(self, fname):
513 def _checkknown(self, fname):
514 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 if self.repo.dirstate[fname] == '?' and self.exists(fname):
515 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
516
516
517 def setfile(self, fname, data, mode, copysource):
517 def setfile(self, fname, data, mode, copysource):
518 self._checkknown(fname)
518 self._checkknown(fname)
519 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 super(workingbackend, self).setfile(fname, data, mode, copysource)
520 if copysource is not None:
520 if copysource is not None:
521 self.copied.append((copysource, fname))
521 self.copied.append((copysource, fname))
522 self.changed.add(fname)
522 self.changed.add(fname)
523
523
524 def unlink(self, fname):
524 def unlink(self, fname):
525 self._checkknown(fname)
525 self._checkknown(fname)
526 super(workingbackend, self).unlink(fname)
526 super(workingbackend, self).unlink(fname)
527 self.removed.add(fname)
527 self.removed.add(fname)
528 self.changed.add(fname)
528 self.changed.add(fname)
529
529
530 def close(self):
530 def close(self):
531 wctx = self.repo[None]
531 wctx = self.repo[None]
532 changed = set(self.changed)
532 changed = set(self.changed)
533 for src, dst in self.copied:
533 for src, dst in self.copied:
534 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
535 if self.removed:
535 if self.removed:
536 wctx.forget(sorted(self.removed))
536 wctx.forget(sorted(self.removed))
537 for f in self.removed:
537 for f in self.removed:
538 if f not in self.repo.dirstate:
538 if f not in self.repo.dirstate:
539 # File was deleted and no longer belongs to the
539 # File was deleted and no longer belongs to the
540 # dirstate, it was probably marked added then
540 # dirstate, it was probably marked added then
541 # deleted, and should not be considered by
541 # deleted, and should not be considered by
542 # marktouched().
542 # marktouched().
543 changed.discard(f)
543 changed.discard(f)
544 if changed:
544 if changed:
545 scmutil.marktouched(self.repo, changed, self.similarity)
545 scmutil.marktouched(self.repo, changed, self.similarity)
546 return sorted(self.changed)
546 return sorted(self.changed)
547
547
548 class filestore(object):
548 class filestore(object):
549 def __init__(self, maxsize=None):
549 def __init__(self, maxsize=None):
550 self.opener = None
550 self.opener = None
551 self.files = {}
551 self.files = {}
552 self.created = 0
552 self.created = 0
553 self.maxsize = maxsize
553 self.maxsize = maxsize
554 if self.maxsize is None:
554 if self.maxsize is None:
555 self.maxsize = 4*(2**20)
555 self.maxsize = 4*(2**20)
556 self.size = 0
556 self.size = 0
557 self.data = {}
557 self.data = {}
558
558
559 def setfile(self, fname, data, mode, copied=None):
559 def setfile(self, fname, data, mode, copied=None):
560 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
561 self.data[fname] = (data, mode, copied)
561 self.data[fname] = (data, mode, copied)
562 self.size += len(data)
562 self.size += len(data)
563 else:
563 else:
564 if self.opener is None:
564 if self.opener is None:
565 root = tempfile.mkdtemp(prefix='hg-patch-')
565 root = tempfile.mkdtemp(prefix='hg-patch-')
566 self.opener = vfsmod.vfs(root)
566 self.opener = vfsmod.vfs(root)
567 # Avoid filename issues with these simple names
567 # Avoid filename issues with these simple names
568 fn = str(self.created)
568 fn = str(self.created)
569 self.opener.write(fn, data)
569 self.opener.write(fn, data)
570 self.created += 1
570 self.created += 1
571 self.files[fname] = (fn, mode, copied)
571 self.files[fname] = (fn, mode, copied)
572
572
573 def getfile(self, fname):
573 def getfile(self, fname):
574 if fname in self.data:
574 if fname in self.data:
575 return self.data[fname]
575 return self.data[fname]
576 if not self.opener or fname not in self.files:
576 if not self.opener or fname not in self.files:
577 return None, None, None
577 return None, None, None
578 fn, mode, copied = self.files[fname]
578 fn, mode, copied = self.files[fname]
579 return self.opener.read(fn), mode, copied
579 return self.opener.read(fn), mode, copied
580
580
581 def close(self):
581 def close(self):
582 if self.opener:
582 if self.opener:
583 shutil.rmtree(self.opener.base)
583 shutil.rmtree(self.opener.base)
584
584
585 class repobackend(abstractbackend):
585 class repobackend(abstractbackend):
586 def __init__(self, ui, repo, ctx, store):
586 def __init__(self, ui, repo, ctx, store):
587 super(repobackend, self).__init__(ui)
587 super(repobackend, self).__init__(ui)
588 self.repo = repo
588 self.repo = repo
589 self.ctx = ctx
589 self.ctx = ctx
590 self.store = store
590 self.store = store
591 self.changed = set()
591 self.changed = set()
592 self.removed = set()
592 self.removed = set()
593 self.copied = {}
593 self.copied = {}
594
594
595 def _checkknown(self, fname):
595 def _checkknown(self, fname):
596 if fname not in self.ctx:
596 if fname not in self.ctx:
597 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
598
598
599 def getfile(self, fname):
599 def getfile(self, fname):
600 try:
600 try:
601 fctx = self.ctx[fname]
601 fctx = self.ctx[fname]
602 except error.LookupError:
602 except error.LookupError:
603 return None, None
603 return None, None
604 flags = fctx.flags()
604 flags = fctx.flags()
605 return fctx.data(), ('l' in flags, 'x' in flags)
605 return fctx.data(), ('l' in flags, 'x' in flags)
606
606
607 def setfile(self, fname, data, mode, copysource):
607 def setfile(self, fname, data, mode, copysource):
608 if copysource:
608 if copysource:
609 self._checkknown(copysource)
609 self._checkknown(copysource)
610 if data is None:
610 if data is None:
611 data = self.ctx[fname].data()
611 data = self.ctx[fname].data()
612 self.store.setfile(fname, data, mode, copysource)
612 self.store.setfile(fname, data, mode, copysource)
613 self.changed.add(fname)
613 self.changed.add(fname)
614 if copysource:
614 if copysource:
615 self.copied[fname] = copysource
615 self.copied[fname] = copysource
616
616
617 def unlink(self, fname):
617 def unlink(self, fname):
618 self._checkknown(fname)
618 self._checkknown(fname)
619 self.removed.add(fname)
619 self.removed.add(fname)
620
620
621 def exists(self, fname):
621 def exists(self, fname):
622 return fname in self.ctx
622 return fname in self.ctx
623
623
624 def close(self):
624 def close(self):
625 return self.changed | self.removed
625 return self.changed | self.removed
626
626
627 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
628 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
629 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
630 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630 eolmodes = ['strict', 'crlf', 'lf', 'auto']
631
631
632 class patchfile(object):
632 class patchfile(object):
633 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 def __init__(self, ui, gp, backend, store, eolmode='strict'):
634 self.fname = gp.path
634 self.fname = gp.path
635 self.eolmode = eolmode
635 self.eolmode = eolmode
636 self.eol = None
636 self.eol = None
637 self.backend = backend
637 self.backend = backend
638 self.ui = ui
638 self.ui = ui
639 self.lines = []
639 self.lines = []
640 self.exists = False
640 self.exists = False
641 self.missing = True
641 self.missing = True
642 self.mode = gp.mode
642 self.mode = gp.mode
643 self.copysource = gp.oldpath
643 self.copysource = gp.oldpath
644 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
645 self.remove = gp.op == 'DELETE'
645 self.remove = gp.op == 'DELETE'
646 if self.copysource is None:
646 if self.copysource is None:
647 data, mode = backend.getfile(self.fname)
647 data, mode = backend.getfile(self.fname)
648 else:
648 else:
649 data, mode = store.getfile(self.copysource)[:2]
649 data, mode = store.getfile(self.copysource)[:2]
650 if data is not None:
650 if data is not None:
651 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.exists = self.copysource is None or backend.exists(self.fname)
652 self.missing = False
652 self.missing = False
653 if data:
653 if data:
654 self.lines = mdiff.splitnewlines(data)
654 self.lines = mdiff.splitnewlines(data)
655 if self.mode is None:
655 if self.mode is None:
656 self.mode = mode
656 self.mode = mode
657 if self.lines:
657 if self.lines:
658 # Normalize line endings
658 # Normalize line endings
659 if self.lines[0].endswith('\r\n'):
659 if self.lines[0].endswith('\r\n'):
660 self.eol = '\r\n'
660 self.eol = '\r\n'
661 elif self.lines[0].endswith('\n'):
661 elif self.lines[0].endswith('\n'):
662 self.eol = '\n'
662 self.eol = '\n'
663 if eolmode != 'strict':
663 if eolmode != 'strict':
664 nlines = []
664 nlines = []
665 for l in self.lines:
665 for l in self.lines:
666 if l.endswith('\r\n'):
666 if l.endswith('\r\n'):
667 l = l[:-2] + '\n'
667 l = l[:-2] + '\n'
668 nlines.append(l)
668 nlines.append(l)
669 self.lines = nlines
669 self.lines = nlines
670 else:
670 else:
671 if self.create:
671 if self.create:
672 self.missing = False
672 self.missing = False
673 if self.mode is None:
673 if self.mode is None:
674 self.mode = (False, False)
674 self.mode = (False, False)
675 if self.missing:
675 if self.missing:
676 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
677 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
678 "current directory)\n"))
678 "current directory)\n"))
679
679
680 self.hash = {}
680 self.hash = {}
681 self.dirty = 0
681 self.dirty = 0
682 self.offset = 0
682 self.offset = 0
683 self.skew = 0
683 self.skew = 0
684 self.rej = []
684 self.rej = []
685 self.fileprinted = False
685 self.fileprinted = False
686 self.printfile(False)
686 self.printfile(False)
687 self.hunks = 0
687 self.hunks = 0
688
688
689 def writelines(self, fname, lines, mode):
689 def writelines(self, fname, lines, mode):
690 if self.eolmode == 'auto':
690 if self.eolmode == 'auto':
691 eol = self.eol
691 eol = self.eol
692 elif self.eolmode == 'crlf':
692 elif self.eolmode == 'crlf':
693 eol = '\r\n'
693 eol = '\r\n'
694 else:
694 else:
695 eol = '\n'
695 eol = '\n'
696
696
697 if self.eolmode != 'strict' and eol and eol != '\n':
697 if self.eolmode != 'strict' and eol and eol != '\n':
698 rawlines = []
698 rawlines = []
699 for l in lines:
699 for l in lines:
700 if l and l[-1] == '\n':
700 if l and l[-1] == '\n':
701 l = l[:-1] + eol
701 l = l[:-1] + eol
702 rawlines.append(l)
702 rawlines.append(l)
703 lines = rawlines
703 lines = rawlines
704
704
705 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
706
706
707 def printfile(self, warn):
707 def printfile(self, warn):
708 if self.fileprinted:
708 if self.fileprinted:
709 return
709 return
710 if warn or self.ui.verbose:
710 if warn or self.ui.verbose:
711 self.fileprinted = True
711 self.fileprinted = True
712 s = _("patching file %s\n") % self.fname
712 s = _("patching file %s\n") % self.fname
713 if warn:
713 if warn:
714 self.ui.warn(s)
714 self.ui.warn(s)
715 else:
715 else:
716 self.ui.note(s)
716 self.ui.note(s)
717
717
718
718
719 def findlines(self, l, linenum):
719 def findlines(self, l, linenum):
720 # looks through the hash and finds candidate lines. The
720 # looks through the hash and finds candidate lines. The
721 # result is a list of line numbers sorted based on distance
721 # result is a list of line numbers sorted based on distance
722 # from linenum
722 # from linenum
723
723
724 cand = self.hash.get(l, [])
724 cand = self.hash.get(l, [])
725 if len(cand) > 1:
725 if len(cand) > 1:
726 # resort our list of potentials forward then back.
726 # resort our list of potentials forward then back.
727 cand.sort(key=lambda x: abs(x - linenum))
727 cand.sort(key=lambda x: abs(x - linenum))
728 return cand
728 return cand
729
729
730 def write_rej(self):
730 def write_rej(self):
731 # our rejects are a little different from patch(1). This always
731 # our rejects are a little different from patch(1). This always
732 # creates rejects in the same form as the original patch. A file
732 # creates rejects in the same form as the original patch. A file
733 # header is inserted so that you can run the reject through patch again
733 # header is inserted so that you can run the reject through patch again
734 # without having to type the filename.
734 # without having to type the filename.
735 if not self.rej:
735 if not self.rej:
736 return
736 return
737 base = os.path.basename(self.fname)
737 base = os.path.basename(self.fname)
738 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 lines = ["--- %s\n+++ %s\n" % (base, base)]
739 for x in self.rej:
739 for x in self.rej:
740 for l in x.hunk:
740 for l in x.hunk:
741 lines.append(l)
741 lines.append(l)
742 if l[-1:] != '\n':
742 if l[-1:] != '\n':
743 lines.append("\n\ No newline at end of file\n")
743 lines.append("\n\ No newline at end of file\n")
744 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
745
745
746 def apply(self, h):
746 def apply(self, h):
747 if not h.complete():
747 if not h.complete():
748 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
749 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 (h.number, h.desc, len(h.a), h.lena, len(h.b),
750 h.lenb))
750 h.lenb))
751
751
752 self.hunks += 1
752 self.hunks += 1
753
753
754 if self.missing:
754 if self.missing:
755 self.rej.append(h)
755 self.rej.append(h)
756 return -1
756 return -1
757
757
758 if self.exists and self.create:
758 if self.exists and self.create:
759 if self.copysource:
759 if self.copysource:
760 self.ui.warn(_("cannot create %s: destination already "
760 self.ui.warn(_("cannot create %s: destination already "
761 "exists\n") % self.fname)
761 "exists\n") % self.fname)
762 else:
762 else:
763 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.ui.warn(_("file %s already exists\n") % self.fname)
764 self.rej.append(h)
764 self.rej.append(h)
765 return -1
765 return -1
766
766
767 if isinstance(h, binhunk):
767 if isinstance(h, binhunk):
768 if self.remove:
768 if self.remove:
769 self.backend.unlink(self.fname)
769 self.backend.unlink(self.fname)
770 else:
770 else:
771 l = h.new(self.lines)
771 l = h.new(self.lines)
772 self.lines[:] = l
772 self.lines[:] = l
773 self.offset += len(l)
773 self.offset += len(l)
774 self.dirty = True
774 self.dirty = True
775 return 0
775 return 0
776
776
777 horig = h
777 horig = h
778 if (self.eolmode in ('crlf', 'lf')
778 if (self.eolmode in ('crlf', 'lf')
779 or self.eolmode == 'auto' and self.eol):
779 or self.eolmode == 'auto' and self.eol):
780 # If new eols are going to be normalized, then normalize
780 # If new eols are going to be normalized, then normalize
781 # hunk data before patching. Otherwise, preserve input
781 # hunk data before patching. Otherwise, preserve input
782 # line-endings.
782 # line-endings.
783 h = h.getnormalized()
783 h = h.getnormalized()
784
784
785 # fast case first, no offsets, no fuzz
785 # fast case first, no offsets, no fuzz
786 old, oldstart, new, newstart = h.fuzzit(0, False)
786 old, oldstart, new, newstart = h.fuzzit(0, False)
787 oldstart += self.offset
787 oldstart += self.offset
788 orig_start = oldstart
788 orig_start = oldstart
789 # if there's skew we want to emit the "(offset %d lines)" even
789 # if there's skew we want to emit the "(offset %d lines)" even
790 # when the hunk cleanly applies at start + skew, so skip the
790 # when the hunk cleanly applies at start + skew, so skip the
791 # fast case code
791 # fast case code
792 if (self.skew == 0 and
792 if (self.skew == 0 and
793 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
794 if self.remove:
794 if self.remove:
795 self.backend.unlink(self.fname)
795 self.backend.unlink(self.fname)
796 else:
796 else:
797 self.lines[oldstart:oldstart + len(old)] = new
797 self.lines[oldstart:oldstart + len(old)] = new
798 self.offset += len(new) - len(old)
798 self.offset += len(new) - len(old)
799 self.dirty = True
799 self.dirty = True
800 return 0
800 return 0
801
801
802 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
803 self.hash = {}
803 self.hash = {}
804 for x, s in enumerate(self.lines):
804 for x, s in enumerate(self.lines):
805 self.hash.setdefault(s, []).append(x)
805 self.hash.setdefault(s, []).append(x)
806
806
807 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
808 for toponly in [True, False]:
808 for toponly in [True, False]:
809 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
810 oldstart = oldstart + self.offset + self.skew
810 oldstart = oldstart + self.offset + self.skew
811 oldstart = min(oldstart, len(self.lines))
811 oldstart = min(oldstart, len(self.lines))
812 if old:
812 if old:
813 cand = self.findlines(old[0][1:], oldstart)
813 cand = self.findlines(old[0][1:], oldstart)
814 else:
814 else:
815 # Only adding lines with no or fuzzed context, just
815 # Only adding lines with no or fuzzed context, just
816 # take the skew in account
816 # take the skew in account
817 cand = [oldstart]
817 cand = [oldstart]
818
818
819 for l in cand:
819 for l in cand:
820 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
821 self.lines[l : l + len(old)] = new
821 self.lines[l : l + len(old)] = new
822 self.offset += len(new) - len(old)
822 self.offset += len(new) - len(old)
823 self.skew = l - orig_start
823 self.skew = l - orig_start
824 self.dirty = True
824 self.dirty = True
825 offset = l - orig_start - fuzzlen
825 offset = l - orig_start - fuzzlen
826 if fuzzlen:
826 if fuzzlen:
827 msg = _("Hunk #%d succeeded at %d "
827 msg = _("Hunk #%d succeeded at %d "
828 "with fuzz %d "
828 "with fuzz %d "
829 "(offset %d lines).\n")
829 "(offset %d lines).\n")
830 self.printfile(True)
830 self.printfile(True)
831 self.ui.warn(msg %
831 self.ui.warn(msg %
832 (h.number, l + 1, fuzzlen, offset))
832 (h.number, l + 1, fuzzlen, offset))
833 else:
833 else:
834 msg = _("Hunk #%d succeeded at %d "
834 msg = _("Hunk #%d succeeded at %d "
835 "(offset %d lines).\n")
835 "(offset %d lines).\n")
836 self.ui.note(msg % (h.number, l + 1, offset))
836 self.ui.note(msg % (h.number, l + 1, offset))
837 return fuzzlen
837 return fuzzlen
838 self.printfile(True)
838 self.printfile(True)
839 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
840 self.rej.append(horig)
840 self.rej.append(horig)
841 return -1
841 return -1
842
842
843 def close(self):
843 def close(self):
844 if self.dirty:
844 if self.dirty:
845 self.writelines(self.fname, self.lines, self.mode)
845 self.writelines(self.fname, self.lines, self.mode)
846 self.write_rej()
846 self.write_rej()
847 return len(self.rej)
847 return len(self.rej)
848
848
849 class header(object):
849 class header(object):
850 """patch header
850 """patch header
851 """
851 """
852 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
853 diff_re = re.compile('diff -r .* (.*)$')
853 diff_re = re.compile('diff -r .* (.*)$')
854 allhunks_re = re.compile('(?:index|deleted file) ')
854 allhunks_re = re.compile('(?:index|deleted file) ')
855 pretty_re = re.compile('(?:new file|deleted file) ')
855 pretty_re = re.compile('(?:new file|deleted file) ')
856 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 special_re = re.compile('(?:index|deleted|copy|rename) ')
857 newfile_re = re.compile('(?:new file)')
857 newfile_re = re.compile('(?:new file)')
858
858
859 def __init__(self, header):
859 def __init__(self, header):
860 self.header = header
860 self.header = header
861 self.hunks = []
861 self.hunks = []
862
862
863 def binary(self):
863 def binary(self):
864 return any(h.startswith('index ') for h in self.header)
864 return any(h.startswith('index ') for h in self.header)
865
865
866 def pretty(self, fp):
866 def pretty(self, fp):
867 for h in self.header:
867 for h in self.header:
868 if h.startswith('index '):
868 if h.startswith('index '):
869 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 fp.write(_('this modifies a binary file (all or nothing)\n'))
870 break
870 break
871 if self.pretty_re.match(h):
871 if self.pretty_re.match(h):
872 fp.write(h)
872 fp.write(h)
873 if self.binary():
873 if self.binary():
874 fp.write(_('this is a binary file\n'))
874 fp.write(_('this is a binary file\n'))
875 break
875 break
876 if h.startswith('---'):
876 if h.startswith('---'):
877 fp.write(_('%d hunks, %d lines changed\n') %
877 fp.write(_('%d hunks, %d lines changed\n') %
878 (len(self.hunks),
878 (len(self.hunks),
879 sum([max(h.added, h.removed) for h in self.hunks])))
879 sum([max(h.added, h.removed) for h in self.hunks])))
880 break
880 break
881 fp.write(h)
881 fp.write(h)
882
882
883 def write(self, fp):
883 def write(self, fp):
884 fp.write(''.join(self.header))
884 fp.write(''.join(self.header))
885
885
886 def allhunks(self):
886 def allhunks(self):
887 return any(self.allhunks_re.match(h) for h in self.header)
887 return any(self.allhunks_re.match(h) for h in self.header)
888
888
889 def files(self):
889 def files(self):
890 match = self.diffgit_re.match(self.header[0])
890 match = self.diffgit_re.match(self.header[0])
891 if match:
891 if match:
892 fromfile, tofile = match.groups()
892 fromfile, tofile = match.groups()
893 if fromfile == tofile:
893 if fromfile == tofile:
894 return [fromfile]
894 return [fromfile]
895 return [fromfile, tofile]
895 return [fromfile, tofile]
896 else:
896 else:
897 return self.diff_re.match(self.header[0]).groups()
897 return self.diff_re.match(self.header[0]).groups()
898
898
899 def filename(self):
899 def filename(self):
900 return self.files()[-1]
900 return self.files()[-1]
901
901
902 def __repr__(self):
902 def __repr__(self):
903 return '<header %s>' % (' '.join(map(repr, self.files())))
903 return '<header %s>' % (' '.join(map(repr, self.files())))
904
904
905 def isnewfile(self):
905 def isnewfile(self):
906 return any(self.newfile_re.match(h) for h in self.header)
906 return any(self.newfile_re.match(h) for h in self.header)
907
907
908 def special(self):
908 def special(self):
909 # Special files are shown only at the header level and not at the hunk
909 # Special files are shown only at the header level and not at the hunk
910 # level for example a file that has been deleted is a special file.
910 # level for example a file that has been deleted is a special file.
911 # The user cannot change the content of the operation, in the case of
911 # The user cannot change the content of the operation, in the case of
912 # the deleted file he has to take the deletion or not take it, he
912 # the deleted file he has to take the deletion or not take it, he
913 # cannot take some of it.
913 # cannot take some of it.
914 # Newly added files are special if they are empty, they are not special
914 # Newly added files are special if they are empty, they are not special
915 # if they have some content as we want to be able to change it
915 # if they have some content as we want to be able to change it
916 nocontent = len(self.header) == 2
916 nocontent = len(self.header) == 2
917 emptynewfile = self.isnewfile() and nocontent
917 emptynewfile = self.isnewfile() and nocontent
918 return emptynewfile or \
918 return emptynewfile or \
919 any(self.special_re.match(h) for h in self.header)
919 any(self.special_re.match(h) for h in self.header)
920
920
921 class recordhunk(object):
921 class recordhunk(object):
922 """patch hunk
922 """patch hunk
923
923
924 XXX shouldn't we merge this with the other hunk class?
924 XXX shouldn't we merge this with the other hunk class?
925 """
925 """
926
926
927 def __init__(self, header, fromline, toline, proc, before, hunk, after,
927 def __init__(self, header, fromline, toline, proc, before, hunk, after,
928 maxcontext=None):
928 maxcontext=None):
929 def trimcontext(lines, reverse=False):
929 def trimcontext(lines, reverse=False):
930 if maxcontext is not None:
930 if maxcontext is not None:
931 delta = len(lines) - maxcontext
931 delta = len(lines) - maxcontext
932 if delta > 0:
932 if delta > 0:
933 if reverse:
933 if reverse:
934 return delta, lines[delta:]
934 return delta, lines[delta:]
935 else:
935 else:
936 return delta, lines[:maxcontext]
936 return delta, lines[:maxcontext]
937 return 0, lines
937 return 0, lines
938
938
939 self.header = header
939 self.header = header
940 trimedbefore, self.before = trimcontext(before, True)
940 trimedbefore, self.before = trimcontext(before, True)
941 self.fromline = fromline + trimedbefore
941 self.fromline = fromline + trimedbefore
942 self.toline = toline + trimedbefore
942 self.toline = toline + trimedbefore
943 _trimedafter, self.after = trimcontext(after, False)
943 _trimedafter, self.after = trimcontext(after, False)
944 self.proc = proc
944 self.proc = proc
945 self.hunk = hunk
945 self.hunk = hunk
946 self.added, self.removed = self.countchanges(self.hunk)
946 self.added, self.removed = self.countchanges(self.hunk)
947
947
948 def __eq__(self, v):
948 def __eq__(self, v):
949 if not isinstance(v, recordhunk):
949 if not isinstance(v, recordhunk):
950 return False
950 return False
951
951
952 return ((v.hunk == self.hunk) and
952 return ((v.hunk == self.hunk) and
953 (v.proc == self.proc) and
953 (v.proc == self.proc) and
954 (self.fromline == v.fromline) and
954 (self.fromline == v.fromline) and
955 (self.header.files() == v.header.files()))
955 (self.header.files() == v.header.files()))
956
956
957 def __hash__(self):
957 def __hash__(self):
958 return hash((tuple(self.hunk),
958 return hash((tuple(self.hunk),
959 tuple(self.header.files()),
959 tuple(self.header.files()),
960 self.fromline,
960 self.fromline,
961 self.proc))
961 self.proc))
962
962
963 def countchanges(self, hunk):
963 def countchanges(self, hunk):
964 """hunk -> (n+,n-)"""
964 """hunk -> (n+,n-)"""
965 add = len([h for h in hunk if h.startswith('+')])
965 add = len([h for h in hunk if h.startswith('+')])
966 rem = len([h for h in hunk if h.startswith('-')])
966 rem = len([h for h in hunk if h.startswith('-')])
967 return add, rem
967 return add, rem
968
968
969 def reversehunk(self):
969 def reversehunk(self):
970 """return another recordhunk which is the reverse of the hunk
970 """return another recordhunk which is the reverse of the hunk
971
971
972 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
972 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
973 that, swap fromline/toline and +/- signs while keep other things
973 that, swap fromline/toline and +/- signs while keep other things
974 unchanged.
974 unchanged.
975 """
975 """
976 m = {'+': '-', '-': '+', '\\': '\\'}
976 m = {'+': '-', '-': '+', '\\': '\\'}
977 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
977 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
978 return recordhunk(self.header, self.toline, self.fromline, self.proc,
978 return recordhunk(self.header, self.toline, self.fromline, self.proc,
979 self.before, hunk, self.after)
979 self.before, hunk, self.after)
980
980
981 def write(self, fp):
981 def write(self, fp):
982 delta = len(self.before) + len(self.after)
982 delta = len(self.before) + len(self.after)
983 if self.after and self.after[-1] == '\\ No newline at end of file\n':
983 if self.after and self.after[-1] == '\\ No newline at end of file\n':
984 delta -= 1
984 delta -= 1
985 fromlen = delta + self.removed
985 fromlen = delta + self.removed
986 tolen = delta + self.added
986 tolen = delta + self.added
987 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
987 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
988 (self.fromline, fromlen, self.toline, tolen,
988 (self.fromline, fromlen, self.toline, tolen,
989 self.proc and (' ' + self.proc)))
989 self.proc and (' ' + self.proc)))
990 fp.write(''.join(self.before + self.hunk + self.after))
990 fp.write(''.join(self.before + self.hunk + self.after))
991
991
992 pretty = write
992 pretty = write
993
993
994 def filename(self):
994 def filename(self):
995 return self.header.filename()
995 return self.header.filename()
996
996
997 def __repr__(self):
997 def __repr__(self):
998 return '<hunk %r@%d>' % (self.filename(), self.fromline)
998 return '<hunk %r@%d>' % (self.filename(), self.fromline)
999
999
1000 def getmessages():
1000 def getmessages():
1001 return {
1001 return {
1002 'multiple': {
1002 'multiple': {
1003 'apply': _("apply change %d/%d to '%s'?"),
1003 'apply': _("apply change %d/%d to '%s'?"),
1004 'discard': _("discard change %d/%d to '%s'?"),
1004 'discard': _("discard change %d/%d to '%s'?"),
1005 'record': _("record change %d/%d to '%s'?"),
1005 'record': _("record change %d/%d to '%s'?"),
1006 },
1006 },
1007 'single': {
1007 'single': {
1008 'apply': _("apply this change to '%s'?"),
1008 'apply': _("apply this change to '%s'?"),
1009 'discard': _("discard this change to '%s'?"),
1009 'discard': _("discard this change to '%s'?"),
1010 'record': _("record this change to '%s'?"),
1010 'record': _("record this change to '%s'?"),
1011 },
1011 },
1012 'help': {
1012 'help': {
1013 'apply': _('[Ynesfdaq?]'
1013 'apply': _('[Ynesfdaq?]'
1014 '$$ &Yes, apply this change'
1014 '$$ &Yes, apply this change'
1015 '$$ &No, skip this change'
1015 '$$ &No, skip this change'
1016 '$$ &Edit this change manually'
1016 '$$ &Edit this change manually'
1017 '$$ &Skip remaining changes to this file'
1017 '$$ &Skip remaining changes to this file'
1018 '$$ Apply remaining changes to this &file'
1018 '$$ Apply remaining changes to this &file'
1019 '$$ &Done, skip remaining changes and files'
1019 '$$ &Done, skip remaining changes and files'
1020 '$$ Apply &all changes to all remaining files'
1020 '$$ Apply &all changes to all remaining files'
1021 '$$ &Quit, applying no changes'
1021 '$$ &Quit, applying no changes'
1022 '$$ &? (display help)'),
1022 '$$ &? (display help)'),
1023 'discard': _('[Ynesfdaq?]'
1023 'discard': _('[Ynesfdaq?]'
1024 '$$ &Yes, discard this change'
1024 '$$ &Yes, discard this change'
1025 '$$ &No, skip this change'
1025 '$$ &No, skip this change'
1026 '$$ &Edit this change manually'
1026 '$$ &Edit this change manually'
1027 '$$ &Skip remaining changes to this file'
1027 '$$ &Skip remaining changes to this file'
1028 '$$ Discard remaining changes to this &file'
1028 '$$ Discard remaining changes to this &file'
1029 '$$ &Done, skip remaining changes and files'
1029 '$$ &Done, skip remaining changes and files'
1030 '$$ Discard &all changes to all remaining files'
1030 '$$ Discard &all changes to all remaining files'
1031 '$$ &Quit, discarding no changes'
1031 '$$ &Quit, discarding no changes'
1032 '$$ &? (display help)'),
1032 '$$ &? (display help)'),
1033 'record': _('[Ynesfdaq?]'
1033 'record': _('[Ynesfdaq?]'
1034 '$$ &Yes, record this change'
1034 '$$ &Yes, record this change'
1035 '$$ &No, skip this change'
1035 '$$ &No, skip this change'
1036 '$$ &Edit this change manually'
1036 '$$ &Edit this change manually'
1037 '$$ &Skip remaining changes to this file'
1037 '$$ &Skip remaining changes to this file'
1038 '$$ Record remaining changes to this &file'
1038 '$$ Record remaining changes to this &file'
1039 '$$ &Done, skip remaining changes and files'
1039 '$$ &Done, skip remaining changes and files'
1040 '$$ Record &all changes to all remaining files'
1040 '$$ Record &all changes to all remaining files'
1041 '$$ &Quit, recording no changes'
1041 '$$ &Quit, recording no changes'
1042 '$$ &? (display help)'),
1042 '$$ &? (display help)'),
1043 }
1043 }
1044 }
1044 }
1045
1045
1046 def filterpatch(ui, headers, operation=None):
1046 def filterpatch(ui, headers, operation=None):
1047 """Interactively filter patch chunks into applied-only chunks"""
1047 """Interactively filter patch chunks into applied-only chunks"""
1048 messages = getmessages()
1048 messages = getmessages()
1049
1049
1050 if operation is None:
1050 if operation is None:
1051 operation = 'record'
1051 operation = 'record'
1052
1052
1053 def prompt(skipfile, skipall, query, chunk):
1053 def prompt(skipfile, skipall, query, chunk):
1054 """prompt query, and process base inputs
1054 """prompt query, and process base inputs
1055
1055
1056 - y/n for the rest of file
1056 - y/n for the rest of file
1057 - y/n for the rest
1057 - y/n for the rest
1058 - ? (help)
1058 - ? (help)
1059 - q (quit)
1059 - q (quit)
1060
1060
1061 Return True/False and possibly updated skipfile and skipall.
1061 Return True/False and possibly updated skipfile and skipall.
1062 """
1062 """
1063 newpatches = None
1063 newpatches = None
1064 if skipall is not None:
1064 if skipall is not None:
1065 return skipall, skipfile, skipall, newpatches
1065 return skipall, skipfile, skipall, newpatches
1066 if skipfile is not None:
1066 if skipfile is not None:
1067 return skipfile, skipfile, skipall, newpatches
1067 return skipfile, skipfile, skipall, newpatches
1068 while True:
1068 while True:
1069 resps = messages['help'][operation]
1069 resps = messages['help'][operation]
1070 r = ui.promptchoice("%s %s" % (query, resps))
1070 r = ui.promptchoice("%s %s" % (query, resps))
1071 ui.write("\n")
1071 ui.write("\n")
1072 if r == 8: # ?
1072 if r == 8: # ?
1073 for c, t in ui.extractchoices(resps)[1]:
1073 for c, t in ui.extractchoices(resps)[1]:
1074 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1074 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1075 continue
1075 continue
1076 elif r == 0: # yes
1076 elif r == 0: # yes
1077 ret = True
1077 ret = True
1078 elif r == 1: # no
1078 elif r == 1: # no
1079 ret = False
1079 ret = False
1080 elif r == 2: # Edit patch
1080 elif r == 2: # Edit patch
1081 if chunk is None:
1081 if chunk is None:
1082 ui.write(_('cannot edit patch for whole file'))
1082 ui.write(_('cannot edit patch for whole file'))
1083 ui.write("\n")
1083 ui.write("\n")
1084 continue
1084 continue
1085 if chunk.header.binary():
1085 if chunk.header.binary():
1086 ui.write(_('cannot edit patch for binary file'))
1086 ui.write(_('cannot edit patch for binary file'))
1087 ui.write("\n")
1087 ui.write("\n")
1088 continue
1088 continue
1089 # Patch comment based on the Git one (based on comment at end of
1089 # Patch comment based on the Git one (based on comment at end of
1090 # https://mercurial-scm.org/wiki/RecordExtension)
1090 # https://mercurial-scm.org/wiki/RecordExtension)
1091 phelp = '---' + _("""
1091 phelp = '---' + _("""
1092 To remove '-' lines, make them ' ' lines (context).
1092 To remove '-' lines, make them ' ' lines (context).
1093 To remove '+' lines, delete them.
1093 To remove '+' lines, delete them.
1094 Lines starting with # will be removed from the patch.
1094 Lines starting with # will be removed from the patch.
1095
1095
1096 If the patch applies cleanly, the edited hunk will immediately be
1096 If the patch applies cleanly, the edited hunk will immediately be
1097 added to the record list. If it does not apply cleanly, a rejects
1097 added to the record list. If it does not apply cleanly, a rejects
1098 file will be generated: you can use that when you try again. If
1098 file will be generated: you can use that when you try again. If
1099 all lines of the hunk are removed, then the edit is aborted and
1099 all lines of the hunk are removed, then the edit is aborted and
1100 the hunk is left unchanged.
1100 the hunk is left unchanged.
1101 """)
1101 """)
1102 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1102 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1103 suffix=".diff", text=True)
1103 suffix=".diff", text=True)
1104 ncpatchfp = None
1104 ncpatchfp = None
1105 try:
1105 try:
1106 # Write the initial patch
1106 # Write the initial patch
1107 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1107 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1108 chunk.header.write(f)
1108 chunk.header.write(f)
1109 chunk.write(f)
1109 chunk.write(f)
1110 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1110 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1111 f.close()
1111 f.close()
1112 # Start the editor and wait for it to complete
1112 # Start the editor and wait for it to complete
1113 editor = ui.geteditor()
1113 editor = ui.geteditor()
1114 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1114 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1115 environ={'HGUSER': ui.username()},
1115 environ={'HGUSER': ui.username()},
1116 blockedtag='filterpatch')
1116 blockedtag='filterpatch')
1117 if ret != 0:
1117 if ret != 0:
1118 ui.warn(_("editor exited with exit code %d\n") % ret)
1118 ui.warn(_("editor exited with exit code %d\n") % ret)
1119 continue
1119 continue
1120 # Remove comment lines
1120 # Remove comment lines
1121 patchfp = open(patchfn)
1121 patchfp = open(patchfn)
1122 ncpatchfp = stringio()
1122 ncpatchfp = stringio()
1123 for line in util.iterfile(patchfp):
1123 for line in util.iterfile(patchfp):
1124 if not line.startswith('#'):
1124 if not line.startswith('#'):
1125 ncpatchfp.write(line)
1125 ncpatchfp.write(line)
1126 patchfp.close()
1126 patchfp.close()
1127 ncpatchfp.seek(0)
1127 ncpatchfp.seek(0)
1128 newpatches = parsepatch(ncpatchfp)
1128 newpatches = parsepatch(ncpatchfp)
1129 finally:
1129 finally:
1130 os.unlink(patchfn)
1130 os.unlink(patchfn)
1131 del ncpatchfp
1131 del ncpatchfp
1132 # Signal that the chunk shouldn't be applied as-is, but
1132 # Signal that the chunk shouldn't be applied as-is, but
1133 # provide the new patch to be used instead.
1133 # provide the new patch to be used instead.
1134 ret = False
1134 ret = False
1135 elif r == 3: # Skip
1135 elif r == 3: # Skip
1136 ret = skipfile = False
1136 ret = skipfile = False
1137 elif r == 4: # file (Record remaining)
1137 elif r == 4: # file (Record remaining)
1138 ret = skipfile = True
1138 ret = skipfile = True
1139 elif r == 5: # done, skip remaining
1139 elif r == 5: # done, skip remaining
1140 ret = skipall = False
1140 ret = skipall = False
1141 elif r == 6: # all
1141 elif r == 6: # all
1142 ret = skipall = True
1142 ret = skipall = True
1143 elif r == 7: # quit
1143 elif r == 7: # quit
1144 raise error.Abort(_('user quit'))
1144 raise error.Abort(_('user quit'))
1145 return ret, skipfile, skipall, newpatches
1145 return ret, skipfile, skipall, newpatches
1146
1146
1147 seen = set()
1147 seen = set()
1148 applied = {} # 'filename' -> [] of chunks
1148 applied = {} # 'filename' -> [] of chunks
1149 skipfile, skipall = None, None
1149 skipfile, skipall = None, None
1150 pos, total = 1, sum(len(h.hunks) for h in headers)
1150 pos, total = 1, sum(len(h.hunks) for h in headers)
1151 for h in headers:
1151 for h in headers:
1152 pos += len(h.hunks)
1152 pos += len(h.hunks)
1153 skipfile = None
1153 skipfile = None
1154 fixoffset = 0
1154 fixoffset = 0
1155 hdr = ''.join(h.header)
1155 hdr = ''.join(h.header)
1156 if hdr in seen:
1156 if hdr in seen:
1157 continue
1157 continue
1158 seen.add(hdr)
1158 seen.add(hdr)
1159 if skipall is None:
1159 if skipall is None:
1160 h.pretty(ui)
1160 h.pretty(ui)
1161 msg = (_('examine changes to %s?') %
1161 msg = (_('examine changes to %s?') %
1162 _(' and ').join("'%s'" % f for f in h.files()))
1162 _(' and ').join("'%s'" % f for f in h.files()))
1163 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1163 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1164 if not r:
1164 if not r:
1165 continue
1165 continue
1166 applied[h.filename()] = [h]
1166 applied[h.filename()] = [h]
1167 if h.allhunks():
1167 if h.allhunks():
1168 applied[h.filename()] += h.hunks
1168 applied[h.filename()] += h.hunks
1169 continue
1169 continue
1170 for i, chunk in enumerate(h.hunks):
1170 for i, chunk in enumerate(h.hunks):
1171 if skipfile is None and skipall is None:
1171 if skipfile is None and skipall is None:
1172 chunk.pretty(ui)
1172 chunk.pretty(ui)
1173 if total == 1:
1173 if total == 1:
1174 msg = messages['single'][operation] % chunk.filename()
1174 msg = messages['single'][operation] % chunk.filename()
1175 else:
1175 else:
1176 idx = pos - len(h.hunks) + i
1176 idx = pos - len(h.hunks) + i
1177 msg = messages['multiple'][operation] % (idx, total,
1177 msg = messages['multiple'][operation] % (idx, total,
1178 chunk.filename())
1178 chunk.filename())
1179 r, skipfile, skipall, newpatches = prompt(skipfile,
1179 r, skipfile, skipall, newpatches = prompt(skipfile,
1180 skipall, msg, chunk)
1180 skipall, msg, chunk)
1181 if r:
1181 if r:
1182 if fixoffset:
1182 if fixoffset:
1183 chunk = copy.copy(chunk)
1183 chunk = copy.copy(chunk)
1184 chunk.toline += fixoffset
1184 chunk.toline += fixoffset
1185 applied[chunk.filename()].append(chunk)
1185 applied[chunk.filename()].append(chunk)
1186 elif newpatches is not None:
1186 elif newpatches is not None:
1187 for newpatch in newpatches:
1187 for newpatch in newpatches:
1188 for newhunk in newpatch.hunks:
1188 for newhunk in newpatch.hunks:
1189 if fixoffset:
1189 if fixoffset:
1190 newhunk.toline += fixoffset
1190 newhunk.toline += fixoffset
1191 applied[newhunk.filename()].append(newhunk)
1191 applied[newhunk.filename()].append(newhunk)
1192 else:
1192 else:
1193 fixoffset += chunk.removed - chunk.added
1193 fixoffset += chunk.removed - chunk.added
1194 return (sum([h for h in applied.itervalues()
1194 return (sum([h for h in applied.itervalues()
1195 if h[0].special() or len(h) > 1], []), {})
1195 if h[0].special() or len(h) > 1], []), {})
1196 class hunk(object):
1196 class hunk(object):
1197 def __init__(self, desc, num, lr, context):
1197 def __init__(self, desc, num, lr, context):
1198 self.number = num
1198 self.number = num
1199 self.desc = desc
1199 self.desc = desc
1200 self.hunk = [desc]
1200 self.hunk = [desc]
1201 self.a = []
1201 self.a = []
1202 self.b = []
1202 self.b = []
1203 self.starta = self.lena = None
1203 self.starta = self.lena = None
1204 self.startb = self.lenb = None
1204 self.startb = self.lenb = None
1205 if lr is not None:
1205 if lr is not None:
1206 if context:
1206 if context:
1207 self.read_context_hunk(lr)
1207 self.read_context_hunk(lr)
1208 else:
1208 else:
1209 self.read_unified_hunk(lr)
1209 self.read_unified_hunk(lr)
1210
1210
1211 def getnormalized(self):
1211 def getnormalized(self):
1212 """Return a copy with line endings normalized to LF."""
1212 """Return a copy with line endings normalized to LF."""
1213
1213
1214 def normalize(lines):
1214 def normalize(lines):
1215 nlines = []
1215 nlines = []
1216 for line in lines:
1216 for line in lines:
1217 if line.endswith('\r\n'):
1217 if line.endswith('\r\n'):
1218 line = line[:-2] + '\n'
1218 line = line[:-2] + '\n'
1219 nlines.append(line)
1219 nlines.append(line)
1220 return nlines
1220 return nlines
1221
1221
1222 # Dummy object, it is rebuilt manually
1222 # Dummy object, it is rebuilt manually
1223 nh = hunk(self.desc, self.number, None, None)
1223 nh = hunk(self.desc, self.number, None, None)
1224 nh.number = self.number
1224 nh.number = self.number
1225 nh.desc = self.desc
1225 nh.desc = self.desc
1226 nh.hunk = self.hunk
1226 nh.hunk = self.hunk
1227 nh.a = normalize(self.a)
1227 nh.a = normalize(self.a)
1228 nh.b = normalize(self.b)
1228 nh.b = normalize(self.b)
1229 nh.starta = self.starta
1229 nh.starta = self.starta
1230 nh.startb = self.startb
1230 nh.startb = self.startb
1231 nh.lena = self.lena
1231 nh.lena = self.lena
1232 nh.lenb = self.lenb
1232 nh.lenb = self.lenb
1233 return nh
1233 return nh
1234
1234
1235 def read_unified_hunk(self, lr):
1235 def read_unified_hunk(self, lr):
1236 m = unidesc.match(self.desc)
1236 m = unidesc.match(self.desc)
1237 if not m:
1237 if not m:
1238 raise PatchError(_("bad hunk #%d") % self.number)
1238 raise PatchError(_("bad hunk #%d") % self.number)
1239 self.starta, self.lena, self.startb, self.lenb = m.groups()
1239 self.starta, self.lena, self.startb, self.lenb = m.groups()
1240 if self.lena is None:
1240 if self.lena is None:
1241 self.lena = 1
1241 self.lena = 1
1242 else:
1242 else:
1243 self.lena = int(self.lena)
1243 self.lena = int(self.lena)
1244 if self.lenb is None:
1244 if self.lenb is None:
1245 self.lenb = 1
1245 self.lenb = 1
1246 else:
1246 else:
1247 self.lenb = int(self.lenb)
1247 self.lenb = int(self.lenb)
1248 self.starta = int(self.starta)
1248 self.starta = int(self.starta)
1249 self.startb = int(self.startb)
1249 self.startb = int(self.startb)
1250 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1250 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1251 self.b)
1251 self.b)
1252 # if we hit eof before finishing out the hunk, the last line will
1252 # if we hit eof before finishing out the hunk, the last line will
1253 # be zero length. Lets try to fix it up.
1253 # be zero length. Lets try to fix it up.
1254 while len(self.hunk[-1]) == 0:
1254 while len(self.hunk[-1]) == 0:
1255 del self.hunk[-1]
1255 del self.hunk[-1]
1256 del self.a[-1]
1256 del self.a[-1]
1257 del self.b[-1]
1257 del self.b[-1]
1258 self.lena -= 1
1258 self.lena -= 1
1259 self.lenb -= 1
1259 self.lenb -= 1
1260 self._fixnewline(lr)
1260 self._fixnewline(lr)
1261
1261
1262 def read_context_hunk(self, lr):
1262 def read_context_hunk(self, lr):
1263 self.desc = lr.readline()
1263 self.desc = lr.readline()
1264 m = contextdesc.match(self.desc)
1264 m = contextdesc.match(self.desc)
1265 if not m:
1265 if not m:
1266 raise PatchError(_("bad hunk #%d") % self.number)
1266 raise PatchError(_("bad hunk #%d") % self.number)
1267 self.starta, aend = m.groups()
1267 self.starta, aend = m.groups()
1268 self.starta = int(self.starta)
1268 self.starta = int(self.starta)
1269 if aend is None:
1269 if aend is None:
1270 aend = self.starta
1270 aend = self.starta
1271 self.lena = int(aend) - self.starta
1271 self.lena = int(aend) - self.starta
1272 if self.starta:
1272 if self.starta:
1273 self.lena += 1
1273 self.lena += 1
1274 for x in xrange(self.lena):
1274 for x in xrange(self.lena):
1275 l = lr.readline()
1275 l = lr.readline()
1276 if l.startswith('---'):
1276 if l.startswith('---'):
1277 # lines addition, old block is empty
1277 # lines addition, old block is empty
1278 lr.push(l)
1278 lr.push(l)
1279 break
1279 break
1280 s = l[2:]
1280 s = l[2:]
1281 if l.startswith('- ') or l.startswith('! '):
1281 if l.startswith('- ') or l.startswith('! '):
1282 u = '-' + s
1282 u = '-' + s
1283 elif l.startswith(' '):
1283 elif l.startswith(' '):
1284 u = ' ' + s
1284 u = ' ' + s
1285 else:
1285 else:
1286 raise PatchError(_("bad hunk #%d old text line %d") %
1286 raise PatchError(_("bad hunk #%d old text line %d") %
1287 (self.number, x))
1287 (self.number, x))
1288 self.a.append(u)
1288 self.a.append(u)
1289 self.hunk.append(u)
1289 self.hunk.append(u)
1290
1290
1291 l = lr.readline()
1291 l = lr.readline()
1292 if l.startswith('\ '):
1292 if l.startswith('\ '):
1293 s = self.a[-1][:-1]
1293 s = self.a[-1][:-1]
1294 self.a[-1] = s
1294 self.a[-1] = s
1295 self.hunk[-1] = s
1295 self.hunk[-1] = s
1296 l = lr.readline()
1296 l = lr.readline()
1297 m = contextdesc.match(l)
1297 m = contextdesc.match(l)
1298 if not m:
1298 if not m:
1299 raise PatchError(_("bad hunk #%d") % self.number)
1299 raise PatchError(_("bad hunk #%d") % self.number)
1300 self.startb, bend = m.groups()
1300 self.startb, bend = m.groups()
1301 self.startb = int(self.startb)
1301 self.startb = int(self.startb)
1302 if bend is None:
1302 if bend is None:
1303 bend = self.startb
1303 bend = self.startb
1304 self.lenb = int(bend) - self.startb
1304 self.lenb = int(bend) - self.startb
1305 if self.startb:
1305 if self.startb:
1306 self.lenb += 1
1306 self.lenb += 1
1307 hunki = 1
1307 hunki = 1
1308 for x in xrange(self.lenb):
1308 for x in xrange(self.lenb):
1309 l = lr.readline()
1309 l = lr.readline()
1310 if l.startswith('\ '):
1310 if l.startswith('\ '):
1311 # XXX: the only way to hit this is with an invalid line range.
1311 # XXX: the only way to hit this is with an invalid line range.
1312 # The no-eol marker is not counted in the line range, but I
1312 # The no-eol marker is not counted in the line range, but I
1313 # guess there are diff(1) out there which behave differently.
1313 # guess there are diff(1) out there which behave differently.
1314 s = self.b[-1][:-1]
1314 s = self.b[-1][:-1]
1315 self.b[-1] = s
1315 self.b[-1] = s
1316 self.hunk[hunki - 1] = s
1316 self.hunk[hunki - 1] = s
1317 continue
1317 continue
1318 if not l:
1318 if not l:
1319 # line deletions, new block is empty and we hit EOF
1319 # line deletions, new block is empty and we hit EOF
1320 lr.push(l)
1320 lr.push(l)
1321 break
1321 break
1322 s = l[2:]
1322 s = l[2:]
1323 if l.startswith('+ ') or l.startswith('! '):
1323 if l.startswith('+ ') or l.startswith('! '):
1324 u = '+' + s
1324 u = '+' + s
1325 elif l.startswith(' '):
1325 elif l.startswith(' '):
1326 u = ' ' + s
1326 u = ' ' + s
1327 elif len(self.b) == 0:
1327 elif len(self.b) == 0:
1328 # line deletions, new block is empty
1328 # line deletions, new block is empty
1329 lr.push(l)
1329 lr.push(l)
1330 break
1330 break
1331 else:
1331 else:
1332 raise PatchError(_("bad hunk #%d old text line %d") %
1332 raise PatchError(_("bad hunk #%d old text line %d") %
1333 (self.number, x))
1333 (self.number, x))
1334 self.b.append(s)
1334 self.b.append(s)
1335 while True:
1335 while True:
1336 if hunki >= len(self.hunk):
1336 if hunki >= len(self.hunk):
1337 h = ""
1337 h = ""
1338 else:
1338 else:
1339 h = self.hunk[hunki]
1339 h = self.hunk[hunki]
1340 hunki += 1
1340 hunki += 1
1341 if h == u:
1341 if h == u:
1342 break
1342 break
1343 elif h.startswith('-'):
1343 elif h.startswith('-'):
1344 continue
1344 continue
1345 else:
1345 else:
1346 self.hunk.insert(hunki - 1, u)
1346 self.hunk.insert(hunki - 1, u)
1347 break
1347 break
1348
1348
1349 if not self.a:
1349 if not self.a:
1350 # this happens when lines were only added to the hunk
1350 # this happens when lines were only added to the hunk
1351 for x in self.hunk:
1351 for x in self.hunk:
1352 if x.startswith('-') or x.startswith(' '):
1352 if x.startswith('-') or x.startswith(' '):
1353 self.a.append(x)
1353 self.a.append(x)
1354 if not self.b:
1354 if not self.b:
1355 # this happens when lines were only deleted from the hunk
1355 # this happens when lines were only deleted from the hunk
1356 for x in self.hunk:
1356 for x in self.hunk:
1357 if x.startswith('+') or x.startswith(' '):
1357 if x.startswith('+') or x.startswith(' '):
1358 self.b.append(x[1:])
1358 self.b.append(x[1:])
1359 # @@ -start,len +start,len @@
1359 # @@ -start,len +start,len @@
1360 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1360 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1361 self.startb, self.lenb)
1361 self.startb, self.lenb)
1362 self.hunk[0] = self.desc
1362 self.hunk[0] = self.desc
1363 self._fixnewline(lr)
1363 self._fixnewline(lr)
1364
1364
1365 def _fixnewline(self, lr):
1365 def _fixnewline(self, lr):
1366 l = lr.readline()
1366 l = lr.readline()
1367 if l.startswith('\ '):
1367 if l.startswith('\ '):
1368 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1368 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1369 else:
1369 else:
1370 lr.push(l)
1370 lr.push(l)
1371
1371
1372 def complete(self):
1372 def complete(self):
1373 return len(self.a) == self.lena and len(self.b) == self.lenb
1373 return len(self.a) == self.lena and len(self.b) == self.lenb
1374
1374
1375 def _fuzzit(self, old, new, fuzz, toponly):
1375 def _fuzzit(self, old, new, fuzz, toponly):
1376 # this removes context lines from the top and bottom of list 'l'. It
1376 # this removes context lines from the top and bottom of list 'l'. It
1377 # checks the hunk to make sure only context lines are removed, and then
1377 # checks the hunk to make sure only context lines are removed, and then
1378 # returns a new shortened list of lines.
1378 # returns a new shortened list of lines.
1379 fuzz = min(fuzz, len(old))
1379 fuzz = min(fuzz, len(old))
1380 if fuzz:
1380 if fuzz:
1381 top = 0
1381 top = 0
1382 bot = 0
1382 bot = 0
1383 hlen = len(self.hunk)
1383 hlen = len(self.hunk)
1384 for x in xrange(hlen - 1):
1384 for x in xrange(hlen - 1):
1385 # the hunk starts with the @@ line, so use x+1
1385 # the hunk starts with the @@ line, so use x+1
1386 if self.hunk[x + 1][0] == ' ':
1386 if self.hunk[x + 1][0] == ' ':
1387 top += 1
1387 top += 1
1388 else:
1388 else:
1389 break
1389 break
1390 if not toponly:
1390 if not toponly:
1391 for x in xrange(hlen - 1):
1391 for x in xrange(hlen - 1):
1392 if self.hunk[hlen - bot - 1][0] == ' ':
1392 if self.hunk[hlen - bot - 1][0] == ' ':
1393 bot += 1
1393 bot += 1
1394 else:
1394 else:
1395 break
1395 break
1396
1396
1397 bot = min(fuzz, bot)
1397 bot = min(fuzz, bot)
1398 top = min(fuzz, top)
1398 top = min(fuzz, top)
1399 return old[top:len(old) - bot], new[top:len(new) - bot], top
1399 return old[top:len(old) - bot], new[top:len(new) - bot], top
1400 return old, new, 0
1400 return old, new, 0
1401
1401
1402 def fuzzit(self, fuzz, toponly):
1402 def fuzzit(self, fuzz, toponly):
1403 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1403 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1404 oldstart = self.starta + top
1404 oldstart = self.starta + top
1405 newstart = self.startb + top
1405 newstart = self.startb + top
1406 # zero length hunk ranges already have their start decremented
1406 # zero length hunk ranges already have their start decremented
1407 if self.lena and oldstart > 0:
1407 if self.lena and oldstart > 0:
1408 oldstart -= 1
1408 oldstart -= 1
1409 if self.lenb and newstart > 0:
1409 if self.lenb and newstart > 0:
1410 newstart -= 1
1410 newstart -= 1
1411 return old, oldstart, new, newstart
1411 return old, oldstart, new, newstart
1412
1412
1413 class binhunk(object):
1413 class binhunk(object):
1414 'A binary patch file.'
1414 'A binary patch file.'
1415 def __init__(self, lr, fname):
1415 def __init__(self, lr, fname):
1416 self.text = None
1416 self.text = None
1417 self.delta = False
1417 self.delta = False
1418 self.hunk = ['GIT binary patch\n']
1418 self.hunk = ['GIT binary patch\n']
1419 self._fname = fname
1419 self._fname = fname
1420 self._read(lr)
1420 self._read(lr)
1421
1421
1422 def complete(self):
1422 def complete(self):
1423 return self.text is not None
1423 return self.text is not None
1424
1424
1425 def new(self, lines):
1425 def new(self, lines):
1426 if self.delta:
1426 if self.delta:
1427 return [applybindelta(self.text, ''.join(lines))]
1427 return [applybindelta(self.text, ''.join(lines))]
1428 return [self.text]
1428 return [self.text]
1429
1429
1430 def _read(self, lr):
1430 def _read(self, lr):
1431 def getline(lr, hunk):
1431 def getline(lr, hunk):
1432 l = lr.readline()
1432 l = lr.readline()
1433 hunk.append(l)
1433 hunk.append(l)
1434 return l.rstrip('\r\n')
1434 return l.rstrip('\r\n')
1435
1435
1436 size = 0
1436 size = 0
1437 while True:
1437 while True:
1438 line = getline(lr, self.hunk)
1438 line = getline(lr, self.hunk)
1439 if not line:
1439 if not line:
1440 raise PatchError(_('could not extract "%s" binary data')
1440 raise PatchError(_('could not extract "%s" binary data')
1441 % self._fname)
1441 % self._fname)
1442 if line.startswith('literal '):
1442 if line.startswith('literal '):
1443 size = int(line[8:].rstrip())
1443 size = int(line[8:].rstrip())
1444 break
1444 break
1445 if line.startswith('delta '):
1445 if line.startswith('delta '):
1446 size = int(line[6:].rstrip())
1446 size = int(line[6:].rstrip())
1447 self.delta = True
1447 self.delta = True
1448 break
1448 break
1449 dec = []
1449 dec = []
1450 line = getline(lr, self.hunk)
1450 line = getline(lr, self.hunk)
1451 while len(line) > 1:
1451 while len(line) > 1:
1452 l = line[0]
1452 l = line[0]
1453 if l <= 'Z' and l >= 'A':
1453 if l <= 'Z' and l >= 'A':
1454 l = ord(l) - ord('A') + 1
1454 l = ord(l) - ord('A') + 1
1455 else:
1455 else:
1456 l = ord(l) - ord('a') + 27
1456 l = ord(l) - ord('a') + 27
1457 try:
1457 try:
1458 dec.append(util.b85decode(line[1:])[:l])
1458 dec.append(util.b85decode(line[1:])[:l])
1459 except ValueError as e:
1459 except ValueError as e:
1460 raise PatchError(_('could not decode "%s" binary patch: %s')
1460 raise PatchError(_('could not decode "%s" binary patch: %s')
1461 % (self._fname, str(e)))
1461 % (self._fname, str(e)))
1462 line = getline(lr, self.hunk)
1462 line = getline(lr, self.hunk)
1463 text = zlib.decompress(''.join(dec))
1463 text = zlib.decompress(''.join(dec))
1464 if len(text) != size:
1464 if len(text) != size:
1465 raise PatchError(_('"%s" length is %d bytes, should be %d')
1465 raise PatchError(_('"%s" length is %d bytes, should be %d')
1466 % (self._fname, len(text), size))
1466 % (self._fname, len(text), size))
1467 self.text = text
1467 self.text = text
1468
1468
1469 def parsefilename(str):
1469 def parsefilename(str):
1470 # --- filename \t|space stuff
1470 # --- filename \t|space stuff
1471 s = str[4:].rstrip('\r\n')
1471 s = str[4:].rstrip('\r\n')
1472 i = s.find('\t')
1472 i = s.find('\t')
1473 if i < 0:
1473 if i < 0:
1474 i = s.find(' ')
1474 i = s.find(' ')
1475 if i < 0:
1475 if i < 0:
1476 return s
1476 return s
1477 return s[:i]
1477 return s[:i]
1478
1478
1479 def reversehunks(hunks):
1479 def reversehunks(hunks):
1480 '''reverse the signs in the hunks given as argument
1480 '''reverse the signs in the hunks given as argument
1481
1481
1482 This function operates on hunks coming out of patch.filterpatch, that is
1482 This function operates on hunks coming out of patch.filterpatch, that is
1483 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1483 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1484
1484
1485 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1485 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1486 ... --- a/folder1/g
1486 ... --- a/folder1/g
1487 ... +++ b/folder1/g
1487 ... +++ b/folder1/g
1488 ... @@ -1,7 +1,7 @@
1488 ... @@ -1,7 +1,7 @@
1489 ... +firstline
1489 ... +firstline
1490 ... c
1490 ... c
1491 ... 1
1491 ... 1
1492 ... 2
1492 ... 2
1493 ... + 3
1493 ... + 3
1494 ... -4
1494 ... -4
1495 ... 5
1495 ... 5
1496 ... d
1496 ... d
1497 ... +lastline"""
1497 ... +lastline"""
1498 >>> hunks = parsepatch([rawpatch])
1498 >>> hunks = parsepatch([rawpatch])
1499 >>> hunkscomingfromfilterpatch = []
1499 >>> hunkscomingfromfilterpatch = []
1500 >>> for h in hunks:
1500 >>> for h in hunks:
1501 ... hunkscomingfromfilterpatch.append(h)
1501 ... hunkscomingfromfilterpatch.append(h)
1502 ... hunkscomingfromfilterpatch.extend(h.hunks)
1502 ... hunkscomingfromfilterpatch.extend(h.hunks)
1503
1503
1504 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1504 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1505 >>> from . import util
1505 >>> from . import util
1506 >>> fp = util.stringio()
1506 >>> fp = util.stringio()
1507 >>> for c in reversedhunks:
1507 >>> for c in reversedhunks:
1508 ... c.write(fp)
1508 ... c.write(fp)
1509 >>> fp.seek(0) or None
1509 >>> fp.seek(0) or None
1510 >>> reversedpatch = fp.read()
1510 >>> reversedpatch = fp.read()
1511 >>> print(pycompat.sysstr(reversedpatch))
1511 >>> print(pycompat.sysstr(reversedpatch))
1512 diff --git a/folder1/g b/folder1/g
1512 diff --git a/folder1/g b/folder1/g
1513 --- a/folder1/g
1513 --- a/folder1/g
1514 +++ b/folder1/g
1514 +++ b/folder1/g
1515 @@ -1,4 +1,3 @@
1515 @@ -1,4 +1,3 @@
1516 -firstline
1516 -firstline
1517 c
1517 c
1518 1
1518 1
1519 2
1519 2
1520 @@ -2,6 +1,6 @@
1520 @@ -2,6 +1,6 @@
1521 c
1521 c
1522 1
1522 1
1523 2
1523 2
1524 - 3
1524 - 3
1525 +4
1525 +4
1526 5
1526 5
1527 d
1527 d
1528 @@ -6,3 +5,2 @@
1528 @@ -6,3 +5,2 @@
1529 5
1529 5
1530 d
1530 d
1531 -lastline
1531 -lastline
1532
1532
1533 '''
1533 '''
1534
1534
1535 newhunks = []
1535 newhunks = []
1536 for c in hunks:
1536 for c in hunks:
1537 if util.safehasattr(c, 'reversehunk'):
1537 if util.safehasattr(c, 'reversehunk'):
1538 c = c.reversehunk()
1538 c = c.reversehunk()
1539 newhunks.append(c)
1539 newhunks.append(c)
1540 return newhunks
1540 return newhunks
1541
1541
1542 def parsepatch(originalchunks, maxcontext=None):
1542 def parsepatch(originalchunks, maxcontext=None):
1543 """patch -> [] of headers -> [] of hunks
1543 """patch -> [] of headers -> [] of hunks
1544
1544
1545 If maxcontext is not None, trim context lines if necessary.
1545 If maxcontext is not None, trim context lines if necessary.
1546
1546
1547 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1547 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1548 ... --- a/folder1/g
1548 ... --- a/folder1/g
1549 ... +++ b/folder1/g
1549 ... +++ b/folder1/g
1550 ... @@ -1,8 +1,10 @@
1550 ... @@ -1,8 +1,10 @@
1551 ... 1
1551 ... 1
1552 ... 2
1552 ... 2
1553 ... -3
1553 ... -3
1554 ... 4
1554 ... 4
1555 ... 5
1555 ... 5
1556 ... 6
1556 ... 6
1557 ... +6.1
1557 ... +6.1
1558 ... +6.2
1558 ... +6.2
1559 ... 7
1559 ... 7
1560 ... 8
1560 ... 8
1561 ... +9'''
1561 ... +9'''
1562 >>> out = util.stringio()
1562 >>> out = util.stringio()
1563 >>> headers = parsepatch([rawpatch], maxcontext=1)
1563 >>> headers = parsepatch([rawpatch], maxcontext=1)
1564 >>> for header in headers:
1564 >>> for header in headers:
1565 ... header.write(out)
1565 ... header.write(out)
1566 ... for hunk in header.hunks:
1566 ... for hunk in header.hunks:
1567 ... hunk.write(out)
1567 ... hunk.write(out)
1568 >>> print(pycompat.sysstr(out.getvalue()))
1568 >>> print(pycompat.sysstr(out.getvalue()))
1569 diff --git a/folder1/g b/folder1/g
1569 diff --git a/folder1/g b/folder1/g
1570 --- a/folder1/g
1570 --- a/folder1/g
1571 +++ b/folder1/g
1571 +++ b/folder1/g
1572 @@ -2,3 +2,2 @@
1572 @@ -2,3 +2,2 @@
1573 2
1573 2
1574 -3
1574 -3
1575 4
1575 4
1576 @@ -6,2 +5,4 @@
1576 @@ -6,2 +5,4 @@
1577 6
1577 6
1578 +6.1
1578 +6.1
1579 +6.2
1579 +6.2
1580 7
1580 7
1581 @@ -8,1 +9,2 @@
1581 @@ -8,1 +9,2 @@
1582 8
1582 8
1583 +9
1583 +9
1584 """
1584 """
1585 class parser(object):
1585 class parser(object):
1586 """patch parsing state machine"""
1586 """patch parsing state machine"""
1587 def __init__(self):
1587 def __init__(self):
1588 self.fromline = 0
1588 self.fromline = 0
1589 self.toline = 0
1589 self.toline = 0
1590 self.proc = ''
1590 self.proc = ''
1591 self.header = None
1591 self.header = None
1592 self.context = []
1592 self.context = []
1593 self.before = []
1593 self.before = []
1594 self.hunk = []
1594 self.hunk = []
1595 self.headers = []
1595 self.headers = []
1596
1596
1597 def addrange(self, limits):
1597 def addrange(self, limits):
1598 fromstart, fromend, tostart, toend, proc = limits
1598 fromstart, fromend, tostart, toend, proc = limits
1599 self.fromline = int(fromstart)
1599 self.fromline = int(fromstart)
1600 self.toline = int(tostart)
1600 self.toline = int(tostart)
1601 self.proc = proc
1601 self.proc = proc
1602
1602
1603 def addcontext(self, context):
1603 def addcontext(self, context):
1604 if self.hunk:
1604 if self.hunk:
1605 h = recordhunk(self.header, self.fromline, self.toline,
1605 h = recordhunk(self.header, self.fromline, self.toline,
1606 self.proc, self.before, self.hunk, context, maxcontext)
1606 self.proc, self.before, self.hunk, context, maxcontext)
1607 self.header.hunks.append(h)
1607 self.header.hunks.append(h)
1608 self.fromline += len(self.before) + h.removed
1608 self.fromline += len(self.before) + h.removed
1609 self.toline += len(self.before) + h.added
1609 self.toline += len(self.before) + h.added
1610 self.before = []
1610 self.before = []
1611 self.hunk = []
1611 self.hunk = []
1612 self.context = context
1612 self.context = context
1613
1613
1614 def addhunk(self, hunk):
1614 def addhunk(self, hunk):
1615 if self.context:
1615 if self.context:
1616 self.before = self.context
1616 self.before = self.context
1617 self.context = []
1617 self.context = []
1618 self.hunk = hunk
1618 self.hunk = hunk
1619
1619
1620 def newfile(self, hdr):
1620 def newfile(self, hdr):
1621 self.addcontext([])
1621 self.addcontext([])
1622 h = header(hdr)
1622 h = header(hdr)
1623 self.headers.append(h)
1623 self.headers.append(h)
1624 self.header = h
1624 self.header = h
1625
1625
1626 def addother(self, line):
1626 def addother(self, line):
1627 pass # 'other' lines are ignored
1627 pass # 'other' lines are ignored
1628
1628
1629 def finished(self):
1629 def finished(self):
1630 self.addcontext([])
1630 self.addcontext([])
1631 return self.headers
1631 return self.headers
1632
1632
1633 transitions = {
1633 transitions = {
1634 'file': {'context': addcontext,
1634 'file': {'context': addcontext,
1635 'file': newfile,
1635 'file': newfile,
1636 'hunk': addhunk,
1636 'hunk': addhunk,
1637 'range': addrange},
1637 'range': addrange},
1638 'context': {'file': newfile,
1638 'context': {'file': newfile,
1639 'hunk': addhunk,
1639 'hunk': addhunk,
1640 'range': addrange,
1640 'range': addrange,
1641 'other': addother},
1641 'other': addother},
1642 'hunk': {'context': addcontext,
1642 'hunk': {'context': addcontext,
1643 'file': newfile,
1643 'file': newfile,
1644 'range': addrange},
1644 'range': addrange},
1645 'range': {'context': addcontext,
1645 'range': {'context': addcontext,
1646 'hunk': addhunk},
1646 'hunk': addhunk},
1647 'other': {'other': addother},
1647 'other': {'other': addother},
1648 }
1648 }
1649
1649
1650 p = parser()
1650 p = parser()
1651 fp = stringio()
1651 fp = stringio()
1652 fp.write(''.join(originalchunks))
1652 fp.write(''.join(originalchunks))
1653 fp.seek(0)
1653 fp.seek(0)
1654
1654
1655 state = 'context'
1655 state = 'context'
1656 for newstate, data in scanpatch(fp):
1656 for newstate, data in scanpatch(fp):
1657 try:
1657 try:
1658 p.transitions[state][newstate](p, data)
1658 p.transitions[state][newstate](p, data)
1659 except KeyError:
1659 except KeyError:
1660 raise PatchError('unhandled transition: %s -> %s' %
1660 raise PatchError('unhandled transition: %s -> %s' %
1661 (state, newstate))
1661 (state, newstate))
1662 state = newstate
1662 state = newstate
1663 del fp
1663 del fp
1664 return p.finished()
1664 return p.finished()
1665
1665
1666 def pathtransform(path, strip, prefix):
1666 def pathtransform(path, strip, prefix):
1667 '''turn a path from a patch into a path suitable for the repository
1667 '''turn a path from a patch into a path suitable for the repository
1668
1668
1669 prefix, if not empty, is expected to be normalized with a / at the end.
1669 prefix, if not empty, is expected to be normalized with a / at the end.
1670
1670
1671 Returns (stripped components, path in repository).
1671 Returns (stripped components, path in repository).
1672
1672
1673 >>> pathtransform(b'a/b/c', 0, b'')
1673 >>> pathtransform(b'a/b/c', 0, b'')
1674 ('', 'a/b/c')
1674 ('', 'a/b/c')
1675 >>> pathtransform(b' a/b/c ', 0, b'')
1675 >>> pathtransform(b' a/b/c ', 0, b'')
1676 ('', ' a/b/c')
1676 ('', ' a/b/c')
1677 >>> pathtransform(b' a/b/c ', 2, b'')
1677 >>> pathtransform(b' a/b/c ', 2, b'')
1678 ('a/b/', 'c')
1678 ('a/b/', 'c')
1679 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1679 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1680 ('', 'd/e/a/b/c')
1680 ('', 'd/e/a/b/c')
1681 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1681 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1682 ('a//b/', 'd/e/c')
1682 ('a//b/', 'd/e/c')
1683 >>> pathtransform(b'a/b/c', 3, b'')
1683 >>> pathtransform(b'a/b/c', 3, b'')
1684 Traceback (most recent call last):
1684 Traceback (most recent call last):
1685 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1685 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1686 '''
1686 '''
1687 pathlen = len(path)
1687 pathlen = len(path)
1688 i = 0
1688 i = 0
1689 if strip == 0:
1689 if strip == 0:
1690 return '', prefix + path.rstrip()
1690 return '', prefix + path.rstrip()
1691 count = strip
1691 count = strip
1692 while count > 0:
1692 while count > 0:
1693 i = path.find('/', i)
1693 i = path.find('/', i)
1694 if i == -1:
1694 if i == -1:
1695 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1695 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1696 (count, strip, path))
1696 (count, strip, path))
1697 i += 1
1697 i += 1
1698 # consume '//' in the path
1698 # consume '//' in the path
1699 while i < pathlen - 1 and path[i:i + 1] == '/':
1699 while i < pathlen - 1 and path[i:i + 1] == '/':
1700 i += 1
1700 i += 1
1701 count -= 1
1701 count -= 1
1702 return path[:i].lstrip(), prefix + path[i:].rstrip()
1702 return path[:i].lstrip(), prefix + path[i:].rstrip()
1703
1703
1704 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1704 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1705 nulla = afile_orig == "/dev/null"
1705 nulla = afile_orig == "/dev/null"
1706 nullb = bfile_orig == "/dev/null"
1706 nullb = bfile_orig == "/dev/null"
1707 create = nulla and hunk.starta == 0 and hunk.lena == 0
1707 create = nulla and hunk.starta == 0 and hunk.lena == 0
1708 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1708 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1709 abase, afile = pathtransform(afile_orig, strip, prefix)
1709 abase, afile = pathtransform(afile_orig, strip, prefix)
1710 gooda = not nulla and backend.exists(afile)
1710 gooda = not nulla and backend.exists(afile)
1711 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1711 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1712 if afile == bfile:
1712 if afile == bfile:
1713 goodb = gooda
1713 goodb = gooda
1714 else:
1714 else:
1715 goodb = not nullb and backend.exists(bfile)
1715 goodb = not nullb and backend.exists(bfile)
1716 missing = not goodb and not gooda and not create
1716 missing = not goodb and not gooda and not create
1717
1717
1718 # some diff programs apparently produce patches where the afile is
1718 # some diff programs apparently produce patches where the afile is
1719 # not /dev/null, but afile starts with bfile
1719 # not /dev/null, but afile starts with bfile
1720 abasedir = afile[:afile.rfind('/') + 1]
1720 abasedir = afile[:afile.rfind('/') + 1]
1721 bbasedir = bfile[:bfile.rfind('/') + 1]
1721 bbasedir = bfile[:bfile.rfind('/') + 1]
1722 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1722 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1723 and hunk.starta == 0 and hunk.lena == 0):
1723 and hunk.starta == 0 and hunk.lena == 0):
1724 create = True
1724 create = True
1725 missing = False
1725 missing = False
1726
1726
1727 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1727 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1728 # diff is between a file and its backup. In this case, the original
1728 # diff is between a file and its backup. In this case, the original
1729 # file should be patched (see original mpatch code).
1729 # file should be patched (see original mpatch code).
1730 isbackup = (abase == bbase and bfile.startswith(afile))
1730 isbackup = (abase == bbase and bfile.startswith(afile))
1731 fname = None
1731 fname = None
1732 if not missing:
1732 if not missing:
1733 if gooda and goodb:
1733 if gooda and goodb:
1734 if isbackup:
1734 if isbackup:
1735 fname = afile
1735 fname = afile
1736 else:
1736 else:
1737 fname = bfile
1737 fname = bfile
1738 elif gooda:
1738 elif gooda:
1739 fname = afile
1739 fname = afile
1740
1740
1741 if not fname:
1741 if not fname:
1742 if not nullb:
1742 if not nullb:
1743 if isbackup:
1743 if isbackup:
1744 fname = afile
1744 fname = afile
1745 else:
1745 else:
1746 fname = bfile
1746 fname = bfile
1747 elif not nulla:
1747 elif not nulla:
1748 fname = afile
1748 fname = afile
1749 else:
1749 else:
1750 raise PatchError(_("undefined source and destination files"))
1750 raise PatchError(_("undefined source and destination files"))
1751
1751
1752 gp = patchmeta(fname)
1752 gp = patchmeta(fname)
1753 if create:
1753 if create:
1754 gp.op = 'ADD'
1754 gp.op = 'ADD'
1755 elif remove:
1755 elif remove:
1756 gp.op = 'DELETE'
1756 gp.op = 'DELETE'
1757 return gp
1757 return gp
1758
1758
1759 def scanpatch(fp):
1759 def scanpatch(fp):
1760 """like patch.iterhunks, but yield different events
1760 """like patch.iterhunks, but yield different events
1761
1761
1762 - ('file', [header_lines + fromfile + tofile])
1762 - ('file', [header_lines + fromfile + tofile])
1763 - ('context', [context_lines])
1763 - ('context', [context_lines])
1764 - ('hunk', [hunk_lines])
1764 - ('hunk', [hunk_lines])
1765 - ('range', (-start,len, +start,len, proc))
1765 - ('range', (-start,len, +start,len, proc))
1766 """
1766 """
1767 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1767 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1768 lr = linereader(fp)
1768 lr = linereader(fp)
1769
1769
1770 def scanwhile(first, p):
1770 def scanwhile(first, p):
1771 """scan lr while predicate holds"""
1771 """scan lr while predicate holds"""
1772 lines = [first]
1772 lines = [first]
1773 for line in iter(lr.readline, ''):
1773 for line in iter(lr.readline, ''):
1774 if p(line):
1774 if p(line):
1775 lines.append(line)
1775 lines.append(line)
1776 else:
1776 else:
1777 lr.push(line)
1777 lr.push(line)
1778 break
1778 break
1779 return lines
1779 return lines
1780
1780
1781 for line in iter(lr.readline, ''):
1781 for line in iter(lr.readline, ''):
1782 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1782 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1783 def notheader(line):
1783 def notheader(line):
1784 s = line.split(None, 1)
1784 s = line.split(None, 1)
1785 return not s or s[0] not in ('---', 'diff')
1785 return not s or s[0] not in ('---', 'diff')
1786 header = scanwhile(line, notheader)
1786 header = scanwhile(line, notheader)
1787 fromfile = lr.readline()
1787 fromfile = lr.readline()
1788 if fromfile.startswith('---'):
1788 if fromfile.startswith('---'):
1789 tofile = lr.readline()
1789 tofile = lr.readline()
1790 header += [fromfile, tofile]
1790 header += [fromfile, tofile]
1791 else:
1791 else:
1792 lr.push(fromfile)
1792 lr.push(fromfile)
1793 yield 'file', header
1793 yield 'file', header
1794 elif line[0:1] == ' ':
1794 elif line[0:1] == ' ':
1795 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1795 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1796 elif line[0] in '-+':
1796 elif line[0] in '-+':
1797 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1797 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1798 else:
1798 else:
1799 m = lines_re.match(line)
1799 m = lines_re.match(line)
1800 if m:
1800 if m:
1801 yield 'range', m.groups()
1801 yield 'range', m.groups()
1802 else:
1802 else:
1803 yield 'other', line
1803 yield 'other', line
1804
1804
1805 def scangitpatch(lr, firstline):
1805 def scangitpatch(lr, firstline):
1806 """
1806 """
1807 Git patches can emit:
1807 Git patches can emit:
1808 - rename a to b
1808 - rename a to b
1809 - change b
1809 - change b
1810 - copy a to c
1810 - copy a to c
1811 - change c
1811 - change c
1812
1812
1813 We cannot apply this sequence as-is, the renamed 'a' could not be
1813 We cannot apply this sequence as-is, the renamed 'a' could not be
1814 found for it would have been renamed already. And we cannot copy
1814 found for it would have been renamed already. And we cannot copy
1815 from 'b' instead because 'b' would have been changed already. So
1815 from 'b' instead because 'b' would have been changed already. So
1816 we scan the git patch for copy and rename commands so we can
1816 we scan the git patch for copy and rename commands so we can
1817 perform the copies ahead of time.
1817 perform the copies ahead of time.
1818 """
1818 """
1819 pos = 0
1819 pos = 0
1820 try:
1820 try:
1821 pos = lr.fp.tell()
1821 pos = lr.fp.tell()
1822 fp = lr.fp
1822 fp = lr.fp
1823 except IOError:
1823 except IOError:
1824 fp = stringio(lr.fp.read())
1824 fp = stringio(lr.fp.read())
1825 gitlr = linereader(fp)
1825 gitlr = linereader(fp)
1826 gitlr.push(firstline)
1826 gitlr.push(firstline)
1827 gitpatches = readgitpatch(gitlr)
1827 gitpatches = readgitpatch(gitlr)
1828 fp.seek(pos)
1828 fp.seek(pos)
1829 return gitpatches
1829 return gitpatches
1830
1830
1831 def iterhunks(fp):
1831 def iterhunks(fp):
1832 """Read a patch and yield the following events:
1832 """Read a patch and yield the following events:
1833 - ("file", afile, bfile, firsthunk): select a new target file.
1833 - ("file", afile, bfile, firsthunk): select a new target file.
1834 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1834 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1835 "file" event.
1835 "file" event.
1836 - ("git", gitchanges): current diff is in git format, gitchanges
1836 - ("git", gitchanges): current diff is in git format, gitchanges
1837 maps filenames to gitpatch records. Unique event.
1837 maps filenames to gitpatch records. Unique event.
1838 """
1838 """
1839 afile = ""
1839 afile = ""
1840 bfile = ""
1840 bfile = ""
1841 state = None
1841 state = None
1842 hunknum = 0
1842 hunknum = 0
1843 emitfile = newfile = False
1843 emitfile = newfile = False
1844 gitpatches = None
1844 gitpatches = None
1845
1845
1846 # our states
1846 # our states
1847 BFILE = 1
1847 BFILE = 1
1848 context = None
1848 context = None
1849 lr = linereader(fp)
1849 lr = linereader(fp)
1850
1850
1851 for x in iter(lr.readline, ''):
1851 for x in iter(lr.readline, ''):
1852 if state == BFILE and (
1852 if state == BFILE and (
1853 (not context and x[0] == '@')
1853 (not context and x[0] == '@')
1854 or (context is not False and x.startswith('***************'))
1854 or (context is not False and x.startswith('***************'))
1855 or x.startswith('GIT binary patch')):
1855 or x.startswith('GIT binary patch')):
1856 gp = None
1856 gp = None
1857 if (gitpatches and
1857 if (gitpatches and
1858 gitpatches[-1].ispatching(afile, bfile)):
1858 gitpatches[-1].ispatching(afile, bfile)):
1859 gp = gitpatches.pop()
1859 gp = gitpatches.pop()
1860 if x.startswith('GIT binary patch'):
1860 if x.startswith('GIT binary patch'):
1861 h = binhunk(lr, gp.path)
1861 h = binhunk(lr, gp.path)
1862 else:
1862 else:
1863 if context is None and x.startswith('***************'):
1863 if context is None and x.startswith('***************'):
1864 context = True
1864 context = True
1865 h = hunk(x, hunknum + 1, lr, context)
1865 h = hunk(x, hunknum + 1, lr, context)
1866 hunknum += 1
1866 hunknum += 1
1867 if emitfile:
1867 if emitfile:
1868 emitfile = False
1868 emitfile = False
1869 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1869 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1870 yield 'hunk', h
1870 yield 'hunk', h
1871 elif x.startswith('diff --git a/'):
1871 elif x.startswith('diff --git a/'):
1872 m = gitre.match(x.rstrip(' \r\n'))
1872 m = gitre.match(x.rstrip(' \r\n'))
1873 if not m:
1873 if not m:
1874 continue
1874 continue
1875 if gitpatches is None:
1875 if gitpatches is None:
1876 # scan whole input for git metadata
1876 # scan whole input for git metadata
1877 gitpatches = scangitpatch(lr, x)
1877 gitpatches = scangitpatch(lr, x)
1878 yield 'git', [g.copy() for g in gitpatches
1878 yield 'git', [g.copy() for g in gitpatches
1879 if g.op in ('COPY', 'RENAME')]
1879 if g.op in ('COPY', 'RENAME')]
1880 gitpatches.reverse()
1880 gitpatches.reverse()
1881 afile = 'a/' + m.group(1)
1881 afile = 'a/' + m.group(1)
1882 bfile = 'b/' + m.group(2)
1882 bfile = 'b/' + m.group(2)
1883 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1883 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1884 gp = gitpatches.pop()
1884 gp = gitpatches.pop()
1885 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1885 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1886 if not gitpatches:
1886 if not gitpatches:
1887 raise PatchError(_('failed to synchronize metadata for "%s"')
1887 raise PatchError(_('failed to synchronize metadata for "%s"')
1888 % afile[2:])
1888 % afile[2:])
1889 gp = gitpatches[-1]
1889 gp = gitpatches[-1]
1890 newfile = True
1890 newfile = True
1891 elif x.startswith('---'):
1891 elif x.startswith('---'):
1892 # check for a unified diff
1892 # check for a unified diff
1893 l2 = lr.readline()
1893 l2 = lr.readline()
1894 if not l2.startswith('+++'):
1894 if not l2.startswith('+++'):
1895 lr.push(l2)
1895 lr.push(l2)
1896 continue
1896 continue
1897 newfile = True
1897 newfile = True
1898 context = False
1898 context = False
1899 afile = parsefilename(x)
1899 afile = parsefilename(x)
1900 bfile = parsefilename(l2)
1900 bfile = parsefilename(l2)
1901 elif x.startswith('***'):
1901 elif x.startswith('***'):
1902 # check for a context diff
1902 # check for a context diff
1903 l2 = lr.readline()
1903 l2 = lr.readline()
1904 if not l2.startswith('---'):
1904 if not l2.startswith('---'):
1905 lr.push(l2)
1905 lr.push(l2)
1906 continue
1906 continue
1907 l3 = lr.readline()
1907 l3 = lr.readline()
1908 lr.push(l3)
1908 lr.push(l3)
1909 if not l3.startswith("***************"):
1909 if not l3.startswith("***************"):
1910 lr.push(l2)
1910 lr.push(l2)
1911 continue
1911 continue
1912 newfile = True
1912 newfile = True
1913 context = True
1913 context = True
1914 afile = parsefilename(x)
1914 afile = parsefilename(x)
1915 bfile = parsefilename(l2)
1915 bfile = parsefilename(l2)
1916
1916
1917 if newfile:
1917 if newfile:
1918 newfile = False
1918 newfile = False
1919 emitfile = True
1919 emitfile = True
1920 state = BFILE
1920 state = BFILE
1921 hunknum = 0
1921 hunknum = 0
1922
1922
1923 while gitpatches:
1923 while gitpatches:
1924 gp = gitpatches.pop()
1924 gp = gitpatches.pop()
1925 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1925 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1926
1926
1927 def applybindelta(binchunk, data):
1927 def applybindelta(binchunk, data):
1928 """Apply a binary delta hunk
1928 """Apply a binary delta hunk
1929 The algorithm used is the algorithm from git's patch-delta.c
1929 The algorithm used is the algorithm from git's patch-delta.c
1930 """
1930 """
1931 def deltahead(binchunk):
1931 def deltahead(binchunk):
1932 i = 0
1932 i = 0
1933 for c in binchunk:
1933 for c in binchunk:
1934 i += 1
1934 i += 1
1935 if not (ord(c) & 0x80):
1935 if not (ord(c) & 0x80):
1936 return i
1936 return i
1937 return i
1937 return i
1938 out = ""
1938 out = ""
1939 s = deltahead(binchunk)
1939 s = deltahead(binchunk)
1940 binchunk = binchunk[s:]
1940 binchunk = binchunk[s:]
1941 s = deltahead(binchunk)
1941 s = deltahead(binchunk)
1942 binchunk = binchunk[s:]
1942 binchunk = binchunk[s:]
1943 i = 0
1943 i = 0
1944 while i < len(binchunk):
1944 while i < len(binchunk):
1945 cmd = ord(binchunk[i])
1945 cmd = ord(binchunk[i])
1946 i += 1
1946 i += 1
1947 if (cmd & 0x80):
1947 if (cmd & 0x80):
1948 offset = 0
1948 offset = 0
1949 size = 0
1949 size = 0
1950 if (cmd & 0x01):
1950 if (cmd & 0x01):
1951 offset = ord(binchunk[i])
1951 offset = ord(binchunk[i])
1952 i += 1
1952 i += 1
1953 if (cmd & 0x02):
1953 if (cmd & 0x02):
1954 offset |= ord(binchunk[i]) << 8
1954 offset |= ord(binchunk[i]) << 8
1955 i += 1
1955 i += 1
1956 if (cmd & 0x04):
1956 if (cmd & 0x04):
1957 offset |= ord(binchunk[i]) << 16
1957 offset |= ord(binchunk[i]) << 16
1958 i += 1
1958 i += 1
1959 if (cmd & 0x08):
1959 if (cmd & 0x08):
1960 offset |= ord(binchunk[i]) << 24
1960 offset |= ord(binchunk[i]) << 24
1961 i += 1
1961 i += 1
1962 if (cmd & 0x10):
1962 if (cmd & 0x10):
1963 size = ord(binchunk[i])
1963 size = ord(binchunk[i])
1964 i += 1
1964 i += 1
1965 if (cmd & 0x20):
1965 if (cmd & 0x20):
1966 size |= ord(binchunk[i]) << 8
1966 size |= ord(binchunk[i]) << 8
1967 i += 1
1967 i += 1
1968 if (cmd & 0x40):
1968 if (cmd & 0x40):
1969 size |= ord(binchunk[i]) << 16
1969 size |= ord(binchunk[i]) << 16
1970 i += 1
1970 i += 1
1971 if size == 0:
1971 if size == 0:
1972 size = 0x10000
1972 size = 0x10000
1973 offset_end = offset + size
1973 offset_end = offset + size
1974 out += data[offset:offset_end]
1974 out += data[offset:offset_end]
1975 elif cmd != 0:
1975 elif cmd != 0:
1976 offset_end = i + cmd
1976 offset_end = i + cmd
1977 out += binchunk[i:offset_end]
1977 out += binchunk[i:offset_end]
1978 i += cmd
1978 i += cmd
1979 else:
1979 else:
1980 raise PatchError(_('unexpected delta opcode 0'))
1980 raise PatchError(_('unexpected delta opcode 0'))
1981 return out
1981 return out
1982
1982
1983 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1983 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1984 """Reads a patch from fp and tries to apply it.
1984 """Reads a patch from fp and tries to apply it.
1985
1985
1986 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1986 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1987 there was any fuzz.
1987 there was any fuzz.
1988
1988
1989 If 'eolmode' is 'strict', the patch content and patched file are
1989 If 'eolmode' is 'strict', the patch content and patched file are
1990 read in binary mode. Otherwise, line endings are ignored when
1990 read in binary mode. Otherwise, line endings are ignored when
1991 patching then normalized according to 'eolmode'.
1991 patching then normalized according to 'eolmode'.
1992 """
1992 """
1993 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1993 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1994 prefix=prefix, eolmode=eolmode)
1994 prefix=prefix, eolmode=eolmode)
1995
1995
1996 def _canonprefix(repo, prefix):
1996 def _canonprefix(repo, prefix):
1997 if prefix:
1997 if prefix:
1998 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
1998 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
1999 if prefix != '':
1999 if prefix != '':
2000 prefix += '/'
2000 prefix += '/'
2001 return prefix
2001 return prefix
2002
2002
2003 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2003 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2004 eolmode='strict'):
2004 eolmode='strict'):
2005 prefix = _canonprefix(backend.repo, prefix)
2005 prefix = _canonprefix(backend.repo, prefix)
2006 def pstrip(p):
2006 def pstrip(p):
2007 return pathtransform(p, strip - 1, prefix)[1]
2007 return pathtransform(p, strip - 1, prefix)[1]
2008
2008
2009 rejects = 0
2009 rejects = 0
2010 err = 0
2010 err = 0
2011 current_file = None
2011 current_file = None
2012
2012
2013 for state, values in iterhunks(fp):
2013 for state, values in iterhunks(fp):
2014 if state == 'hunk':
2014 if state == 'hunk':
2015 if not current_file:
2015 if not current_file:
2016 continue
2016 continue
2017 ret = current_file.apply(values)
2017 ret = current_file.apply(values)
2018 if ret > 0:
2018 if ret > 0:
2019 err = 1
2019 err = 1
2020 elif state == 'file':
2020 elif state == 'file':
2021 if current_file:
2021 if current_file:
2022 rejects += current_file.close()
2022 rejects += current_file.close()
2023 current_file = None
2023 current_file = None
2024 afile, bfile, first_hunk, gp = values
2024 afile, bfile, first_hunk, gp = values
2025 if gp:
2025 if gp:
2026 gp.path = pstrip(gp.path)
2026 gp.path = pstrip(gp.path)
2027 if gp.oldpath:
2027 if gp.oldpath:
2028 gp.oldpath = pstrip(gp.oldpath)
2028 gp.oldpath = pstrip(gp.oldpath)
2029 else:
2029 else:
2030 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2030 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2031 prefix)
2031 prefix)
2032 if gp.op == 'RENAME':
2032 if gp.op == 'RENAME':
2033 backend.unlink(gp.oldpath)
2033 backend.unlink(gp.oldpath)
2034 if not first_hunk:
2034 if not first_hunk:
2035 if gp.op == 'DELETE':
2035 if gp.op == 'DELETE':
2036 backend.unlink(gp.path)
2036 backend.unlink(gp.path)
2037 continue
2037 continue
2038 data, mode = None, None
2038 data, mode = None, None
2039 if gp.op in ('RENAME', 'COPY'):
2039 if gp.op in ('RENAME', 'COPY'):
2040 data, mode = store.getfile(gp.oldpath)[:2]
2040 data, mode = store.getfile(gp.oldpath)[:2]
2041 if data is None:
2041 if data is None:
2042 # This means that the old path does not exist
2042 # This means that the old path does not exist
2043 raise PatchError(_("source file '%s' does not exist")
2043 raise PatchError(_("source file '%s' does not exist")
2044 % gp.oldpath)
2044 % gp.oldpath)
2045 if gp.mode:
2045 if gp.mode:
2046 mode = gp.mode
2046 mode = gp.mode
2047 if gp.op == 'ADD':
2047 if gp.op == 'ADD':
2048 # Added files without content have no hunk and
2048 # Added files without content have no hunk and
2049 # must be created
2049 # must be created
2050 data = ''
2050 data = ''
2051 if data or mode:
2051 if data or mode:
2052 if (gp.op in ('ADD', 'RENAME', 'COPY')
2052 if (gp.op in ('ADD', 'RENAME', 'COPY')
2053 and backend.exists(gp.path)):
2053 and backend.exists(gp.path)):
2054 raise PatchError(_("cannot create %s: destination "
2054 raise PatchError(_("cannot create %s: destination "
2055 "already exists") % gp.path)
2055 "already exists") % gp.path)
2056 backend.setfile(gp.path, data, mode, gp.oldpath)
2056 backend.setfile(gp.path, data, mode, gp.oldpath)
2057 continue
2057 continue
2058 try:
2058 try:
2059 current_file = patcher(ui, gp, backend, store,
2059 current_file = patcher(ui, gp, backend, store,
2060 eolmode=eolmode)
2060 eolmode=eolmode)
2061 except PatchError as inst:
2061 except PatchError as inst:
2062 ui.warn(str(inst) + '\n')
2062 ui.warn(str(inst) + '\n')
2063 current_file = None
2063 current_file = None
2064 rejects += 1
2064 rejects += 1
2065 continue
2065 continue
2066 elif state == 'git':
2066 elif state == 'git':
2067 for gp in values:
2067 for gp in values:
2068 path = pstrip(gp.oldpath)
2068 path = pstrip(gp.oldpath)
2069 data, mode = backend.getfile(path)
2069 data, mode = backend.getfile(path)
2070 if data is None:
2070 if data is None:
2071 # The error ignored here will trigger a getfile()
2071 # The error ignored here will trigger a getfile()
2072 # error in a place more appropriate for error
2072 # error in a place more appropriate for error
2073 # handling, and will not interrupt the patching
2073 # handling, and will not interrupt the patching
2074 # process.
2074 # process.
2075 pass
2075 pass
2076 else:
2076 else:
2077 store.setfile(path, data, mode)
2077 store.setfile(path, data, mode)
2078 else:
2078 else:
2079 raise error.Abort(_('unsupported parser state: %s') % state)
2079 raise error.Abort(_('unsupported parser state: %s') % state)
2080
2080
2081 if current_file:
2081 if current_file:
2082 rejects += current_file.close()
2082 rejects += current_file.close()
2083
2083
2084 if rejects:
2084 if rejects:
2085 return -1
2085 return -1
2086 return err
2086 return err
2087
2087
2088 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2088 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2089 similarity):
2089 similarity):
2090 """use <patcher> to apply <patchname> to the working directory.
2090 """use <patcher> to apply <patchname> to the working directory.
2091 returns whether patch was applied with fuzz factor."""
2091 returns whether patch was applied with fuzz factor."""
2092
2092
2093 fuzz = False
2093 fuzz = False
2094 args = []
2094 args = []
2095 cwd = repo.root
2095 cwd = repo.root
2096 if cwd:
2096 if cwd:
2097 args.append('-d %s' % util.shellquote(cwd))
2097 args.append('-d %s' % util.shellquote(cwd))
2098 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2098 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2099 util.shellquote(patchname)))
2099 util.shellquote(patchname)))
2100 try:
2100 try:
2101 for line in util.iterfile(fp):
2101 for line in util.iterfile(fp):
2102 line = line.rstrip()
2102 line = line.rstrip()
2103 ui.note(line + '\n')
2103 ui.note(line + '\n')
2104 if line.startswith('patching file '):
2104 if line.startswith('patching file '):
2105 pf = util.parsepatchoutput(line)
2105 pf = util.parsepatchoutput(line)
2106 printed_file = False
2106 printed_file = False
2107 files.add(pf)
2107 files.add(pf)
2108 elif line.find('with fuzz') >= 0:
2108 elif line.find('with fuzz') >= 0:
2109 fuzz = True
2109 fuzz = True
2110 if not printed_file:
2110 if not printed_file:
2111 ui.warn(pf + '\n')
2111 ui.warn(pf + '\n')
2112 printed_file = True
2112 printed_file = True
2113 ui.warn(line + '\n')
2113 ui.warn(line + '\n')
2114 elif line.find('saving rejects to file') >= 0:
2114 elif line.find('saving rejects to file') >= 0:
2115 ui.warn(line + '\n')
2115 ui.warn(line + '\n')
2116 elif line.find('FAILED') >= 0:
2116 elif line.find('FAILED') >= 0:
2117 if not printed_file:
2117 if not printed_file:
2118 ui.warn(pf + '\n')
2118 ui.warn(pf + '\n')
2119 printed_file = True
2119 printed_file = True
2120 ui.warn(line + '\n')
2120 ui.warn(line + '\n')
2121 finally:
2121 finally:
2122 if files:
2122 if files:
2123 scmutil.marktouched(repo, files, similarity)
2123 scmutil.marktouched(repo, files, similarity)
2124 code = fp.close()
2124 code = fp.close()
2125 if code:
2125 if code:
2126 raise PatchError(_("patch command failed: %s") %
2126 raise PatchError(_("patch command failed: %s") %
2127 util.explainexit(code)[0])
2127 util.explainexit(code)[0])
2128 return fuzz
2128 return fuzz
2129
2129
2130 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2130 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2131 eolmode='strict'):
2131 eolmode='strict'):
2132 if files is None:
2132 if files is None:
2133 files = set()
2133 files = set()
2134 if eolmode is None:
2134 if eolmode is None:
2135 eolmode = ui.config('patch', 'eol')
2135 eolmode = ui.config('patch', 'eol')
2136 if eolmode.lower() not in eolmodes:
2136 if eolmode.lower() not in eolmodes:
2137 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2137 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2138 eolmode = eolmode.lower()
2138 eolmode = eolmode.lower()
2139
2139
2140 store = filestore()
2140 store = filestore()
2141 try:
2141 try:
2142 fp = open(patchobj, 'rb')
2142 fp = open(patchobj, 'rb')
2143 except TypeError:
2143 except TypeError:
2144 fp = patchobj
2144 fp = patchobj
2145 try:
2145 try:
2146 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2146 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2147 eolmode=eolmode)
2147 eolmode=eolmode)
2148 finally:
2148 finally:
2149 if fp != patchobj:
2149 if fp != patchobj:
2150 fp.close()
2150 fp.close()
2151 files.update(backend.close())
2151 files.update(backend.close())
2152 store.close()
2152 store.close()
2153 if ret < 0:
2153 if ret < 0:
2154 raise PatchError(_('patch failed to apply'))
2154 raise PatchError(_('patch failed to apply'))
2155 return ret > 0
2155 return ret > 0
2156
2156
2157 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2157 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2158 eolmode='strict', similarity=0):
2158 eolmode='strict', similarity=0):
2159 """use builtin patch to apply <patchobj> to the working directory.
2159 """use builtin patch to apply <patchobj> to the working directory.
2160 returns whether patch was applied with fuzz factor."""
2160 returns whether patch was applied with fuzz factor."""
2161 backend = workingbackend(ui, repo, similarity)
2161 backend = workingbackend(ui, repo, similarity)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163
2163
2164 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2164 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2165 eolmode='strict'):
2165 eolmode='strict'):
2166 backend = repobackend(ui, repo, ctx, store)
2166 backend = repobackend(ui, repo, ctx, store)
2167 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2167 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2168
2168
2169 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2169 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2170 similarity=0):
2170 similarity=0):
2171 """Apply <patchname> to the working directory.
2171 """Apply <patchname> to the working directory.
2172
2172
2173 'eolmode' specifies how end of lines should be handled. It can be:
2173 'eolmode' specifies how end of lines should be handled. It can be:
2174 - 'strict': inputs are read in binary mode, EOLs are preserved
2174 - 'strict': inputs are read in binary mode, EOLs are preserved
2175 - 'crlf': EOLs are ignored when patching and reset to CRLF
2175 - 'crlf': EOLs are ignored when patching and reset to CRLF
2176 - 'lf': EOLs are ignored when patching and reset to LF
2176 - 'lf': EOLs are ignored when patching and reset to LF
2177 - None: get it from user settings, default to 'strict'
2177 - None: get it from user settings, default to 'strict'
2178 'eolmode' is ignored when using an external patcher program.
2178 'eolmode' is ignored when using an external patcher program.
2179
2179
2180 Returns whether patch was applied with fuzz factor.
2180 Returns whether patch was applied with fuzz factor.
2181 """
2181 """
2182 patcher = ui.config('ui', 'patch')
2182 patcher = ui.config('ui', 'patch')
2183 if files is None:
2183 if files is None:
2184 files = set()
2184 files = set()
2185 if patcher:
2185 if patcher:
2186 return _externalpatch(ui, repo, patcher, patchname, strip,
2186 return _externalpatch(ui, repo, patcher, patchname, strip,
2187 files, similarity)
2187 files, similarity)
2188 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2188 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2189 similarity)
2189 similarity)
2190
2190
2191 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2191 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2192 backend = fsbackend(ui, repo.root)
2192 backend = fsbackend(ui, repo.root)
2193 prefix = _canonprefix(repo, prefix)
2193 prefix = _canonprefix(repo, prefix)
2194 with open(patchpath, 'rb') as fp:
2194 with open(patchpath, 'rb') as fp:
2195 changed = set()
2195 changed = set()
2196 for state, values in iterhunks(fp):
2196 for state, values in iterhunks(fp):
2197 if state == 'file':
2197 if state == 'file':
2198 afile, bfile, first_hunk, gp = values
2198 afile, bfile, first_hunk, gp = values
2199 if gp:
2199 if gp:
2200 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2200 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2201 if gp.oldpath:
2201 if gp.oldpath:
2202 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2202 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2203 prefix)[1]
2203 prefix)[1]
2204 else:
2204 else:
2205 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2205 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2206 prefix)
2206 prefix)
2207 changed.add(gp.path)
2207 changed.add(gp.path)
2208 if gp.op == 'RENAME':
2208 if gp.op == 'RENAME':
2209 changed.add(gp.oldpath)
2209 changed.add(gp.oldpath)
2210 elif state not in ('hunk', 'git'):
2210 elif state not in ('hunk', 'git'):
2211 raise error.Abort(_('unsupported parser state: %s') % state)
2211 raise error.Abort(_('unsupported parser state: %s') % state)
2212 return changed
2212 return changed
2213
2213
2214 class GitDiffRequired(Exception):
2214 class GitDiffRequired(Exception):
2215 pass
2215 pass
2216
2216
2217 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2217 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2218 '''return diffopts with all features supported and parsed'''
2218 '''return diffopts with all features supported and parsed'''
2219 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2219 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2220 git=True, whitespace=True, formatchanging=True)
2220 git=True, whitespace=True, formatchanging=True)
2221
2221
2222 diffopts = diffallopts
2222 diffopts = diffallopts
2223
2223
2224 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2224 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2225 whitespace=False, formatchanging=False):
2225 whitespace=False, formatchanging=False):
2226 '''return diffopts with only opted-in features parsed
2226 '''return diffopts with only opted-in features parsed
2227
2227
2228 Features:
2228 Features:
2229 - git: git-style diffs
2229 - git: git-style diffs
2230 - whitespace: whitespace options like ignoreblanklines and ignorews
2230 - whitespace: whitespace options like ignoreblanklines and ignorews
2231 - formatchanging: options that will likely break or cause correctness issues
2231 - formatchanging: options that will likely break or cause correctness issues
2232 with most diff parsers
2232 with most diff parsers
2233 '''
2233 '''
2234 def get(key, name=None, getter=ui.configbool, forceplain=None):
2234 def get(key, name=None, getter=ui.configbool, forceplain=None):
2235 if opts:
2235 if opts:
2236 v = opts.get(key)
2236 v = opts.get(key)
2237 # diffopts flags are either None-default (which is passed
2237 # diffopts flags are either None-default (which is passed
2238 # through unchanged, so we can identify unset values), or
2238 # through unchanged, so we can identify unset values), or
2239 # some other falsey default (eg --unified, which defaults
2239 # some other falsey default (eg --unified, which defaults
2240 # to an empty string). We only want to override the config
2240 # to an empty string). We only want to override the config
2241 # entries from hgrc with command line values if they
2241 # entries from hgrc with command line values if they
2242 # appear to have been set, which is any truthy value,
2242 # appear to have been set, which is any truthy value,
2243 # True, or False.
2243 # True, or False.
2244 if v or isinstance(v, bool):
2244 if v or isinstance(v, bool):
2245 return v
2245 return v
2246 if forceplain is not None and ui.plain():
2246 if forceplain is not None and ui.plain():
2247 return forceplain
2247 return forceplain
2248 return getter(section, name or key, untrusted=untrusted)
2248 return getter(section, name or key, untrusted=untrusted)
2249
2249
2250 # core options, expected to be understood by every diff parser
2250 # core options, expected to be understood by every diff parser
2251 buildopts = {
2251 buildopts = {
2252 'nodates': get('nodates'),
2252 'nodates': get('nodates'),
2253 'showfunc': get('show_function', 'showfunc'),
2253 'showfunc': get('show_function', 'showfunc'),
2254 'context': get('unified', getter=ui.config),
2254 'context': get('unified', getter=ui.config),
2255 }
2255 }
2256 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2256 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2257
2257
2258 if git:
2258 if git:
2259 buildopts['git'] = get('git')
2259 buildopts['git'] = get('git')
2260
2260
2261 # since this is in the experimental section, we need to call
2261 # since this is in the experimental section, we need to call
2262 # ui.configbool directory
2262 # ui.configbool directory
2263 buildopts['showsimilarity'] = ui.configbool('experimental',
2263 buildopts['showsimilarity'] = ui.configbool('experimental',
2264 'extendedheader.similarity')
2264 'extendedheader.similarity')
2265
2265
2266 # need to inspect the ui object instead of using get() since we want to
2266 # need to inspect the ui object instead of using get() since we want to
2267 # test for an int
2267 # test for an int
2268 hconf = ui.config('experimental', 'extendedheader.index')
2268 hconf = ui.config('experimental', 'extendedheader.index')
2269 if hconf is not None:
2269 if hconf is not None:
2270 hlen = None
2270 hlen = None
2271 try:
2271 try:
2272 # the hash config could be an integer (for length of hash) or a
2272 # the hash config could be an integer (for length of hash) or a
2273 # word (e.g. short, full, none)
2273 # word (e.g. short, full, none)
2274 hlen = int(hconf)
2274 hlen = int(hconf)
2275 if hlen < 0 or hlen > 40:
2275 if hlen < 0 or hlen > 40:
2276 msg = _("invalid length for extendedheader.index: '%d'\n")
2276 msg = _("invalid length for extendedheader.index: '%d'\n")
2277 ui.warn(msg % hlen)
2277 ui.warn(msg % hlen)
2278 except ValueError:
2278 except ValueError:
2279 # default value
2279 # default value
2280 if hconf == 'short' or hconf == '':
2280 if hconf == 'short' or hconf == '':
2281 hlen = 12
2281 hlen = 12
2282 elif hconf == 'full':
2282 elif hconf == 'full':
2283 hlen = 40
2283 hlen = 40
2284 elif hconf != 'none':
2284 elif hconf != 'none':
2285 msg = _("invalid value for extendedheader.index: '%s'\n")
2285 msg = _("invalid value for extendedheader.index: '%s'\n")
2286 ui.warn(msg % hconf)
2286 ui.warn(msg % hconf)
2287 finally:
2287 finally:
2288 buildopts['index'] = hlen
2288 buildopts['index'] = hlen
2289
2289
2290 if whitespace:
2290 if whitespace:
2291 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2291 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2292 buildopts['ignorewsamount'] = get('ignore_space_change',
2292 buildopts['ignorewsamount'] = get('ignore_space_change',
2293 'ignorewsamount')
2293 'ignorewsamount')
2294 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2294 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2295 'ignoreblanklines')
2295 'ignoreblanklines')
2296 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2296 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2297 if formatchanging:
2297 if formatchanging:
2298 buildopts['text'] = opts and opts.get('text')
2298 buildopts['text'] = opts and opts.get('text')
2299 binary = None if opts is None else opts.get('binary')
2299 binary = None if opts is None else opts.get('binary')
2300 buildopts['nobinary'] = (not binary if binary is not None
2300 buildopts['nobinary'] = (not binary if binary is not None
2301 else get('nobinary', forceplain=False))
2301 else get('nobinary', forceplain=False))
2302 buildopts['noprefix'] = get('noprefix', forceplain=False)
2302 buildopts['noprefix'] = get('noprefix', forceplain=False)
2303
2303
2304 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2304 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2305
2305
2306 def diff(repo, node1=None, node2=None, match=None, changes=None,
2306 def diff(repo, node1=None, node2=None, match=None, changes=None,
2307 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2307 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2308 hunksfilterfn=None):
2308 hunksfilterfn=None):
2309 '''yields diff of changes to files between two nodes, or node and
2309 '''yields diff of changes to files between two nodes, or node and
2310 working directory.
2310 working directory.
2311
2311
2312 if node1 is None, use first dirstate parent instead.
2312 if node1 is None, use first dirstate parent instead.
2313 if node2 is None, compare node1 with working directory.
2313 if node2 is None, compare node1 with working directory.
2314
2314
2315 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2315 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2316 every time some change cannot be represented with the current
2316 every time some change cannot be represented with the current
2317 patch format. Return False to upgrade to git patch format, True to
2317 patch format. Return False to upgrade to git patch format, True to
2318 accept the loss or raise an exception to abort the diff. It is
2318 accept the loss or raise an exception to abort the diff. It is
2319 called with the name of current file being diffed as 'fn'. If set
2319 called with the name of current file being diffed as 'fn'. If set
2320 to None, patches will always be upgraded to git format when
2320 to None, patches will always be upgraded to git format when
2321 necessary.
2321 necessary.
2322
2322
2323 prefix is a filename prefix that is prepended to all filenames on
2323 prefix is a filename prefix that is prepended to all filenames on
2324 display (used for subrepos).
2324 display (used for subrepos).
2325
2325
2326 relroot, if not empty, must be normalized with a trailing /. Any match
2326 relroot, if not empty, must be normalized with a trailing /. Any match
2327 patterns that fall outside it will be ignored.
2327 patterns that fall outside it will be ignored.
2328
2328
2329 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2329 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2330 information.
2330 information.
2331
2331
2332 hunksfilterfn, if not None, should be a function taking a filectx and
2332 hunksfilterfn, if not None, should be a function taking a filectx and
2333 hunks generator that may yield filtered hunks.
2333 hunks generator that may yield filtered hunks.
2334 '''
2334 '''
2335 for fctx1, fctx2, hdr, hunks in diffhunks(
2335 for fctx1, fctx2, hdr, hunks in diffhunks(
2336 repo, node1=node1, node2=node2,
2336 repo, node1=node1, node2=node2,
2337 match=match, changes=changes, opts=opts,
2337 match=match, changes=changes, opts=opts,
2338 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2338 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2339 ):
2339 ):
2340 if hunksfilterfn is not None:
2340 if hunksfilterfn is not None:
2341 # If the file has been removed, fctx2 is None; but this should
2341 # If the file has been removed, fctx2 is None; but this should
2342 # not occur here since we catch removed files early in
2342 # not occur here since we catch removed files early in
2343 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2343 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2344 assert fctx2 is not None, \
2344 assert fctx2 is not None, \
2345 'fctx2 unexpectly None in diff hunks filtering'
2345 'fctx2 unexpectly None in diff hunks filtering'
2346 hunks = hunksfilterfn(fctx2, hunks)
2346 hunks = hunksfilterfn(fctx2, hunks)
2347 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2347 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2348 if hdr and (text or len(hdr) > 1):
2348 if hdr and (text or len(hdr) > 1):
2349 yield '\n'.join(hdr) + '\n'
2349 yield '\n'.join(hdr) + '\n'
2350 if text:
2350 if text:
2351 yield text
2351 yield text
2352
2352
2353 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2353 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2354 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2354 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2355 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2355 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2356 where `header` is a list of diff headers and `hunks` is an iterable of
2356 where `header` is a list of diff headers and `hunks` is an iterable of
2357 (`hunkrange`, `hunklines`) tuples.
2357 (`hunkrange`, `hunklines`) tuples.
2358
2358
2359 See diff() for the meaning of parameters.
2359 See diff() for the meaning of parameters.
2360 """
2360 """
2361
2361
2362 if opts is None:
2362 if opts is None:
2363 opts = mdiff.defaultopts
2363 opts = mdiff.defaultopts
2364
2364
2365 if not node1 and not node2:
2365 if not node1 and not node2:
2366 node1 = repo.dirstate.p1()
2366 node1 = repo.dirstate.p1()
2367
2367
2368 def lrugetfilectx():
2368 def lrugetfilectx():
2369 cache = {}
2369 cache = {}
2370 order = collections.deque()
2370 order = collections.deque()
2371 def getfilectx(f, ctx):
2371 def getfilectx(f, ctx):
2372 fctx = ctx.filectx(f, filelog=cache.get(f))
2372 fctx = ctx.filectx(f, filelog=cache.get(f))
2373 if f not in cache:
2373 if f not in cache:
2374 if len(cache) > 20:
2374 if len(cache) > 20:
2375 del cache[order.popleft()]
2375 del cache[order.popleft()]
2376 cache[f] = fctx.filelog()
2376 cache[f] = fctx.filelog()
2377 else:
2377 else:
2378 order.remove(f)
2378 order.remove(f)
2379 order.append(f)
2379 order.append(f)
2380 return fctx
2380 return fctx
2381 return getfilectx
2381 return getfilectx
2382 getfilectx = lrugetfilectx()
2382 getfilectx = lrugetfilectx()
2383
2383
2384 ctx1 = repo[node1]
2384 ctx1 = repo[node1]
2385 ctx2 = repo[node2]
2385 ctx2 = repo[node2]
2386
2386
2387 relfiltered = False
2387 relfiltered = False
2388 if relroot != '' and match.always():
2388 if relroot != '' and match.always():
2389 # as a special case, create a new matcher with just the relroot
2389 # as a special case, create a new matcher with just the relroot
2390 pats = [relroot]
2390 pats = [relroot]
2391 match = scmutil.match(ctx2, pats, default='path')
2391 match = scmutil.match(ctx2, pats, default='path')
2392 relfiltered = True
2392 relfiltered = True
2393
2393
2394 if not changes:
2394 if not changes:
2395 changes = repo.status(ctx1, ctx2, match=match)
2395 changes = repo.status(ctx1, ctx2, match=match)
2396 modified, added, removed = changes[:3]
2396 modified, added, removed = changes[:3]
2397
2397
2398 if not modified and not added and not removed:
2398 if not modified and not added and not removed:
2399 return []
2399 return []
2400
2400
2401 if repo.ui.debugflag:
2401 if repo.ui.debugflag:
2402 hexfunc = hex
2402 hexfunc = hex
2403 else:
2403 else:
2404 hexfunc = short
2404 hexfunc = short
2405 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2405 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2406
2406
2407 if copy is None:
2407 if copy is None:
2408 copy = {}
2408 copy = {}
2409 if opts.git or opts.upgrade:
2409 if opts.git or opts.upgrade:
2410 copy = copies.pathcopies(ctx1, ctx2, match=match)
2410 copy = copies.pathcopies(ctx1, ctx2, match=match)
2411
2411
2412 if relroot is not None:
2412 if relroot is not None:
2413 if not relfiltered:
2413 if not relfiltered:
2414 # XXX this would ideally be done in the matcher, but that is
2414 # XXX this would ideally be done in the matcher, but that is
2415 # generally meant to 'or' patterns, not 'and' them. In this case we
2415 # generally meant to 'or' patterns, not 'and' them. In this case we
2416 # need to 'and' all the patterns from the matcher with relroot.
2416 # need to 'and' all the patterns from the matcher with relroot.
2417 def filterrel(l):
2417 def filterrel(l):
2418 return [f for f in l if f.startswith(relroot)]
2418 return [f for f in l if f.startswith(relroot)]
2419 modified = filterrel(modified)
2419 modified = filterrel(modified)
2420 added = filterrel(added)
2420 added = filterrel(added)
2421 removed = filterrel(removed)
2421 removed = filterrel(removed)
2422 relfiltered = True
2422 relfiltered = True
2423 # filter out copies where either side isn't inside the relative root
2423 # filter out copies where either side isn't inside the relative root
2424 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2424 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2425 if dst.startswith(relroot)
2425 if dst.startswith(relroot)
2426 and src.startswith(relroot)))
2426 and src.startswith(relroot)))
2427
2427
2428 modifiedset = set(modified)
2428 modifiedset = set(modified)
2429 addedset = set(added)
2429 addedset = set(added)
2430 removedset = set(removed)
2430 removedset = set(removed)
2431 for f in modified:
2431 for f in modified:
2432 if f not in ctx1:
2432 if f not in ctx1:
2433 # Fix up added, since merged-in additions appear as
2433 # Fix up added, since merged-in additions appear as
2434 # modifications during merges
2434 # modifications during merges
2435 modifiedset.remove(f)
2435 modifiedset.remove(f)
2436 addedset.add(f)
2436 addedset.add(f)
2437 for f in removed:
2437 for f in removed:
2438 if f not in ctx1:
2438 if f not in ctx1:
2439 # Merged-in additions that are then removed are reported as removed.
2439 # Merged-in additions that are then removed are reported as removed.
2440 # They are not in ctx1, so We don't want to show them in the diff.
2440 # They are not in ctx1, so We don't want to show them in the diff.
2441 removedset.remove(f)
2441 removedset.remove(f)
2442 modified = sorted(modifiedset)
2442 modified = sorted(modifiedset)
2443 added = sorted(addedset)
2443 added = sorted(addedset)
2444 removed = sorted(removedset)
2444 removed = sorted(removedset)
2445 for dst, src in copy.items():
2445 for dst, src in copy.items():
2446 if src not in ctx1:
2446 if src not in ctx1:
2447 # Files merged in during a merge and then copied/renamed are
2447 # Files merged in during a merge and then copied/renamed are
2448 # reported as copies. We want to show them in the diff as additions.
2448 # reported as copies. We want to show them in the diff as additions.
2449 del copy[dst]
2449 del copy[dst]
2450
2450
2451 def difffn(opts, losedata):
2451 def difffn(opts, losedata):
2452 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2452 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2453 copy, getfilectx, opts, losedata, prefix, relroot)
2453 copy, getfilectx, opts, losedata, prefix, relroot)
2454 if opts.upgrade and not opts.git:
2454 if opts.upgrade and not opts.git:
2455 try:
2455 try:
2456 def losedata(fn):
2456 def losedata(fn):
2457 if not losedatafn or not losedatafn(fn=fn):
2457 if not losedatafn or not losedatafn(fn=fn):
2458 raise GitDiffRequired
2458 raise GitDiffRequired
2459 # Buffer the whole output until we are sure it can be generated
2459 # Buffer the whole output until we are sure it can be generated
2460 return list(difffn(opts.copy(git=False), losedata))
2460 return list(difffn(opts.copy(git=False), losedata))
2461 except GitDiffRequired:
2461 except GitDiffRequired:
2462 return difffn(opts.copy(git=True), None)
2462 return difffn(opts.copy(git=True), None)
2463 else:
2463 else:
2464 return difffn(opts, None)
2464 return difffn(opts, None)
2465
2465
2466 def difflabel(func, *args, **kw):
2466 def difflabel(func, *args, **kw):
2467 '''yields 2-tuples of (output, label) based on the output of func()'''
2467 '''yields 2-tuples of (output, label) based on the output of func()'''
2468 inlinecolor = False
2468 inlinecolor = False
2469 if kw.get('opts'):
2469 if kw.get(r'opts'):
2470 inlinecolor = kw['opts'].worddiff
2470 inlinecolor = kw[r'opts'].worddiff
2471 headprefixes = [('diff', 'diff.diffline'),
2471 headprefixes = [('diff', 'diff.diffline'),
2472 ('copy', 'diff.extended'),
2472 ('copy', 'diff.extended'),
2473 ('rename', 'diff.extended'),
2473 ('rename', 'diff.extended'),
2474 ('old', 'diff.extended'),
2474 ('old', 'diff.extended'),
2475 ('new', 'diff.extended'),
2475 ('new', 'diff.extended'),
2476 ('deleted', 'diff.extended'),
2476 ('deleted', 'diff.extended'),
2477 ('index', 'diff.extended'),
2477 ('index', 'diff.extended'),
2478 ('similarity', 'diff.extended'),
2478 ('similarity', 'diff.extended'),
2479 ('---', 'diff.file_a'),
2479 ('---', 'diff.file_a'),
2480 ('+++', 'diff.file_b')]
2480 ('+++', 'diff.file_b')]
2481 textprefixes = [('@', 'diff.hunk'),
2481 textprefixes = [('@', 'diff.hunk'),
2482 ('-', 'diff.deleted'),
2482 ('-', 'diff.deleted'),
2483 ('+', 'diff.inserted')]
2483 ('+', 'diff.inserted')]
2484 head = False
2484 head = False
2485 for chunk in func(*args, **kw):
2485 for chunk in func(*args, **kw):
2486 lines = chunk.split('\n')
2486 lines = chunk.split('\n')
2487 matches = {}
2487 matches = {}
2488 if inlinecolor:
2488 if inlinecolor:
2489 matches = _findmatches(lines)
2489 matches = _findmatches(lines)
2490 for i, line in enumerate(lines):
2490 for i, line in enumerate(lines):
2491 if i != 0:
2491 if i != 0:
2492 yield ('\n', '')
2492 yield ('\n', '')
2493 if head:
2493 if head:
2494 if line.startswith('@'):
2494 if line.startswith('@'):
2495 head = False
2495 head = False
2496 else:
2496 else:
2497 if line and line[0] not in ' +-@\\':
2497 if line and line[0] not in ' +-@\\':
2498 head = True
2498 head = True
2499 stripline = line
2499 stripline = line
2500 diffline = False
2500 diffline = False
2501 if not head and line and line[0] in '+-':
2501 if not head and line and line[0] in '+-':
2502 # highlight tabs and trailing whitespace, but only in
2502 # highlight tabs and trailing whitespace, but only in
2503 # changed lines
2503 # changed lines
2504 stripline = line.rstrip()
2504 stripline = line.rstrip()
2505 diffline = True
2505 diffline = True
2506
2506
2507 prefixes = textprefixes
2507 prefixes = textprefixes
2508 if head:
2508 if head:
2509 prefixes = headprefixes
2509 prefixes = headprefixes
2510 for prefix, label in prefixes:
2510 for prefix, label in prefixes:
2511 if stripline.startswith(prefix):
2511 if stripline.startswith(prefix):
2512 if diffline:
2512 if diffline:
2513 if i in matches:
2513 if i in matches:
2514 for t, l in _inlinediff(lines[i].rstrip(),
2514 for t, l in _inlinediff(lines[i].rstrip(),
2515 lines[matches[i]].rstrip(),
2515 lines[matches[i]].rstrip(),
2516 label):
2516 label):
2517 yield (t, l)
2517 yield (t, l)
2518 else:
2518 else:
2519 for token in tabsplitter.findall(stripline):
2519 for token in tabsplitter.findall(stripline):
2520 if '\t' == token[0]:
2520 if '\t' == token[0]:
2521 yield (token, 'diff.tab')
2521 yield (token, 'diff.tab')
2522 else:
2522 else:
2523 yield (token, label)
2523 yield (token, label)
2524 else:
2524 else:
2525 yield (stripline, label)
2525 yield (stripline, label)
2526 break
2526 break
2527 else:
2527 else:
2528 yield (line, '')
2528 yield (line, '')
2529 if line != stripline:
2529 if line != stripline:
2530 yield (line[len(stripline):], 'diff.trailingwhitespace')
2530 yield (line[len(stripline):], 'diff.trailingwhitespace')
2531
2531
2532 def _findmatches(slist):
2532 def _findmatches(slist):
2533 '''Look for insertion matches to deletion and returns a dict of
2533 '''Look for insertion matches to deletion and returns a dict of
2534 correspondences.
2534 correspondences.
2535 '''
2535 '''
2536 lastmatch = 0
2536 lastmatch = 0
2537 matches = {}
2537 matches = {}
2538 for i, line in enumerate(slist):
2538 for i, line in enumerate(slist):
2539 if line == '':
2539 if line == '':
2540 continue
2540 continue
2541 if line[0] == '-':
2541 if line[0] == '-':
2542 lastmatch = max(lastmatch, i)
2542 lastmatch = max(lastmatch, i)
2543 newgroup = False
2543 newgroup = False
2544 for j, newline in enumerate(slist[lastmatch + 1:]):
2544 for j, newline in enumerate(slist[lastmatch + 1:]):
2545 if newline == '':
2545 if newline == '':
2546 continue
2546 continue
2547 if newline[0] == '-' and newgroup: # too far, no match
2547 if newline[0] == '-' and newgroup: # too far, no match
2548 break
2548 break
2549 if newline[0] == '+': # potential match
2549 if newline[0] == '+': # potential match
2550 newgroup = True
2550 newgroup = True
2551 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2551 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2552 if sim > 0.7:
2552 if sim > 0.7:
2553 lastmatch = lastmatch + 1 + j
2553 lastmatch = lastmatch + 1 + j
2554 matches[i] = lastmatch
2554 matches[i] = lastmatch
2555 matches[lastmatch] = i
2555 matches[lastmatch] = i
2556 break
2556 break
2557 return matches
2557 return matches
2558
2558
2559 def _inlinediff(s1, s2, operation):
2559 def _inlinediff(s1, s2, operation):
2560 '''Perform string diff to highlight specific changes.'''
2560 '''Perform string diff to highlight specific changes.'''
2561 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2561 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2562 if operation == 'diff.deleted':
2562 if operation == 'diff.deleted':
2563 s2, s1 = s1, s2
2563 s2, s1 = s1, s2
2564
2564
2565 buff = []
2565 buff = []
2566 # we never want to higlight the leading +-
2566 # we never want to higlight the leading +-
2567 if operation == 'diff.deleted' and s2.startswith('-'):
2567 if operation == 'diff.deleted' and s2.startswith('-'):
2568 label = operation
2568 label = operation
2569 token = '-'
2569 token = '-'
2570 s2 = s2[1:]
2570 s2 = s2[1:]
2571 s1 = s1[1:]
2571 s1 = s1[1:]
2572 elif operation == 'diff.inserted' and s1.startswith('+'):
2572 elif operation == 'diff.inserted' and s1.startswith('+'):
2573 label = operation
2573 label = operation
2574 token = '+'
2574 token = '+'
2575 s2 = s2[1:]
2575 s2 = s2[1:]
2576 s1 = s1[1:]
2576 s1 = s1[1:]
2577 else:
2577 else:
2578 raise error.ProgrammingError("Case not expected, operation = %s" %
2578 raise error.ProgrammingError("Case not expected, operation = %s" %
2579 operation)
2579 operation)
2580
2580
2581 s = difflib.ndiff(re.split(br'(\W)', s2), re.split(br'(\W)', s1))
2581 s = difflib.ndiff(re.split(br'(\W)', s2), re.split(br'(\W)', s1))
2582 for part in s:
2582 for part in s:
2583 if part[0] in operation_skip or len(part) == 2:
2583 if part[0] in operation_skip or len(part) == 2:
2584 continue
2584 continue
2585 l = operation + '.highlight'
2585 l = operation + '.highlight'
2586 if part[0] in ' ':
2586 if part[0] in ' ':
2587 l = operation
2587 l = operation
2588 if part[2:] == '\t':
2588 if part[2:] == '\t':
2589 l = 'diff.tab'
2589 l = 'diff.tab'
2590 if l == label: # contiguous token with same label
2590 if l == label: # contiguous token with same label
2591 token += part[2:]
2591 token += part[2:]
2592 continue
2592 continue
2593 else:
2593 else:
2594 buff.append((token, label))
2594 buff.append((token, label))
2595 label = l
2595 label = l
2596 token = part[2:]
2596 token = part[2:]
2597 buff.append((token, label))
2597 buff.append((token, label))
2598
2598
2599 return buff
2599 return buff
2600
2600
2601 def diffui(*args, **kw):
2601 def diffui(*args, **kw):
2602 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2602 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2603 return difflabel(diff, *args, **kw)
2603 return difflabel(diff, *args, **kw)
2604
2604
2605 def _filepairs(modified, added, removed, copy, opts):
2605 def _filepairs(modified, added, removed, copy, opts):
2606 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2606 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2607 before and f2 is the the name after. For added files, f1 will be None,
2607 before and f2 is the the name after. For added files, f1 will be None,
2608 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2608 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2609 or 'rename' (the latter two only if opts.git is set).'''
2609 or 'rename' (the latter two only if opts.git is set).'''
2610 gone = set()
2610 gone = set()
2611
2611
2612 copyto = dict([(v, k) for k, v in copy.items()])
2612 copyto = dict([(v, k) for k, v in copy.items()])
2613
2613
2614 addedset, removedset = set(added), set(removed)
2614 addedset, removedset = set(added), set(removed)
2615
2615
2616 for f in sorted(modified + added + removed):
2616 for f in sorted(modified + added + removed):
2617 copyop = None
2617 copyop = None
2618 f1, f2 = f, f
2618 f1, f2 = f, f
2619 if f in addedset:
2619 if f in addedset:
2620 f1 = None
2620 f1 = None
2621 if f in copy:
2621 if f in copy:
2622 if opts.git:
2622 if opts.git:
2623 f1 = copy[f]
2623 f1 = copy[f]
2624 if f1 in removedset and f1 not in gone:
2624 if f1 in removedset and f1 not in gone:
2625 copyop = 'rename'
2625 copyop = 'rename'
2626 gone.add(f1)
2626 gone.add(f1)
2627 else:
2627 else:
2628 copyop = 'copy'
2628 copyop = 'copy'
2629 elif f in removedset:
2629 elif f in removedset:
2630 f2 = None
2630 f2 = None
2631 if opts.git:
2631 if opts.git:
2632 # have we already reported a copy above?
2632 # have we already reported a copy above?
2633 if (f in copyto and copyto[f] in addedset
2633 if (f in copyto and copyto[f] in addedset
2634 and copy[copyto[f]] == f):
2634 and copy[copyto[f]] == f):
2635 continue
2635 continue
2636 yield f1, f2, copyop
2636 yield f1, f2, copyop
2637
2637
2638 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2638 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2639 copy, getfilectx, opts, losedatafn, prefix, relroot):
2639 copy, getfilectx, opts, losedatafn, prefix, relroot):
2640 '''given input data, generate a diff and yield it in blocks
2640 '''given input data, generate a diff and yield it in blocks
2641
2641
2642 If generating a diff would lose data like flags or binary data and
2642 If generating a diff would lose data like flags or binary data and
2643 losedatafn is not None, it will be called.
2643 losedatafn is not None, it will be called.
2644
2644
2645 relroot is removed and prefix is added to every path in the diff output.
2645 relroot is removed and prefix is added to every path in the diff output.
2646
2646
2647 If relroot is not empty, this function expects every path in modified,
2647 If relroot is not empty, this function expects every path in modified,
2648 added, removed and copy to start with it.'''
2648 added, removed and copy to start with it.'''
2649
2649
2650 def gitindex(text):
2650 def gitindex(text):
2651 if not text:
2651 if not text:
2652 text = ""
2652 text = ""
2653 l = len(text)
2653 l = len(text)
2654 s = hashlib.sha1('blob %d\0' % l)
2654 s = hashlib.sha1('blob %d\0' % l)
2655 s.update(text)
2655 s.update(text)
2656 return s.hexdigest()
2656 return s.hexdigest()
2657
2657
2658 if opts.noprefix:
2658 if opts.noprefix:
2659 aprefix = bprefix = ''
2659 aprefix = bprefix = ''
2660 else:
2660 else:
2661 aprefix = 'a/'
2661 aprefix = 'a/'
2662 bprefix = 'b/'
2662 bprefix = 'b/'
2663
2663
2664 def diffline(f, revs):
2664 def diffline(f, revs):
2665 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2665 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2666 return 'diff %s %s' % (revinfo, f)
2666 return 'diff %s %s' % (revinfo, f)
2667
2667
2668 def isempty(fctx):
2668 def isempty(fctx):
2669 return fctx is None or fctx.size() == 0
2669 return fctx is None or fctx.size() == 0
2670
2670
2671 date1 = util.datestr(ctx1.date())
2671 date1 = util.datestr(ctx1.date())
2672 date2 = util.datestr(ctx2.date())
2672 date2 = util.datestr(ctx2.date())
2673
2673
2674 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2674 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2675
2675
2676 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2676 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2677 or repo.ui.configbool('devel', 'check-relroot')):
2677 or repo.ui.configbool('devel', 'check-relroot')):
2678 for f in modified + added + removed + list(copy) + list(copy.values()):
2678 for f in modified + added + removed + list(copy) + list(copy.values()):
2679 if f is not None and not f.startswith(relroot):
2679 if f is not None and not f.startswith(relroot):
2680 raise AssertionError(
2680 raise AssertionError(
2681 "file %s doesn't start with relroot %s" % (f, relroot))
2681 "file %s doesn't start with relroot %s" % (f, relroot))
2682
2682
2683 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2683 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2684 content1 = None
2684 content1 = None
2685 content2 = None
2685 content2 = None
2686 fctx1 = None
2686 fctx1 = None
2687 fctx2 = None
2687 fctx2 = None
2688 flag1 = None
2688 flag1 = None
2689 flag2 = None
2689 flag2 = None
2690 if f1:
2690 if f1:
2691 fctx1 = getfilectx(f1, ctx1)
2691 fctx1 = getfilectx(f1, ctx1)
2692 if opts.git or losedatafn:
2692 if opts.git or losedatafn:
2693 flag1 = ctx1.flags(f1)
2693 flag1 = ctx1.flags(f1)
2694 if f2:
2694 if f2:
2695 fctx2 = getfilectx(f2, ctx2)
2695 fctx2 = getfilectx(f2, ctx2)
2696 if opts.git or losedatafn:
2696 if opts.git or losedatafn:
2697 flag2 = ctx2.flags(f2)
2697 flag2 = ctx2.flags(f2)
2698 # if binary is True, output "summary" or "base85", but not "text diff"
2698 # if binary is True, output "summary" or "base85", but not "text diff"
2699 binary = not opts.text and any(f.isbinary()
2699 binary = not opts.text and any(f.isbinary()
2700 for f in [fctx1, fctx2] if f is not None)
2700 for f in [fctx1, fctx2] if f is not None)
2701
2701
2702 if losedatafn and not opts.git:
2702 if losedatafn and not opts.git:
2703 if (binary or
2703 if (binary or
2704 # copy/rename
2704 # copy/rename
2705 f2 in copy or
2705 f2 in copy or
2706 # empty file creation
2706 # empty file creation
2707 (not f1 and isempty(fctx2)) or
2707 (not f1 and isempty(fctx2)) or
2708 # empty file deletion
2708 # empty file deletion
2709 (isempty(fctx1) and not f2) or
2709 (isempty(fctx1) and not f2) or
2710 # create with flags
2710 # create with flags
2711 (not f1 and flag2) or
2711 (not f1 and flag2) or
2712 # change flags
2712 # change flags
2713 (f1 and f2 and flag1 != flag2)):
2713 (f1 and f2 and flag1 != flag2)):
2714 losedatafn(f2 or f1)
2714 losedatafn(f2 or f1)
2715
2715
2716 path1 = f1 or f2
2716 path1 = f1 or f2
2717 path2 = f2 or f1
2717 path2 = f2 or f1
2718 path1 = posixpath.join(prefix, path1[len(relroot):])
2718 path1 = posixpath.join(prefix, path1[len(relroot):])
2719 path2 = posixpath.join(prefix, path2[len(relroot):])
2719 path2 = posixpath.join(prefix, path2[len(relroot):])
2720 header = []
2720 header = []
2721 if opts.git:
2721 if opts.git:
2722 header.append('diff --git %s%s %s%s' %
2722 header.append('diff --git %s%s %s%s' %
2723 (aprefix, path1, bprefix, path2))
2723 (aprefix, path1, bprefix, path2))
2724 if not f1: # added
2724 if not f1: # added
2725 header.append('new file mode %s' % gitmode[flag2])
2725 header.append('new file mode %s' % gitmode[flag2])
2726 elif not f2: # removed
2726 elif not f2: # removed
2727 header.append('deleted file mode %s' % gitmode[flag1])
2727 header.append('deleted file mode %s' % gitmode[flag1])
2728 else: # modified/copied/renamed
2728 else: # modified/copied/renamed
2729 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2729 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2730 if mode1 != mode2:
2730 if mode1 != mode2:
2731 header.append('old mode %s' % mode1)
2731 header.append('old mode %s' % mode1)
2732 header.append('new mode %s' % mode2)
2732 header.append('new mode %s' % mode2)
2733 if copyop is not None:
2733 if copyop is not None:
2734 if opts.showsimilarity:
2734 if opts.showsimilarity:
2735 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2735 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2736 header.append('similarity index %d%%' % sim)
2736 header.append('similarity index %d%%' % sim)
2737 header.append('%s from %s' % (copyop, path1))
2737 header.append('%s from %s' % (copyop, path1))
2738 header.append('%s to %s' % (copyop, path2))
2738 header.append('%s to %s' % (copyop, path2))
2739 elif revs and not repo.ui.quiet:
2739 elif revs and not repo.ui.quiet:
2740 header.append(diffline(path1, revs))
2740 header.append(diffline(path1, revs))
2741
2741
2742 # fctx.is | diffopts | what to | is fctx.data()
2742 # fctx.is | diffopts | what to | is fctx.data()
2743 # binary() | text nobinary git index | output? | outputted?
2743 # binary() | text nobinary git index | output? | outputted?
2744 # ------------------------------------|----------------------------
2744 # ------------------------------------|----------------------------
2745 # yes | no no no * | summary | no
2745 # yes | no no no * | summary | no
2746 # yes | no no yes * | base85 | yes
2746 # yes | no no yes * | base85 | yes
2747 # yes | no yes no * | summary | no
2747 # yes | no yes no * | summary | no
2748 # yes | no yes yes 0 | summary | no
2748 # yes | no yes yes 0 | summary | no
2749 # yes | no yes yes >0 | summary | semi [1]
2749 # yes | no yes yes >0 | summary | semi [1]
2750 # yes | yes * * * | text diff | yes
2750 # yes | yes * * * | text diff | yes
2751 # no | * * * * | text diff | yes
2751 # no | * * * * | text diff | yes
2752 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2752 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2753 if binary and (not opts.git or (opts.git and opts.nobinary and not
2753 if binary and (not opts.git or (opts.git and opts.nobinary and not
2754 opts.index)):
2754 opts.index)):
2755 # fast path: no binary content will be displayed, content1 and
2755 # fast path: no binary content will be displayed, content1 and
2756 # content2 are only used for equivalent test. cmp() could have a
2756 # content2 are only used for equivalent test. cmp() could have a
2757 # fast path.
2757 # fast path.
2758 if fctx1 is not None:
2758 if fctx1 is not None:
2759 content1 = b'\0'
2759 content1 = b'\0'
2760 if fctx2 is not None:
2760 if fctx2 is not None:
2761 if fctx1 is not None and not fctx1.cmp(fctx2):
2761 if fctx1 is not None and not fctx1.cmp(fctx2):
2762 content2 = b'\0' # not different
2762 content2 = b'\0' # not different
2763 else:
2763 else:
2764 content2 = b'\0\0'
2764 content2 = b'\0\0'
2765 else:
2765 else:
2766 # normal path: load contents
2766 # normal path: load contents
2767 if fctx1 is not None:
2767 if fctx1 is not None:
2768 content1 = fctx1.data()
2768 content1 = fctx1.data()
2769 if fctx2 is not None:
2769 if fctx2 is not None:
2770 content2 = fctx2.data()
2770 content2 = fctx2.data()
2771
2771
2772 if binary and opts.git and not opts.nobinary:
2772 if binary and opts.git and not opts.nobinary:
2773 text = mdiff.b85diff(content1, content2)
2773 text = mdiff.b85diff(content1, content2)
2774 if text:
2774 if text:
2775 header.append('index %s..%s' %
2775 header.append('index %s..%s' %
2776 (gitindex(content1), gitindex(content2)))
2776 (gitindex(content1), gitindex(content2)))
2777 hunks = (None, [text]),
2777 hunks = (None, [text]),
2778 else:
2778 else:
2779 if opts.git and opts.index > 0:
2779 if opts.git and opts.index > 0:
2780 flag = flag1
2780 flag = flag1
2781 if flag is None:
2781 if flag is None:
2782 flag = flag2
2782 flag = flag2
2783 header.append('index %s..%s %s' %
2783 header.append('index %s..%s %s' %
2784 (gitindex(content1)[0:opts.index],
2784 (gitindex(content1)[0:opts.index],
2785 gitindex(content2)[0:opts.index],
2785 gitindex(content2)[0:opts.index],
2786 gitmode[flag]))
2786 gitmode[flag]))
2787
2787
2788 uheaders, hunks = mdiff.unidiff(content1, date1,
2788 uheaders, hunks = mdiff.unidiff(content1, date1,
2789 content2, date2,
2789 content2, date2,
2790 path1, path2, opts=opts)
2790 path1, path2, opts=opts)
2791 header.extend(uheaders)
2791 header.extend(uheaders)
2792 yield fctx1, fctx2, header, hunks
2792 yield fctx1, fctx2, header, hunks
2793
2793
2794 def diffstatsum(stats):
2794 def diffstatsum(stats):
2795 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2795 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2796 for f, a, r, b in stats:
2796 for f, a, r, b in stats:
2797 maxfile = max(maxfile, encoding.colwidth(f))
2797 maxfile = max(maxfile, encoding.colwidth(f))
2798 maxtotal = max(maxtotal, a + r)
2798 maxtotal = max(maxtotal, a + r)
2799 addtotal += a
2799 addtotal += a
2800 removetotal += r
2800 removetotal += r
2801 binary = binary or b
2801 binary = binary or b
2802
2802
2803 return maxfile, maxtotal, addtotal, removetotal, binary
2803 return maxfile, maxtotal, addtotal, removetotal, binary
2804
2804
2805 def diffstatdata(lines):
2805 def diffstatdata(lines):
2806 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2806 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2807
2807
2808 results = []
2808 results = []
2809 filename, adds, removes, isbinary = None, 0, 0, False
2809 filename, adds, removes, isbinary = None, 0, 0, False
2810
2810
2811 def addresult():
2811 def addresult():
2812 if filename:
2812 if filename:
2813 results.append((filename, adds, removes, isbinary))
2813 results.append((filename, adds, removes, isbinary))
2814
2814
2815 # inheader is used to track if a line is in the
2815 # inheader is used to track if a line is in the
2816 # header portion of the diff. This helps properly account
2816 # header portion of the diff. This helps properly account
2817 # for lines that start with '--' or '++'
2817 # for lines that start with '--' or '++'
2818 inheader = False
2818 inheader = False
2819
2819
2820 for line in lines:
2820 for line in lines:
2821 if line.startswith('diff'):
2821 if line.startswith('diff'):
2822 addresult()
2822 addresult()
2823 # starting a new file diff
2823 # starting a new file diff
2824 # set numbers to 0 and reset inheader
2824 # set numbers to 0 and reset inheader
2825 inheader = True
2825 inheader = True
2826 adds, removes, isbinary = 0, 0, False
2826 adds, removes, isbinary = 0, 0, False
2827 if line.startswith('diff --git a/'):
2827 if line.startswith('diff --git a/'):
2828 filename = gitre.search(line).group(2)
2828 filename = gitre.search(line).group(2)
2829 elif line.startswith('diff -r'):
2829 elif line.startswith('diff -r'):
2830 # format: "diff -r ... -r ... filename"
2830 # format: "diff -r ... -r ... filename"
2831 filename = diffre.search(line).group(1)
2831 filename = diffre.search(line).group(1)
2832 elif line.startswith('@@'):
2832 elif line.startswith('@@'):
2833 inheader = False
2833 inheader = False
2834 elif line.startswith('+') and not inheader:
2834 elif line.startswith('+') and not inheader:
2835 adds += 1
2835 adds += 1
2836 elif line.startswith('-') and not inheader:
2836 elif line.startswith('-') and not inheader:
2837 removes += 1
2837 removes += 1
2838 elif (line.startswith('GIT binary patch') or
2838 elif (line.startswith('GIT binary patch') or
2839 line.startswith('Binary file')):
2839 line.startswith('Binary file')):
2840 isbinary = True
2840 isbinary = True
2841 addresult()
2841 addresult()
2842 return results
2842 return results
2843
2843
2844 def diffstat(lines, width=80):
2844 def diffstat(lines, width=80):
2845 output = []
2845 output = []
2846 stats = diffstatdata(lines)
2846 stats = diffstatdata(lines)
2847 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2847 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2848
2848
2849 countwidth = len(str(maxtotal))
2849 countwidth = len(str(maxtotal))
2850 if hasbinary and countwidth < 3:
2850 if hasbinary and countwidth < 3:
2851 countwidth = 3
2851 countwidth = 3
2852 graphwidth = width - countwidth - maxname - 6
2852 graphwidth = width - countwidth - maxname - 6
2853 if graphwidth < 10:
2853 if graphwidth < 10:
2854 graphwidth = 10
2854 graphwidth = 10
2855
2855
2856 def scale(i):
2856 def scale(i):
2857 if maxtotal <= graphwidth:
2857 if maxtotal <= graphwidth:
2858 return i
2858 return i
2859 # If diffstat runs out of room it doesn't print anything,
2859 # If diffstat runs out of room it doesn't print anything,
2860 # which isn't very useful, so always print at least one + or -
2860 # which isn't very useful, so always print at least one + or -
2861 # if there were at least some changes.
2861 # if there were at least some changes.
2862 return max(i * graphwidth // maxtotal, int(bool(i)))
2862 return max(i * graphwidth // maxtotal, int(bool(i)))
2863
2863
2864 for filename, adds, removes, isbinary in stats:
2864 for filename, adds, removes, isbinary in stats:
2865 if isbinary:
2865 if isbinary:
2866 count = 'Bin'
2866 count = 'Bin'
2867 else:
2867 else:
2868 count = '%d' % (adds + removes)
2868 count = '%d' % (adds + removes)
2869 pluses = '+' * scale(adds)
2869 pluses = '+' * scale(adds)
2870 minuses = '-' * scale(removes)
2870 minuses = '-' * scale(removes)
2871 output.append(' %s%s | %*s %s%s\n' %
2871 output.append(' %s%s | %*s %s%s\n' %
2872 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2872 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2873 countwidth, count, pluses, minuses))
2873 countwidth, count, pluses, minuses))
2874
2874
2875 if stats:
2875 if stats:
2876 output.append(_(' %d files changed, %d insertions(+), '
2876 output.append(_(' %d files changed, %d insertions(+), '
2877 '%d deletions(-)\n')
2877 '%d deletions(-)\n')
2878 % (len(stats), totaladds, totalremoves))
2878 % (len(stats), totaladds, totalremoves))
2879
2879
2880 return ''.join(output)
2880 return ''.join(output)
2881
2881
2882 def diffstatui(*args, **kw):
2882 def diffstatui(*args, **kw):
2883 '''like diffstat(), but yields 2-tuples of (output, label) for
2883 '''like diffstat(), but yields 2-tuples of (output, label) for
2884 ui.write()
2884 ui.write()
2885 '''
2885 '''
2886
2886
2887 for line in diffstat(*args, **kw).splitlines():
2887 for line in diffstat(*args, **kw).splitlines():
2888 if line and line[-1] in '+-':
2888 if line and line[-1] in '+-':
2889 name, graph = line.rsplit(' ', 1)
2889 name, graph = line.rsplit(' ', 1)
2890 yield (name + ' ', '')
2890 yield (name + ' ', '')
2891 m = re.search(br'\++', graph)
2891 m = re.search(br'\++', graph)
2892 if m:
2892 if m:
2893 yield (m.group(0), 'diffstat.inserted')
2893 yield (m.group(0), 'diffstat.inserted')
2894 m = re.search(br'-+', graph)
2894 m = re.search(br'-+', graph)
2895 if m:
2895 if m:
2896 yield (m.group(0), 'diffstat.deleted')
2896 yield (m.group(0), 'diffstat.deleted')
2897 else:
2897 else:
2898 yield (line, '')
2898 yield (line, '')
2899 yield ('\n', '')
2899 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now