##// END OF EJS Templates
py3: convert parsed message items to bytes in patch.extract()...
Yuya Nishihara -
r37474:19becdf5 default
parent child Browse files
Show More
@@ -1,2910 +1,2911 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import tempfile
21 import tempfile
22 import zlib
22 import zlib
23
23
24 from .i18n import _
24 from .i18n import _
25 from .node import (
25 from .node import (
26 hex,
26 hex,
27 short,
27 short,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 policy,
36 policy,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 from .utils import (
43 from .utils import (
44 dateutil,
44 dateutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 diffhelpers = policy.importmod(r'diffhelpers')
49 diffhelpers = policy.importmod(r'diffhelpers')
50 stringio = util.stringio
50 stringio = util.stringio
51
51
52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
54 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
54 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60 def split(stream):
60 def split(stream):
61 '''return an iterator of individual patches from a stream'''
61 '''return an iterator of individual patches from a stream'''
62 def isheader(line, inheader):
62 def isheader(line, inheader):
63 if inheader and line[0] in (' ', '\t'):
63 if inheader and line[0] in (' ', '\t'):
64 # continuation
64 # continuation
65 return True
65 return True
66 if line[0] in (' ', '-', '+'):
66 if line[0] in (' ', '-', '+'):
67 # diff line - don't check for header pattern in there
67 # diff line - don't check for header pattern in there
68 return False
68 return False
69 l = line.split(': ', 1)
69 l = line.split(': ', 1)
70 return len(l) == 2 and ' ' not in l[0]
70 return len(l) == 2 and ' ' not in l[0]
71
71
72 def chunk(lines):
72 def chunk(lines):
73 return stringio(''.join(lines))
73 return stringio(''.join(lines))
74
74
75 def hgsplit(stream, cur):
75 def hgsplit(stream, cur):
76 inheader = True
76 inheader = True
77
77
78 for line in stream:
78 for line in stream:
79 if not line.strip():
79 if not line.strip():
80 inheader = False
80 inheader = False
81 if not inheader and line.startswith('# HG changeset patch'):
81 if not inheader and line.startswith('# HG changeset patch'):
82 yield chunk(cur)
82 yield chunk(cur)
83 cur = []
83 cur = []
84 inheader = True
84 inheader = True
85
85
86 cur.append(line)
86 cur.append(line)
87
87
88 if cur:
88 if cur:
89 yield chunk(cur)
89 yield chunk(cur)
90
90
91 def mboxsplit(stream, cur):
91 def mboxsplit(stream, cur):
92 for line in stream:
92 for line in stream:
93 if line.startswith('From '):
93 if line.startswith('From '):
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96 cur = []
96 cur = []
97
97
98 cur.append(line)
98 cur.append(line)
99
99
100 if cur:
100 if cur:
101 for c in split(chunk(cur[1:])):
101 for c in split(chunk(cur[1:])):
102 yield c
102 yield c
103
103
104 def mimesplit(stream, cur):
104 def mimesplit(stream, cur):
105 def msgfp(m):
105 def msgfp(m):
106 fp = stringio()
106 fp = stringio()
107 g = email.Generator.Generator(fp, mangle_from_=False)
107 g = email.Generator.Generator(fp, mangle_from_=False)
108 g.flatten(m)
108 g.flatten(m)
109 fp.seek(0)
109 fp.seek(0)
110 return fp
110 return fp
111
111
112 for line in stream:
112 for line in stream:
113 cur.append(line)
113 cur.append(line)
114 c = chunk(cur)
114 c = chunk(cur)
115
115
116 m = pycompat.emailparser().parse(c)
116 m = pycompat.emailparser().parse(c)
117 if not m.is_multipart():
117 if not m.is_multipart():
118 yield msgfp(m)
118 yield msgfp(m)
119 else:
119 else:
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 for part in m.walk():
121 for part in m.walk():
122 ct = part.get_content_type()
122 ct = part.get_content_type()
123 if ct not in ok_types:
123 if ct not in ok_types:
124 continue
124 continue
125 yield msgfp(part)
125 yield msgfp(part)
126
126
127 def headersplit(stream, cur):
127 def headersplit(stream, cur):
128 inheader = False
128 inheader = False
129
129
130 for line in stream:
130 for line in stream:
131 if not inheader and isheader(line, inheader):
131 if not inheader and isheader(line, inheader):
132 yield chunk(cur)
132 yield chunk(cur)
133 cur = []
133 cur = []
134 inheader = True
134 inheader = True
135 if inheader and not isheader(line, inheader):
135 if inheader and not isheader(line, inheader):
136 inheader = False
136 inheader = False
137
137
138 cur.append(line)
138 cur.append(line)
139
139
140 if cur:
140 if cur:
141 yield chunk(cur)
141 yield chunk(cur)
142
142
143 def remainder(cur):
143 def remainder(cur):
144 yield chunk(cur)
144 yield chunk(cur)
145
145
146 class fiter(object):
146 class fiter(object):
147 def __init__(self, fp):
147 def __init__(self, fp):
148 self.fp = fp
148 self.fp = fp
149
149
150 def __iter__(self):
150 def __iter__(self):
151 return self
151 return self
152
152
153 def next(self):
153 def next(self):
154 l = self.fp.readline()
154 l = self.fp.readline()
155 if not l:
155 if not l:
156 raise StopIteration
156 raise StopIteration
157 return l
157 return l
158
158
159 __next__ = next
159 __next__ = next
160
160
161 inheader = False
161 inheader = False
162 cur = []
162 cur = []
163
163
164 mimeheaders = ['content-type']
164 mimeheaders = ['content-type']
165
165
166 if not util.safehasattr(stream, 'next'):
166 if not util.safehasattr(stream, 'next'):
167 # http responses, for example, have readline but not next
167 # http responses, for example, have readline but not next
168 stream = fiter(stream)
168 stream = fiter(stream)
169
169
170 for line in stream:
170 for line in stream:
171 cur.append(line)
171 cur.append(line)
172 if line.startswith('# HG changeset patch'):
172 if line.startswith('# HG changeset patch'):
173 return hgsplit(stream, cur)
173 return hgsplit(stream, cur)
174 elif line.startswith('From '):
174 elif line.startswith('From '):
175 return mboxsplit(stream, cur)
175 return mboxsplit(stream, cur)
176 elif isheader(line, inheader):
176 elif isheader(line, inheader):
177 inheader = True
177 inheader = True
178 if line.split(':', 1)[0].lower() in mimeheaders:
178 if line.split(':', 1)[0].lower() in mimeheaders:
179 # let email parser handle this
179 # let email parser handle this
180 return mimesplit(stream, cur)
180 return mimesplit(stream, cur)
181 elif line.startswith('--- ') and inheader:
181 elif line.startswith('--- ') and inheader:
182 # No evil headers seen by diff start, split by hand
182 # No evil headers seen by diff start, split by hand
183 return headersplit(stream, cur)
183 return headersplit(stream, cur)
184 # Not enough info, keep reading
184 # Not enough info, keep reading
185
185
186 # if we are here, we have a very plain patch
186 # if we are here, we have a very plain patch
187 return remainder(cur)
187 return remainder(cur)
188
188
189 ## Some facility for extensible patch parsing:
189 ## Some facility for extensible patch parsing:
190 # list of pairs ("header to match", "data key")
190 # list of pairs ("header to match", "data key")
191 patchheadermap = [('Date', 'date'),
191 patchheadermap = [('Date', 'date'),
192 ('Branch', 'branch'),
192 ('Branch', 'branch'),
193 ('Node ID', 'nodeid'),
193 ('Node ID', 'nodeid'),
194 ]
194 ]
195
195
196 def extract(ui, fileobj):
196 def extract(ui, fileobj):
197 '''extract patch from data read from fileobj.
197 '''extract patch from data read from fileobj.
198
198
199 patch can be a normal patch or contained in an email message.
199 patch can be a normal patch or contained in an email message.
200
200
201 return a dictionary. Standard keys are:
201 return a dictionary. Standard keys are:
202 - filename,
202 - filename,
203 - message,
203 - message,
204 - user,
204 - user,
205 - date,
205 - date,
206 - branch,
206 - branch,
207 - node,
207 - node,
208 - p1,
208 - p1,
209 - p2.
209 - p2.
210 Any item can be missing from the dictionary. If filename is missing,
210 Any item can be missing from the dictionary. If filename is missing,
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
212
212
213 # attempt to detect the start of a patch
213 # attempt to detect the start of a patch
214 # (this heuristic is borrowed from quilt)
214 # (this heuristic is borrowed from quilt)
215 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
215 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
216 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
216 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
217 br'---[ \t].*?^\+\+\+[ \t]|'
217 br'---[ \t].*?^\+\+\+[ \t]|'
218 br'\*\*\*[ \t].*?^---[ \t])',
218 br'\*\*\*[ \t].*?^---[ \t])',
219 re.MULTILINE | re.DOTALL)
219 re.MULTILINE | re.DOTALL)
220
220
221 data = {}
221 data = {}
222 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
222 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
223 tmpfp = os.fdopen(fd, r'wb')
223 tmpfp = os.fdopen(fd, r'wb')
224 try:
224 try:
225 msg = pycompat.emailparser().parse(fileobj)
225 msg = pycompat.emailparser().parse(fileobj)
226
226
227 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
227 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
228 data['user'] = msg['From'] and mail.headdecode(msg['From'])
228 data['user'] = msg['From'] and mail.headdecode(msg['From'])
229 if not subject and not data['user']:
229 if not subject and not data['user']:
230 # Not an email, restore parsed headers if any
230 # Not an email, restore parsed headers if any
231 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
231 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
232 for h in msg.items()) + '\n'
232
233
233 # should try to parse msg['Date']
234 # should try to parse msg['Date']
234 parents = []
235 parents = []
235
236
236 if subject:
237 if subject:
237 if subject.startswith('[PATCH'):
238 if subject.startswith('[PATCH'):
238 pend = subject.find(']')
239 pend = subject.find(']')
239 if pend >= 0:
240 if pend >= 0:
240 subject = subject[pend + 1:].lstrip()
241 subject = subject[pend + 1:].lstrip()
241 subject = re.sub(br'\n[ \t]+', ' ', subject)
242 subject = re.sub(br'\n[ \t]+', ' ', subject)
242 ui.debug('Subject: %s\n' % subject)
243 ui.debug('Subject: %s\n' % subject)
243 if data['user']:
244 if data['user']:
244 ui.debug('From: %s\n' % data['user'])
245 ui.debug('From: %s\n' % data['user'])
245 diffs_seen = 0
246 diffs_seen = 0
246 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
247 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
247 message = ''
248 message = ''
248 for part in msg.walk():
249 for part in msg.walk():
249 content_type = pycompat.bytestr(part.get_content_type())
250 content_type = pycompat.bytestr(part.get_content_type())
250 ui.debug('Content-Type: %s\n' % content_type)
251 ui.debug('Content-Type: %s\n' % content_type)
251 if content_type not in ok_types:
252 if content_type not in ok_types:
252 continue
253 continue
253 payload = part.get_payload(decode=True)
254 payload = part.get_payload(decode=True)
254 m = diffre.search(payload)
255 m = diffre.search(payload)
255 if m:
256 if m:
256 hgpatch = False
257 hgpatch = False
257 hgpatchheader = False
258 hgpatchheader = False
258 ignoretext = False
259 ignoretext = False
259
260
260 ui.debug('found patch at byte %d\n' % m.start(0))
261 ui.debug('found patch at byte %d\n' % m.start(0))
261 diffs_seen += 1
262 diffs_seen += 1
262 cfp = stringio()
263 cfp = stringio()
263 for line in payload[:m.start(0)].splitlines():
264 for line in payload[:m.start(0)].splitlines():
264 if line.startswith('# HG changeset patch') and not hgpatch:
265 if line.startswith('# HG changeset patch') and not hgpatch:
265 ui.debug('patch generated by hg export\n')
266 ui.debug('patch generated by hg export\n')
266 hgpatch = True
267 hgpatch = True
267 hgpatchheader = True
268 hgpatchheader = True
268 # drop earlier commit message content
269 # drop earlier commit message content
269 cfp.seek(0)
270 cfp.seek(0)
270 cfp.truncate()
271 cfp.truncate()
271 subject = None
272 subject = None
272 elif hgpatchheader:
273 elif hgpatchheader:
273 if line.startswith('# User '):
274 if line.startswith('# User '):
274 data['user'] = line[7:]
275 data['user'] = line[7:]
275 ui.debug('From: %s\n' % data['user'])
276 ui.debug('From: %s\n' % data['user'])
276 elif line.startswith("# Parent "):
277 elif line.startswith("# Parent "):
277 parents.append(line[9:].lstrip())
278 parents.append(line[9:].lstrip())
278 elif line.startswith("# "):
279 elif line.startswith("# "):
279 for header, key in patchheadermap:
280 for header, key in patchheadermap:
280 prefix = '# %s ' % header
281 prefix = '# %s ' % header
281 if line.startswith(prefix):
282 if line.startswith(prefix):
282 data[key] = line[len(prefix):]
283 data[key] = line[len(prefix):]
283 else:
284 else:
284 hgpatchheader = False
285 hgpatchheader = False
285 elif line == '---':
286 elif line == '---':
286 ignoretext = True
287 ignoretext = True
287 if not hgpatchheader and not ignoretext:
288 if not hgpatchheader and not ignoretext:
288 cfp.write(line)
289 cfp.write(line)
289 cfp.write('\n')
290 cfp.write('\n')
290 message = cfp.getvalue()
291 message = cfp.getvalue()
291 if tmpfp:
292 if tmpfp:
292 tmpfp.write(payload)
293 tmpfp.write(payload)
293 if not payload.endswith('\n'):
294 if not payload.endswith('\n'):
294 tmpfp.write('\n')
295 tmpfp.write('\n')
295 elif not diffs_seen and message and content_type == 'text/plain':
296 elif not diffs_seen and message and content_type == 'text/plain':
296 message += '\n' + payload
297 message += '\n' + payload
297 except: # re-raises
298 except: # re-raises
298 tmpfp.close()
299 tmpfp.close()
299 os.unlink(tmpname)
300 os.unlink(tmpname)
300 raise
301 raise
301
302
302 if subject and not message.startswith(subject):
303 if subject and not message.startswith(subject):
303 message = '%s\n%s' % (subject, message)
304 message = '%s\n%s' % (subject, message)
304 data['message'] = message
305 data['message'] = message
305 tmpfp.close()
306 tmpfp.close()
306 if parents:
307 if parents:
307 data['p1'] = parents.pop(0)
308 data['p1'] = parents.pop(0)
308 if parents:
309 if parents:
309 data['p2'] = parents.pop(0)
310 data['p2'] = parents.pop(0)
310
311
311 if diffs_seen:
312 if diffs_seen:
312 data['filename'] = tmpname
313 data['filename'] = tmpname
313 else:
314 else:
314 os.unlink(tmpname)
315 os.unlink(tmpname)
315 return data
316 return data
316
317
317 class patchmeta(object):
318 class patchmeta(object):
318 """Patched file metadata
319 """Patched file metadata
319
320
320 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
321 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
321 or COPY. 'path' is patched file path. 'oldpath' is set to the
322 or COPY. 'path' is patched file path. 'oldpath' is set to the
322 origin file when 'op' is either COPY or RENAME, None otherwise. If
323 origin file when 'op' is either COPY or RENAME, None otherwise. If
323 file mode is changed, 'mode' is a tuple (islink, isexec) where
324 file mode is changed, 'mode' is a tuple (islink, isexec) where
324 'islink' is True if the file is a symlink and 'isexec' is True if
325 'islink' is True if the file is a symlink and 'isexec' is True if
325 the file is executable. Otherwise, 'mode' is None.
326 the file is executable. Otherwise, 'mode' is None.
326 """
327 """
327 def __init__(self, path):
328 def __init__(self, path):
328 self.path = path
329 self.path = path
329 self.oldpath = None
330 self.oldpath = None
330 self.mode = None
331 self.mode = None
331 self.op = 'MODIFY'
332 self.op = 'MODIFY'
332 self.binary = False
333 self.binary = False
333
334
334 def setmode(self, mode):
335 def setmode(self, mode):
335 islink = mode & 0o20000
336 islink = mode & 0o20000
336 isexec = mode & 0o100
337 isexec = mode & 0o100
337 self.mode = (islink, isexec)
338 self.mode = (islink, isexec)
338
339
339 def copy(self):
340 def copy(self):
340 other = patchmeta(self.path)
341 other = patchmeta(self.path)
341 other.oldpath = self.oldpath
342 other.oldpath = self.oldpath
342 other.mode = self.mode
343 other.mode = self.mode
343 other.op = self.op
344 other.op = self.op
344 other.binary = self.binary
345 other.binary = self.binary
345 return other
346 return other
346
347
347 def _ispatchinga(self, afile):
348 def _ispatchinga(self, afile):
348 if afile == '/dev/null':
349 if afile == '/dev/null':
349 return self.op == 'ADD'
350 return self.op == 'ADD'
350 return afile == 'a/' + (self.oldpath or self.path)
351 return afile == 'a/' + (self.oldpath or self.path)
351
352
352 def _ispatchingb(self, bfile):
353 def _ispatchingb(self, bfile):
353 if bfile == '/dev/null':
354 if bfile == '/dev/null':
354 return self.op == 'DELETE'
355 return self.op == 'DELETE'
355 return bfile == 'b/' + self.path
356 return bfile == 'b/' + self.path
356
357
357 def ispatching(self, afile, bfile):
358 def ispatching(self, afile, bfile):
358 return self._ispatchinga(afile) and self._ispatchingb(bfile)
359 return self._ispatchinga(afile) and self._ispatchingb(bfile)
359
360
360 def __repr__(self):
361 def __repr__(self):
361 return "<patchmeta %s %r>" % (self.op, self.path)
362 return "<patchmeta %s %r>" % (self.op, self.path)
362
363
363 def readgitpatch(lr):
364 def readgitpatch(lr):
364 """extract git-style metadata about patches from <patchname>"""
365 """extract git-style metadata about patches from <patchname>"""
365
366
366 # Filter patch for git information
367 # Filter patch for git information
367 gp = None
368 gp = None
368 gitpatches = []
369 gitpatches = []
369 for line in lr:
370 for line in lr:
370 line = line.rstrip(' \r\n')
371 line = line.rstrip(' \r\n')
371 if line.startswith('diff --git a/'):
372 if line.startswith('diff --git a/'):
372 m = gitre.match(line)
373 m = gitre.match(line)
373 if m:
374 if m:
374 if gp:
375 if gp:
375 gitpatches.append(gp)
376 gitpatches.append(gp)
376 dst = m.group(2)
377 dst = m.group(2)
377 gp = patchmeta(dst)
378 gp = patchmeta(dst)
378 elif gp:
379 elif gp:
379 if line.startswith('--- '):
380 if line.startswith('--- '):
380 gitpatches.append(gp)
381 gitpatches.append(gp)
381 gp = None
382 gp = None
382 continue
383 continue
383 if line.startswith('rename from '):
384 if line.startswith('rename from '):
384 gp.op = 'RENAME'
385 gp.op = 'RENAME'
385 gp.oldpath = line[12:]
386 gp.oldpath = line[12:]
386 elif line.startswith('rename to '):
387 elif line.startswith('rename to '):
387 gp.path = line[10:]
388 gp.path = line[10:]
388 elif line.startswith('copy from '):
389 elif line.startswith('copy from '):
389 gp.op = 'COPY'
390 gp.op = 'COPY'
390 gp.oldpath = line[10:]
391 gp.oldpath = line[10:]
391 elif line.startswith('copy to '):
392 elif line.startswith('copy to '):
392 gp.path = line[8:]
393 gp.path = line[8:]
393 elif line.startswith('deleted file'):
394 elif line.startswith('deleted file'):
394 gp.op = 'DELETE'
395 gp.op = 'DELETE'
395 elif line.startswith('new file mode '):
396 elif line.startswith('new file mode '):
396 gp.op = 'ADD'
397 gp.op = 'ADD'
397 gp.setmode(int(line[-6:], 8))
398 gp.setmode(int(line[-6:], 8))
398 elif line.startswith('new mode '):
399 elif line.startswith('new mode '):
399 gp.setmode(int(line[-6:], 8))
400 gp.setmode(int(line[-6:], 8))
400 elif line.startswith('GIT binary patch'):
401 elif line.startswith('GIT binary patch'):
401 gp.binary = True
402 gp.binary = True
402 if gp:
403 if gp:
403 gitpatches.append(gp)
404 gitpatches.append(gp)
404
405
405 return gitpatches
406 return gitpatches
406
407
407 class linereader(object):
408 class linereader(object):
408 # simple class to allow pushing lines back into the input stream
409 # simple class to allow pushing lines back into the input stream
409 def __init__(self, fp):
410 def __init__(self, fp):
410 self.fp = fp
411 self.fp = fp
411 self.buf = []
412 self.buf = []
412
413
413 def push(self, line):
414 def push(self, line):
414 if line is not None:
415 if line is not None:
415 self.buf.append(line)
416 self.buf.append(line)
416
417
417 def readline(self):
418 def readline(self):
418 if self.buf:
419 if self.buf:
419 l = self.buf[0]
420 l = self.buf[0]
420 del self.buf[0]
421 del self.buf[0]
421 return l
422 return l
422 return self.fp.readline()
423 return self.fp.readline()
423
424
424 def __iter__(self):
425 def __iter__(self):
425 return iter(self.readline, '')
426 return iter(self.readline, '')
426
427
427 class abstractbackend(object):
428 class abstractbackend(object):
428 def __init__(self, ui):
429 def __init__(self, ui):
429 self.ui = ui
430 self.ui = ui
430
431
431 def getfile(self, fname):
432 def getfile(self, fname):
432 """Return target file data and flags as a (data, (islink,
433 """Return target file data and flags as a (data, (islink,
433 isexec)) tuple. Data is None if file is missing/deleted.
434 isexec)) tuple. Data is None if file is missing/deleted.
434 """
435 """
435 raise NotImplementedError
436 raise NotImplementedError
436
437
437 def setfile(self, fname, data, mode, copysource):
438 def setfile(self, fname, data, mode, copysource):
438 """Write data to target file fname and set its mode. mode is a
439 """Write data to target file fname and set its mode. mode is a
439 (islink, isexec) tuple. If data is None, the file content should
440 (islink, isexec) tuple. If data is None, the file content should
440 be left unchanged. If the file is modified after being copied,
441 be left unchanged. If the file is modified after being copied,
441 copysource is set to the original file name.
442 copysource is set to the original file name.
442 """
443 """
443 raise NotImplementedError
444 raise NotImplementedError
444
445
445 def unlink(self, fname):
446 def unlink(self, fname):
446 """Unlink target file."""
447 """Unlink target file."""
447 raise NotImplementedError
448 raise NotImplementedError
448
449
449 def writerej(self, fname, failed, total, lines):
450 def writerej(self, fname, failed, total, lines):
450 """Write rejected lines for fname. total is the number of hunks
451 """Write rejected lines for fname. total is the number of hunks
451 which failed to apply and total the total number of hunks for this
452 which failed to apply and total the total number of hunks for this
452 files.
453 files.
453 """
454 """
454
455
455 def exists(self, fname):
456 def exists(self, fname):
456 raise NotImplementedError
457 raise NotImplementedError
457
458
458 def close(self):
459 def close(self):
459 raise NotImplementedError
460 raise NotImplementedError
460
461
461 class fsbackend(abstractbackend):
462 class fsbackend(abstractbackend):
462 def __init__(self, ui, basedir):
463 def __init__(self, ui, basedir):
463 super(fsbackend, self).__init__(ui)
464 super(fsbackend, self).__init__(ui)
464 self.opener = vfsmod.vfs(basedir)
465 self.opener = vfsmod.vfs(basedir)
465
466
466 def getfile(self, fname):
467 def getfile(self, fname):
467 if self.opener.islink(fname):
468 if self.opener.islink(fname):
468 return (self.opener.readlink(fname), (True, False))
469 return (self.opener.readlink(fname), (True, False))
469
470
470 isexec = False
471 isexec = False
471 try:
472 try:
472 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
473 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
473 except OSError as e:
474 except OSError as e:
474 if e.errno != errno.ENOENT:
475 if e.errno != errno.ENOENT:
475 raise
476 raise
476 try:
477 try:
477 return (self.opener.read(fname), (False, isexec))
478 return (self.opener.read(fname), (False, isexec))
478 except IOError as e:
479 except IOError as e:
479 if e.errno != errno.ENOENT:
480 if e.errno != errno.ENOENT:
480 raise
481 raise
481 return None, None
482 return None, None
482
483
483 def setfile(self, fname, data, mode, copysource):
484 def setfile(self, fname, data, mode, copysource):
484 islink, isexec = mode
485 islink, isexec = mode
485 if data is None:
486 if data is None:
486 self.opener.setflags(fname, islink, isexec)
487 self.opener.setflags(fname, islink, isexec)
487 return
488 return
488 if islink:
489 if islink:
489 self.opener.symlink(data, fname)
490 self.opener.symlink(data, fname)
490 else:
491 else:
491 self.opener.write(fname, data)
492 self.opener.write(fname, data)
492 if isexec:
493 if isexec:
493 self.opener.setflags(fname, False, True)
494 self.opener.setflags(fname, False, True)
494
495
495 def unlink(self, fname):
496 def unlink(self, fname):
496 self.opener.unlinkpath(fname, ignoremissing=True)
497 self.opener.unlinkpath(fname, ignoremissing=True)
497
498
498 def writerej(self, fname, failed, total, lines):
499 def writerej(self, fname, failed, total, lines):
499 fname = fname + ".rej"
500 fname = fname + ".rej"
500 self.ui.warn(
501 self.ui.warn(
501 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
502 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
502 (failed, total, fname))
503 (failed, total, fname))
503 fp = self.opener(fname, 'w')
504 fp = self.opener(fname, 'w')
504 fp.writelines(lines)
505 fp.writelines(lines)
505 fp.close()
506 fp.close()
506
507
507 def exists(self, fname):
508 def exists(self, fname):
508 return self.opener.lexists(fname)
509 return self.opener.lexists(fname)
509
510
510 class workingbackend(fsbackend):
511 class workingbackend(fsbackend):
511 def __init__(self, ui, repo, similarity):
512 def __init__(self, ui, repo, similarity):
512 super(workingbackend, self).__init__(ui, repo.root)
513 super(workingbackend, self).__init__(ui, repo.root)
513 self.repo = repo
514 self.repo = repo
514 self.similarity = similarity
515 self.similarity = similarity
515 self.removed = set()
516 self.removed = set()
516 self.changed = set()
517 self.changed = set()
517 self.copied = []
518 self.copied = []
518
519
519 def _checkknown(self, fname):
520 def _checkknown(self, fname):
520 if self.repo.dirstate[fname] == '?' and self.exists(fname):
521 if self.repo.dirstate[fname] == '?' and self.exists(fname):
521 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
522 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
522
523
523 def setfile(self, fname, data, mode, copysource):
524 def setfile(self, fname, data, mode, copysource):
524 self._checkknown(fname)
525 self._checkknown(fname)
525 super(workingbackend, self).setfile(fname, data, mode, copysource)
526 super(workingbackend, self).setfile(fname, data, mode, copysource)
526 if copysource is not None:
527 if copysource is not None:
527 self.copied.append((copysource, fname))
528 self.copied.append((copysource, fname))
528 self.changed.add(fname)
529 self.changed.add(fname)
529
530
530 def unlink(self, fname):
531 def unlink(self, fname):
531 self._checkknown(fname)
532 self._checkknown(fname)
532 super(workingbackend, self).unlink(fname)
533 super(workingbackend, self).unlink(fname)
533 self.removed.add(fname)
534 self.removed.add(fname)
534 self.changed.add(fname)
535 self.changed.add(fname)
535
536
536 def close(self):
537 def close(self):
537 wctx = self.repo[None]
538 wctx = self.repo[None]
538 changed = set(self.changed)
539 changed = set(self.changed)
539 for src, dst in self.copied:
540 for src, dst in self.copied:
540 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
541 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
541 if self.removed:
542 if self.removed:
542 wctx.forget(sorted(self.removed))
543 wctx.forget(sorted(self.removed))
543 for f in self.removed:
544 for f in self.removed:
544 if f not in self.repo.dirstate:
545 if f not in self.repo.dirstate:
545 # File was deleted and no longer belongs to the
546 # File was deleted and no longer belongs to the
546 # dirstate, it was probably marked added then
547 # dirstate, it was probably marked added then
547 # deleted, and should not be considered by
548 # deleted, and should not be considered by
548 # marktouched().
549 # marktouched().
549 changed.discard(f)
550 changed.discard(f)
550 if changed:
551 if changed:
551 scmutil.marktouched(self.repo, changed, self.similarity)
552 scmutil.marktouched(self.repo, changed, self.similarity)
552 return sorted(self.changed)
553 return sorted(self.changed)
553
554
554 class filestore(object):
555 class filestore(object):
555 def __init__(self, maxsize=None):
556 def __init__(self, maxsize=None):
556 self.opener = None
557 self.opener = None
557 self.files = {}
558 self.files = {}
558 self.created = 0
559 self.created = 0
559 self.maxsize = maxsize
560 self.maxsize = maxsize
560 if self.maxsize is None:
561 if self.maxsize is None:
561 self.maxsize = 4*(2**20)
562 self.maxsize = 4*(2**20)
562 self.size = 0
563 self.size = 0
563 self.data = {}
564 self.data = {}
564
565
565 def setfile(self, fname, data, mode, copied=None):
566 def setfile(self, fname, data, mode, copied=None):
566 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
567 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
567 self.data[fname] = (data, mode, copied)
568 self.data[fname] = (data, mode, copied)
568 self.size += len(data)
569 self.size += len(data)
569 else:
570 else:
570 if self.opener is None:
571 if self.opener is None:
571 root = tempfile.mkdtemp(prefix='hg-patch-')
572 root = tempfile.mkdtemp(prefix='hg-patch-')
572 self.opener = vfsmod.vfs(root)
573 self.opener = vfsmod.vfs(root)
573 # Avoid filename issues with these simple names
574 # Avoid filename issues with these simple names
574 fn = '%d' % self.created
575 fn = '%d' % self.created
575 self.opener.write(fn, data)
576 self.opener.write(fn, data)
576 self.created += 1
577 self.created += 1
577 self.files[fname] = (fn, mode, copied)
578 self.files[fname] = (fn, mode, copied)
578
579
579 def getfile(self, fname):
580 def getfile(self, fname):
580 if fname in self.data:
581 if fname in self.data:
581 return self.data[fname]
582 return self.data[fname]
582 if not self.opener or fname not in self.files:
583 if not self.opener or fname not in self.files:
583 return None, None, None
584 return None, None, None
584 fn, mode, copied = self.files[fname]
585 fn, mode, copied = self.files[fname]
585 return self.opener.read(fn), mode, copied
586 return self.opener.read(fn), mode, copied
586
587
587 def close(self):
588 def close(self):
588 if self.opener:
589 if self.opener:
589 shutil.rmtree(self.opener.base)
590 shutil.rmtree(self.opener.base)
590
591
591 class repobackend(abstractbackend):
592 class repobackend(abstractbackend):
592 def __init__(self, ui, repo, ctx, store):
593 def __init__(self, ui, repo, ctx, store):
593 super(repobackend, self).__init__(ui)
594 super(repobackend, self).__init__(ui)
594 self.repo = repo
595 self.repo = repo
595 self.ctx = ctx
596 self.ctx = ctx
596 self.store = store
597 self.store = store
597 self.changed = set()
598 self.changed = set()
598 self.removed = set()
599 self.removed = set()
599 self.copied = {}
600 self.copied = {}
600
601
601 def _checkknown(self, fname):
602 def _checkknown(self, fname):
602 if fname not in self.ctx:
603 if fname not in self.ctx:
603 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
604 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
604
605
605 def getfile(self, fname):
606 def getfile(self, fname):
606 try:
607 try:
607 fctx = self.ctx[fname]
608 fctx = self.ctx[fname]
608 except error.LookupError:
609 except error.LookupError:
609 return None, None
610 return None, None
610 flags = fctx.flags()
611 flags = fctx.flags()
611 return fctx.data(), ('l' in flags, 'x' in flags)
612 return fctx.data(), ('l' in flags, 'x' in flags)
612
613
613 def setfile(self, fname, data, mode, copysource):
614 def setfile(self, fname, data, mode, copysource):
614 if copysource:
615 if copysource:
615 self._checkknown(copysource)
616 self._checkknown(copysource)
616 if data is None:
617 if data is None:
617 data = self.ctx[fname].data()
618 data = self.ctx[fname].data()
618 self.store.setfile(fname, data, mode, copysource)
619 self.store.setfile(fname, data, mode, copysource)
619 self.changed.add(fname)
620 self.changed.add(fname)
620 if copysource:
621 if copysource:
621 self.copied[fname] = copysource
622 self.copied[fname] = copysource
622
623
623 def unlink(self, fname):
624 def unlink(self, fname):
624 self._checkknown(fname)
625 self._checkknown(fname)
625 self.removed.add(fname)
626 self.removed.add(fname)
626
627
627 def exists(self, fname):
628 def exists(self, fname):
628 return fname in self.ctx
629 return fname in self.ctx
629
630
630 def close(self):
631 def close(self):
631 return self.changed | self.removed
632 return self.changed | self.removed
632
633
633 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
634 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
634 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
635 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
635 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
636 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
636 eolmodes = ['strict', 'crlf', 'lf', 'auto']
637 eolmodes = ['strict', 'crlf', 'lf', 'auto']
637
638
638 class patchfile(object):
639 class patchfile(object):
639 def __init__(self, ui, gp, backend, store, eolmode='strict'):
640 def __init__(self, ui, gp, backend, store, eolmode='strict'):
640 self.fname = gp.path
641 self.fname = gp.path
641 self.eolmode = eolmode
642 self.eolmode = eolmode
642 self.eol = None
643 self.eol = None
643 self.backend = backend
644 self.backend = backend
644 self.ui = ui
645 self.ui = ui
645 self.lines = []
646 self.lines = []
646 self.exists = False
647 self.exists = False
647 self.missing = True
648 self.missing = True
648 self.mode = gp.mode
649 self.mode = gp.mode
649 self.copysource = gp.oldpath
650 self.copysource = gp.oldpath
650 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
651 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
651 self.remove = gp.op == 'DELETE'
652 self.remove = gp.op == 'DELETE'
652 if self.copysource is None:
653 if self.copysource is None:
653 data, mode = backend.getfile(self.fname)
654 data, mode = backend.getfile(self.fname)
654 else:
655 else:
655 data, mode = store.getfile(self.copysource)[:2]
656 data, mode = store.getfile(self.copysource)[:2]
656 if data is not None:
657 if data is not None:
657 self.exists = self.copysource is None or backend.exists(self.fname)
658 self.exists = self.copysource is None or backend.exists(self.fname)
658 self.missing = False
659 self.missing = False
659 if data:
660 if data:
660 self.lines = mdiff.splitnewlines(data)
661 self.lines = mdiff.splitnewlines(data)
661 if self.mode is None:
662 if self.mode is None:
662 self.mode = mode
663 self.mode = mode
663 if self.lines:
664 if self.lines:
664 # Normalize line endings
665 # Normalize line endings
665 if self.lines[0].endswith('\r\n'):
666 if self.lines[0].endswith('\r\n'):
666 self.eol = '\r\n'
667 self.eol = '\r\n'
667 elif self.lines[0].endswith('\n'):
668 elif self.lines[0].endswith('\n'):
668 self.eol = '\n'
669 self.eol = '\n'
669 if eolmode != 'strict':
670 if eolmode != 'strict':
670 nlines = []
671 nlines = []
671 for l in self.lines:
672 for l in self.lines:
672 if l.endswith('\r\n'):
673 if l.endswith('\r\n'):
673 l = l[:-2] + '\n'
674 l = l[:-2] + '\n'
674 nlines.append(l)
675 nlines.append(l)
675 self.lines = nlines
676 self.lines = nlines
676 else:
677 else:
677 if self.create:
678 if self.create:
678 self.missing = False
679 self.missing = False
679 if self.mode is None:
680 if self.mode is None:
680 self.mode = (False, False)
681 self.mode = (False, False)
681 if self.missing:
682 if self.missing:
682 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
683 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
683 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
684 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
684 "current directory)\n"))
685 "current directory)\n"))
685
686
686 self.hash = {}
687 self.hash = {}
687 self.dirty = 0
688 self.dirty = 0
688 self.offset = 0
689 self.offset = 0
689 self.skew = 0
690 self.skew = 0
690 self.rej = []
691 self.rej = []
691 self.fileprinted = False
692 self.fileprinted = False
692 self.printfile(False)
693 self.printfile(False)
693 self.hunks = 0
694 self.hunks = 0
694
695
695 def writelines(self, fname, lines, mode):
696 def writelines(self, fname, lines, mode):
696 if self.eolmode == 'auto':
697 if self.eolmode == 'auto':
697 eol = self.eol
698 eol = self.eol
698 elif self.eolmode == 'crlf':
699 elif self.eolmode == 'crlf':
699 eol = '\r\n'
700 eol = '\r\n'
700 else:
701 else:
701 eol = '\n'
702 eol = '\n'
702
703
703 if self.eolmode != 'strict' and eol and eol != '\n':
704 if self.eolmode != 'strict' and eol and eol != '\n':
704 rawlines = []
705 rawlines = []
705 for l in lines:
706 for l in lines:
706 if l and l[-1] == '\n':
707 if l and l[-1] == '\n':
707 l = l[:-1] + eol
708 l = l[:-1] + eol
708 rawlines.append(l)
709 rawlines.append(l)
709 lines = rawlines
710 lines = rawlines
710
711
711 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
712 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
712
713
713 def printfile(self, warn):
714 def printfile(self, warn):
714 if self.fileprinted:
715 if self.fileprinted:
715 return
716 return
716 if warn or self.ui.verbose:
717 if warn or self.ui.verbose:
717 self.fileprinted = True
718 self.fileprinted = True
718 s = _("patching file %s\n") % self.fname
719 s = _("patching file %s\n") % self.fname
719 if warn:
720 if warn:
720 self.ui.warn(s)
721 self.ui.warn(s)
721 else:
722 else:
722 self.ui.note(s)
723 self.ui.note(s)
723
724
724
725
725 def findlines(self, l, linenum):
726 def findlines(self, l, linenum):
726 # looks through the hash and finds candidate lines. The
727 # looks through the hash and finds candidate lines. The
727 # result is a list of line numbers sorted based on distance
728 # result is a list of line numbers sorted based on distance
728 # from linenum
729 # from linenum
729
730
730 cand = self.hash.get(l, [])
731 cand = self.hash.get(l, [])
731 if len(cand) > 1:
732 if len(cand) > 1:
732 # resort our list of potentials forward then back.
733 # resort our list of potentials forward then back.
733 cand.sort(key=lambda x: abs(x - linenum))
734 cand.sort(key=lambda x: abs(x - linenum))
734 return cand
735 return cand
735
736
736 def write_rej(self):
737 def write_rej(self):
737 # our rejects are a little different from patch(1). This always
738 # our rejects are a little different from patch(1). This always
738 # creates rejects in the same form as the original patch. A file
739 # creates rejects in the same form as the original patch. A file
739 # header is inserted so that you can run the reject through patch again
740 # header is inserted so that you can run the reject through patch again
740 # without having to type the filename.
741 # without having to type the filename.
741 if not self.rej:
742 if not self.rej:
742 return
743 return
743 base = os.path.basename(self.fname)
744 base = os.path.basename(self.fname)
744 lines = ["--- %s\n+++ %s\n" % (base, base)]
745 lines = ["--- %s\n+++ %s\n" % (base, base)]
745 for x in self.rej:
746 for x in self.rej:
746 for l in x.hunk:
747 for l in x.hunk:
747 lines.append(l)
748 lines.append(l)
748 if l[-1:] != '\n':
749 if l[-1:] != '\n':
749 lines.append("\n\ No newline at end of file\n")
750 lines.append("\n\ No newline at end of file\n")
750 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
751 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
751
752
752 def apply(self, h):
753 def apply(self, h):
753 if not h.complete():
754 if not h.complete():
754 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
755 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
755 (h.number, h.desc, len(h.a), h.lena, len(h.b),
756 (h.number, h.desc, len(h.a), h.lena, len(h.b),
756 h.lenb))
757 h.lenb))
757
758
758 self.hunks += 1
759 self.hunks += 1
759
760
760 if self.missing:
761 if self.missing:
761 self.rej.append(h)
762 self.rej.append(h)
762 return -1
763 return -1
763
764
764 if self.exists and self.create:
765 if self.exists and self.create:
765 if self.copysource:
766 if self.copysource:
766 self.ui.warn(_("cannot create %s: destination already "
767 self.ui.warn(_("cannot create %s: destination already "
767 "exists\n") % self.fname)
768 "exists\n") % self.fname)
768 else:
769 else:
769 self.ui.warn(_("file %s already exists\n") % self.fname)
770 self.ui.warn(_("file %s already exists\n") % self.fname)
770 self.rej.append(h)
771 self.rej.append(h)
771 return -1
772 return -1
772
773
773 if isinstance(h, binhunk):
774 if isinstance(h, binhunk):
774 if self.remove:
775 if self.remove:
775 self.backend.unlink(self.fname)
776 self.backend.unlink(self.fname)
776 else:
777 else:
777 l = h.new(self.lines)
778 l = h.new(self.lines)
778 self.lines[:] = l
779 self.lines[:] = l
779 self.offset += len(l)
780 self.offset += len(l)
780 self.dirty = True
781 self.dirty = True
781 return 0
782 return 0
782
783
783 horig = h
784 horig = h
784 if (self.eolmode in ('crlf', 'lf')
785 if (self.eolmode in ('crlf', 'lf')
785 or self.eolmode == 'auto' and self.eol):
786 or self.eolmode == 'auto' and self.eol):
786 # If new eols are going to be normalized, then normalize
787 # If new eols are going to be normalized, then normalize
787 # hunk data before patching. Otherwise, preserve input
788 # hunk data before patching. Otherwise, preserve input
788 # line-endings.
789 # line-endings.
789 h = h.getnormalized()
790 h = h.getnormalized()
790
791
791 # fast case first, no offsets, no fuzz
792 # fast case first, no offsets, no fuzz
792 old, oldstart, new, newstart = h.fuzzit(0, False)
793 old, oldstart, new, newstart = h.fuzzit(0, False)
793 oldstart += self.offset
794 oldstart += self.offset
794 orig_start = oldstart
795 orig_start = oldstart
795 # if there's skew we want to emit the "(offset %d lines)" even
796 # if there's skew we want to emit the "(offset %d lines)" even
796 # when the hunk cleanly applies at start + skew, so skip the
797 # when the hunk cleanly applies at start + skew, so skip the
797 # fast case code
798 # fast case code
798 if (self.skew == 0 and
799 if (self.skew == 0 and
799 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
800 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
800 if self.remove:
801 if self.remove:
801 self.backend.unlink(self.fname)
802 self.backend.unlink(self.fname)
802 else:
803 else:
803 self.lines[oldstart:oldstart + len(old)] = new
804 self.lines[oldstart:oldstart + len(old)] = new
804 self.offset += len(new) - len(old)
805 self.offset += len(new) - len(old)
805 self.dirty = True
806 self.dirty = True
806 return 0
807 return 0
807
808
808 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
809 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
809 self.hash = {}
810 self.hash = {}
810 for x, s in enumerate(self.lines):
811 for x, s in enumerate(self.lines):
811 self.hash.setdefault(s, []).append(x)
812 self.hash.setdefault(s, []).append(x)
812
813
813 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
814 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
814 for toponly in [True, False]:
815 for toponly in [True, False]:
815 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
816 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
816 oldstart = oldstart + self.offset + self.skew
817 oldstart = oldstart + self.offset + self.skew
817 oldstart = min(oldstart, len(self.lines))
818 oldstart = min(oldstart, len(self.lines))
818 if old:
819 if old:
819 cand = self.findlines(old[0][1:], oldstart)
820 cand = self.findlines(old[0][1:], oldstart)
820 else:
821 else:
821 # Only adding lines with no or fuzzed context, just
822 # Only adding lines with no or fuzzed context, just
822 # take the skew in account
823 # take the skew in account
823 cand = [oldstart]
824 cand = [oldstart]
824
825
825 for l in cand:
826 for l in cand:
826 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
827 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
827 self.lines[l : l + len(old)] = new
828 self.lines[l : l + len(old)] = new
828 self.offset += len(new) - len(old)
829 self.offset += len(new) - len(old)
829 self.skew = l - orig_start
830 self.skew = l - orig_start
830 self.dirty = True
831 self.dirty = True
831 offset = l - orig_start - fuzzlen
832 offset = l - orig_start - fuzzlen
832 if fuzzlen:
833 if fuzzlen:
833 msg = _("Hunk #%d succeeded at %d "
834 msg = _("Hunk #%d succeeded at %d "
834 "with fuzz %d "
835 "with fuzz %d "
835 "(offset %d lines).\n")
836 "(offset %d lines).\n")
836 self.printfile(True)
837 self.printfile(True)
837 self.ui.warn(msg %
838 self.ui.warn(msg %
838 (h.number, l + 1, fuzzlen, offset))
839 (h.number, l + 1, fuzzlen, offset))
839 else:
840 else:
840 msg = _("Hunk #%d succeeded at %d "
841 msg = _("Hunk #%d succeeded at %d "
841 "(offset %d lines).\n")
842 "(offset %d lines).\n")
842 self.ui.note(msg % (h.number, l + 1, offset))
843 self.ui.note(msg % (h.number, l + 1, offset))
843 return fuzzlen
844 return fuzzlen
844 self.printfile(True)
845 self.printfile(True)
845 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
846 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
846 self.rej.append(horig)
847 self.rej.append(horig)
847 return -1
848 return -1
848
849
849 def close(self):
850 def close(self):
850 if self.dirty:
851 if self.dirty:
851 self.writelines(self.fname, self.lines, self.mode)
852 self.writelines(self.fname, self.lines, self.mode)
852 self.write_rej()
853 self.write_rej()
853 return len(self.rej)
854 return len(self.rej)
854
855
855 class header(object):
856 class header(object):
856 """patch header
857 """patch header
857 """
858 """
858 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
859 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
859 diff_re = re.compile('diff -r .* (.*)$')
860 diff_re = re.compile('diff -r .* (.*)$')
860 allhunks_re = re.compile('(?:index|deleted file) ')
861 allhunks_re = re.compile('(?:index|deleted file) ')
861 pretty_re = re.compile('(?:new file|deleted file) ')
862 pretty_re = re.compile('(?:new file|deleted file) ')
862 special_re = re.compile('(?:index|deleted|copy|rename) ')
863 special_re = re.compile('(?:index|deleted|copy|rename) ')
863 newfile_re = re.compile('(?:new file)')
864 newfile_re = re.compile('(?:new file)')
864
865
865 def __init__(self, header):
866 def __init__(self, header):
866 self.header = header
867 self.header = header
867 self.hunks = []
868 self.hunks = []
868
869
869 def binary(self):
870 def binary(self):
870 return any(h.startswith('index ') for h in self.header)
871 return any(h.startswith('index ') for h in self.header)
871
872
872 def pretty(self, fp):
873 def pretty(self, fp):
873 for h in self.header:
874 for h in self.header:
874 if h.startswith('index '):
875 if h.startswith('index '):
875 fp.write(_('this modifies a binary file (all or nothing)\n'))
876 fp.write(_('this modifies a binary file (all or nothing)\n'))
876 break
877 break
877 if self.pretty_re.match(h):
878 if self.pretty_re.match(h):
878 fp.write(h)
879 fp.write(h)
879 if self.binary():
880 if self.binary():
880 fp.write(_('this is a binary file\n'))
881 fp.write(_('this is a binary file\n'))
881 break
882 break
882 if h.startswith('---'):
883 if h.startswith('---'):
883 fp.write(_('%d hunks, %d lines changed\n') %
884 fp.write(_('%d hunks, %d lines changed\n') %
884 (len(self.hunks),
885 (len(self.hunks),
885 sum([max(h.added, h.removed) for h in self.hunks])))
886 sum([max(h.added, h.removed) for h in self.hunks])))
886 break
887 break
887 fp.write(h)
888 fp.write(h)
888
889
889 def write(self, fp):
890 def write(self, fp):
890 fp.write(''.join(self.header))
891 fp.write(''.join(self.header))
891
892
892 def allhunks(self):
893 def allhunks(self):
893 return any(self.allhunks_re.match(h) for h in self.header)
894 return any(self.allhunks_re.match(h) for h in self.header)
894
895
895 def files(self):
896 def files(self):
896 match = self.diffgit_re.match(self.header[0])
897 match = self.diffgit_re.match(self.header[0])
897 if match:
898 if match:
898 fromfile, tofile = match.groups()
899 fromfile, tofile = match.groups()
899 if fromfile == tofile:
900 if fromfile == tofile:
900 return [fromfile]
901 return [fromfile]
901 return [fromfile, tofile]
902 return [fromfile, tofile]
902 else:
903 else:
903 return self.diff_re.match(self.header[0]).groups()
904 return self.diff_re.match(self.header[0]).groups()
904
905
905 def filename(self):
906 def filename(self):
906 return self.files()[-1]
907 return self.files()[-1]
907
908
908 def __repr__(self):
909 def __repr__(self):
909 return '<header %s>' % (' '.join(map(repr, self.files())))
910 return '<header %s>' % (' '.join(map(repr, self.files())))
910
911
911 def isnewfile(self):
912 def isnewfile(self):
912 return any(self.newfile_re.match(h) for h in self.header)
913 return any(self.newfile_re.match(h) for h in self.header)
913
914
914 def special(self):
915 def special(self):
915 # Special files are shown only at the header level and not at the hunk
916 # Special files are shown only at the header level and not at the hunk
916 # level for example a file that has been deleted is a special file.
917 # level for example a file that has been deleted is a special file.
917 # The user cannot change the content of the operation, in the case of
918 # The user cannot change the content of the operation, in the case of
918 # the deleted file he has to take the deletion or not take it, he
919 # the deleted file he has to take the deletion or not take it, he
919 # cannot take some of it.
920 # cannot take some of it.
920 # Newly added files are special if they are empty, they are not special
921 # Newly added files are special if they are empty, they are not special
921 # if they have some content as we want to be able to change it
922 # if they have some content as we want to be able to change it
922 nocontent = len(self.header) == 2
923 nocontent = len(self.header) == 2
923 emptynewfile = self.isnewfile() and nocontent
924 emptynewfile = self.isnewfile() and nocontent
924 return emptynewfile or \
925 return emptynewfile or \
925 any(self.special_re.match(h) for h in self.header)
926 any(self.special_re.match(h) for h in self.header)
926
927
927 class recordhunk(object):
928 class recordhunk(object):
928 """patch hunk
929 """patch hunk
929
930
930 XXX shouldn't we merge this with the other hunk class?
931 XXX shouldn't we merge this with the other hunk class?
931 """
932 """
932
933
933 def __init__(self, header, fromline, toline, proc, before, hunk, after,
934 def __init__(self, header, fromline, toline, proc, before, hunk, after,
934 maxcontext=None):
935 maxcontext=None):
935 def trimcontext(lines, reverse=False):
936 def trimcontext(lines, reverse=False):
936 if maxcontext is not None:
937 if maxcontext is not None:
937 delta = len(lines) - maxcontext
938 delta = len(lines) - maxcontext
938 if delta > 0:
939 if delta > 0:
939 if reverse:
940 if reverse:
940 return delta, lines[delta:]
941 return delta, lines[delta:]
941 else:
942 else:
942 return delta, lines[:maxcontext]
943 return delta, lines[:maxcontext]
943 return 0, lines
944 return 0, lines
944
945
945 self.header = header
946 self.header = header
946 trimedbefore, self.before = trimcontext(before, True)
947 trimedbefore, self.before = trimcontext(before, True)
947 self.fromline = fromline + trimedbefore
948 self.fromline = fromline + trimedbefore
948 self.toline = toline + trimedbefore
949 self.toline = toline + trimedbefore
949 _trimedafter, self.after = trimcontext(after, False)
950 _trimedafter, self.after = trimcontext(after, False)
950 self.proc = proc
951 self.proc = proc
951 self.hunk = hunk
952 self.hunk = hunk
952 self.added, self.removed = self.countchanges(self.hunk)
953 self.added, self.removed = self.countchanges(self.hunk)
953
954
954 def __eq__(self, v):
955 def __eq__(self, v):
955 if not isinstance(v, recordhunk):
956 if not isinstance(v, recordhunk):
956 return False
957 return False
957
958
958 return ((v.hunk == self.hunk) and
959 return ((v.hunk == self.hunk) and
959 (v.proc == self.proc) and
960 (v.proc == self.proc) and
960 (self.fromline == v.fromline) and
961 (self.fromline == v.fromline) and
961 (self.header.files() == v.header.files()))
962 (self.header.files() == v.header.files()))
962
963
963 def __hash__(self):
964 def __hash__(self):
964 return hash((tuple(self.hunk),
965 return hash((tuple(self.hunk),
965 tuple(self.header.files()),
966 tuple(self.header.files()),
966 self.fromline,
967 self.fromline,
967 self.proc))
968 self.proc))
968
969
969 def countchanges(self, hunk):
970 def countchanges(self, hunk):
970 """hunk -> (n+,n-)"""
971 """hunk -> (n+,n-)"""
971 add = len([h for h in hunk if h.startswith('+')])
972 add = len([h for h in hunk if h.startswith('+')])
972 rem = len([h for h in hunk if h.startswith('-')])
973 rem = len([h for h in hunk if h.startswith('-')])
973 return add, rem
974 return add, rem
974
975
975 def reversehunk(self):
976 def reversehunk(self):
976 """return another recordhunk which is the reverse of the hunk
977 """return another recordhunk which is the reverse of the hunk
977
978
978 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
979 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
979 that, swap fromline/toline and +/- signs while keep other things
980 that, swap fromline/toline and +/- signs while keep other things
980 unchanged.
981 unchanged.
981 """
982 """
982 m = {'+': '-', '-': '+', '\\': '\\'}
983 m = {'+': '-', '-': '+', '\\': '\\'}
983 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
984 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
984 return recordhunk(self.header, self.toline, self.fromline, self.proc,
985 return recordhunk(self.header, self.toline, self.fromline, self.proc,
985 self.before, hunk, self.after)
986 self.before, hunk, self.after)
986
987
987 def write(self, fp):
988 def write(self, fp):
988 delta = len(self.before) + len(self.after)
989 delta = len(self.before) + len(self.after)
989 if self.after and self.after[-1] == '\\ No newline at end of file\n':
990 if self.after and self.after[-1] == '\\ No newline at end of file\n':
990 delta -= 1
991 delta -= 1
991 fromlen = delta + self.removed
992 fromlen = delta + self.removed
992 tolen = delta + self.added
993 tolen = delta + self.added
993 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
994 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
994 (self.fromline, fromlen, self.toline, tolen,
995 (self.fromline, fromlen, self.toline, tolen,
995 self.proc and (' ' + self.proc)))
996 self.proc and (' ' + self.proc)))
996 fp.write(''.join(self.before + self.hunk + self.after))
997 fp.write(''.join(self.before + self.hunk + self.after))
997
998
998 pretty = write
999 pretty = write
999
1000
1000 def filename(self):
1001 def filename(self):
1001 return self.header.filename()
1002 return self.header.filename()
1002
1003
1003 def __repr__(self):
1004 def __repr__(self):
1004 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1005 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1005
1006
1006 def getmessages():
1007 def getmessages():
1007 return {
1008 return {
1008 'multiple': {
1009 'multiple': {
1009 'apply': _("apply change %d/%d to '%s'?"),
1010 'apply': _("apply change %d/%d to '%s'?"),
1010 'discard': _("discard change %d/%d to '%s'?"),
1011 'discard': _("discard change %d/%d to '%s'?"),
1011 'record': _("record change %d/%d to '%s'?"),
1012 'record': _("record change %d/%d to '%s'?"),
1012 },
1013 },
1013 'single': {
1014 'single': {
1014 'apply': _("apply this change to '%s'?"),
1015 'apply': _("apply this change to '%s'?"),
1015 'discard': _("discard this change to '%s'?"),
1016 'discard': _("discard this change to '%s'?"),
1016 'record': _("record this change to '%s'?"),
1017 'record': _("record this change to '%s'?"),
1017 },
1018 },
1018 'help': {
1019 'help': {
1019 'apply': _('[Ynesfdaq?]'
1020 'apply': _('[Ynesfdaq?]'
1020 '$$ &Yes, apply this change'
1021 '$$ &Yes, apply this change'
1021 '$$ &No, skip this change'
1022 '$$ &No, skip this change'
1022 '$$ &Edit this change manually'
1023 '$$ &Edit this change manually'
1023 '$$ &Skip remaining changes to this file'
1024 '$$ &Skip remaining changes to this file'
1024 '$$ Apply remaining changes to this &file'
1025 '$$ Apply remaining changes to this &file'
1025 '$$ &Done, skip remaining changes and files'
1026 '$$ &Done, skip remaining changes and files'
1026 '$$ Apply &all changes to all remaining files'
1027 '$$ Apply &all changes to all remaining files'
1027 '$$ &Quit, applying no changes'
1028 '$$ &Quit, applying no changes'
1028 '$$ &? (display help)'),
1029 '$$ &? (display help)'),
1029 'discard': _('[Ynesfdaq?]'
1030 'discard': _('[Ynesfdaq?]'
1030 '$$ &Yes, discard this change'
1031 '$$ &Yes, discard this change'
1031 '$$ &No, skip this change'
1032 '$$ &No, skip this change'
1032 '$$ &Edit this change manually'
1033 '$$ &Edit this change manually'
1033 '$$ &Skip remaining changes to this file'
1034 '$$ &Skip remaining changes to this file'
1034 '$$ Discard remaining changes to this &file'
1035 '$$ Discard remaining changes to this &file'
1035 '$$ &Done, skip remaining changes and files'
1036 '$$ &Done, skip remaining changes and files'
1036 '$$ Discard &all changes to all remaining files'
1037 '$$ Discard &all changes to all remaining files'
1037 '$$ &Quit, discarding no changes'
1038 '$$ &Quit, discarding no changes'
1038 '$$ &? (display help)'),
1039 '$$ &? (display help)'),
1039 'record': _('[Ynesfdaq?]'
1040 'record': _('[Ynesfdaq?]'
1040 '$$ &Yes, record this change'
1041 '$$ &Yes, record this change'
1041 '$$ &No, skip this change'
1042 '$$ &No, skip this change'
1042 '$$ &Edit this change manually'
1043 '$$ &Edit this change manually'
1043 '$$ &Skip remaining changes to this file'
1044 '$$ &Skip remaining changes to this file'
1044 '$$ Record remaining changes to this &file'
1045 '$$ Record remaining changes to this &file'
1045 '$$ &Done, skip remaining changes and files'
1046 '$$ &Done, skip remaining changes and files'
1046 '$$ Record &all changes to all remaining files'
1047 '$$ Record &all changes to all remaining files'
1047 '$$ &Quit, recording no changes'
1048 '$$ &Quit, recording no changes'
1048 '$$ &? (display help)'),
1049 '$$ &? (display help)'),
1049 }
1050 }
1050 }
1051 }
1051
1052
1052 def filterpatch(ui, headers, operation=None):
1053 def filterpatch(ui, headers, operation=None):
1053 """Interactively filter patch chunks into applied-only chunks"""
1054 """Interactively filter patch chunks into applied-only chunks"""
1054 messages = getmessages()
1055 messages = getmessages()
1055
1056
1056 if operation is None:
1057 if operation is None:
1057 operation = 'record'
1058 operation = 'record'
1058
1059
1059 def prompt(skipfile, skipall, query, chunk):
1060 def prompt(skipfile, skipall, query, chunk):
1060 """prompt query, and process base inputs
1061 """prompt query, and process base inputs
1061
1062
1062 - y/n for the rest of file
1063 - y/n for the rest of file
1063 - y/n for the rest
1064 - y/n for the rest
1064 - ? (help)
1065 - ? (help)
1065 - q (quit)
1066 - q (quit)
1066
1067
1067 Return True/False and possibly updated skipfile and skipall.
1068 Return True/False and possibly updated skipfile and skipall.
1068 """
1069 """
1069 newpatches = None
1070 newpatches = None
1070 if skipall is not None:
1071 if skipall is not None:
1071 return skipall, skipfile, skipall, newpatches
1072 return skipall, skipfile, skipall, newpatches
1072 if skipfile is not None:
1073 if skipfile is not None:
1073 return skipfile, skipfile, skipall, newpatches
1074 return skipfile, skipfile, skipall, newpatches
1074 while True:
1075 while True:
1075 resps = messages['help'][operation]
1076 resps = messages['help'][operation]
1076 r = ui.promptchoice("%s %s" % (query, resps))
1077 r = ui.promptchoice("%s %s" % (query, resps))
1077 ui.write("\n")
1078 ui.write("\n")
1078 if r == 8: # ?
1079 if r == 8: # ?
1079 for c, t in ui.extractchoices(resps)[1]:
1080 for c, t in ui.extractchoices(resps)[1]:
1080 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1081 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1081 continue
1082 continue
1082 elif r == 0: # yes
1083 elif r == 0: # yes
1083 ret = True
1084 ret = True
1084 elif r == 1: # no
1085 elif r == 1: # no
1085 ret = False
1086 ret = False
1086 elif r == 2: # Edit patch
1087 elif r == 2: # Edit patch
1087 if chunk is None:
1088 if chunk is None:
1088 ui.write(_('cannot edit patch for whole file'))
1089 ui.write(_('cannot edit patch for whole file'))
1089 ui.write("\n")
1090 ui.write("\n")
1090 continue
1091 continue
1091 if chunk.header.binary():
1092 if chunk.header.binary():
1092 ui.write(_('cannot edit patch for binary file'))
1093 ui.write(_('cannot edit patch for binary file'))
1093 ui.write("\n")
1094 ui.write("\n")
1094 continue
1095 continue
1095 # Patch comment based on the Git one (based on comment at end of
1096 # Patch comment based on the Git one (based on comment at end of
1096 # https://mercurial-scm.org/wiki/RecordExtension)
1097 # https://mercurial-scm.org/wiki/RecordExtension)
1097 phelp = '---' + _("""
1098 phelp = '---' + _("""
1098 To remove '-' lines, make them ' ' lines (context).
1099 To remove '-' lines, make them ' ' lines (context).
1099 To remove '+' lines, delete them.
1100 To remove '+' lines, delete them.
1100 Lines starting with # will be removed from the patch.
1101 Lines starting with # will be removed from the patch.
1101
1102
1102 If the patch applies cleanly, the edited hunk will immediately be
1103 If the patch applies cleanly, the edited hunk will immediately be
1103 added to the record list. If it does not apply cleanly, a rejects
1104 added to the record list. If it does not apply cleanly, a rejects
1104 file will be generated: you can use that when you try again. If
1105 file will be generated: you can use that when you try again. If
1105 all lines of the hunk are removed, then the edit is aborted and
1106 all lines of the hunk are removed, then the edit is aborted and
1106 the hunk is left unchanged.
1107 the hunk is left unchanged.
1107 """)
1108 """)
1108 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1109 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1109 suffix=".diff")
1110 suffix=".diff")
1110 ncpatchfp = None
1111 ncpatchfp = None
1111 try:
1112 try:
1112 # Write the initial patch
1113 # Write the initial patch
1113 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1114 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1114 chunk.header.write(f)
1115 chunk.header.write(f)
1115 chunk.write(f)
1116 chunk.write(f)
1116 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1117 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1117 f.close()
1118 f.close()
1118 # Start the editor and wait for it to complete
1119 # Start the editor and wait for it to complete
1119 editor = ui.geteditor()
1120 editor = ui.geteditor()
1120 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1121 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1121 environ={'HGUSER': ui.username()},
1122 environ={'HGUSER': ui.username()},
1122 blockedtag='filterpatch')
1123 blockedtag='filterpatch')
1123 if ret != 0:
1124 if ret != 0:
1124 ui.warn(_("editor exited with exit code %d\n") % ret)
1125 ui.warn(_("editor exited with exit code %d\n") % ret)
1125 continue
1126 continue
1126 # Remove comment lines
1127 # Remove comment lines
1127 patchfp = open(patchfn, r'rb')
1128 patchfp = open(patchfn, r'rb')
1128 ncpatchfp = stringio()
1129 ncpatchfp = stringio()
1129 for line in util.iterfile(patchfp):
1130 for line in util.iterfile(patchfp):
1130 line = util.fromnativeeol(line)
1131 line = util.fromnativeeol(line)
1131 if not line.startswith('#'):
1132 if not line.startswith('#'):
1132 ncpatchfp.write(line)
1133 ncpatchfp.write(line)
1133 patchfp.close()
1134 patchfp.close()
1134 ncpatchfp.seek(0)
1135 ncpatchfp.seek(0)
1135 newpatches = parsepatch(ncpatchfp)
1136 newpatches = parsepatch(ncpatchfp)
1136 finally:
1137 finally:
1137 os.unlink(patchfn)
1138 os.unlink(patchfn)
1138 del ncpatchfp
1139 del ncpatchfp
1139 # Signal that the chunk shouldn't be applied as-is, but
1140 # Signal that the chunk shouldn't be applied as-is, but
1140 # provide the new patch to be used instead.
1141 # provide the new patch to be used instead.
1141 ret = False
1142 ret = False
1142 elif r == 3: # Skip
1143 elif r == 3: # Skip
1143 ret = skipfile = False
1144 ret = skipfile = False
1144 elif r == 4: # file (Record remaining)
1145 elif r == 4: # file (Record remaining)
1145 ret = skipfile = True
1146 ret = skipfile = True
1146 elif r == 5: # done, skip remaining
1147 elif r == 5: # done, skip remaining
1147 ret = skipall = False
1148 ret = skipall = False
1148 elif r == 6: # all
1149 elif r == 6: # all
1149 ret = skipall = True
1150 ret = skipall = True
1150 elif r == 7: # quit
1151 elif r == 7: # quit
1151 raise error.Abort(_('user quit'))
1152 raise error.Abort(_('user quit'))
1152 return ret, skipfile, skipall, newpatches
1153 return ret, skipfile, skipall, newpatches
1153
1154
1154 seen = set()
1155 seen = set()
1155 applied = {} # 'filename' -> [] of chunks
1156 applied = {} # 'filename' -> [] of chunks
1156 skipfile, skipall = None, None
1157 skipfile, skipall = None, None
1157 pos, total = 1, sum(len(h.hunks) for h in headers)
1158 pos, total = 1, sum(len(h.hunks) for h in headers)
1158 for h in headers:
1159 for h in headers:
1159 pos += len(h.hunks)
1160 pos += len(h.hunks)
1160 skipfile = None
1161 skipfile = None
1161 fixoffset = 0
1162 fixoffset = 0
1162 hdr = ''.join(h.header)
1163 hdr = ''.join(h.header)
1163 if hdr in seen:
1164 if hdr in seen:
1164 continue
1165 continue
1165 seen.add(hdr)
1166 seen.add(hdr)
1166 if skipall is None:
1167 if skipall is None:
1167 h.pretty(ui)
1168 h.pretty(ui)
1168 msg = (_('examine changes to %s?') %
1169 msg = (_('examine changes to %s?') %
1169 _(' and ').join("'%s'" % f for f in h.files()))
1170 _(' and ').join("'%s'" % f for f in h.files()))
1170 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1171 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1171 if not r:
1172 if not r:
1172 continue
1173 continue
1173 applied[h.filename()] = [h]
1174 applied[h.filename()] = [h]
1174 if h.allhunks():
1175 if h.allhunks():
1175 applied[h.filename()] += h.hunks
1176 applied[h.filename()] += h.hunks
1176 continue
1177 continue
1177 for i, chunk in enumerate(h.hunks):
1178 for i, chunk in enumerate(h.hunks):
1178 if skipfile is None and skipall is None:
1179 if skipfile is None and skipall is None:
1179 chunk.pretty(ui)
1180 chunk.pretty(ui)
1180 if total == 1:
1181 if total == 1:
1181 msg = messages['single'][operation] % chunk.filename()
1182 msg = messages['single'][operation] % chunk.filename()
1182 else:
1183 else:
1183 idx = pos - len(h.hunks) + i
1184 idx = pos - len(h.hunks) + i
1184 msg = messages['multiple'][operation] % (idx, total,
1185 msg = messages['multiple'][operation] % (idx, total,
1185 chunk.filename())
1186 chunk.filename())
1186 r, skipfile, skipall, newpatches = prompt(skipfile,
1187 r, skipfile, skipall, newpatches = prompt(skipfile,
1187 skipall, msg, chunk)
1188 skipall, msg, chunk)
1188 if r:
1189 if r:
1189 if fixoffset:
1190 if fixoffset:
1190 chunk = copy.copy(chunk)
1191 chunk = copy.copy(chunk)
1191 chunk.toline += fixoffset
1192 chunk.toline += fixoffset
1192 applied[chunk.filename()].append(chunk)
1193 applied[chunk.filename()].append(chunk)
1193 elif newpatches is not None:
1194 elif newpatches is not None:
1194 for newpatch in newpatches:
1195 for newpatch in newpatches:
1195 for newhunk in newpatch.hunks:
1196 for newhunk in newpatch.hunks:
1196 if fixoffset:
1197 if fixoffset:
1197 newhunk.toline += fixoffset
1198 newhunk.toline += fixoffset
1198 applied[newhunk.filename()].append(newhunk)
1199 applied[newhunk.filename()].append(newhunk)
1199 else:
1200 else:
1200 fixoffset += chunk.removed - chunk.added
1201 fixoffset += chunk.removed - chunk.added
1201 return (sum([h for h in applied.itervalues()
1202 return (sum([h for h in applied.itervalues()
1202 if h[0].special() or len(h) > 1], []), {})
1203 if h[0].special() or len(h) > 1], []), {})
1203 class hunk(object):
1204 class hunk(object):
1204 def __init__(self, desc, num, lr, context):
1205 def __init__(self, desc, num, lr, context):
1205 self.number = num
1206 self.number = num
1206 self.desc = desc
1207 self.desc = desc
1207 self.hunk = [desc]
1208 self.hunk = [desc]
1208 self.a = []
1209 self.a = []
1209 self.b = []
1210 self.b = []
1210 self.starta = self.lena = None
1211 self.starta = self.lena = None
1211 self.startb = self.lenb = None
1212 self.startb = self.lenb = None
1212 if lr is not None:
1213 if lr is not None:
1213 if context:
1214 if context:
1214 self.read_context_hunk(lr)
1215 self.read_context_hunk(lr)
1215 else:
1216 else:
1216 self.read_unified_hunk(lr)
1217 self.read_unified_hunk(lr)
1217
1218
1218 def getnormalized(self):
1219 def getnormalized(self):
1219 """Return a copy with line endings normalized to LF."""
1220 """Return a copy with line endings normalized to LF."""
1220
1221
1221 def normalize(lines):
1222 def normalize(lines):
1222 nlines = []
1223 nlines = []
1223 for line in lines:
1224 for line in lines:
1224 if line.endswith('\r\n'):
1225 if line.endswith('\r\n'):
1225 line = line[:-2] + '\n'
1226 line = line[:-2] + '\n'
1226 nlines.append(line)
1227 nlines.append(line)
1227 return nlines
1228 return nlines
1228
1229
1229 # Dummy object, it is rebuilt manually
1230 # Dummy object, it is rebuilt manually
1230 nh = hunk(self.desc, self.number, None, None)
1231 nh = hunk(self.desc, self.number, None, None)
1231 nh.number = self.number
1232 nh.number = self.number
1232 nh.desc = self.desc
1233 nh.desc = self.desc
1233 nh.hunk = self.hunk
1234 nh.hunk = self.hunk
1234 nh.a = normalize(self.a)
1235 nh.a = normalize(self.a)
1235 nh.b = normalize(self.b)
1236 nh.b = normalize(self.b)
1236 nh.starta = self.starta
1237 nh.starta = self.starta
1237 nh.startb = self.startb
1238 nh.startb = self.startb
1238 nh.lena = self.lena
1239 nh.lena = self.lena
1239 nh.lenb = self.lenb
1240 nh.lenb = self.lenb
1240 return nh
1241 return nh
1241
1242
1242 def read_unified_hunk(self, lr):
1243 def read_unified_hunk(self, lr):
1243 m = unidesc.match(self.desc)
1244 m = unidesc.match(self.desc)
1244 if not m:
1245 if not m:
1245 raise PatchError(_("bad hunk #%d") % self.number)
1246 raise PatchError(_("bad hunk #%d") % self.number)
1246 self.starta, self.lena, self.startb, self.lenb = m.groups()
1247 self.starta, self.lena, self.startb, self.lenb = m.groups()
1247 if self.lena is None:
1248 if self.lena is None:
1248 self.lena = 1
1249 self.lena = 1
1249 else:
1250 else:
1250 self.lena = int(self.lena)
1251 self.lena = int(self.lena)
1251 if self.lenb is None:
1252 if self.lenb is None:
1252 self.lenb = 1
1253 self.lenb = 1
1253 else:
1254 else:
1254 self.lenb = int(self.lenb)
1255 self.lenb = int(self.lenb)
1255 self.starta = int(self.starta)
1256 self.starta = int(self.starta)
1256 self.startb = int(self.startb)
1257 self.startb = int(self.startb)
1257 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1258 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1258 self.b)
1259 self.b)
1259 # if we hit eof before finishing out the hunk, the last line will
1260 # if we hit eof before finishing out the hunk, the last line will
1260 # be zero length. Lets try to fix it up.
1261 # be zero length. Lets try to fix it up.
1261 while len(self.hunk[-1]) == 0:
1262 while len(self.hunk[-1]) == 0:
1262 del self.hunk[-1]
1263 del self.hunk[-1]
1263 del self.a[-1]
1264 del self.a[-1]
1264 del self.b[-1]
1265 del self.b[-1]
1265 self.lena -= 1
1266 self.lena -= 1
1266 self.lenb -= 1
1267 self.lenb -= 1
1267 self._fixnewline(lr)
1268 self._fixnewline(lr)
1268
1269
1269 def read_context_hunk(self, lr):
1270 def read_context_hunk(self, lr):
1270 self.desc = lr.readline()
1271 self.desc = lr.readline()
1271 m = contextdesc.match(self.desc)
1272 m = contextdesc.match(self.desc)
1272 if not m:
1273 if not m:
1273 raise PatchError(_("bad hunk #%d") % self.number)
1274 raise PatchError(_("bad hunk #%d") % self.number)
1274 self.starta, aend = m.groups()
1275 self.starta, aend = m.groups()
1275 self.starta = int(self.starta)
1276 self.starta = int(self.starta)
1276 if aend is None:
1277 if aend is None:
1277 aend = self.starta
1278 aend = self.starta
1278 self.lena = int(aend) - self.starta
1279 self.lena = int(aend) - self.starta
1279 if self.starta:
1280 if self.starta:
1280 self.lena += 1
1281 self.lena += 1
1281 for x in xrange(self.lena):
1282 for x in xrange(self.lena):
1282 l = lr.readline()
1283 l = lr.readline()
1283 if l.startswith('---'):
1284 if l.startswith('---'):
1284 # lines addition, old block is empty
1285 # lines addition, old block is empty
1285 lr.push(l)
1286 lr.push(l)
1286 break
1287 break
1287 s = l[2:]
1288 s = l[2:]
1288 if l.startswith('- ') or l.startswith('! '):
1289 if l.startswith('- ') or l.startswith('! '):
1289 u = '-' + s
1290 u = '-' + s
1290 elif l.startswith(' '):
1291 elif l.startswith(' '):
1291 u = ' ' + s
1292 u = ' ' + s
1292 else:
1293 else:
1293 raise PatchError(_("bad hunk #%d old text line %d") %
1294 raise PatchError(_("bad hunk #%d old text line %d") %
1294 (self.number, x))
1295 (self.number, x))
1295 self.a.append(u)
1296 self.a.append(u)
1296 self.hunk.append(u)
1297 self.hunk.append(u)
1297
1298
1298 l = lr.readline()
1299 l = lr.readline()
1299 if l.startswith('\ '):
1300 if l.startswith('\ '):
1300 s = self.a[-1][:-1]
1301 s = self.a[-1][:-1]
1301 self.a[-1] = s
1302 self.a[-1] = s
1302 self.hunk[-1] = s
1303 self.hunk[-1] = s
1303 l = lr.readline()
1304 l = lr.readline()
1304 m = contextdesc.match(l)
1305 m = contextdesc.match(l)
1305 if not m:
1306 if not m:
1306 raise PatchError(_("bad hunk #%d") % self.number)
1307 raise PatchError(_("bad hunk #%d") % self.number)
1307 self.startb, bend = m.groups()
1308 self.startb, bend = m.groups()
1308 self.startb = int(self.startb)
1309 self.startb = int(self.startb)
1309 if bend is None:
1310 if bend is None:
1310 bend = self.startb
1311 bend = self.startb
1311 self.lenb = int(bend) - self.startb
1312 self.lenb = int(bend) - self.startb
1312 if self.startb:
1313 if self.startb:
1313 self.lenb += 1
1314 self.lenb += 1
1314 hunki = 1
1315 hunki = 1
1315 for x in xrange(self.lenb):
1316 for x in xrange(self.lenb):
1316 l = lr.readline()
1317 l = lr.readline()
1317 if l.startswith('\ '):
1318 if l.startswith('\ '):
1318 # XXX: the only way to hit this is with an invalid line range.
1319 # XXX: the only way to hit this is with an invalid line range.
1319 # The no-eol marker is not counted in the line range, but I
1320 # The no-eol marker is not counted in the line range, but I
1320 # guess there are diff(1) out there which behave differently.
1321 # guess there are diff(1) out there which behave differently.
1321 s = self.b[-1][:-1]
1322 s = self.b[-1][:-1]
1322 self.b[-1] = s
1323 self.b[-1] = s
1323 self.hunk[hunki - 1] = s
1324 self.hunk[hunki - 1] = s
1324 continue
1325 continue
1325 if not l:
1326 if not l:
1326 # line deletions, new block is empty and we hit EOF
1327 # line deletions, new block is empty and we hit EOF
1327 lr.push(l)
1328 lr.push(l)
1328 break
1329 break
1329 s = l[2:]
1330 s = l[2:]
1330 if l.startswith('+ ') or l.startswith('! '):
1331 if l.startswith('+ ') or l.startswith('! '):
1331 u = '+' + s
1332 u = '+' + s
1332 elif l.startswith(' '):
1333 elif l.startswith(' '):
1333 u = ' ' + s
1334 u = ' ' + s
1334 elif len(self.b) == 0:
1335 elif len(self.b) == 0:
1335 # line deletions, new block is empty
1336 # line deletions, new block is empty
1336 lr.push(l)
1337 lr.push(l)
1337 break
1338 break
1338 else:
1339 else:
1339 raise PatchError(_("bad hunk #%d old text line %d") %
1340 raise PatchError(_("bad hunk #%d old text line %d") %
1340 (self.number, x))
1341 (self.number, x))
1341 self.b.append(s)
1342 self.b.append(s)
1342 while True:
1343 while True:
1343 if hunki >= len(self.hunk):
1344 if hunki >= len(self.hunk):
1344 h = ""
1345 h = ""
1345 else:
1346 else:
1346 h = self.hunk[hunki]
1347 h = self.hunk[hunki]
1347 hunki += 1
1348 hunki += 1
1348 if h == u:
1349 if h == u:
1349 break
1350 break
1350 elif h.startswith('-'):
1351 elif h.startswith('-'):
1351 continue
1352 continue
1352 else:
1353 else:
1353 self.hunk.insert(hunki - 1, u)
1354 self.hunk.insert(hunki - 1, u)
1354 break
1355 break
1355
1356
1356 if not self.a:
1357 if not self.a:
1357 # this happens when lines were only added to the hunk
1358 # this happens when lines were only added to the hunk
1358 for x in self.hunk:
1359 for x in self.hunk:
1359 if x.startswith('-') or x.startswith(' '):
1360 if x.startswith('-') or x.startswith(' '):
1360 self.a.append(x)
1361 self.a.append(x)
1361 if not self.b:
1362 if not self.b:
1362 # this happens when lines were only deleted from the hunk
1363 # this happens when lines were only deleted from the hunk
1363 for x in self.hunk:
1364 for x in self.hunk:
1364 if x.startswith('+') or x.startswith(' '):
1365 if x.startswith('+') or x.startswith(' '):
1365 self.b.append(x[1:])
1366 self.b.append(x[1:])
1366 # @@ -start,len +start,len @@
1367 # @@ -start,len +start,len @@
1367 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1368 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1368 self.startb, self.lenb)
1369 self.startb, self.lenb)
1369 self.hunk[0] = self.desc
1370 self.hunk[0] = self.desc
1370 self._fixnewline(lr)
1371 self._fixnewline(lr)
1371
1372
1372 def _fixnewline(self, lr):
1373 def _fixnewline(self, lr):
1373 l = lr.readline()
1374 l = lr.readline()
1374 if l.startswith('\ '):
1375 if l.startswith('\ '):
1375 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1376 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1376 else:
1377 else:
1377 lr.push(l)
1378 lr.push(l)
1378
1379
1379 def complete(self):
1380 def complete(self):
1380 return len(self.a) == self.lena and len(self.b) == self.lenb
1381 return len(self.a) == self.lena and len(self.b) == self.lenb
1381
1382
1382 def _fuzzit(self, old, new, fuzz, toponly):
1383 def _fuzzit(self, old, new, fuzz, toponly):
1383 # this removes context lines from the top and bottom of list 'l'. It
1384 # this removes context lines from the top and bottom of list 'l'. It
1384 # checks the hunk to make sure only context lines are removed, and then
1385 # checks the hunk to make sure only context lines are removed, and then
1385 # returns a new shortened list of lines.
1386 # returns a new shortened list of lines.
1386 fuzz = min(fuzz, len(old))
1387 fuzz = min(fuzz, len(old))
1387 if fuzz:
1388 if fuzz:
1388 top = 0
1389 top = 0
1389 bot = 0
1390 bot = 0
1390 hlen = len(self.hunk)
1391 hlen = len(self.hunk)
1391 for x in xrange(hlen - 1):
1392 for x in xrange(hlen - 1):
1392 # the hunk starts with the @@ line, so use x+1
1393 # the hunk starts with the @@ line, so use x+1
1393 if self.hunk[x + 1][0] == ' ':
1394 if self.hunk[x + 1][0] == ' ':
1394 top += 1
1395 top += 1
1395 else:
1396 else:
1396 break
1397 break
1397 if not toponly:
1398 if not toponly:
1398 for x in xrange(hlen - 1):
1399 for x in xrange(hlen - 1):
1399 if self.hunk[hlen - bot - 1][0] == ' ':
1400 if self.hunk[hlen - bot - 1][0] == ' ':
1400 bot += 1
1401 bot += 1
1401 else:
1402 else:
1402 break
1403 break
1403
1404
1404 bot = min(fuzz, bot)
1405 bot = min(fuzz, bot)
1405 top = min(fuzz, top)
1406 top = min(fuzz, top)
1406 return old[top:len(old) - bot], new[top:len(new) - bot], top
1407 return old[top:len(old) - bot], new[top:len(new) - bot], top
1407 return old, new, 0
1408 return old, new, 0
1408
1409
1409 def fuzzit(self, fuzz, toponly):
1410 def fuzzit(self, fuzz, toponly):
1410 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1411 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1411 oldstart = self.starta + top
1412 oldstart = self.starta + top
1412 newstart = self.startb + top
1413 newstart = self.startb + top
1413 # zero length hunk ranges already have their start decremented
1414 # zero length hunk ranges already have their start decremented
1414 if self.lena and oldstart > 0:
1415 if self.lena and oldstart > 0:
1415 oldstart -= 1
1416 oldstart -= 1
1416 if self.lenb and newstart > 0:
1417 if self.lenb and newstart > 0:
1417 newstart -= 1
1418 newstart -= 1
1418 return old, oldstart, new, newstart
1419 return old, oldstart, new, newstart
1419
1420
1420 class binhunk(object):
1421 class binhunk(object):
1421 'A binary patch file.'
1422 'A binary patch file.'
1422 def __init__(self, lr, fname):
1423 def __init__(self, lr, fname):
1423 self.text = None
1424 self.text = None
1424 self.delta = False
1425 self.delta = False
1425 self.hunk = ['GIT binary patch\n']
1426 self.hunk = ['GIT binary patch\n']
1426 self._fname = fname
1427 self._fname = fname
1427 self._read(lr)
1428 self._read(lr)
1428
1429
1429 def complete(self):
1430 def complete(self):
1430 return self.text is not None
1431 return self.text is not None
1431
1432
1432 def new(self, lines):
1433 def new(self, lines):
1433 if self.delta:
1434 if self.delta:
1434 return [applybindelta(self.text, ''.join(lines))]
1435 return [applybindelta(self.text, ''.join(lines))]
1435 return [self.text]
1436 return [self.text]
1436
1437
1437 def _read(self, lr):
1438 def _read(self, lr):
1438 def getline(lr, hunk):
1439 def getline(lr, hunk):
1439 l = lr.readline()
1440 l = lr.readline()
1440 hunk.append(l)
1441 hunk.append(l)
1441 return l.rstrip('\r\n')
1442 return l.rstrip('\r\n')
1442
1443
1443 size = 0
1444 size = 0
1444 while True:
1445 while True:
1445 line = getline(lr, self.hunk)
1446 line = getline(lr, self.hunk)
1446 if not line:
1447 if not line:
1447 raise PatchError(_('could not extract "%s" binary data')
1448 raise PatchError(_('could not extract "%s" binary data')
1448 % self._fname)
1449 % self._fname)
1449 if line.startswith('literal '):
1450 if line.startswith('literal '):
1450 size = int(line[8:].rstrip())
1451 size = int(line[8:].rstrip())
1451 break
1452 break
1452 if line.startswith('delta '):
1453 if line.startswith('delta '):
1453 size = int(line[6:].rstrip())
1454 size = int(line[6:].rstrip())
1454 self.delta = True
1455 self.delta = True
1455 break
1456 break
1456 dec = []
1457 dec = []
1457 line = getline(lr, self.hunk)
1458 line = getline(lr, self.hunk)
1458 while len(line) > 1:
1459 while len(line) > 1:
1459 l = line[0:1]
1460 l = line[0:1]
1460 if l <= 'Z' and l >= 'A':
1461 if l <= 'Z' and l >= 'A':
1461 l = ord(l) - ord('A') + 1
1462 l = ord(l) - ord('A') + 1
1462 else:
1463 else:
1463 l = ord(l) - ord('a') + 27
1464 l = ord(l) - ord('a') + 27
1464 try:
1465 try:
1465 dec.append(util.b85decode(line[1:])[:l])
1466 dec.append(util.b85decode(line[1:])[:l])
1466 except ValueError as e:
1467 except ValueError as e:
1467 raise PatchError(_('could not decode "%s" binary patch: %s')
1468 raise PatchError(_('could not decode "%s" binary patch: %s')
1468 % (self._fname, stringutil.forcebytestr(e)))
1469 % (self._fname, stringutil.forcebytestr(e)))
1469 line = getline(lr, self.hunk)
1470 line = getline(lr, self.hunk)
1470 text = zlib.decompress(''.join(dec))
1471 text = zlib.decompress(''.join(dec))
1471 if len(text) != size:
1472 if len(text) != size:
1472 raise PatchError(_('"%s" length is %d bytes, should be %d')
1473 raise PatchError(_('"%s" length is %d bytes, should be %d')
1473 % (self._fname, len(text), size))
1474 % (self._fname, len(text), size))
1474 self.text = text
1475 self.text = text
1475
1476
1476 def parsefilename(str):
1477 def parsefilename(str):
1477 # --- filename \t|space stuff
1478 # --- filename \t|space stuff
1478 s = str[4:].rstrip('\r\n')
1479 s = str[4:].rstrip('\r\n')
1479 i = s.find('\t')
1480 i = s.find('\t')
1480 if i < 0:
1481 if i < 0:
1481 i = s.find(' ')
1482 i = s.find(' ')
1482 if i < 0:
1483 if i < 0:
1483 return s
1484 return s
1484 return s[:i]
1485 return s[:i]
1485
1486
1486 def reversehunks(hunks):
1487 def reversehunks(hunks):
1487 '''reverse the signs in the hunks given as argument
1488 '''reverse the signs in the hunks given as argument
1488
1489
1489 This function operates on hunks coming out of patch.filterpatch, that is
1490 This function operates on hunks coming out of patch.filterpatch, that is
1490 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1491 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1491
1492
1492 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1493 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1493 ... --- a/folder1/g
1494 ... --- a/folder1/g
1494 ... +++ b/folder1/g
1495 ... +++ b/folder1/g
1495 ... @@ -1,7 +1,7 @@
1496 ... @@ -1,7 +1,7 @@
1496 ... +firstline
1497 ... +firstline
1497 ... c
1498 ... c
1498 ... 1
1499 ... 1
1499 ... 2
1500 ... 2
1500 ... + 3
1501 ... + 3
1501 ... -4
1502 ... -4
1502 ... 5
1503 ... 5
1503 ... d
1504 ... d
1504 ... +lastline"""
1505 ... +lastline"""
1505 >>> hunks = parsepatch([rawpatch])
1506 >>> hunks = parsepatch([rawpatch])
1506 >>> hunkscomingfromfilterpatch = []
1507 >>> hunkscomingfromfilterpatch = []
1507 >>> for h in hunks:
1508 >>> for h in hunks:
1508 ... hunkscomingfromfilterpatch.append(h)
1509 ... hunkscomingfromfilterpatch.append(h)
1509 ... hunkscomingfromfilterpatch.extend(h.hunks)
1510 ... hunkscomingfromfilterpatch.extend(h.hunks)
1510
1511
1511 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1512 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1512 >>> from . import util
1513 >>> from . import util
1513 >>> fp = util.stringio()
1514 >>> fp = util.stringio()
1514 >>> for c in reversedhunks:
1515 >>> for c in reversedhunks:
1515 ... c.write(fp)
1516 ... c.write(fp)
1516 >>> fp.seek(0) or None
1517 >>> fp.seek(0) or None
1517 >>> reversedpatch = fp.read()
1518 >>> reversedpatch = fp.read()
1518 >>> print(pycompat.sysstr(reversedpatch))
1519 >>> print(pycompat.sysstr(reversedpatch))
1519 diff --git a/folder1/g b/folder1/g
1520 diff --git a/folder1/g b/folder1/g
1520 --- a/folder1/g
1521 --- a/folder1/g
1521 +++ b/folder1/g
1522 +++ b/folder1/g
1522 @@ -1,4 +1,3 @@
1523 @@ -1,4 +1,3 @@
1523 -firstline
1524 -firstline
1524 c
1525 c
1525 1
1526 1
1526 2
1527 2
1527 @@ -2,6 +1,6 @@
1528 @@ -2,6 +1,6 @@
1528 c
1529 c
1529 1
1530 1
1530 2
1531 2
1531 - 3
1532 - 3
1532 +4
1533 +4
1533 5
1534 5
1534 d
1535 d
1535 @@ -6,3 +5,2 @@
1536 @@ -6,3 +5,2 @@
1536 5
1537 5
1537 d
1538 d
1538 -lastline
1539 -lastline
1539
1540
1540 '''
1541 '''
1541
1542
1542 newhunks = []
1543 newhunks = []
1543 for c in hunks:
1544 for c in hunks:
1544 if util.safehasattr(c, 'reversehunk'):
1545 if util.safehasattr(c, 'reversehunk'):
1545 c = c.reversehunk()
1546 c = c.reversehunk()
1546 newhunks.append(c)
1547 newhunks.append(c)
1547 return newhunks
1548 return newhunks
1548
1549
1549 def parsepatch(originalchunks, maxcontext=None):
1550 def parsepatch(originalchunks, maxcontext=None):
1550 """patch -> [] of headers -> [] of hunks
1551 """patch -> [] of headers -> [] of hunks
1551
1552
1552 If maxcontext is not None, trim context lines if necessary.
1553 If maxcontext is not None, trim context lines if necessary.
1553
1554
1554 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1555 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1555 ... --- a/folder1/g
1556 ... --- a/folder1/g
1556 ... +++ b/folder1/g
1557 ... +++ b/folder1/g
1557 ... @@ -1,8 +1,10 @@
1558 ... @@ -1,8 +1,10 @@
1558 ... 1
1559 ... 1
1559 ... 2
1560 ... 2
1560 ... -3
1561 ... -3
1561 ... 4
1562 ... 4
1562 ... 5
1563 ... 5
1563 ... 6
1564 ... 6
1564 ... +6.1
1565 ... +6.1
1565 ... +6.2
1566 ... +6.2
1566 ... 7
1567 ... 7
1567 ... 8
1568 ... 8
1568 ... +9'''
1569 ... +9'''
1569 >>> out = util.stringio()
1570 >>> out = util.stringio()
1570 >>> headers = parsepatch([rawpatch], maxcontext=1)
1571 >>> headers = parsepatch([rawpatch], maxcontext=1)
1571 >>> for header in headers:
1572 >>> for header in headers:
1572 ... header.write(out)
1573 ... header.write(out)
1573 ... for hunk in header.hunks:
1574 ... for hunk in header.hunks:
1574 ... hunk.write(out)
1575 ... hunk.write(out)
1575 >>> print(pycompat.sysstr(out.getvalue()))
1576 >>> print(pycompat.sysstr(out.getvalue()))
1576 diff --git a/folder1/g b/folder1/g
1577 diff --git a/folder1/g b/folder1/g
1577 --- a/folder1/g
1578 --- a/folder1/g
1578 +++ b/folder1/g
1579 +++ b/folder1/g
1579 @@ -2,3 +2,2 @@
1580 @@ -2,3 +2,2 @@
1580 2
1581 2
1581 -3
1582 -3
1582 4
1583 4
1583 @@ -6,2 +5,4 @@
1584 @@ -6,2 +5,4 @@
1584 6
1585 6
1585 +6.1
1586 +6.1
1586 +6.2
1587 +6.2
1587 7
1588 7
1588 @@ -8,1 +9,2 @@
1589 @@ -8,1 +9,2 @@
1589 8
1590 8
1590 +9
1591 +9
1591 """
1592 """
1592 class parser(object):
1593 class parser(object):
1593 """patch parsing state machine"""
1594 """patch parsing state machine"""
1594 def __init__(self):
1595 def __init__(self):
1595 self.fromline = 0
1596 self.fromline = 0
1596 self.toline = 0
1597 self.toline = 0
1597 self.proc = ''
1598 self.proc = ''
1598 self.header = None
1599 self.header = None
1599 self.context = []
1600 self.context = []
1600 self.before = []
1601 self.before = []
1601 self.hunk = []
1602 self.hunk = []
1602 self.headers = []
1603 self.headers = []
1603
1604
1604 def addrange(self, limits):
1605 def addrange(self, limits):
1605 fromstart, fromend, tostart, toend, proc = limits
1606 fromstart, fromend, tostart, toend, proc = limits
1606 self.fromline = int(fromstart)
1607 self.fromline = int(fromstart)
1607 self.toline = int(tostart)
1608 self.toline = int(tostart)
1608 self.proc = proc
1609 self.proc = proc
1609
1610
1610 def addcontext(self, context):
1611 def addcontext(self, context):
1611 if self.hunk:
1612 if self.hunk:
1612 h = recordhunk(self.header, self.fromline, self.toline,
1613 h = recordhunk(self.header, self.fromline, self.toline,
1613 self.proc, self.before, self.hunk, context, maxcontext)
1614 self.proc, self.before, self.hunk, context, maxcontext)
1614 self.header.hunks.append(h)
1615 self.header.hunks.append(h)
1615 self.fromline += len(self.before) + h.removed
1616 self.fromline += len(self.before) + h.removed
1616 self.toline += len(self.before) + h.added
1617 self.toline += len(self.before) + h.added
1617 self.before = []
1618 self.before = []
1618 self.hunk = []
1619 self.hunk = []
1619 self.context = context
1620 self.context = context
1620
1621
1621 def addhunk(self, hunk):
1622 def addhunk(self, hunk):
1622 if self.context:
1623 if self.context:
1623 self.before = self.context
1624 self.before = self.context
1624 self.context = []
1625 self.context = []
1625 self.hunk = hunk
1626 self.hunk = hunk
1626
1627
1627 def newfile(self, hdr):
1628 def newfile(self, hdr):
1628 self.addcontext([])
1629 self.addcontext([])
1629 h = header(hdr)
1630 h = header(hdr)
1630 self.headers.append(h)
1631 self.headers.append(h)
1631 self.header = h
1632 self.header = h
1632
1633
1633 def addother(self, line):
1634 def addother(self, line):
1634 pass # 'other' lines are ignored
1635 pass # 'other' lines are ignored
1635
1636
1636 def finished(self):
1637 def finished(self):
1637 self.addcontext([])
1638 self.addcontext([])
1638 return self.headers
1639 return self.headers
1639
1640
1640 transitions = {
1641 transitions = {
1641 'file': {'context': addcontext,
1642 'file': {'context': addcontext,
1642 'file': newfile,
1643 'file': newfile,
1643 'hunk': addhunk,
1644 'hunk': addhunk,
1644 'range': addrange},
1645 'range': addrange},
1645 'context': {'file': newfile,
1646 'context': {'file': newfile,
1646 'hunk': addhunk,
1647 'hunk': addhunk,
1647 'range': addrange,
1648 'range': addrange,
1648 'other': addother},
1649 'other': addother},
1649 'hunk': {'context': addcontext,
1650 'hunk': {'context': addcontext,
1650 'file': newfile,
1651 'file': newfile,
1651 'range': addrange},
1652 'range': addrange},
1652 'range': {'context': addcontext,
1653 'range': {'context': addcontext,
1653 'hunk': addhunk},
1654 'hunk': addhunk},
1654 'other': {'other': addother},
1655 'other': {'other': addother},
1655 }
1656 }
1656
1657
1657 p = parser()
1658 p = parser()
1658 fp = stringio()
1659 fp = stringio()
1659 fp.write(''.join(originalchunks))
1660 fp.write(''.join(originalchunks))
1660 fp.seek(0)
1661 fp.seek(0)
1661
1662
1662 state = 'context'
1663 state = 'context'
1663 for newstate, data in scanpatch(fp):
1664 for newstate, data in scanpatch(fp):
1664 try:
1665 try:
1665 p.transitions[state][newstate](p, data)
1666 p.transitions[state][newstate](p, data)
1666 except KeyError:
1667 except KeyError:
1667 raise PatchError('unhandled transition: %s -> %s' %
1668 raise PatchError('unhandled transition: %s -> %s' %
1668 (state, newstate))
1669 (state, newstate))
1669 state = newstate
1670 state = newstate
1670 del fp
1671 del fp
1671 return p.finished()
1672 return p.finished()
1672
1673
1673 def pathtransform(path, strip, prefix):
1674 def pathtransform(path, strip, prefix):
1674 '''turn a path from a patch into a path suitable for the repository
1675 '''turn a path from a patch into a path suitable for the repository
1675
1676
1676 prefix, if not empty, is expected to be normalized with a / at the end.
1677 prefix, if not empty, is expected to be normalized with a / at the end.
1677
1678
1678 Returns (stripped components, path in repository).
1679 Returns (stripped components, path in repository).
1679
1680
1680 >>> pathtransform(b'a/b/c', 0, b'')
1681 >>> pathtransform(b'a/b/c', 0, b'')
1681 ('', 'a/b/c')
1682 ('', 'a/b/c')
1682 >>> pathtransform(b' a/b/c ', 0, b'')
1683 >>> pathtransform(b' a/b/c ', 0, b'')
1683 ('', ' a/b/c')
1684 ('', ' a/b/c')
1684 >>> pathtransform(b' a/b/c ', 2, b'')
1685 >>> pathtransform(b' a/b/c ', 2, b'')
1685 ('a/b/', 'c')
1686 ('a/b/', 'c')
1686 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1687 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1687 ('', 'd/e/a/b/c')
1688 ('', 'd/e/a/b/c')
1688 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1689 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1689 ('a//b/', 'd/e/c')
1690 ('a//b/', 'd/e/c')
1690 >>> pathtransform(b'a/b/c', 3, b'')
1691 >>> pathtransform(b'a/b/c', 3, b'')
1691 Traceback (most recent call last):
1692 Traceback (most recent call last):
1692 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1693 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1693 '''
1694 '''
1694 pathlen = len(path)
1695 pathlen = len(path)
1695 i = 0
1696 i = 0
1696 if strip == 0:
1697 if strip == 0:
1697 return '', prefix + path.rstrip()
1698 return '', prefix + path.rstrip()
1698 count = strip
1699 count = strip
1699 while count > 0:
1700 while count > 0:
1700 i = path.find('/', i)
1701 i = path.find('/', i)
1701 if i == -1:
1702 if i == -1:
1702 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1703 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1703 (count, strip, path))
1704 (count, strip, path))
1704 i += 1
1705 i += 1
1705 # consume '//' in the path
1706 # consume '//' in the path
1706 while i < pathlen - 1 and path[i:i + 1] == '/':
1707 while i < pathlen - 1 and path[i:i + 1] == '/':
1707 i += 1
1708 i += 1
1708 count -= 1
1709 count -= 1
1709 return path[:i].lstrip(), prefix + path[i:].rstrip()
1710 return path[:i].lstrip(), prefix + path[i:].rstrip()
1710
1711
1711 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1712 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1712 nulla = afile_orig == "/dev/null"
1713 nulla = afile_orig == "/dev/null"
1713 nullb = bfile_orig == "/dev/null"
1714 nullb = bfile_orig == "/dev/null"
1714 create = nulla and hunk.starta == 0 and hunk.lena == 0
1715 create = nulla and hunk.starta == 0 and hunk.lena == 0
1715 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1716 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1716 abase, afile = pathtransform(afile_orig, strip, prefix)
1717 abase, afile = pathtransform(afile_orig, strip, prefix)
1717 gooda = not nulla and backend.exists(afile)
1718 gooda = not nulla and backend.exists(afile)
1718 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1719 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1719 if afile == bfile:
1720 if afile == bfile:
1720 goodb = gooda
1721 goodb = gooda
1721 else:
1722 else:
1722 goodb = not nullb and backend.exists(bfile)
1723 goodb = not nullb and backend.exists(bfile)
1723 missing = not goodb and not gooda and not create
1724 missing = not goodb and not gooda and not create
1724
1725
1725 # some diff programs apparently produce patches where the afile is
1726 # some diff programs apparently produce patches where the afile is
1726 # not /dev/null, but afile starts with bfile
1727 # not /dev/null, but afile starts with bfile
1727 abasedir = afile[:afile.rfind('/') + 1]
1728 abasedir = afile[:afile.rfind('/') + 1]
1728 bbasedir = bfile[:bfile.rfind('/') + 1]
1729 bbasedir = bfile[:bfile.rfind('/') + 1]
1729 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1730 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1730 and hunk.starta == 0 and hunk.lena == 0):
1731 and hunk.starta == 0 and hunk.lena == 0):
1731 create = True
1732 create = True
1732 missing = False
1733 missing = False
1733
1734
1734 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1735 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1735 # diff is between a file and its backup. In this case, the original
1736 # diff is between a file and its backup. In this case, the original
1736 # file should be patched (see original mpatch code).
1737 # file should be patched (see original mpatch code).
1737 isbackup = (abase == bbase and bfile.startswith(afile))
1738 isbackup = (abase == bbase and bfile.startswith(afile))
1738 fname = None
1739 fname = None
1739 if not missing:
1740 if not missing:
1740 if gooda and goodb:
1741 if gooda and goodb:
1741 if isbackup:
1742 if isbackup:
1742 fname = afile
1743 fname = afile
1743 else:
1744 else:
1744 fname = bfile
1745 fname = bfile
1745 elif gooda:
1746 elif gooda:
1746 fname = afile
1747 fname = afile
1747
1748
1748 if not fname:
1749 if not fname:
1749 if not nullb:
1750 if not nullb:
1750 if isbackup:
1751 if isbackup:
1751 fname = afile
1752 fname = afile
1752 else:
1753 else:
1753 fname = bfile
1754 fname = bfile
1754 elif not nulla:
1755 elif not nulla:
1755 fname = afile
1756 fname = afile
1756 else:
1757 else:
1757 raise PatchError(_("undefined source and destination files"))
1758 raise PatchError(_("undefined source and destination files"))
1758
1759
1759 gp = patchmeta(fname)
1760 gp = patchmeta(fname)
1760 if create:
1761 if create:
1761 gp.op = 'ADD'
1762 gp.op = 'ADD'
1762 elif remove:
1763 elif remove:
1763 gp.op = 'DELETE'
1764 gp.op = 'DELETE'
1764 return gp
1765 return gp
1765
1766
1766 def scanpatch(fp):
1767 def scanpatch(fp):
1767 """like patch.iterhunks, but yield different events
1768 """like patch.iterhunks, but yield different events
1768
1769
1769 - ('file', [header_lines + fromfile + tofile])
1770 - ('file', [header_lines + fromfile + tofile])
1770 - ('context', [context_lines])
1771 - ('context', [context_lines])
1771 - ('hunk', [hunk_lines])
1772 - ('hunk', [hunk_lines])
1772 - ('range', (-start,len, +start,len, proc))
1773 - ('range', (-start,len, +start,len, proc))
1773 """
1774 """
1774 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1775 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1775 lr = linereader(fp)
1776 lr = linereader(fp)
1776
1777
1777 def scanwhile(first, p):
1778 def scanwhile(first, p):
1778 """scan lr while predicate holds"""
1779 """scan lr while predicate holds"""
1779 lines = [first]
1780 lines = [first]
1780 for line in iter(lr.readline, ''):
1781 for line in iter(lr.readline, ''):
1781 if p(line):
1782 if p(line):
1782 lines.append(line)
1783 lines.append(line)
1783 else:
1784 else:
1784 lr.push(line)
1785 lr.push(line)
1785 break
1786 break
1786 return lines
1787 return lines
1787
1788
1788 for line in iter(lr.readline, ''):
1789 for line in iter(lr.readline, ''):
1789 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1790 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1790 def notheader(line):
1791 def notheader(line):
1791 s = line.split(None, 1)
1792 s = line.split(None, 1)
1792 return not s or s[0] not in ('---', 'diff')
1793 return not s or s[0] not in ('---', 'diff')
1793 header = scanwhile(line, notheader)
1794 header = scanwhile(line, notheader)
1794 fromfile = lr.readline()
1795 fromfile = lr.readline()
1795 if fromfile.startswith('---'):
1796 if fromfile.startswith('---'):
1796 tofile = lr.readline()
1797 tofile = lr.readline()
1797 header += [fromfile, tofile]
1798 header += [fromfile, tofile]
1798 else:
1799 else:
1799 lr.push(fromfile)
1800 lr.push(fromfile)
1800 yield 'file', header
1801 yield 'file', header
1801 elif line[0:1] == ' ':
1802 elif line[0:1] == ' ':
1802 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1803 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1803 elif line[0] in '-+':
1804 elif line[0] in '-+':
1804 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1805 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1805 else:
1806 else:
1806 m = lines_re.match(line)
1807 m = lines_re.match(line)
1807 if m:
1808 if m:
1808 yield 'range', m.groups()
1809 yield 'range', m.groups()
1809 else:
1810 else:
1810 yield 'other', line
1811 yield 'other', line
1811
1812
1812 def scangitpatch(lr, firstline):
1813 def scangitpatch(lr, firstline):
1813 """
1814 """
1814 Git patches can emit:
1815 Git patches can emit:
1815 - rename a to b
1816 - rename a to b
1816 - change b
1817 - change b
1817 - copy a to c
1818 - copy a to c
1818 - change c
1819 - change c
1819
1820
1820 We cannot apply this sequence as-is, the renamed 'a' could not be
1821 We cannot apply this sequence as-is, the renamed 'a' could not be
1821 found for it would have been renamed already. And we cannot copy
1822 found for it would have been renamed already. And we cannot copy
1822 from 'b' instead because 'b' would have been changed already. So
1823 from 'b' instead because 'b' would have been changed already. So
1823 we scan the git patch for copy and rename commands so we can
1824 we scan the git patch for copy and rename commands so we can
1824 perform the copies ahead of time.
1825 perform the copies ahead of time.
1825 """
1826 """
1826 pos = 0
1827 pos = 0
1827 try:
1828 try:
1828 pos = lr.fp.tell()
1829 pos = lr.fp.tell()
1829 fp = lr.fp
1830 fp = lr.fp
1830 except IOError:
1831 except IOError:
1831 fp = stringio(lr.fp.read())
1832 fp = stringio(lr.fp.read())
1832 gitlr = linereader(fp)
1833 gitlr = linereader(fp)
1833 gitlr.push(firstline)
1834 gitlr.push(firstline)
1834 gitpatches = readgitpatch(gitlr)
1835 gitpatches = readgitpatch(gitlr)
1835 fp.seek(pos)
1836 fp.seek(pos)
1836 return gitpatches
1837 return gitpatches
1837
1838
1838 def iterhunks(fp):
1839 def iterhunks(fp):
1839 """Read a patch and yield the following events:
1840 """Read a patch and yield the following events:
1840 - ("file", afile, bfile, firsthunk): select a new target file.
1841 - ("file", afile, bfile, firsthunk): select a new target file.
1841 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1842 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1842 "file" event.
1843 "file" event.
1843 - ("git", gitchanges): current diff is in git format, gitchanges
1844 - ("git", gitchanges): current diff is in git format, gitchanges
1844 maps filenames to gitpatch records. Unique event.
1845 maps filenames to gitpatch records. Unique event.
1845 """
1846 """
1846 afile = ""
1847 afile = ""
1847 bfile = ""
1848 bfile = ""
1848 state = None
1849 state = None
1849 hunknum = 0
1850 hunknum = 0
1850 emitfile = newfile = False
1851 emitfile = newfile = False
1851 gitpatches = None
1852 gitpatches = None
1852
1853
1853 # our states
1854 # our states
1854 BFILE = 1
1855 BFILE = 1
1855 context = None
1856 context = None
1856 lr = linereader(fp)
1857 lr = linereader(fp)
1857
1858
1858 for x in iter(lr.readline, ''):
1859 for x in iter(lr.readline, ''):
1859 if state == BFILE and (
1860 if state == BFILE and (
1860 (not context and x.startswith('@'))
1861 (not context and x.startswith('@'))
1861 or (context is not False and x.startswith('***************'))
1862 or (context is not False and x.startswith('***************'))
1862 or x.startswith('GIT binary patch')):
1863 or x.startswith('GIT binary patch')):
1863 gp = None
1864 gp = None
1864 if (gitpatches and
1865 if (gitpatches and
1865 gitpatches[-1].ispatching(afile, bfile)):
1866 gitpatches[-1].ispatching(afile, bfile)):
1866 gp = gitpatches.pop()
1867 gp = gitpatches.pop()
1867 if x.startswith('GIT binary patch'):
1868 if x.startswith('GIT binary patch'):
1868 h = binhunk(lr, gp.path)
1869 h = binhunk(lr, gp.path)
1869 else:
1870 else:
1870 if context is None and x.startswith('***************'):
1871 if context is None and x.startswith('***************'):
1871 context = True
1872 context = True
1872 h = hunk(x, hunknum + 1, lr, context)
1873 h = hunk(x, hunknum + 1, lr, context)
1873 hunknum += 1
1874 hunknum += 1
1874 if emitfile:
1875 if emitfile:
1875 emitfile = False
1876 emitfile = False
1876 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1877 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1877 yield 'hunk', h
1878 yield 'hunk', h
1878 elif x.startswith('diff --git a/'):
1879 elif x.startswith('diff --git a/'):
1879 m = gitre.match(x.rstrip(' \r\n'))
1880 m = gitre.match(x.rstrip(' \r\n'))
1880 if not m:
1881 if not m:
1881 continue
1882 continue
1882 if gitpatches is None:
1883 if gitpatches is None:
1883 # scan whole input for git metadata
1884 # scan whole input for git metadata
1884 gitpatches = scangitpatch(lr, x)
1885 gitpatches = scangitpatch(lr, x)
1885 yield 'git', [g.copy() for g in gitpatches
1886 yield 'git', [g.copy() for g in gitpatches
1886 if g.op in ('COPY', 'RENAME')]
1887 if g.op in ('COPY', 'RENAME')]
1887 gitpatches.reverse()
1888 gitpatches.reverse()
1888 afile = 'a/' + m.group(1)
1889 afile = 'a/' + m.group(1)
1889 bfile = 'b/' + m.group(2)
1890 bfile = 'b/' + m.group(2)
1890 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1891 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1891 gp = gitpatches.pop()
1892 gp = gitpatches.pop()
1892 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1893 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1893 if not gitpatches:
1894 if not gitpatches:
1894 raise PatchError(_('failed to synchronize metadata for "%s"')
1895 raise PatchError(_('failed to synchronize metadata for "%s"')
1895 % afile[2:])
1896 % afile[2:])
1896 gp = gitpatches[-1]
1897 gp = gitpatches[-1]
1897 newfile = True
1898 newfile = True
1898 elif x.startswith('---'):
1899 elif x.startswith('---'):
1899 # check for a unified diff
1900 # check for a unified diff
1900 l2 = lr.readline()
1901 l2 = lr.readline()
1901 if not l2.startswith('+++'):
1902 if not l2.startswith('+++'):
1902 lr.push(l2)
1903 lr.push(l2)
1903 continue
1904 continue
1904 newfile = True
1905 newfile = True
1905 context = False
1906 context = False
1906 afile = parsefilename(x)
1907 afile = parsefilename(x)
1907 bfile = parsefilename(l2)
1908 bfile = parsefilename(l2)
1908 elif x.startswith('***'):
1909 elif x.startswith('***'):
1909 # check for a context diff
1910 # check for a context diff
1910 l2 = lr.readline()
1911 l2 = lr.readline()
1911 if not l2.startswith('---'):
1912 if not l2.startswith('---'):
1912 lr.push(l2)
1913 lr.push(l2)
1913 continue
1914 continue
1914 l3 = lr.readline()
1915 l3 = lr.readline()
1915 lr.push(l3)
1916 lr.push(l3)
1916 if not l3.startswith("***************"):
1917 if not l3.startswith("***************"):
1917 lr.push(l2)
1918 lr.push(l2)
1918 continue
1919 continue
1919 newfile = True
1920 newfile = True
1920 context = True
1921 context = True
1921 afile = parsefilename(x)
1922 afile = parsefilename(x)
1922 bfile = parsefilename(l2)
1923 bfile = parsefilename(l2)
1923
1924
1924 if newfile:
1925 if newfile:
1925 newfile = False
1926 newfile = False
1926 emitfile = True
1927 emitfile = True
1927 state = BFILE
1928 state = BFILE
1928 hunknum = 0
1929 hunknum = 0
1929
1930
1930 while gitpatches:
1931 while gitpatches:
1931 gp = gitpatches.pop()
1932 gp = gitpatches.pop()
1932 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1933 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1933
1934
1934 def applybindelta(binchunk, data):
1935 def applybindelta(binchunk, data):
1935 """Apply a binary delta hunk
1936 """Apply a binary delta hunk
1936 The algorithm used is the algorithm from git's patch-delta.c
1937 The algorithm used is the algorithm from git's patch-delta.c
1937 """
1938 """
1938 def deltahead(binchunk):
1939 def deltahead(binchunk):
1939 i = 0
1940 i = 0
1940 for c in binchunk:
1941 for c in binchunk:
1941 i += 1
1942 i += 1
1942 if not (ord(c) & 0x80):
1943 if not (ord(c) & 0x80):
1943 return i
1944 return i
1944 return i
1945 return i
1945 out = ""
1946 out = ""
1946 s = deltahead(binchunk)
1947 s = deltahead(binchunk)
1947 binchunk = binchunk[s:]
1948 binchunk = binchunk[s:]
1948 s = deltahead(binchunk)
1949 s = deltahead(binchunk)
1949 binchunk = binchunk[s:]
1950 binchunk = binchunk[s:]
1950 i = 0
1951 i = 0
1951 while i < len(binchunk):
1952 while i < len(binchunk):
1952 cmd = ord(binchunk[i])
1953 cmd = ord(binchunk[i])
1953 i += 1
1954 i += 1
1954 if (cmd & 0x80):
1955 if (cmd & 0x80):
1955 offset = 0
1956 offset = 0
1956 size = 0
1957 size = 0
1957 if (cmd & 0x01):
1958 if (cmd & 0x01):
1958 offset = ord(binchunk[i])
1959 offset = ord(binchunk[i])
1959 i += 1
1960 i += 1
1960 if (cmd & 0x02):
1961 if (cmd & 0x02):
1961 offset |= ord(binchunk[i]) << 8
1962 offset |= ord(binchunk[i]) << 8
1962 i += 1
1963 i += 1
1963 if (cmd & 0x04):
1964 if (cmd & 0x04):
1964 offset |= ord(binchunk[i]) << 16
1965 offset |= ord(binchunk[i]) << 16
1965 i += 1
1966 i += 1
1966 if (cmd & 0x08):
1967 if (cmd & 0x08):
1967 offset |= ord(binchunk[i]) << 24
1968 offset |= ord(binchunk[i]) << 24
1968 i += 1
1969 i += 1
1969 if (cmd & 0x10):
1970 if (cmd & 0x10):
1970 size = ord(binchunk[i])
1971 size = ord(binchunk[i])
1971 i += 1
1972 i += 1
1972 if (cmd & 0x20):
1973 if (cmd & 0x20):
1973 size |= ord(binchunk[i]) << 8
1974 size |= ord(binchunk[i]) << 8
1974 i += 1
1975 i += 1
1975 if (cmd & 0x40):
1976 if (cmd & 0x40):
1976 size |= ord(binchunk[i]) << 16
1977 size |= ord(binchunk[i]) << 16
1977 i += 1
1978 i += 1
1978 if size == 0:
1979 if size == 0:
1979 size = 0x10000
1980 size = 0x10000
1980 offset_end = offset + size
1981 offset_end = offset + size
1981 out += data[offset:offset_end]
1982 out += data[offset:offset_end]
1982 elif cmd != 0:
1983 elif cmd != 0:
1983 offset_end = i + cmd
1984 offset_end = i + cmd
1984 out += binchunk[i:offset_end]
1985 out += binchunk[i:offset_end]
1985 i += cmd
1986 i += cmd
1986 else:
1987 else:
1987 raise PatchError(_('unexpected delta opcode 0'))
1988 raise PatchError(_('unexpected delta opcode 0'))
1988 return out
1989 return out
1989
1990
1990 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1991 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1991 """Reads a patch from fp and tries to apply it.
1992 """Reads a patch from fp and tries to apply it.
1992
1993
1993 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1994 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1994 there was any fuzz.
1995 there was any fuzz.
1995
1996
1996 If 'eolmode' is 'strict', the patch content and patched file are
1997 If 'eolmode' is 'strict', the patch content and patched file are
1997 read in binary mode. Otherwise, line endings are ignored when
1998 read in binary mode. Otherwise, line endings are ignored when
1998 patching then normalized according to 'eolmode'.
1999 patching then normalized according to 'eolmode'.
1999 """
2000 """
2000 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2001 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2001 prefix=prefix, eolmode=eolmode)
2002 prefix=prefix, eolmode=eolmode)
2002
2003
2003 def _canonprefix(repo, prefix):
2004 def _canonprefix(repo, prefix):
2004 if prefix:
2005 if prefix:
2005 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2006 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2006 if prefix != '':
2007 if prefix != '':
2007 prefix += '/'
2008 prefix += '/'
2008 return prefix
2009 return prefix
2009
2010
2010 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2011 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2011 eolmode='strict'):
2012 eolmode='strict'):
2012 prefix = _canonprefix(backend.repo, prefix)
2013 prefix = _canonprefix(backend.repo, prefix)
2013 def pstrip(p):
2014 def pstrip(p):
2014 return pathtransform(p, strip - 1, prefix)[1]
2015 return pathtransform(p, strip - 1, prefix)[1]
2015
2016
2016 rejects = 0
2017 rejects = 0
2017 err = 0
2018 err = 0
2018 current_file = None
2019 current_file = None
2019
2020
2020 for state, values in iterhunks(fp):
2021 for state, values in iterhunks(fp):
2021 if state == 'hunk':
2022 if state == 'hunk':
2022 if not current_file:
2023 if not current_file:
2023 continue
2024 continue
2024 ret = current_file.apply(values)
2025 ret = current_file.apply(values)
2025 if ret > 0:
2026 if ret > 0:
2026 err = 1
2027 err = 1
2027 elif state == 'file':
2028 elif state == 'file':
2028 if current_file:
2029 if current_file:
2029 rejects += current_file.close()
2030 rejects += current_file.close()
2030 current_file = None
2031 current_file = None
2031 afile, bfile, first_hunk, gp = values
2032 afile, bfile, first_hunk, gp = values
2032 if gp:
2033 if gp:
2033 gp.path = pstrip(gp.path)
2034 gp.path = pstrip(gp.path)
2034 if gp.oldpath:
2035 if gp.oldpath:
2035 gp.oldpath = pstrip(gp.oldpath)
2036 gp.oldpath = pstrip(gp.oldpath)
2036 else:
2037 else:
2037 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2038 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2038 prefix)
2039 prefix)
2039 if gp.op == 'RENAME':
2040 if gp.op == 'RENAME':
2040 backend.unlink(gp.oldpath)
2041 backend.unlink(gp.oldpath)
2041 if not first_hunk:
2042 if not first_hunk:
2042 if gp.op == 'DELETE':
2043 if gp.op == 'DELETE':
2043 backend.unlink(gp.path)
2044 backend.unlink(gp.path)
2044 continue
2045 continue
2045 data, mode = None, None
2046 data, mode = None, None
2046 if gp.op in ('RENAME', 'COPY'):
2047 if gp.op in ('RENAME', 'COPY'):
2047 data, mode = store.getfile(gp.oldpath)[:2]
2048 data, mode = store.getfile(gp.oldpath)[:2]
2048 if data is None:
2049 if data is None:
2049 # This means that the old path does not exist
2050 # This means that the old path does not exist
2050 raise PatchError(_("source file '%s' does not exist")
2051 raise PatchError(_("source file '%s' does not exist")
2051 % gp.oldpath)
2052 % gp.oldpath)
2052 if gp.mode:
2053 if gp.mode:
2053 mode = gp.mode
2054 mode = gp.mode
2054 if gp.op == 'ADD':
2055 if gp.op == 'ADD':
2055 # Added files without content have no hunk and
2056 # Added files without content have no hunk and
2056 # must be created
2057 # must be created
2057 data = ''
2058 data = ''
2058 if data or mode:
2059 if data or mode:
2059 if (gp.op in ('ADD', 'RENAME', 'COPY')
2060 if (gp.op in ('ADD', 'RENAME', 'COPY')
2060 and backend.exists(gp.path)):
2061 and backend.exists(gp.path)):
2061 raise PatchError(_("cannot create %s: destination "
2062 raise PatchError(_("cannot create %s: destination "
2062 "already exists") % gp.path)
2063 "already exists") % gp.path)
2063 backend.setfile(gp.path, data, mode, gp.oldpath)
2064 backend.setfile(gp.path, data, mode, gp.oldpath)
2064 continue
2065 continue
2065 try:
2066 try:
2066 current_file = patcher(ui, gp, backend, store,
2067 current_file = patcher(ui, gp, backend, store,
2067 eolmode=eolmode)
2068 eolmode=eolmode)
2068 except PatchError as inst:
2069 except PatchError as inst:
2069 ui.warn(str(inst) + '\n')
2070 ui.warn(str(inst) + '\n')
2070 current_file = None
2071 current_file = None
2071 rejects += 1
2072 rejects += 1
2072 continue
2073 continue
2073 elif state == 'git':
2074 elif state == 'git':
2074 for gp in values:
2075 for gp in values:
2075 path = pstrip(gp.oldpath)
2076 path = pstrip(gp.oldpath)
2076 data, mode = backend.getfile(path)
2077 data, mode = backend.getfile(path)
2077 if data is None:
2078 if data is None:
2078 # The error ignored here will trigger a getfile()
2079 # The error ignored here will trigger a getfile()
2079 # error in a place more appropriate for error
2080 # error in a place more appropriate for error
2080 # handling, and will not interrupt the patching
2081 # handling, and will not interrupt the patching
2081 # process.
2082 # process.
2082 pass
2083 pass
2083 else:
2084 else:
2084 store.setfile(path, data, mode)
2085 store.setfile(path, data, mode)
2085 else:
2086 else:
2086 raise error.Abort(_('unsupported parser state: %s') % state)
2087 raise error.Abort(_('unsupported parser state: %s') % state)
2087
2088
2088 if current_file:
2089 if current_file:
2089 rejects += current_file.close()
2090 rejects += current_file.close()
2090
2091
2091 if rejects:
2092 if rejects:
2092 return -1
2093 return -1
2093 return err
2094 return err
2094
2095
2095 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2096 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2096 similarity):
2097 similarity):
2097 """use <patcher> to apply <patchname> to the working directory.
2098 """use <patcher> to apply <patchname> to the working directory.
2098 returns whether patch was applied with fuzz factor."""
2099 returns whether patch was applied with fuzz factor."""
2099
2100
2100 fuzz = False
2101 fuzz = False
2101 args = []
2102 args = []
2102 cwd = repo.root
2103 cwd = repo.root
2103 if cwd:
2104 if cwd:
2104 args.append('-d %s' % procutil.shellquote(cwd))
2105 args.append('-d %s' % procutil.shellquote(cwd))
2105 fp = procutil.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2106 fp = procutil.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2106 procutil.shellquote(patchname)))
2107 procutil.shellquote(patchname)))
2107 try:
2108 try:
2108 for line in util.iterfile(fp):
2109 for line in util.iterfile(fp):
2109 line = line.rstrip()
2110 line = line.rstrip()
2110 ui.note(line + '\n')
2111 ui.note(line + '\n')
2111 if line.startswith('patching file '):
2112 if line.startswith('patching file '):
2112 pf = util.parsepatchoutput(line)
2113 pf = util.parsepatchoutput(line)
2113 printed_file = False
2114 printed_file = False
2114 files.add(pf)
2115 files.add(pf)
2115 elif line.find('with fuzz') >= 0:
2116 elif line.find('with fuzz') >= 0:
2116 fuzz = True
2117 fuzz = True
2117 if not printed_file:
2118 if not printed_file:
2118 ui.warn(pf + '\n')
2119 ui.warn(pf + '\n')
2119 printed_file = True
2120 printed_file = True
2120 ui.warn(line + '\n')
2121 ui.warn(line + '\n')
2121 elif line.find('saving rejects to file') >= 0:
2122 elif line.find('saving rejects to file') >= 0:
2122 ui.warn(line + '\n')
2123 ui.warn(line + '\n')
2123 elif line.find('FAILED') >= 0:
2124 elif line.find('FAILED') >= 0:
2124 if not printed_file:
2125 if not printed_file:
2125 ui.warn(pf + '\n')
2126 ui.warn(pf + '\n')
2126 printed_file = True
2127 printed_file = True
2127 ui.warn(line + '\n')
2128 ui.warn(line + '\n')
2128 finally:
2129 finally:
2129 if files:
2130 if files:
2130 scmutil.marktouched(repo, files, similarity)
2131 scmutil.marktouched(repo, files, similarity)
2131 code = fp.close()
2132 code = fp.close()
2132 if code:
2133 if code:
2133 raise PatchError(_("patch command failed: %s") %
2134 raise PatchError(_("patch command failed: %s") %
2134 procutil.explainexit(code)[0])
2135 procutil.explainexit(code)[0])
2135 return fuzz
2136 return fuzz
2136
2137
2137 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2138 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2138 eolmode='strict'):
2139 eolmode='strict'):
2139 if files is None:
2140 if files is None:
2140 files = set()
2141 files = set()
2141 if eolmode is None:
2142 if eolmode is None:
2142 eolmode = ui.config('patch', 'eol')
2143 eolmode = ui.config('patch', 'eol')
2143 if eolmode.lower() not in eolmodes:
2144 if eolmode.lower() not in eolmodes:
2144 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2145 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2145 eolmode = eolmode.lower()
2146 eolmode = eolmode.lower()
2146
2147
2147 store = filestore()
2148 store = filestore()
2148 try:
2149 try:
2149 fp = open(patchobj, 'rb')
2150 fp = open(patchobj, 'rb')
2150 except TypeError:
2151 except TypeError:
2151 fp = patchobj
2152 fp = patchobj
2152 try:
2153 try:
2153 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2154 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2154 eolmode=eolmode)
2155 eolmode=eolmode)
2155 finally:
2156 finally:
2156 if fp != patchobj:
2157 if fp != patchobj:
2157 fp.close()
2158 fp.close()
2158 files.update(backend.close())
2159 files.update(backend.close())
2159 store.close()
2160 store.close()
2160 if ret < 0:
2161 if ret < 0:
2161 raise PatchError(_('patch failed to apply'))
2162 raise PatchError(_('patch failed to apply'))
2162 return ret > 0
2163 return ret > 0
2163
2164
2164 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2165 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2165 eolmode='strict', similarity=0):
2166 eolmode='strict', similarity=0):
2166 """use builtin patch to apply <patchobj> to the working directory.
2167 """use builtin patch to apply <patchobj> to the working directory.
2167 returns whether patch was applied with fuzz factor."""
2168 returns whether patch was applied with fuzz factor."""
2168 backend = workingbackend(ui, repo, similarity)
2169 backend = workingbackend(ui, repo, similarity)
2169 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2170 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2170
2171
2171 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2172 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2172 eolmode='strict'):
2173 eolmode='strict'):
2173 backend = repobackend(ui, repo, ctx, store)
2174 backend = repobackend(ui, repo, ctx, store)
2174 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2175 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2175
2176
2176 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2177 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2177 similarity=0):
2178 similarity=0):
2178 """Apply <patchname> to the working directory.
2179 """Apply <patchname> to the working directory.
2179
2180
2180 'eolmode' specifies how end of lines should be handled. It can be:
2181 'eolmode' specifies how end of lines should be handled. It can be:
2181 - 'strict': inputs are read in binary mode, EOLs are preserved
2182 - 'strict': inputs are read in binary mode, EOLs are preserved
2182 - 'crlf': EOLs are ignored when patching and reset to CRLF
2183 - 'crlf': EOLs are ignored when patching and reset to CRLF
2183 - 'lf': EOLs are ignored when patching and reset to LF
2184 - 'lf': EOLs are ignored when patching and reset to LF
2184 - None: get it from user settings, default to 'strict'
2185 - None: get it from user settings, default to 'strict'
2185 'eolmode' is ignored when using an external patcher program.
2186 'eolmode' is ignored when using an external patcher program.
2186
2187
2187 Returns whether patch was applied with fuzz factor.
2188 Returns whether patch was applied with fuzz factor.
2188 """
2189 """
2189 patcher = ui.config('ui', 'patch')
2190 patcher = ui.config('ui', 'patch')
2190 if files is None:
2191 if files is None:
2191 files = set()
2192 files = set()
2192 if patcher:
2193 if patcher:
2193 return _externalpatch(ui, repo, patcher, patchname, strip,
2194 return _externalpatch(ui, repo, patcher, patchname, strip,
2194 files, similarity)
2195 files, similarity)
2195 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2196 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2196 similarity)
2197 similarity)
2197
2198
2198 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2199 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2199 backend = fsbackend(ui, repo.root)
2200 backend = fsbackend(ui, repo.root)
2200 prefix = _canonprefix(repo, prefix)
2201 prefix = _canonprefix(repo, prefix)
2201 with open(patchpath, 'rb') as fp:
2202 with open(patchpath, 'rb') as fp:
2202 changed = set()
2203 changed = set()
2203 for state, values in iterhunks(fp):
2204 for state, values in iterhunks(fp):
2204 if state == 'file':
2205 if state == 'file':
2205 afile, bfile, first_hunk, gp = values
2206 afile, bfile, first_hunk, gp = values
2206 if gp:
2207 if gp:
2207 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2208 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2208 if gp.oldpath:
2209 if gp.oldpath:
2209 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2210 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2210 prefix)[1]
2211 prefix)[1]
2211 else:
2212 else:
2212 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2213 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2213 prefix)
2214 prefix)
2214 changed.add(gp.path)
2215 changed.add(gp.path)
2215 if gp.op == 'RENAME':
2216 if gp.op == 'RENAME':
2216 changed.add(gp.oldpath)
2217 changed.add(gp.oldpath)
2217 elif state not in ('hunk', 'git'):
2218 elif state not in ('hunk', 'git'):
2218 raise error.Abort(_('unsupported parser state: %s') % state)
2219 raise error.Abort(_('unsupported parser state: %s') % state)
2219 return changed
2220 return changed
2220
2221
2221 class GitDiffRequired(Exception):
2222 class GitDiffRequired(Exception):
2222 pass
2223 pass
2223
2224
2224 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2225 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2225 '''return diffopts with all features supported and parsed'''
2226 '''return diffopts with all features supported and parsed'''
2226 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2227 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2227 git=True, whitespace=True, formatchanging=True)
2228 git=True, whitespace=True, formatchanging=True)
2228
2229
2229 diffopts = diffallopts
2230 diffopts = diffallopts
2230
2231
2231 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2232 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2232 whitespace=False, formatchanging=False):
2233 whitespace=False, formatchanging=False):
2233 '''return diffopts with only opted-in features parsed
2234 '''return diffopts with only opted-in features parsed
2234
2235
2235 Features:
2236 Features:
2236 - git: git-style diffs
2237 - git: git-style diffs
2237 - whitespace: whitespace options like ignoreblanklines and ignorews
2238 - whitespace: whitespace options like ignoreblanklines and ignorews
2238 - formatchanging: options that will likely break or cause correctness issues
2239 - formatchanging: options that will likely break or cause correctness issues
2239 with most diff parsers
2240 with most diff parsers
2240 '''
2241 '''
2241 def get(key, name=None, getter=ui.configbool, forceplain=None):
2242 def get(key, name=None, getter=ui.configbool, forceplain=None):
2242 if opts:
2243 if opts:
2243 v = opts.get(key)
2244 v = opts.get(key)
2244 # diffopts flags are either None-default (which is passed
2245 # diffopts flags are either None-default (which is passed
2245 # through unchanged, so we can identify unset values), or
2246 # through unchanged, so we can identify unset values), or
2246 # some other falsey default (eg --unified, which defaults
2247 # some other falsey default (eg --unified, which defaults
2247 # to an empty string). We only want to override the config
2248 # to an empty string). We only want to override the config
2248 # entries from hgrc with command line values if they
2249 # entries from hgrc with command line values if they
2249 # appear to have been set, which is any truthy value,
2250 # appear to have been set, which is any truthy value,
2250 # True, or False.
2251 # True, or False.
2251 if v or isinstance(v, bool):
2252 if v or isinstance(v, bool):
2252 return v
2253 return v
2253 if forceplain is not None and ui.plain():
2254 if forceplain is not None and ui.plain():
2254 return forceplain
2255 return forceplain
2255 return getter(section, name or key, untrusted=untrusted)
2256 return getter(section, name or key, untrusted=untrusted)
2256
2257
2257 # core options, expected to be understood by every diff parser
2258 # core options, expected to be understood by every diff parser
2258 buildopts = {
2259 buildopts = {
2259 'nodates': get('nodates'),
2260 'nodates': get('nodates'),
2260 'showfunc': get('show_function', 'showfunc'),
2261 'showfunc': get('show_function', 'showfunc'),
2261 'context': get('unified', getter=ui.config),
2262 'context': get('unified', getter=ui.config),
2262 }
2263 }
2263 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2264 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2264 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2265 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2265
2266
2266 if git:
2267 if git:
2267 buildopts['git'] = get('git')
2268 buildopts['git'] = get('git')
2268
2269
2269 # since this is in the experimental section, we need to call
2270 # since this is in the experimental section, we need to call
2270 # ui.configbool directory
2271 # ui.configbool directory
2271 buildopts['showsimilarity'] = ui.configbool('experimental',
2272 buildopts['showsimilarity'] = ui.configbool('experimental',
2272 'extendedheader.similarity')
2273 'extendedheader.similarity')
2273
2274
2274 # need to inspect the ui object instead of using get() since we want to
2275 # need to inspect the ui object instead of using get() since we want to
2275 # test for an int
2276 # test for an int
2276 hconf = ui.config('experimental', 'extendedheader.index')
2277 hconf = ui.config('experimental', 'extendedheader.index')
2277 if hconf is not None:
2278 if hconf is not None:
2278 hlen = None
2279 hlen = None
2279 try:
2280 try:
2280 # the hash config could be an integer (for length of hash) or a
2281 # the hash config could be an integer (for length of hash) or a
2281 # word (e.g. short, full, none)
2282 # word (e.g. short, full, none)
2282 hlen = int(hconf)
2283 hlen = int(hconf)
2283 if hlen < 0 or hlen > 40:
2284 if hlen < 0 or hlen > 40:
2284 msg = _("invalid length for extendedheader.index: '%d'\n")
2285 msg = _("invalid length for extendedheader.index: '%d'\n")
2285 ui.warn(msg % hlen)
2286 ui.warn(msg % hlen)
2286 except ValueError:
2287 except ValueError:
2287 # default value
2288 # default value
2288 if hconf == 'short' or hconf == '':
2289 if hconf == 'short' or hconf == '':
2289 hlen = 12
2290 hlen = 12
2290 elif hconf == 'full':
2291 elif hconf == 'full':
2291 hlen = 40
2292 hlen = 40
2292 elif hconf != 'none':
2293 elif hconf != 'none':
2293 msg = _("invalid value for extendedheader.index: '%s'\n")
2294 msg = _("invalid value for extendedheader.index: '%s'\n")
2294 ui.warn(msg % hconf)
2295 ui.warn(msg % hconf)
2295 finally:
2296 finally:
2296 buildopts['index'] = hlen
2297 buildopts['index'] = hlen
2297
2298
2298 if whitespace:
2299 if whitespace:
2299 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2300 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2300 buildopts['ignorewsamount'] = get('ignore_space_change',
2301 buildopts['ignorewsamount'] = get('ignore_space_change',
2301 'ignorewsamount')
2302 'ignorewsamount')
2302 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2303 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2303 'ignoreblanklines')
2304 'ignoreblanklines')
2304 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2305 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2305 if formatchanging:
2306 if formatchanging:
2306 buildopts['text'] = opts and opts.get('text')
2307 buildopts['text'] = opts and opts.get('text')
2307 binary = None if opts is None else opts.get('binary')
2308 binary = None if opts is None else opts.get('binary')
2308 buildopts['nobinary'] = (not binary if binary is not None
2309 buildopts['nobinary'] = (not binary if binary is not None
2309 else get('nobinary', forceplain=False))
2310 else get('nobinary', forceplain=False))
2310 buildopts['noprefix'] = get('noprefix', forceplain=False)
2311 buildopts['noprefix'] = get('noprefix', forceplain=False)
2311
2312
2312 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2313 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2313
2314
2314 def diff(repo, node1=None, node2=None, match=None, changes=None,
2315 def diff(repo, node1=None, node2=None, match=None, changes=None,
2315 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2316 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2316 hunksfilterfn=None):
2317 hunksfilterfn=None):
2317 '''yields diff of changes to files between two nodes, or node and
2318 '''yields diff of changes to files between two nodes, or node and
2318 working directory.
2319 working directory.
2319
2320
2320 if node1 is None, use first dirstate parent instead.
2321 if node1 is None, use first dirstate parent instead.
2321 if node2 is None, compare node1 with working directory.
2322 if node2 is None, compare node1 with working directory.
2322
2323
2323 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2324 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2324 every time some change cannot be represented with the current
2325 every time some change cannot be represented with the current
2325 patch format. Return False to upgrade to git patch format, True to
2326 patch format. Return False to upgrade to git patch format, True to
2326 accept the loss or raise an exception to abort the diff. It is
2327 accept the loss or raise an exception to abort the diff. It is
2327 called with the name of current file being diffed as 'fn'. If set
2328 called with the name of current file being diffed as 'fn'. If set
2328 to None, patches will always be upgraded to git format when
2329 to None, patches will always be upgraded to git format when
2329 necessary.
2330 necessary.
2330
2331
2331 prefix is a filename prefix that is prepended to all filenames on
2332 prefix is a filename prefix that is prepended to all filenames on
2332 display (used for subrepos).
2333 display (used for subrepos).
2333
2334
2334 relroot, if not empty, must be normalized with a trailing /. Any match
2335 relroot, if not empty, must be normalized with a trailing /. Any match
2335 patterns that fall outside it will be ignored.
2336 patterns that fall outside it will be ignored.
2336
2337
2337 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2338 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2338 information.
2339 information.
2339
2340
2340 hunksfilterfn, if not None, should be a function taking a filectx and
2341 hunksfilterfn, if not None, should be a function taking a filectx and
2341 hunks generator that may yield filtered hunks.
2342 hunks generator that may yield filtered hunks.
2342 '''
2343 '''
2343 for fctx1, fctx2, hdr, hunks in diffhunks(
2344 for fctx1, fctx2, hdr, hunks in diffhunks(
2344 repo, node1=node1, node2=node2,
2345 repo, node1=node1, node2=node2,
2345 match=match, changes=changes, opts=opts,
2346 match=match, changes=changes, opts=opts,
2346 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2347 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2347 ):
2348 ):
2348 if hunksfilterfn is not None:
2349 if hunksfilterfn is not None:
2349 # If the file has been removed, fctx2 is None; but this should
2350 # If the file has been removed, fctx2 is None; but this should
2350 # not occur here since we catch removed files early in
2351 # not occur here since we catch removed files early in
2351 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2352 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2352 assert fctx2 is not None, \
2353 assert fctx2 is not None, \
2353 'fctx2 unexpectly None in diff hunks filtering'
2354 'fctx2 unexpectly None in diff hunks filtering'
2354 hunks = hunksfilterfn(fctx2, hunks)
2355 hunks = hunksfilterfn(fctx2, hunks)
2355 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2356 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2356 if hdr and (text or len(hdr) > 1):
2357 if hdr and (text or len(hdr) > 1):
2357 yield '\n'.join(hdr) + '\n'
2358 yield '\n'.join(hdr) + '\n'
2358 if text:
2359 if text:
2359 yield text
2360 yield text
2360
2361
2361 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2362 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2362 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2363 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2363 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2364 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2364 where `header` is a list of diff headers and `hunks` is an iterable of
2365 where `header` is a list of diff headers and `hunks` is an iterable of
2365 (`hunkrange`, `hunklines`) tuples.
2366 (`hunkrange`, `hunklines`) tuples.
2366
2367
2367 See diff() for the meaning of parameters.
2368 See diff() for the meaning of parameters.
2368 """
2369 """
2369
2370
2370 if opts is None:
2371 if opts is None:
2371 opts = mdiff.defaultopts
2372 opts = mdiff.defaultopts
2372
2373
2373 if not node1 and not node2:
2374 if not node1 and not node2:
2374 node1 = repo.dirstate.p1()
2375 node1 = repo.dirstate.p1()
2375
2376
2376 def lrugetfilectx():
2377 def lrugetfilectx():
2377 cache = {}
2378 cache = {}
2378 order = collections.deque()
2379 order = collections.deque()
2379 def getfilectx(f, ctx):
2380 def getfilectx(f, ctx):
2380 fctx = ctx.filectx(f, filelog=cache.get(f))
2381 fctx = ctx.filectx(f, filelog=cache.get(f))
2381 if f not in cache:
2382 if f not in cache:
2382 if len(cache) > 20:
2383 if len(cache) > 20:
2383 del cache[order.popleft()]
2384 del cache[order.popleft()]
2384 cache[f] = fctx.filelog()
2385 cache[f] = fctx.filelog()
2385 else:
2386 else:
2386 order.remove(f)
2387 order.remove(f)
2387 order.append(f)
2388 order.append(f)
2388 return fctx
2389 return fctx
2389 return getfilectx
2390 return getfilectx
2390 getfilectx = lrugetfilectx()
2391 getfilectx = lrugetfilectx()
2391
2392
2392 ctx1 = repo[node1]
2393 ctx1 = repo[node1]
2393 ctx2 = repo[node2]
2394 ctx2 = repo[node2]
2394
2395
2395 relfiltered = False
2396 relfiltered = False
2396 if relroot != '' and match.always():
2397 if relroot != '' and match.always():
2397 # as a special case, create a new matcher with just the relroot
2398 # as a special case, create a new matcher with just the relroot
2398 pats = [relroot]
2399 pats = [relroot]
2399 match = scmutil.match(ctx2, pats, default='path')
2400 match = scmutil.match(ctx2, pats, default='path')
2400 relfiltered = True
2401 relfiltered = True
2401
2402
2402 if not changes:
2403 if not changes:
2403 changes = repo.status(ctx1, ctx2, match=match)
2404 changes = repo.status(ctx1, ctx2, match=match)
2404 modified, added, removed = changes[:3]
2405 modified, added, removed = changes[:3]
2405
2406
2406 if not modified and not added and not removed:
2407 if not modified and not added and not removed:
2407 return []
2408 return []
2408
2409
2409 if repo.ui.debugflag:
2410 if repo.ui.debugflag:
2410 hexfunc = hex
2411 hexfunc = hex
2411 else:
2412 else:
2412 hexfunc = short
2413 hexfunc = short
2413 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2414 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2414
2415
2415 if copy is None:
2416 if copy is None:
2416 copy = {}
2417 copy = {}
2417 if opts.git or opts.upgrade:
2418 if opts.git or opts.upgrade:
2418 copy = copies.pathcopies(ctx1, ctx2, match=match)
2419 copy = copies.pathcopies(ctx1, ctx2, match=match)
2419
2420
2420 if relroot is not None:
2421 if relroot is not None:
2421 if not relfiltered:
2422 if not relfiltered:
2422 # XXX this would ideally be done in the matcher, but that is
2423 # XXX this would ideally be done in the matcher, but that is
2423 # generally meant to 'or' patterns, not 'and' them. In this case we
2424 # generally meant to 'or' patterns, not 'and' them. In this case we
2424 # need to 'and' all the patterns from the matcher with relroot.
2425 # need to 'and' all the patterns from the matcher with relroot.
2425 def filterrel(l):
2426 def filterrel(l):
2426 return [f for f in l if f.startswith(relroot)]
2427 return [f for f in l if f.startswith(relroot)]
2427 modified = filterrel(modified)
2428 modified = filterrel(modified)
2428 added = filterrel(added)
2429 added = filterrel(added)
2429 removed = filterrel(removed)
2430 removed = filterrel(removed)
2430 relfiltered = True
2431 relfiltered = True
2431 # filter out copies where either side isn't inside the relative root
2432 # filter out copies where either side isn't inside the relative root
2432 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2433 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2433 if dst.startswith(relroot)
2434 if dst.startswith(relroot)
2434 and src.startswith(relroot)))
2435 and src.startswith(relroot)))
2435
2436
2436 modifiedset = set(modified)
2437 modifiedset = set(modified)
2437 addedset = set(added)
2438 addedset = set(added)
2438 removedset = set(removed)
2439 removedset = set(removed)
2439 for f in modified:
2440 for f in modified:
2440 if f not in ctx1:
2441 if f not in ctx1:
2441 # Fix up added, since merged-in additions appear as
2442 # Fix up added, since merged-in additions appear as
2442 # modifications during merges
2443 # modifications during merges
2443 modifiedset.remove(f)
2444 modifiedset.remove(f)
2444 addedset.add(f)
2445 addedset.add(f)
2445 for f in removed:
2446 for f in removed:
2446 if f not in ctx1:
2447 if f not in ctx1:
2447 # Merged-in additions that are then removed are reported as removed.
2448 # Merged-in additions that are then removed are reported as removed.
2448 # They are not in ctx1, so We don't want to show them in the diff.
2449 # They are not in ctx1, so We don't want to show them in the diff.
2449 removedset.remove(f)
2450 removedset.remove(f)
2450 modified = sorted(modifiedset)
2451 modified = sorted(modifiedset)
2451 added = sorted(addedset)
2452 added = sorted(addedset)
2452 removed = sorted(removedset)
2453 removed = sorted(removedset)
2453 for dst, src in list(copy.items()):
2454 for dst, src in list(copy.items()):
2454 if src not in ctx1:
2455 if src not in ctx1:
2455 # Files merged in during a merge and then copied/renamed are
2456 # Files merged in during a merge and then copied/renamed are
2456 # reported as copies. We want to show them in the diff as additions.
2457 # reported as copies. We want to show them in the diff as additions.
2457 del copy[dst]
2458 del copy[dst]
2458
2459
2459 def difffn(opts, losedata):
2460 def difffn(opts, losedata):
2460 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2461 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2461 copy, getfilectx, opts, losedata, prefix, relroot)
2462 copy, getfilectx, opts, losedata, prefix, relroot)
2462 if opts.upgrade and not opts.git:
2463 if opts.upgrade and not opts.git:
2463 try:
2464 try:
2464 def losedata(fn):
2465 def losedata(fn):
2465 if not losedatafn or not losedatafn(fn=fn):
2466 if not losedatafn or not losedatafn(fn=fn):
2466 raise GitDiffRequired
2467 raise GitDiffRequired
2467 # Buffer the whole output until we are sure it can be generated
2468 # Buffer the whole output until we are sure it can be generated
2468 return list(difffn(opts.copy(git=False), losedata))
2469 return list(difffn(opts.copy(git=False), losedata))
2469 except GitDiffRequired:
2470 except GitDiffRequired:
2470 return difffn(opts.copy(git=True), None)
2471 return difffn(opts.copy(git=True), None)
2471 else:
2472 else:
2472 return difffn(opts, None)
2473 return difffn(opts, None)
2473
2474
2474 def difflabel(func, *args, **kw):
2475 def difflabel(func, *args, **kw):
2475 '''yields 2-tuples of (output, label) based on the output of func()'''
2476 '''yields 2-tuples of (output, label) based on the output of func()'''
2476 inlinecolor = False
2477 inlinecolor = False
2477 if kw.get(r'opts'):
2478 if kw.get(r'opts'):
2478 inlinecolor = kw[r'opts'].worddiff
2479 inlinecolor = kw[r'opts'].worddiff
2479 headprefixes = [('diff', 'diff.diffline'),
2480 headprefixes = [('diff', 'diff.diffline'),
2480 ('copy', 'diff.extended'),
2481 ('copy', 'diff.extended'),
2481 ('rename', 'diff.extended'),
2482 ('rename', 'diff.extended'),
2482 ('old', 'diff.extended'),
2483 ('old', 'diff.extended'),
2483 ('new', 'diff.extended'),
2484 ('new', 'diff.extended'),
2484 ('deleted', 'diff.extended'),
2485 ('deleted', 'diff.extended'),
2485 ('index', 'diff.extended'),
2486 ('index', 'diff.extended'),
2486 ('similarity', 'diff.extended'),
2487 ('similarity', 'diff.extended'),
2487 ('---', 'diff.file_a'),
2488 ('---', 'diff.file_a'),
2488 ('+++', 'diff.file_b')]
2489 ('+++', 'diff.file_b')]
2489 textprefixes = [('@', 'diff.hunk'),
2490 textprefixes = [('@', 'diff.hunk'),
2490 ('-', 'diff.deleted'),
2491 ('-', 'diff.deleted'),
2491 ('+', 'diff.inserted')]
2492 ('+', 'diff.inserted')]
2492 head = False
2493 head = False
2493 for chunk in func(*args, **kw):
2494 for chunk in func(*args, **kw):
2494 lines = chunk.split('\n')
2495 lines = chunk.split('\n')
2495 matches = {}
2496 matches = {}
2496 if inlinecolor:
2497 if inlinecolor:
2497 matches = _findmatches(lines)
2498 matches = _findmatches(lines)
2498 for i, line in enumerate(lines):
2499 for i, line in enumerate(lines):
2499 if i != 0:
2500 if i != 0:
2500 yield ('\n', '')
2501 yield ('\n', '')
2501 if head:
2502 if head:
2502 if line.startswith('@'):
2503 if line.startswith('@'):
2503 head = False
2504 head = False
2504 else:
2505 else:
2505 if line and line[0] not in ' +-@\\':
2506 if line and line[0] not in ' +-@\\':
2506 head = True
2507 head = True
2507 stripline = line
2508 stripline = line
2508 diffline = False
2509 diffline = False
2509 if not head and line and line[0] in '+-':
2510 if not head and line and line[0] in '+-':
2510 # highlight tabs and trailing whitespace, but only in
2511 # highlight tabs and trailing whitespace, but only in
2511 # changed lines
2512 # changed lines
2512 stripline = line.rstrip()
2513 stripline = line.rstrip()
2513 diffline = True
2514 diffline = True
2514
2515
2515 prefixes = textprefixes
2516 prefixes = textprefixes
2516 if head:
2517 if head:
2517 prefixes = headprefixes
2518 prefixes = headprefixes
2518 for prefix, label in prefixes:
2519 for prefix, label in prefixes:
2519 if stripline.startswith(prefix):
2520 if stripline.startswith(prefix):
2520 if diffline:
2521 if diffline:
2521 if i in matches:
2522 if i in matches:
2522 for t, l in _inlinediff(lines[i].rstrip(),
2523 for t, l in _inlinediff(lines[i].rstrip(),
2523 lines[matches[i]].rstrip(),
2524 lines[matches[i]].rstrip(),
2524 label):
2525 label):
2525 yield (t, l)
2526 yield (t, l)
2526 else:
2527 else:
2527 for token in tabsplitter.findall(stripline):
2528 for token in tabsplitter.findall(stripline):
2528 if token.startswith('\t'):
2529 if token.startswith('\t'):
2529 yield (token, 'diff.tab')
2530 yield (token, 'diff.tab')
2530 else:
2531 else:
2531 yield (token, label)
2532 yield (token, label)
2532 else:
2533 else:
2533 yield (stripline, label)
2534 yield (stripline, label)
2534 break
2535 break
2535 else:
2536 else:
2536 yield (line, '')
2537 yield (line, '')
2537 if line != stripline:
2538 if line != stripline:
2538 yield (line[len(stripline):], 'diff.trailingwhitespace')
2539 yield (line[len(stripline):], 'diff.trailingwhitespace')
2539
2540
2540 def _findmatches(slist):
2541 def _findmatches(slist):
2541 '''Look for insertion matches to deletion and returns a dict of
2542 '''Look for insertion matches to deletion and returns a dict of
2542 correspondences.
2543 correspondences.
2543 '''
2544 '''
2544 lastmatch = 0
2545 lastmatch = 0
2545 matches = {}
2546 matches = {}
2546 for i, line in enumerate(slist):
2547 for i, line in enumerate(slist):
2547 if line == '':
2548 if line == '':
2548 continue
2549 continue
2549 if line[0] == '-':
2550 if line[0] == '-':
2550 lastmatch = max(lastmatch, i)
2551 lastmatch = max(lastmatch, i)
2551 newgroup = False
2552 newgroup = False
2552 for j, newline in enumerate(slist[lastmatch + 1:]):
2553 for j, newline in enumerate(slist[lastmatch + 1:]):
2553 if newline == '':
2554 if newline == '':
2554 continue
2555 continue
2555 if newline[0] == '-' and newgroup: # too far, no match
2556 if newline[0] == '-' and newgroup: # too far, no match
2556 break
2557 break
2557 if newline[0] == '+': # potential match
2558 if newline[0] == '+': # potential match
2558 newgroup = True
2559 newgroup = True
2559 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2560 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2560 if sim > 0.7:
2561 if sim > 0.7:
2561 lastmatch = lastmatch + 1 + j
2562 lastmatch = lastmatch + 1 + j
2562 matches[i] = lastmatch
2563 matches[i] = lastmatch
2563 matches[lastmatch] = i
2564 matches[lastmatch] = i
2564 break
2565 break
2565 return matches
2566 return matches
2566
2567
2567 def _inlinediff(s1, s2, operation):
2568 def _inlinediff(s1, s2, operation):
2568 '''Perform string diff to highlight specific changes.'''
2569 '''Perform string diff to highlight specific changes.'''
2569 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2570 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2570 if operation == 'diff.deleted':
2571 if operation == 'diff.deleted':
2571 s2, s1 = s1, s2
2572 s2, s1 = s1, s2
2572
2573
2573 buff = []
2574 buff = []
2574 # we never want to higlight the leading +-
2575 # we never want to higlight the leading +-
2575 if operation == 'diff.deleted' and s2.startswith('-'):
2576 if operation == 'diff.deleted' and s2.startswith('-'):
2576 label = operation
2577 label = operation
2577 token = '-'
2578 token = '-'
2578 s2 = s2[1:]
2579 s2 = s2[1:]
2579 s1 = s1[1:]
2580 s1 = s1[1:]
2580 elif operation == 'diff.inserted' and s1.startswith('+'):
2581 elif operation == 'diff.inserted' and s1.startswith('+'):
2581 label = operation
2582 label = operation
2582 token = '+'
2583 token = '+'
2583 s2 = s2[1:]
2584 s2 = s2[1:]
2584 s1 = s1[1:]
2585 s1 = s1[1:]
2585 else:
2586 else:
2586 raise error.ProgrammingError("Case not expected, operation = %s" %
2587 raise error.ProgrammingError("Case not expected, operation = %s" %
2587 operation)
2588 operation)
2588
2589
2589 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2590 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2590 for part in s:
2591 for part in s:
2591 if part[0] in operation_skip or len(part) == 2:
2592 if part[0] in operation_skip or len(part) == 2:
2592 continue
2593 continue
2593 l = operation + '.highlight'
2594 l = operation + '.highlight'
2594 if part[0] in ' ':
2595 if part[0] in ' ':
2595 l = operation
2596 l = operation
2596 if part[2:] == '\t':
2597 if part[2:] == '\t':
2597 l = 'diff.tab'
2598 l = 'diff.tab'
2598 if l == label: # contiguous token with same label
2599 if l == label: # contiguous token with same label
2599 token += part[2:]
2600 token += part[2:]
2600 continue
2601 continue
2601 else:
2602 else:
2602 buff.append((token, label))
2603 buff.append((token, label))
2603 label = l
2604 label = l
2604 token = part[2:]
2605 token = part[2:]
2605 buff.append((token, label))
2606 buff.append((token, label))
2606
2607
2607 return buff
2608 return buff
2608
2609
2609 def diffui(*args, **kw):
2610 def diffui(*args, **kw):
2610 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2611 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2611 return difflabel(diff, *args, **kw)
2612 return difflabel(diff, *args, **kw)
2612
2613
2613 def _filepairs(modified, added, removed, copy, opts):
2614 def _filepairs(modified, added, removed, copy, opts):
2614 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2615 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2615 before and f2 is the the name after. For added files, f1 will be None,
2616 before and f2 is the the name after. For added files, f1 will be None,
2616 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2617 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2617 or 'rename' (the latter two only if opts.git is set).'''
2618 or 'rename' (the latter two only if opts.git is set).'''
2618 gone = set()
2619 gone = set()
2619
2620
2620 copyto = dict([(v, k) for k, v in copy.items()])
2621 copyto = dict([(v, k) for k, v in copy.items()])
2621
2622
2622 addedset, removedset = set(added), set(removed)
2623 addedset, removedset = set(added), set(removed)
2623
2624
2624 for f in sorted(modified + added + removed):
2625 for f in sorted(modified + added + removed):
2625 copyop = None
2626 copyop = None
2626 f1, f2 = f, f
2627 f1, f2 = f, f
2627 if f in addedset:
2628 if f in addedset:
2628 f1 = None
2629 f1 = None
2629 if f in copy:
2630 if f in copy:
2630 if opts.git:
2631 if opts.git:
2631 f1 = copy[f]
2632 f1 = copy[f]
2632 if f1 in removedset and f1 not in gone:
2633 if f1 in removedset and f1 not in gone:
2633 copyop = 'rename'
2634 copyop = 'rename'
2634 gone.add(f1)
2635 gone.add(f1)
2635 else:
2636 else:
2636 copyop = 'copy'
2637 copyop = 'copy'
2637 elif f in removedset:
2638 elif f in removedset:
2638 f2 = None
2639 f2 = None
2639 if opts.git:
2640 if opts.git:
2640 # have we already reported a copy above?
2641 # have we already reported a copy above?
2641 if (f in copyto and copyto[f] in addedset
2642 if (f in copyto and copyto[f] in addedset
2642 and copy[copyto[f]] == f):
2643 and copy[copyto[f]] == f):
2643 continue
2644 continue
2644 yield f1, f2, copyop
2645 yield f1, f2, copyop
2645
2646
2646 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2647 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2647 copy, getfilectx, opts, losedatafn, prefix, relroot):
2648 copy, getfilectx, opts, losedatafn, prefix, relroot):
2648 '''given input data, generate a diff and yield it in blocks
2649 '''given input data, generate a diff and yield it in blocks
2649
2650
2650 If generating a diff would lose data like flags or binary data and
2651 If generating a diff would lose data like flags or binary data and
2651 losedatafn is not None, it will be called.
2652 losedatafn is not None, it will be called.
2652
2653
2653 relroot is removed and prefix is added to every path in the diff output.
2654 relroot is removed and prefix is added to every path in the diff output.
2654
2655
2655 If relroot is not empty, this function expects every path in modified,
2656 If relroot is not empty, this function expects every path in modified,
2656 added, removed and copy to start with it.'''
2657 added, removed and copy to start with it.'''
2657
2658
2658 def gitindex(text):
2659 def gitindex(text):
2659 if not text:
2660 if not text:
2660 text = ""
2661 text = ""
2661 l = len(text)
2662 l = len(text)
2662 s = hashlib.sha1('blob %d\0' % l)
2663 s = hashlib.sha1('blob %d\0' % l)
2663 s.update(text)
2664 s.update(text)
2664 return hex(s.digest())
2665 return hex(s.digest())
2665
2666
2666 if opts.noprefix:
2667 if opts.noprefix:
2667 aprefix = bprefix = ''
2668 aprefix = bprefix = ''
2668 else:
2669 else:
2669 aprefix = 'a/'
2670 aprefix = 'a/'
2670 bprefix = 'b/'
2671 bprefix = 'b/'
2671
2672
2672 def diffline(f, revs):
2673 def diffline(f, revs):
2673 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2674 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2674 return 'diff %s %s' % (revinfo, f)
2675 return 'diff %s %s' % (revinfo, f)
2675
2676
2676 def isempty(fctx):
2677 def isempty(fctx):
2677 return fctx is None or fctx.size() == 0
2678 return fctx is None or fctx.size() == 0
2678
2679
2679 date1 = dateutil.datestr(ctx1.date())
2680 date1 = dateutil.datestr(ctx1.date())
2680 date2 = dateutil.datestr(ctx2.date())
2681 date2 = dateutil.datestr(ctx2.date())
2681
2682
2682 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2683 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2683
2684
2684 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2685 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2685 or repo.ui.configbool('devel', 'check-relroot')):
2686 or repo.ui.configbool('devel', 'check-relroot')):
2686 for f in modified + added + removed + list(copy) + list(copy.values()):
2687 for f in modified + added + removed + list(copy) + list(copy.values()):
2687 if f is not None and not f.startswith(relroot):
2688 if f is not None and not f.startswith(relroot):
2688 raise AssertionError(
2689 raise AssertionError(
2689 "file %s doesn't start with relroot %s" % (f, relroot))
2690 "file %s doesn't start with relroot %s" % (f, relroot))
2690
2691
2691 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2692 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2692 content1 = None
2693 content1 = None
2693 content2 = None
2694 content2 = None
2694 fctx1 = None
2695 fctx1 = None
2695 fctx2 = None
2696 fctx2 = None
2696 flag1 = None
2697 flag1 = None
2697 flag2 = None
2698 flag2 = None
2698 if f1:
2699 if f1:
2699 fctx1 = getfilectx(f1, ctx1)
2700 fctx1 = getfilectx(f1, ctx1)
2700 if opts.git or losedatafn:
2701 if opts.git or losedatafn:
2701 flag1 = ctx1.flags(f1)
2702 flag1 = ctx1.flags(f1)
2702 if f2:
2703 if f2:
2703 fctx2 = getfilectx(f2, ctx2)
2704 fctx2 = getfilectx(f2, ctx2)
2704 if opts.git or losedatafn:
2705 if opts.git or losedatafn:
2705 flag2 = ctx2.flags(f2)
2706 flag2 = ctx2.flags(f2)
2706 # if binary is True, output "summary" or "base85", but not "text diff"
2707 # if binary is True, output "summary" or "base85", but not "text diff"
2707 if opts.text:
2708 if opts.text:
2708 binary = False
2709 binary = False
2709 else:
2710 else:
2710 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2711 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2711
2712
2712 if losedatafn and not opts.git:
2713 if losedatafn and not opts.git:
2713 if (binary or
2714 if (binary or
2714 # copy/rename
2715 # copy/rename
2715 f2 in copy or
2716 f2 in copy or
2716 # empty file creation
2717 # empty file creation
2717 (not f1 and isempty(fctx2)) or
2718 (not f1 and isempty(fctx2)) or
2718 # empty file deletion
2719 # empty file deletion
2719 (isempty(fctx1) and not f2) or
2720 (isempty(fctx1) and not f2) or
2720 # create with flags
2721 # create with flags
2721 (not f1 and flag2) or
2722 (not f1 and flag2) or
2722 # change flags
2723 # change flags
2723 (f1 and f2 and flag1 != flag2)):
2724 (f1 and f2 and flag1 != flag2)):
2724 losedatafn(f2 or f1)
2725 losedatafn(f2 or f1)
2725
2726
2726 path1 = f1 or f2
2727 path1 = f1 or f2
2727 path2 = f2 or f1
2728 path2 = f2 or f1
2728 path1 = posixpath.join(prefix, path1[len(relroot):])
2729 path1 = posixpath.join(prefix, path1[len(relroot):])
2729 path2 = posixpath.join(prefix, path2[len(relroot):])
2730 path2 = posixpath.join(prefix, path2[len(relroot):])
2730 header = []
2731 header = []
2731 if opts.git:
2732 if opts.git:
2732 header.append('diff --git %s%s %s%s' %
2733 header.append('diff --git %s%s %s%s' %
2733 (aprefix, path1, bprefix, path2))
2734 (aprefix, path1, bprefix, path2))
2734 if not f1: # added
2735 if not f1: # added
2735 header.append('new file mode %s' % gitmode[flag2])
2736 header.append('new file mode %s' % gitmode[flag2])
2736 elif not f2: # removed
2737 elif not f2: # removed
2737 header.append('deleted file mode %s' % gitmode[flag1])
2738 header.append('deleted file mode %s' % gitmode[flag1])
2738 else: # modified/copied/renamed
2739 else: # modified/copied/renamed
2739 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2740 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2740 if mode1 != mode2:
2741 if mode1 != mode2:
2741 header.append('old mode %s' % mode1)
2742 header.append('old mode %s' % mode1)
2742 header.append('new mode %s' % mode2)
2743 header.append('new mode %s' % mode2)
2743 if copyop is not None:
2744 if copyop is not None:
2744 if opts.showsimilarity:
2745 if opts.showsimilarity:
2745 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2746 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2746 header.append('similarity index %d%%' % sim)
2747 header.append('similarity index %d%%' % sim)
2747 header.append('%s from %s' % (copyop, path1))
2748 header.append('%s from %s' % (copyop, path1))
2748 header.append('%s to %s' % (copyop, path2))
2749 header.append('%s to %s' % (copyop, path2))
2749 elif revs and not repo.ui.quiet:
2750 elif revs and not repo.ui.quiet:
2750 header.append(diffline(path1, revs))
2751 header.append(diffline(path1, revs))
2751
2752
2752 # fctx.is | diffopts | what to | is fctx.data()
2753 # fctx.is | diffopts | what to | is fctx.data()
2753 # binary() | text nobinary git index | output? | outputted?
2754 # binary() | text nobinary git index | output? | outputted?
2754 # ------------------------------------|----------------------------
2755 # ------------------------------------|----------------------------
2755 # yes | no no no * | summary | no
2756 # yes | no no no * | summary | no
2756 # yes | no no yes * | base85 | yes
2757 # yes | no no yes * | base85 | yes
2757 # yes | no yes no * | summary | no
2758 # yes | no yes no * | summary | no
2758 # yes | no yes yes 0 | summary | no
2759 # yes | no yes yes 0 | summary | no
2759 # yes | no yes yes >0 | summary | semi [1]
2760 # yes | no yes yes >0 | summary | semi [1]
2760 # yes | yes * * * | text diff | yes
2761 # yes | yes * * * | text diff | yes
2761 # no | * * * * | text diff | yes
2762 # no | * * * * | text diff | yes
2762 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2763 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2763 if binary and (not opts.git or (opts.git and opts.nobinary and not
2764 if binary and (not opts.git or (opts.git and opts.nobinary and not
2764 opts.index)):
2765 opts.index)):
2765 # fast path: no binary content will be displayed, content1 and
2766 # fast path: no binary content will be displayed, content1 and
2766 # content2 are only used for equivalent test. cmp() could have a
2767 # content2 are only used for equivalent test. cmp() could have a
2767 # fast path.
2768 # fast path.
2768 if fctx1 is not None:
2769 if fctx1 is not None:
2769 content1 = b'\0'
2770 content1 = b'\0'
2770 if fctx2 is not None:
2771 if fctx2 is not None:
2771 if fctx1 is not None and not fctx1.cmp(fctx2):
2772 if fctx1 is not None and not fctx1.cmp(fctx2):
2772 content2 = b'\0' # not different
2773 content2 = b'\0' # not different
2773 else:
2774 else:
2774 content2 = b'\0\0'
2775 content2 = b'\0\0'
2775 else:
2776 else:
2776 # normal path: load contents
2777 # normal path: load contents
2777 if fctx1 is not None:
2778 if fctx1 is not None:
2778 content1 = fctx1.data()
2779 content1 = fctx1.data()
2779 if fctx2 is not None:
2780 if fctx2 is not None:
2780 content2 = fctx2.data()
2781 content2 = fctx2.data()
2781
2782
2782 if binary and opts.git and not opts.nobinary:
2783 if binary and opts.git and not opts.nobinary:
2783 text = mdiff.b85diff(content1, content2)
2784 text = mdiff.b85diff(content1, content2)
2784 if text:
2785 if text:
2785 header.append('index %s..%s' %
2786 header.append('index %s..%s' %
2786 (gitindex(content1), gitindex(content2)))
2787 (gitindex(content1), gitindex(content2)))
2787 hunks = (None, [text]),
2788 hunks = (None, [text]),
2788 else:
2789 else:
2789 if opts.git and opts.index > 0:
2790 if opts.git and opts.index > 0:
2790 flag = flag1
2791 flag = flag1
2791 if flag is None:
2792 if flag is None:
2792 flag = flag2
2793 flag = flag2
2793 header.append('index %s..%s %s' %
2794 header.append('index %s..%s %s' %
2794 (gitindex(content1)[0:opts.index],
2795 (gitindex(content1)[0:opts.index],
2795 gitindex(content2)[0:opts.index],
2796 gitindex(content2)[0:opts.index],
2796 gitmode[flag]))
2797 gitmode[flag]))
2797
2798
2798 uheaders, hunks = mdiff.unidiff(content1, date1,
2799 uheaders, hunks = mdiff.unidiff(content1, date1,
2799 content2, date2,
2800 content2, date2,
2800 path1, path2,
2801 path1, path2,
2801 binary=binary, opts=opts)
2802 binary=binary, opts=opts)
2802 header.extend(uheaders)
2803 header.extend(uheaders)
2803 yield fctx1, fctx2, header, hunks
2804 yield fctx1, fctx2, header, hunks
2804
2805
2805 def diffstatsum(stats):
2806 def diffstatsum(stats):
2806 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2807 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2807 for f, a, r, b in stats:
2808 for f, a, r, b in stats:
2808 maxfile = max(maxfile, encoding.colwidth(f))
2809 maxfile = max(maxfile, encoding.colwidth(f))
2809 maxtotal = max(maxtotal, a + r)
2810 maxtotal = max(maxtotal, a + r)
2810 addtotal += a
2811 addtotal += a
2811 removetotal += r
2812 removetotal += r
2812 binary = binary or b
2813 binary = binary or b
2813
2814
2814 return maxfile, maxtotal, addtotal, removetotal, binary
2815 return maxfile, maxtotal, addtotal, removetotal, binary
2815
2816
2816 def diffstatdata(lines):
2817 def diffstatdata(lines):
2817 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2818 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2818
2819
2819 results = []
2820 results = []
2820 filename, adds, removes, isbinary = None, 0, 0, False
2821 filename, adds, removes, isbinary = None, 0, 0, False
2821
2822
2822 def addresult():
2823 def addresult():
2823 if filename:
2824 if filename:
2824 results.append((filename, adds, removes, isbinary))
2825 results.append((filename, adds, removes, isbinary))
2825
2826
2826 # inheader is used to track if a line is in the
2827 # inheader is used to track if a line is in the
2827 # header portion of the diff. This helps properly account
2828 # header portion of the diff. This helps properly account
2828 # for lines that start with '--' or '++'
2829 # for lines that start with '--' or '++'
2829 inheader = False
2830 inheader = False
2830
2831
2831 for line in lines:
2832 for line in lines:
2832 if line.startswith('diff'):
2833 if line.startswith('diff'):
2833 addresult()
2834 addresult()
2834 # starting a new file diff
2835 # starting a new file diff
2835 # set numbers to 0 and reset inheader
2836 # set numbers to 0 and reset inheader
2836 inheader = True
2837 inheader = True
2837 adds, removes, isbinary = 0, 0, False
2838 adds, removes, isbinary = 0, 0, False
2838 if line.startswith('diff --git a/'):
2839 if line.startswith('diff --git a/'):
2839 filename = gitre.search(line).group(2)
2840 filename = gitre.search(line).group(2)
2840 elif line.startswith('diff -r'):
2841 elif line.startswith('diff -r'):
2841 # format: "diff -r ... -r ... filename"
2842 # format: "diff -r ... -r ... filename"
2842 filename = diffre.search(line).group(1)
2843 filename = diffre.search(line).group(1)
2843 elif line.startswith('@@'):
2844 elif line.startswith('@@'):
2844 inheader = False
2845 inheader = False
2845 elif line.startswith('+') and not inheader:
2846 elif line.startswith('+') and not inheader:
2846 adds += 1
2847 adds += 1
2847 elif line.startswith('-') and not inheader:
2848 elif line.startswith('-') and not inheader:
2848 removes += 1
2849 removes += 1
2849 elif (line.startswith('GIT binary patch') or
2850 elif (line.startswith('GIT binary patch') or
2850 line.startswith('Binary file')):
2851 line.startswith('Binary file')):
2851 isbinary = True
2852 isbinary = True
2852 addresult()
2853 addresult()
2853 return results
2854 return results
2854
2855
2855 def diffstat(lines, width=80):
2856 def diffstat(lines, width=80):
2856 output = []
2857 output = []
2857 stats = diffstatdata(lines)
2858 stats = diffstatdata(lines)
2858 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2859 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2859
2860
2860 countwidth = len(str(maxtotal))
2861 countwidth = len(str(maxtotal))
2861 if hasbinary and countwidth < 3:
2862 if hasbinary and countwidth < 3:
2862 countwidth = 3
2863 countwidth = 3
2863 graphwidth = width - countwidth - maxname - 6
2864 graphwidth = width - countwidth - maxname - 6
2864 if graphwidth < 10:
2865 if graphwidth < 10:
2865 graphwidth = 10
2866 graphwidth = 10
2866
2867
2867 def scale(i):
2868 def scale(i):
2868 if maxtotal <= graphwidth:
2869 if maxtotal <= graphwidth:
2869 return i
2870 return i
2870 # If diffstat runs out of room it doesn't print anything,
2871 # If diffstat runs out of room it doesn't print anything,
2871 # which isn't very useful, so always print at least one + or -
2872 # which isn't very useful, so always print at least one + or -
2872 # if there were at least some changes.
2873 # if there were at least some changes.
2873 return max(i * graphwidth // maxtotal, int(bool(i)))
2874 return max(i * graphwidth // maxtotal, int(bool(i)))
2874
2875
2875 for filename, adds, removes, isbinary in stats:
2876 for filename, adds, removes, isbinary in stats:
2876 if isbinary:
2877 if isbinary:
2877 count = 'Bin'
2878 count = 'Bin'
2878 else:
2879 else:
2879 count = '%d' % (adds + removes)
2880 count = '%d' % (adds + removes)
2880 pluses = '+' * scale(adds)
2881 pluses = '+' * scale(adds)
2881 minuses = '-' * scale(removes)
2882 minuses = '-' * scale(removes)
2882 output.append(' %s%s | %*s %s%s\n' %
2883 output.append(' %s%s | %*s %s%s\n' %
2883 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2884 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2884 countwidth, count, pluses, minuses))
2885 countwidth, count, pluses, minuses))
2885
2886
2886 if stats:
2887 if stats:
2887 output.append(_(' %d files changed, %d insertions(+), '
2888 output.append(_(' %d files changed, %d insertions(+), '
2888 '%d deletions(-)\n')
2889 '%d deletions(-)\n')
2889 % (len(stats), totaladds, totalremoves))
2890 % (len(stats), totaladds, totalremoves))
2890
2891
2891 return ''.join(output)
2892 return ''.join(output)
2892
2893
2893 def diffstatui(*args, **kw):
2894 def diffstatui(*args, **kw):
2894 '''like diffstat(), but yields 2-tuples of (output, label) for
2895 '''like diffstat(), but yields 2-tuples of (output, label) for
2895 ui.write()
2896 ui.write()
2896 '''
2897 '''
2897
2898
2898 for line in diffstat(*args, **kw).splitlines():
2899 for line in diffstat(*args, **kw).splitlines():
2899 if line and line[-1] in '+-':
2900 if line and line[-1] in '+-':
2900 name, graph = line.rsplit(' ', 1)
2901 name, graph = line.rsplit(' ', 1)
2901 yield (name + ' ', '')
2902 yield (name + ' ', '')
2902 m = re.search(br'\++', graph)
2903 m = re.search(br'\++', graph)
2903 if m:
2904 if m:
2904 yield (m.group(0), 'diffstat.inserted')
2905 yield (m.group(0), 'diffstat.inserted')
2905 m = re.search(br'-+', graph)
2906 m = re.search(br'-+', graph)
2906 if m:
2907 if m:
2907 yield (m.group(0), 'diffstat.deleted')
2908 yield (m.group(0), 'diffstat.deleted')
2908 else:
2909 else:
2909 yield (line, '')
2910 yield (line, '')
2910 yield ('\n', '')
2911 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now