##// END OF EJS Templates
patch: make parsepatch optionally trim context lines...
Jun Wu -
r33270:f7b63571 default
parent child Browse files
Show More
@@ -1,2746 +1,2793 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 class PatchError(Exception):
49 class PatchError(Exception):
50 pass
50 pass
51
51
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = email.Parser.Parser().parse(c)
111 m = email.Parser.Parser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 inheader = False
154 inheader = False
155 cur = []
155 cur = []
156
156
157 mimeheaders = ['content-type']
157 mimeheaders = ['content-type']
158
158
159 if not util.safehasattr(stream, 'next'):
159 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
160 # http responses, for example, have readline but not next
161 stream = fiter(stream)
161 stream = fiter(stream)
162
162
163 for line in stream:
163 for line in stream:
164 cur.append(line)
164 cur.append(line)
165 if line.startswith('# HG changeset patch'):
165 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
166 return hgsplit(stream, cur)
167 elif line.startswith('From '):
167 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
168 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
169 elif isheader(line, inheader):
170 inheader = True
170 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
172 # let email parser handle this
173 return mimesplit(stream, cur)
173 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
174 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
175 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
176 return headersplit(stream, cur)
177 # Not enough info, keep reading
177 # Not enough info, keep reading
178
178
179 # if we are here, we have a very plain patch
179 # if we are here, we have a very plain patch
180 return remainder(cur)
180 return remainder(cur)
181
181
182 ## Some facility for extensible patch parsing:
182 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
183 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
184 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
185 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
186 ('Node ID', 'nodeid'),
187 ]
187 ]
188
188
189 def extract(ui, fileobj):
189 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
190 '''extract patch from data read from fileobj.
191
191
192 patch can be a normal patch or contained in an email message.
192 patch can be a normal patch or contained in an email message.
193
193
194 return a dictionary. Standard keys are:
194 return a dictionary. Standard keys are:
195 - filename,
195 - filename,
196 - message,
196 - message,
197 - user,
197 - user,
198 - date,
198 - date,
199 - branch,
199 - branch,
200 - node,
200 - node,
201 - p1,
201 - p1,
202 - p2.
202 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
203 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
205
206 # attempt to detect the start of a patch
206 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
207 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
212
212
213 data = {}
213 data = {}
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 try:
216 try:
217 msg = email.Parser.Parser().parse(fileobj)
217 msg = email.Parser.Parser().parse(fileobj)
218
218
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 if not subject and not data['user']:
221 if not subject and not data['user']:
222 # Not an email, restore parsed headers if any
222 # Not an email, restore parsed headers if any
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224
224
225 # should try to parse msg['Date']
225 # should try to parse msg['Date']
226 parents = []
226 parents = []
227
227
228 if subject:
228 if subject:
229 if subject.startswith('[PATCH'):
229 if subject.startswith('[PATCH'):
230 pend = subject.find(']')
230 pend = subject.find(']')
231 if pend >= 0:
231 if pend >= 0:
232 subject = subject[pend + 1:].lstrip()
232 subject = subject[pend + 1:].lstrip()
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
234 ui.debug('Subject: %s\n' % subject)
234 ui.debug('Subject: %s\n' % subject)
235 if data['user']:
235 if data['user']:
236 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
237 diffs_seen = 0
237 diffs_seen = 0
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 message = ''
239 message = ''
240 for part in msg.walk():
240 for part in msg.walk():
241 content_type = part.get_content_type()
241 content_type = part.get_content_type()
242 ui.debug('Content-Type: %s\n' % content_type)
242 ui.debug('Content-Type: %s\n' % content_type)
243 if content_type not in ok_types:
243 if content_type not in ok_types:
244 continue
244 continue
245 payload = part.get_payload(decode=True)
245 payload = part.get_payload(decode=True)
246 m = diffre.search(payload)
246 m = diffre.search(payload)
247 if m:
247 if m:
248 hgpatch = False
248 hgpatch = False
249 hgpatchheader = False
249 hgpatchheader = False
250 ignoretext = False
250 ignoretext = False
251
251
252 ui.debug('found patch at byte %d\n' % m.start(0))
252 ui.debug('found patch at byte %d\n' % m.start(0))
253 diffs_seen += 1
253 diffs_seen += 1
254 cfp = stringio()
254 cfp = stringio()
255 for line in payload[:m.start(0)].splitlines():
255 for line in payload[:m.start(0)].splitlines():
256 if line.startswith('# HG changeset patch') and not hgpatch:
256 if line.startswith('# HG changeset patch') and not hgpatch:
257 ui.debug('patch generated by hg export\n')
257 ui.debug('patch generated by hg export\n')
258 hgpatch = True
258 hgpatch = True
259 hgpatchheader = True
259 hgpatchheader = True
260 # drop earlier commit message content
260 # drop earlier commit message content
261 cfp.seek(0)
261 cfp.seek(0)
262 cfp.truncate()
262 cfp.truncate()
263 subject = None
263 subject = None
264 elif hgpatchheader:
264 elif hgpatchheader:
265 if line.startswith('# User '):
265 if line.startswith('# User '):
266 data['user'] = line[7:]
266 data['user'] = line[7:]
267 ui.debug('From: %s\n' % data['user'])
267 ui.debug('From: %s\n' % data['user'])
268 elif line.startswith("# Parent "):
268 elif line.startswith("# Parent "):
269 parents.append(line[9:].lstrip())
269 parents.append(line[9:].lstrip())
270 elif line.startswith("# "):
270 elif line.startswith("# "):
271 for header, key in patchheadermap:
271 for header, key in patchheadermap:
272 prefix = '# %s ' % header
272 prefix = '# %s ' % header
273 if line.startswith(prefix):
273 if line.startswith(prefix):
274 data[key] = line[len(prefix):]
274 data[key] = line[len(prefix):]
275 else:
275 else:
276 hgpatchheader = False
276 hgpatchheader = False
277 elif line == '---':
277 elif line == '---':
278 ignoretext = True
278 ignoretext = True
279 if not hgpatchheader and not ignoretext:
279 if not hgpatchheader and not ignoretext:
280 cfp.write(line)
280 cfp.write(line)
281 cfp.write('\n')
281 cfp.write('\n')
282 message = cfp.getvalue()
282 message = cfp.getvalue()
283 if tmpfp:
283 if tmpfp:
284 tmpfp.write(payload)
284 tmpfp.write(payload)
285 if not payload.endswith('\n'):
285 if not payload.endswith('\n'):
286 tmpfp.write('\n')
286 tmpfp.write('\n')
287 elif not diffs_seen and message and content_type == 'text/plain':
287 elif not diffs_seen and message and content_type == 'text/plain':
288 message += '\n' + payload
288 message += '\n' + payload
289 except: # re-raises
289 except: # re-raises
290 tmpfp.close()
290 tmpfp.close()
291 os.unlink(tmpname)
291 os.unlink(tmpname)
292 raise
292 raise
293
293
294 if subject and not message.startswith(subject):
294 if subject and not message.startswith(subject):
295 message = '%s\n%s' % (subject, message)
295 message = '%s\n%s' % (subject, message)
296 data['message'] = message
296 data['message'] = message
297 tmpfp.close()
297 tmpfp.close()
298 if parents:
298 if parents:
299 data['p1'] = parents.pop(0)
299 data['p1'] = parents.pop(0)
300 if parents:
300 if parents:
301 data['p2'] = parents.pop(0)
301 data['p2'] = parents.pop(0)
302
302
303 if diffs_seen:
303 if diffs_seen:
304 data['filename'] = tmpname
304 data['filename'] = tmpname
305 else:
305 else:
306 os.unlink(tmpname)
306 os.unlink(tmpname)
307 return data
307 return data
308
308
309 class patchmeta(object):
309 class patchmeta(object):
310 """Patched file metadata
310 """Patched file metadata
311
311
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 'islink' is True if the file is a symlink and 'isexec' is True if
316 'islink' is True if the file is a symlink and 'isexec' is True if
317 the file is executable. Otherwise, 'mode' is None.
317 the file is executable. Otherwise, 'mode' is None.
318 """
318 """
319 def __init__(self, path):
319 def __init__(self, path):
320 self.path = path
320 self.path = path
321 self.oldpath = None
321 self.oldpath = None
322 self.mode = None
322 self.mode = None
323 self.op = 'MODIFY'
323 self.op = 'MODIFY'
324 self.binary = False
324 self.binary = False
325
325
326 def setmode(self, mode):
326 def setmode(self, mode):
327 islink = mode & 0o20000
327 islink = mode & 0o20000
328 isexec = mode & 0o100
328 isexec = mode & 0o100
329 self.mode = (islink, isexec)
329 self.mode = (islink, isexec)
330
330
331 def copy(self):
331 def copy(self):
332 other = patchmeta(self.path)
332 other = patchmeta(self.path)
333 other.oldpath = self.oldpath
333 other.oldpath = self.oldpath
334 other.mode = self.mode
334 other.mode = self.mode
335 other.op = self.op
335 other.op = self.op
336 other.binary = self.binary
336 other.binary = self.binary
337 return other
337 return other
338
338
339 def _ispatchinga(self, afile):
339 def _ispatchinga(self, afile):
340 if afile == '/dev/null':
340 if afile == '/dev/null':
341 return self.op == 'ADD'
341 return self.op == 'ADD'
342 return afile == 'a/' + (self.oldpath or self.path)
342 return afile == 'a/' + (self.oldpath or self.path)
343
343
344 def _ispatchingb(self, bfile):
344 def _ispatchingb(self, bfile):
345 if bfile == '/dev/null':
345 if bfile == '/dev/null':
346 return self.op == 'DELETE'
346 return self.op == 'DELETE'
347 return bfile == 'b/' + self.path
347 return bfile == 'b/' + self.path
348
348
349 def ispatching(self, afile, bfile):
349 def ispatching(self, afile, bfile):
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351
351
352 def __repr__(self):
352 def __repr__(self):
353 return "<patchmeta %s %r>" % (self.op, self.path)
353 return "<patchmeta %s %r>" % (self.op, self.path)
354
354
355 def readgitpatch(lr):
355 def readgitpatch(lr):
356 """extract git-style metadata about patches from <patchname>"""
356 """extract git-style metadata about patches from <patchname>"""
357
357
358 # Filter patch for git information
358 # Filter patch for git information
359 gp = None
359 gp = None
360 gitpatches = []
360 gitpatches = []
361 for line in lr:
361 for line in lr:
362 line = line.rstrip(' \r\n')
362 line = line.rstrip(' \r\n')
363 if line.startswith('diff --git a/'):
363 if line.startswith('diff --git a/'):
364 m = gitre.match(line)
364 m = gitre.match(line)
365 if m:
365 if m:
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368 dst = m.group(2)
368 dst = m.group(2)
369 gp = patchmeta(dst)
369 gp = patchmeta(dst)
370 elif gp:
370 elif gp:
371 if line.startswith('--- '):
371 if line.startswith('--- '):
372 gitpatches.append(gp)
372 gitpatches.append(gp)
373 gp = None
373 gp = None
374 continue
374 continue
375 if line.startswith('rename from '):
375 if line.startswith('rename from '):
376 gp.op = 'RENAME'
376 gp.op = 'RENAME'
377 gp.oldpath = line[12:]
377 gp.oldpath = line[12:]
378 elif line.startswith('rename to '):
378 elif line.startswith('rename to '):
379 gp.path = line[10:]
379 gp.path = line[10:]
380 elif line.startswith('copy from '):
380 elif line.startswith('copy from '):
381 gp.op = 'COPY'
381 gp.op = 'COPY'
382 gp.oldpath = line[10:]
382 gp.oldpath = line[10:]
383 elif line.startswith('copy to '):
383 elif line.startswith('copy to '):
384 gp.path = line[8:]
384 gp.path = line[8:]
385 elif line.startswith('deleted file'):
385 elif line.startswith('deleted file'):
386 gp.op = 'DELETE'
386 gp.op = 'DELETE'
387 elif line.startswith('new file mode '):
387 elif line.startswith('new file mode '):
388 gp.op = 'ADD'
388 gp.op = 'ADD'
389 gp.setmode(int(line[-6:], 8))
389 gp.setmode(int(line[-6:], 8))
390 elif line.startswith('new mode '):
390 elif line.startswith('new mode '):
391 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
392 elif line.startswith('GIT binary patch'):
392 elif line.startswith('GIT binary patch'):
393 gp.binary = True
393 gp.binary = True
394 if gp:
394 if gp:
395 gitpatches.append(gp)
395 gitpatches.append(gp)
396
396
397 return gitpatches
397 return gitpatches
398
398
399 class linereader(object):
399 class linereader(object):
400 # simple class to allow pushing lines back into the input stream
400 # simple class to allow pushing lines back into the input stream
401 def __init__(self, fp):
401 def __init__(self, fp):
402 self.fp = fp
402 self.fp = fp
403 self.buf = []
403 self.buf = []
404
404
405 def push(self, line):
405 def push(self, line):
406 if line is not None:
406 if line is not None:
407 self.buf.append(line)
407 self.buf.append(line)
408
408
409 def readline(self):
409 def readline(self):
410 if self.buf:
410 if self.buf:
411 l = self.buf[0]
411 l = self.buf[0]
412 del self.buf[0]
412 del self.buf[0]
413 return l
413 return l
414 return self.fp.readline()
414 return self.fp.readline()
415
415
416 def __iter__(self):
416 def __iter__(self):
417 return iter(self.readline, '')
417 return iter(self.readline, '')
418
418
419 class abstractbackend(object):
419 class abstractbackend(object):
420 def __init__(self, ui):
420 def __init__(self, ui):
421 self.ui = ui
421 self.ui = ui
422
422
423 def getfile(self, fname):
423 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
424 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
425 isexec)) tuple. Data is None if file is missing/deleted.
426 """
426 """
427 raise NotImplementedError
427 raise NotImplementedError
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
430 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
431 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
432 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
433 copysource is set to the original file name.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def unlink(self, fname):
437 def unlink(self, fname):
438 """Unlink target file."""
438 """Unlink target file."""
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def writerej(self, fname, failed, total, lines):
441 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
442 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
443 which failed to apply and total the total number of hunks for this
444 files.
444 files.
445 """
445 """
446 pass
446 pass
447
447
448 def exists(self, fname):
448 def exists(self, fname):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def close(self):
451 def close(self):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 class fsbackend(abstractbackend):
454 class fsbackend(abstractbackend):
455 def __init__(self, ui, basedir):
455 def __init__(self, ui, basedir):
456 super(fsbackend, self).__init__(ui)
456 super(fsbackend, self).__init__(ui)
457 self.opener = vfsmod.vfs(basedir)
457 self.opener = vfsmod.vfs(basedir)
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 if self.opener.islink(fname):
460 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
461 return (self.opener.readlink(fname), (True, False))
462
462
463 isexec = False
463 isexec = False
464 try:
464 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
466 except OSError as e:
467 if e.errno != errno.ENOENT:
467 if e.errno != errno.ENOENT:
468 raise
468 raise
469 try:
469 try:
470 return (self.opener.read(fname), (False, isexec))
470 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
471 except IOError as e:
472 if e.errno != errno.ENOENT:
472 if e.errno != errno.ENOENT:
473 raise
473 raise
474 return None, None
474 return None, None
475
475
476 def setfile(self, fname, data, mode, copysource):
476 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
477 islink, isexec = mode
478 if data is None:
478 if data is None:
479 self.opener.setflags(fname, islink, isexec)
479 self.opener.setflags(fname, islink, isexec)
480 return
480 return
481 if islink:
481 if islink:
482 self.opener.symlink(data, fname)
482 self.opener.symlink(data, fname)
483 else:
483 else:
484 self.opener.write(fname, data)
484 self.opener.write(fname, data)
485 if isexec:
485 if isexec:
486 self.opener.setflags(fname, False, True)
486 self.opener.setflags(fname, False, True)
487
487
488 def unlink(self, fname):
488 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
489 self.opener.unlinkpath(fname, ignoremissing=True)
490
490
491 def writerej(self, fname, failed, total, lines):
491 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
492 fname = fname + ".rej"
493 self.ui.warn(
493 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
495 (failed, total, fname))
496 fp = self.opener(fname, 'w')
496 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
497 fp.writelines(lines)
498 fp.close()
498 fp.close()
499
499
500 def exists(self, fname):
500 def exists(self, fname):
501 return self.opener.lexists(fname)
501 return self.opener.lexists(fname)
502
502
503 class workingbackend(fsbackend):
503 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
504 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
505 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
506 self.repo = repo
507 self.similarity = similarity
507 self.similarity = similarity
508 self.removed = set()
508 self.removed = set()
509 self.changed = set()
509 self.changed = set()
510 self.copied = []
510 self.copied = []
511
511
512 def _checkknown(self, fname):
512 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
515
516 def setfile(self, fname, data, mode, copysource):
516 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
517 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
519 if copysource is not None:
520 self.copied.append((copysource, fname))
520 self.copied.append((copysource, fname))
521 self.changed.add(fname)
521 self.changed.add(fname)
522
522
523 def unlink(self, fname):
523 def unlink(self, fname):
524 self._checkknown(fname)
524 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
525 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
526 self.removed.add(fname)
527 self.changed.add(fname)
527 self.changed.add(fname)
528
528
529 def close(self):
529 def close(self):
530 wctx = self.repo[None]
530 wctx = self.repo[None]
531 changed = set(self.changed)
531 changed = set(self.changed)
532 for src, dst in self.copied:
532 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
534 if self.removed:
535 wctx.forget(sorted(self.removed))
535 wctx.forget(sorted(self.removed))
536 for f in self.removed:
536 for f in self.removed:
537 if f not in self.repo.dirstate:
537 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
538 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
539 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
540 # deleted, and should not be considered by
541 # marktouched().
541 # marktouched().
542 changed.discard(f)
542 changed.discard(f)
543 if changed:
543 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
544 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
545 return sorted(self.changed)
546
546
547 class filestore(object):
547 class filestore(object):
548 def __init__(self, maxsize=None):
548 def __init__(self, maxsize=None):
549 self.opener = None
549 self.opener = None
550 self.files = {}
550 self.files = {}
551 self.created = 0
551 self.created = 0
552 self.maxsize = maxsize
552 self.maxsize = maxsize
553 if self.maxsize is None:
553 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
554 self.maxsize = 4*(2**20)
555 self.size = 0
555 self.size = 0
556 self.data = {}
556 self.data = {}
557
557
558 def setfile(self, fname, data, mode, copied=None):
558 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
560 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
561 self.size += len(data)
562 else:
562 else:
563 if self.opener is None:
563 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
564 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = vfsmod.vfs(root)
565 self.opener = vfsmod.vfs(root)
566 # Avoid filename issues with these simple names
566 # Avoid filename issues with these simple names
567 fn = str(self.created)
567 fn = str(self.created)
568 self.opener.write(fn, data)
568 self.opener.write(fn, data)
569 self.created += 1
569 self.created += 1
570 self.files[fname] = (fn, mode, copied)
570 self.files[fname] = (fn, mode, copied)
571
571
572 def getfile(self, fname):
572 def getfile(self, fname):
573 if fname in self.data:
573 if fname in self.data:
574 return self.data[fname]
574 return self.data[fname]
575 if not self.opener or fname not in self.files:
575 if not self.opener or fname not in self.files:
576 return None, None, None
576 return None, None, None
577 fn, mode, copied = self.files[fname]
577 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
578 return self.opener.read(fn), mode, copied
579
579
580 def close(self):
580 def close(self):
581 if self.opener:
581 if self.opener:
582 shutil.rmtree(self.opener.base)
582 shutil.rmtree(self.opener.base)
583
583
584 class repobackend(abstractbackend):
584 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
585 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
586 super(repobackend, self).__init__(ui)
587 self.repo = repo
587 self.repo = repo
588 self.ctx = ctx
588 self.ctx = ctx
589 self.store = store
589 self.store = store
590 self.changed = set()
590 self.changed = set()
591 self.removed = set()
591 self.removed = set()
592 self.copied = {}
592 self.copied = {}
593
593
594 def _checkknown(self, fname):
594 def _checkknown(self, fname):
595 if fname not in self.ctx:
595 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
597
598 def getfile(self, fname):
598 def getfile(self, fname):
599 try:
599 try:
600 fctx = self.ctx[fname]
600 fctx = self.ctx[fname]
601 except error.LookupError:
601 except error.LookupError:
602 return None, None
602 return None, None
603 flags = fctx.flags()
603 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
604 return fctx.data(), ('l' in flags, 'x' in flags)
605
605
606 def setfile(self, fname, data, mode, copysource):
606 def setfile(self, fname, data, mode, copysource):
607 if copysource:
607 if copysource:
608 self._checkknown(copysource)
608 self._checkknown(copysource)
609 if data is None:
609 if data is None:
610 data = self.ctx[fname].data()
610 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
611 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
612 self.changed.add(fname)
613 if copysource:
613 if copysource:
614 self.copied[fname] = copysource
614 self.copied[fname] = copysource
615
615
616 def unlink(self, fname):
616 def unlink(self, fname):
617 self._checkknown(fname)
617 self._checkknown(fname)
618 self.removed.add(fname)
618 self.removed.add(fname)
619
619
620 def exists(self, fname):
620 def exists(self, fname):
621 return fname in self.ctx
621 return fname in self.ctx
622
622
623 def close(self):
623 def close(self):
624 return self.changed | self.removed
624 return self.changed | self.removed
625
625
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
630
631 class patchfile(object):
631 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
633 self.fname = gp.path
634 self.eolmode = eolmode
634 self.eolmode = eolmode
635 self.eol = None
635 self.eol = None
636 self.backend = backend
636 self.backend = backend
637 self.ui = ui
637 self.ui = ui
638 self.lines = []
638 self.lines = []
639 self.exists = False
639 self.exists = False
640 self.missing = True
640 self.missing = True
641 self.mode = gp.mode
641 self.mode = gp.mode
642 self.copysource = gp.oldpath
642 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
644 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
645 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
646 data, mode = backend.getfile(self.fname)
647 else:
647 else:
648 data, mode = store.getfile(self.copysource)[:2]
648 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
649 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
651 self.missing = False
652 if data:
652 if data:
653 self.lines = mdiff.splitnewlines(data)
653 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
654 if self.mode is None:
655 self.mode = mode
655 self.mode = mode
656 if self.lines:
656 if self.lines:
657 # Normalize line endings
657 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
658 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
659 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
660 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
661 self.eol = '\n'
662 if eolmode != 'strict':
662 if eolmode != 'strict':
663 nlines = []
663 nlines = []
664 for l in self.lines:
664 for l in self.lines:
665 if l.endswith('\r\n'):
665 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
666 l = l[:-2] + '\n'
667 nlines.append(l)
667 nlines.append(l)
668 self.lines = nlines
668 self.lines = nlines
669 else:
669 else:
670 if self.create:
670 if self.create:
671 self.missing = False
671 self.missing = False
672 if self.mode is None:
672 if self.mode is None:
673 self.mode = (False, False)
673 self.mode = (False, False)
674 if self.missing:
674 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 "current directory)\n"))
677 "current directory)\n"))
678
678
679 self.hash = {}
679 self.hash = {}
680 self.dirty = 0
680 self.dirty = 0
681 self.offset = 0
681 self.offset = 0
682 self.skew = 0
682 self.skew = 0
683 self.rej = []
683 self.rej = []
684 self.fileprinted = False
684 self.fileprinted = False
685 self.printfile(False)
685 self.printfile(False)
686 self.hunks = 0
686 self.hunks = 0
687
687
688 def writelines(self, fname, lines, mode):
688 def writelines(self, fname, lines, mode):
689 if self.eolmode == 'auto':
689 if self.eolmode == 'auto':
690 eol = self.eol
690 eol = self.eol
691 elif self.eolmode == 'crlf':
691 elif self.eolmode == 'crlf':
692 eol = '\r\n'
692 eol = '\r\n'
693 else:
693 else:
694 eol = '\n'
694 eol = '\n'
695
695
696 if self.eolmode != 'strict' and eol and eol != '\n':
696 if self.eolmode != 'strict' and eol and eol != '\n':
697 rawlines = []
697 rawlines = []
698 for l in lines:
698 for l in lines:
699 if l and l[-1] == '\n':
699 if l and l[-1] == '\n':
700 l = l[:-1] + eol
700 l = l[:-1] + eol
701 rawlines.append(l)
701 rawlines.append(l)
702 lines = rawlines
702 lines = rawlines
703
703
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705
705
706 def printfile(self, warn):
706 def printfile(self, warn):
707 if self.fileprinted:
707 if self.fileprinted:
708 return
708 return
709 if warn or self.ui.verbose:
709 if warn or self.ui.verbose:
710 self.fileprinted = True
710 self.fileprinted = True
711 s = _("patching file %s\n") % self.fname
711 s = _("patching file %s\n") % self.fname
712 if warn:
712 if warn:
713 self.ui.warn(s)
713 self.ui.warn(s)
714 else:
714 else:
715 self.ui.note(s)
715 self.ui.note(s)
716
716
717
717
718 def findlines(self, l, linenum):
718 def findlines(self, l, linenum):
719 # looks through the hash and finds candidate lines. The
719 # looks through the hash and finds candidate lines. The
720 # result is a list of line numbers sorted based on distance
720 # result is a list of line numbers sorted based on distance
721 # from linenum
721 # from linenum
722
722
723 cand = self.hash.get(l, [])
723 cand = self.hash.get(l, [])
724 if len(cand) > 1:
724 if len(cand) > 1:
725 # resort our list of potentials forward then back.
725 # resort our list of potentials forward then back.
726 cand.sort(key=lambda x: abs(x - linenum))
726 cand.sort(key=lambda x: abs(x - linenum))
727 return cand
727 return cand
728
728
729 def write_rej(self):
729 def write_rej(self):
730 # our rejects are a little different from patch(1). This always
730 # our rejects are a little different from patch(1). This always
731 # creates rejects in the same form as the original patch. A file
731 # creates rejects in the same form as the original patch. A file
732 # header is inserted so that you can run the reject through patch again
732 # header is inserted so that you can run the reject through patch again
733 # without having to type the filename.
733 # without having to type the filename.
734 if not self.rej:
734 if not self.rej:
735 return
735 return
736 base = os.path.basename(self.fname)
736 base = os.path.basename(self.fname)
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 for x in self.rej:
738 for x in self.rej:
739 for l in x.hunk:
739 for l in x.hunk:
740 lines.append(l)
740 lines.append(l)
741 if l[-1:] != '\n':
741 if l[-1:] != '\n':
742 lines.append("\n\ No newline at end of file\n")
742 lines.append("\n\ No newline at end of file\n")
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744
744
745 def apply(self, h):
745 def apply(self, h):
746 if not h.complete():
746 if not h.complete():
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 h.lenb))
749 h.lenb))
750
750
751 self.hunks += 1
751 self.hunks += 1
752
752
753 if self.missing:
753 if self.missing:
754 self.rej.append(h)
754 self.rej.append(h)
755 return -1
755 return -1
756
756
757 if self.exists and self.create:
757 if self.exists and self.create:
758 if self.copysource:
758 if self.copysource:
759 self.ui.warn(_("cannot create %s: destination already "
759 self.ui.warn(_("cannot create %s: destination already "
760 "exists\n") % self.fname)
760 "exists\n") % self.fname)
761 else:
761 else:
762 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.rej.append(h)
763 self.rej.append(h)
764 return -1
764 return -1
765
765
766 if isinstance(h, binhunk):
766 if isinstance(h, binhunk):
767 if self.remove:
767 if self.remove:
768 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
769 else:
769 else:
770 l = h.new(self.lines)
770 l = h.new(self.lines)
771 self.lines[:] = l
771 self.lines[:] = l
772 self.offset += len(l)
772 self.offset += len(l)
773 self.dirty = True
773 self.dirty = True
774 return 0
774 return 0
775
775
776 horig = h
776 horig = h
777 if (self.eolmode in ('crlf', 'lf')
777 if (self.eolmode in ('crlf', 'lf')
778 or self.eolmode == 'auto' and self.eol):
778 or self.eolmode == 'auto' and self.eol):
779 # If new eols are going to be normalized, then normalize
779 # If new eols are going to be normalized, then normalize
780 # hunk data before patching. Otherwise, preserve input
780 # hunk data before patching. Otherwise, preserve input
781 # line-endings.
781 # line-endings.
782 h = h.getnormalized()
782 h = h.getnormalized()
783
783
784 # fast case first, no offsets, no fuzz
784 # fast case first, no offsets, no fuzz
785 old, oldstart, new, newstart = h.fuzzit(0, False)
785 old, oldstart, new, newstart = h.fuzzit(0, False)
786 oldstart += self.offset
786 oldstart += self.offset
787 orig_start = oldstart
787 orig_start = oldstart
788 # if there's skew we want to emit the "(offset %d lines)" even
788 # if there's skew we want to emit the "(offset %d lines)" even
789 # when the hunk cleanly applies at start + skew, so skip the
789 # when the hunk cleanly applies at start + skew, so skip the
790 # fast case code
790 # fast case code
791 if (self.skew == 0 and
791 if (self.skew == 0 and
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 if self.remove:
793 if self.remove:
794 self.backend.unlink(self.fname)
794 self.backend.unlink(self.fname)
795 else:
795 else:
796 self.lines[oldstart:oldstart + len(old)] = new
796 self.lines[oldstart:oldstart + len(old)] = new
797 self.offset += len(new) - len(old)
797 self.offset += len(new) - len(old)
798 self.dirty = True
798 self.dirty = True
799 return 0
799 return 0
800
800
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 self.hash = {}
802 self.hash = {}
803 for x, s in enumerate(self.lines):
803 for x, s in enumerate(self.lines):
804 self.hash.setdefault(s, []).append(x)
804 self.hash.setdefault(s, []).append(x)
805
805
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for toponly in [True, False]:
807 for toponly in [True, False]:
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 oldstart = oldstart + self.offset + self.skew
809 oldstart = oldstart + self.offset + self.skew
810 oldstart = min(oldstart, len(self.lines))
810 oldstart = min(oldstart, len(self.lines))
811 if old:
811 if old:
812 cand = self.findlines(old[0][1:], oldstart)
812 cand = self.findlines(old[0][1:], oldstart)
813 else:
813 else:
814 # Only adding lines with no or fuzzed context, just
814 # Only adding lines with no or fuzzed context, just
815 # take the skew in account
815 # take the skew in account
816 cand = [oldstart]
816 cand = [oldstart]
817
817
818 for l in cand:
818 for l in cand:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 self.lines[l : l + len(old)] = new
820 self.lines[l : l + len(old)] = new
821 self.offset += len(new) - len(old)
821 self.offset += len(new) - len(old)
822 self.skew = l - orig_start
822 self.skew = l - orig_start
823 self.dirty = True
823 self.dirty = True
824 offset = l - orig_start - fuzzlen
824 offset = l - orig_start - fuzzlen
825 if fuzzlen:
825 if fuzzlen:
826 msg = _("Hunk #%d succeeded at %d "
826 msg = _("Hunk #%d succeeded at %d "
827 "with fuzz %d "
827 "with fuzz %d "
828 "(offset %d lines).\n")
828 "(offset %d lines).\n")
829 self.printfile(True)
829 self.printfile(True)
830 self.ui.warn(msg %
830 self.ui.warn(msg %
831 (h.number, l + 1, fuzzlen, offset))
831 (h.number, l + 1, fuzzlen, offset))
832 else:
832 else:
833 msg = _("Hunk #%d succeeded at %d "
833 msg = _("Hunk #%d succeeded at %d "
834 "(offset %d lines).\n")
834 "(offset %d lines).\n")
835 self.ui.note(msg % (h.number, l + 1, offset))
835 self.ui.note(msg % (h.number, l + 1, offset))
836 return fuzzlen
836 return fuzzlen
837 self.printfile(True)
837 self.printfile(True)
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.rej.append(horig)
839 self.rej.append(horig)
840 return -1
840 return -1
841
841
842 def close(self):
842 def close(self):
843 if self.dirty:
843 if self.dirty:
844 self.writelines(self.fname, self.lines, self.mode)
844 self.writelines(self.fname, self.lines, self.mode)
845 self.write_rej()
845 self.write_rej()
846 return len(self.rej)
846 return len(self.rej)
847
847
848 class header(object):
848 class header(object):
849 """patch header
849 """patch header
850 """
850 """
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
853 allhunks_re = re.compile('(?:index|deleted file) ')
853 allhunks_re = re.compile('(?:index|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 newfile_re = re.compile('(?:new file)')
856 newfile_re = re.compile('(?:new file)')
857
857
858 def __init__(self, header):
858 def __init__(self, header):
859 self.header = header
859 self.header = header
860 self.hunks = []
860 self.hunks = []
861
861
862 def binary(self):
862 def binary(self):
863 return any(h.startswith('index ') for h in self.header)
863 return any(h.startswith('index ') for h in self.header)
864
864
865 def pretty(self, fp):
865 def pretty(self, fp):
866 for h in self.header:
866 for h in self.header:
867 if h.startswith('index '):
867 if h.startswith('index '):
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 break
869 break
870 if self.pretty_re.match(h):
870 if self.pretty_re.match(h):
871 fp.write(h)
871 fp.write(h)
872 if self.binary():
872 if self.binary():
873 fp.write(_('this is a binary file\n'))
873 fp.write(_('this is a binary file\n'))
874 break
874 break
875 if h.startswith('---'):
875 if h.startswith('---'):
876 fp.write(_('%d hunks, %d lines changed\n') %
876 fp.write(_('%d hunks, %d lines changed\n') %
877 (len(self.hunks),
877 (len(self.hunks),
878 sum([max(h.added, h.removed) for h in self.hunks])))
878 sum([max(h.added, h.removed) for h in self.hunks])))
879 break
879 break
880 fp.write(h)
880 fp.write(h)
881
881
882 def write(self, fp):
882 def write(self, fp):
883 fp.write(''.join(self.header))
883 fp.write(''.join(self.header))
884
884
885 def allhunks(self):
885 def allhunks(self):
886 return any(self.allhunks_re.match(h) for h in self.header)
886 return any(self.allhunks_re.match(h) for h in self.header)
887
887
888 def files(self):
888 def files(self):
889 match = self.diffgit_re.match(self.header[0])
889 match = self.diffgit_re.match(self.header[0])
890 if match:
890 if match:
891 fromfile, tofile = match.groups()
891 fromfile, tofile = match.groups()
892 if fromfile == tofile:
892 if fromfile == tofile:
893 return [fromfile]
893 return [fromfile]
894 return [fromfile, tofile]
894 return [fromfile, tofile]
895 else:
895 else:
896 return self.diff_re.match(self.header[0]).groups()
896 return self.diff_re.match(self.header[0]).groups()
897
897
898 def filename(self):
898 def filename(self):
899 return self.files()[-1]
899 return self.files()[-1]
900
900
901 def __repr__(self):
901 def __repr__(self):
902 return '<header %s>' % (' '.join(map(repr, self.files())))
902 return '<header %s>' % (' '.join(map(repr, self.files())))
903
903
904 def isnewfile(self):
904 def isnewfile(self):
905 return any(self.newfile_re.match(h) for h in self.header)
905 return any(self.newfile_re.match(h) for h in self.header)
906
906
907 def special(self):
907 def special(self):
908 # Special files are shown only at the header level and not at the hunk
908 # Special files are shown only at the header level and not at the hunk
909 # level for example a file that has been deleted is a special file.
909 # level for example a file that has been deleted is a special file.
910 # The user cannot change the content of the operation, in the case of
910 # The user cannot change the content of the operation, in the case of
911 # the deleted file he has to take the deletion or not take it, he
911 # the deleted file he has to take the deletion or not take it, he
912 # cannot take some of it.
912 # cannot take some of it.
913 # Newly added files are special if they are empty, they are not special
913 # Newly added files are special if they are empty, they are not special
914 # if they have some content as we want to be able to change it
914 # if they have some content as we want to be able to change it
915 nocontent = len(self.header) == 2
915 nocontent = len(self.header) == 2
916 emptynewfile = self.isnewfile() and nocontent
916 emptynewfile = self.isnewfile() and nocontent
917 return emptynewfile or \
917 return emptynewfile or \
918 any(self.special_re.match(h) for h in self.header)
918 any(self.special_re.match(h) for h in self.header)
919
919
920 class recordhunk(object):
920 class recordhunk(object):
921 """patch hunk
921 """patch hunk
922
922
923 XXX shouldn't we merge this with the other hunk class?
923 XXX shouldn't we merge this with the other hunk class?
924 """
924 """
925 maxcontext = 3
926
925
927 def __init__(self, header, fromline, toline, proc, before, hunk, after):
926 def __init__(self, header, fromline, toline, proc, before, hunk, after,
928 def trimcontext(number, lines):
927 maxcontext=None):
929 delta = len(lines) - self.maxcontext
928 def trimcontext(lines, reverse=False):
930 if False and delta > 0:
929 if maxcontext is not None:
931 return number + delta, lines[:self.maxcontext]
930 delta = len(lines) - maxcontext
932 return number, lines
931 if delta > 0:
932 if reverse:
933 return delta, lines[delta:]
934 else:
935 return delta, lines[:maxcontext]
936 return 0, lines
933
937
934 self.header = header
938 self.header = header
935 self.fromline, self.before = trimcontext(fromline, before)
939 trimedbefore, self.before = trimcontext(before, True)
936 self.toline, self.after = trimcontext(toline, after)
940 self.fromline = fromline + trimedbefore
941 self.toline = toline + trimedbefore
942 _trimedafter, self.after = trimcontext(after, False)
937 self.proc = proc
943 self.proc = proc
938 self.hunk = hunk
944 self.hunk = hunk
939 self.added, self.removed = self.countchanges(self.hunk)
945 self.added, self.removed = self.countchanges(self.hunk)
940
946
941 def __eq__(self, v):
947 def __eq__(self, v):
942 if not isinstance(v, recordhunk):
948 if not isinstance(v, recordhunk):
943 return False
949 return False
944
950
945 return ((v.hunk == self.hunk) and
951 return ((v.hunk == self.hunk) and
946 (v.proc == self.proc) and
952 (v.proc == self.proc) and
947 (self.fromline == v.fromline) and
953 (self.fromline == v.fromline) and
948 (self.header.files() == v.header.files()))
954 (self.header.files() == v.header.files()))
949
955
950 def __hash__(self):
956 def __hash__(self):
951 return hash((tuple(self.hunk),
957 return hash((tuple(self.hunk),
952 tuple(self.header.files()),
958 tuple(self.header.files()),
953 self.fromline,
959 self.fromline,
954 self.proc))
960 self.proc))
955
961
956 def countchanges(self, hunk):
962 def countchanges(self, hunk):
957 """hunk -> (n+,n-)"""
963 """hunk -> (n+,n-)"""
958 add = len([h for h in hunk if h[0] == '+'])
964 add = len([h for h in hunk if h[0] == '+'])
959 rem = len([h for h in hunk if h[0] == '-'])
965 rem = len([h for h in hunk if h[0] == '-'])
960 return add, rem
966 return add, rem
961
967
962 def reversehunk(self):
968 def reversehunk(self):
963 """return another recordhunk which is the reverse of the hunk
969 """return another recordhunk which is the reverse of the hunk
964
970
965 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
971 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
966 that, swap fromline/toline and +/- signs while keep other things
972 that, swap fromline/toline and +/- signs while keep other things
967 unchanged.
973 unchanged.
968 """
974 """
969 m = {'+': '-', '-': '+'}
975 m = {'+': '-', '-': '+'}
970 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
976 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
971 return recordhunk(self.header, self.toline, self.fromline, self.proc,
977 return recordhunk(self.header, self.toline, self.fromline, self.proc,
972 self.before, hunk, self.after)
978 self.before, hunk, self.after)
973
979
974 def write(self, fp):
980 def write(self, fp):
975 delta = len(self.before) + len(self.after)
981 delta = len(self.before) + len(self.after)
976 if self.after and self.after[-1] == '\\ No newline at end of file\n':
982 if self.after and self.after[-1] == '\\ No newline at end of file\n':
977 delta -= 1
983 delta -= 1
978 fromlen = delta + self.removed
984 fromlen = delta + self.removed
979 tolen = delta + self.added
985 tolen = delta + self.added
980 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
986 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
981 (self.fromline, fromlen, self.toline, tolen,
987 (self.fromline, fromlen, self.toline, tolen,
982 self.proc and (' ' + self.proc)))
988 self.proc and (' ' + self.proc)))
983 fp.write(''.join(self.before + self.hunk + self.after))
989 fp.write(''.join(self.before + self.hunk + self.after))
984
990
985 pretty = write
991 pretty = write
986
992
987 def filename(self):
993 def filename(self):
988 return self.header.filename()
994 return self.header.filename()
989
995
990 def __repr__(self):
996 def __repr__(self):
991 return '<hunk %r@%d>' % (self.filename(), self.fromline)
997 return '<hunk %r@%d>' % (self.filename(), self.fromline)
992
998
993 def filterpatch(ui, headers, operation=None):
999 def filterpatch(ui, headers, operation=None):
994 """Interactively filter patch chunks into applied-only chunks"""
1000 """Interactively filter patch chunks into applied-only chunks"""
995 if operation is None:
1001 if operation is None:
996 operation = 'record'
1002 operation = 'record'
997 messages = {
1003 messages = {
998 'multiple': {
1004 'multiple': {
999 'discard': _("discard change %d/%d to '%s'?"),
1005 'discard': _("discard change %d/%d to '%s'?"),
1000 'record': _("record change %d/%d to '%s'?"),
1006 'record': _("record change %d/%d to '%s'?"),
1001 'revert': _("revert change %d/%d to '%s'?"),
1007 'revert': _("revert change %d/%d to '%s'?"),
1002 }[operation],
1008 }[operation],
1003 'single': {
1009 'single': {
1004 'discard': _("discard this change to '%s'?"),
1010 'discard': _("discard this change to '%s'?"),
1005 'record': _("record this change to '%s'?"),
1011 'record': _("record this change to '%s'?"),
1006 'revert': _("revert this change to '%s'?"),
1012 'revert': _("revert this change to '%s'?"),
1007 }[operation],
1013 }[operation],
1008 'help': {
1014 'help': {
1009 'discard': _('[Ynesfdaq?]'
1015 'discard': _('[Ynesfdaq?]'
1010 '$$ &Yes, discard this change'
1016 '$$ &Yes, discard this change'
1011 '$$ &No, skip this change'
1017 '$$ &No, skip this change'
1012 '$$ &Edit this change manually'
1018 '$$ &Edit this change manually'
1013 '$$ &Skip remaining changes to this file'
1019 '$$ &Skip remaining changes to this file'
1014 '$$ Discard remaining changes to this &file'
1020 '$$ Discard remaining changes to this &file'
1015 '$$ &Done, skip remaining changes and files'
1021 '$$ &Done, skip remaining changes and files'
1016 '$$ Discard &all changes to all remaining files'
1022 '$$ Discard &all changes to all remaining files'
1017 '$$ &Quit, discarding no changes'
1023 '$$ &Quit, discarding no changes'
1018 '$$ &? (display help)'),
1024 '$$ &? (display help)'),
1019 'record': _('[Ynesfdaq?]'
1025 'record': _('[Ynesfdaq?]'
1020 '$$ &Yes, record this change'
1026 '$$ &Yes, record this change'
1021 '$$ &No, skip this change'
1027 '$$ &No, skip this change'
1022 '$$ &Edit this change manually'
1028 '$$ &Edit this change manually'
1023 '$$ &Skip remaining changes to this file'
1029 '$$ &Skip remaining changes to this file'
1024 '$$ Record remaining changes to this &file'
1030 '$$ Record remaining changes to this &file'
1025 '$$ &Done, skip remaining changes and files'
1031 '$$ &Done, skip remaining changes and files'
1026 '$$ Record &all changes to all remaining files'
1032 '$$ Record &all changes to all remaining files'
1027 '$$ &Quit, recording no changes'
1033 '$$ &Quit, recording no changes'
1028 '$$ &? (display help)'),
1034 '$$ &? (display help)'),
1029 'revert': _('[Ynesfdaq?]'
1035 'revert': _('[Ynesfdaq?]'
1030 '$$ &Yes, revert this change'
1036 '$$ &Yes, revert this change'
1031 '$$ &No, skip this change'
1037 '$$ &No, skip this change'
1032 '$$ &Edit this change manually'
1038 '$$ &Edit this change manually'
1033 '$$ &Skip remaining changes to this file'
1039 '$$ &Skip remaining changes to this file'
1034 '$$ Revert remaining changes to this &file'
1040 '$$ Revert remaining changes to this &file'
1035 '$$ &Done, skip remaining changes and files'
1041 '$$ &Done, skip remaining changes and files'
1036 '$$ Revert &all changes to all remaining files'
1042 '$$ Revert &all changes to all remaining files'
1037 '$$ &Quit, reverting no changes'
1043 '$$ &Quit, reverting no changes'
1038 '$$ &? (display help)')
1044 '$$ &? (display help)')
1039 }[operation]
1045 }[operation]
1040 }
1046 }
1041
1047
1042 def prompt(skipfile, skipall, query, chunk):
1048 def prompt(skipfile, skipall, query, chunk):
1043 """prompt query, and process base inputs
1049 """prompt query, and process base inputs
1044
1050
1045 - y/n for the rest of file
1051 - y/n for the rest of file
1046 - y/n for the rest
1052 - y/n for the rest
1047 - ? (help)
1053 - ? (help)
1048 - q (quit)
1054 - q (quit)
1049
1055
1050 Return True/False and possibly updated skipfile and skipall.
1056 Return True/False and possibly updated skipfile and skipall.
1051 """
1057 """
1052 newpatches = None
1058 newpatches = None
1053 if skipall is not None:
1059 if skipall is not None:
1054 return skipall, skipfile, skipall, newpatches
1060 return skipall, skipfile, skipall, newpatches
1055 if skipfile is not None:
1061 if skipfile is not None:
1056 return skipfile, skipfile, skipall, newpatches
1062 return skipfile, skipfile, skipall, newpatches
1057 while True:
1063 while True:
1058 resps = messages['help']
1064 resps = messages['help']
1059 r = ui.promptchoice("%s %s" % (query, resps))
1065 r = ui.promptchoice("%s %s" % (query, resps))
1060 ui.write("\n")
1066 ui.write("\n")
1061 if r == 8: # ?
1067 if r == 8: # ?
1062 for c, t in ui.extractchoices(resps)[1]:
1068 for c, t in ui.extractchoices(resps)[1]:
1063 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1069 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1064 continue
1070 continue
1065 elif r == 0: # yes
1071 elif r == 0: # yes
1066 ret = True
1072 ret = True
1067 elif r == 1: # no
1073 elif r == 1: # no
1068 ret = False
1074 ret = False
1069 elif r == 2: # Edit patch
1075 elif r == 2: # Edit patch
1070 if chunk is None:
1076 if chunk is None:
1071 ui.write(_('cannot edit patch for whole file'))
1077 ui.write(_('cannot edit patch for whole file'))
1072 ui.write("\n")
1078 ui.write("\n")
1073 continue
1079 continue
1074 if chunk.header.binary():
1080 if chunk.header.binary():
1075 ui.write(_('cannot edit patch for binary file'))
1081 ui.write(_('cannot edit patch for binary file'))
1076 ui.write("\n")
1082 ui.write("\n")
1077 continue
1083 continue
1078 # Patch comment based on the Git one (based on comment at end of
1084 # Patch comment based on the Git one (based on comment at end of
1079 # https://mercurial-scm.org/wiki/RecordExtension)
1085 # https://mercurial-scm.org/wiki/RecordExtension)
1080 phelp = '---' + _("""
1086 phelp = '---' + _("""
1081 To remove '-' lines, make them ' ' lines (context).
1087 To remove '-' lines, make them ' ' lines (context).
1082 To remove '+' lines, delete them.
1088 To remove '+' lines, delete them.
1083 Lines starting with # will be removed from the patch.
1089 Lines starting with # will be removed from the patch.
1084
1090
1085 If the patch applies cleanly, the edited hunk will immediately be
1091 If the patch applies cleanly, the edited hunk will immediately be
1086 added to the record list. If it does not apply cleanly, a rejects
1092 added to the record list. If it does not apply cleanly, a rejects
1087 file will be generated: you can use that when you try again. If
1093 file will be generated: you can use that when you try again. If
1088 all lines of the hunk are removed, then the edit is aborted and
1094 all lines of the hunk are removed, then the edit is aborted and
1089 the hunk is left unchanged.
1095 the hunk is left unchanged.
1090 """)
1096 """)
1091 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1097 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1092 suffix=".diff", text=True)
1098 suffix=".diff", text=True)
1093 ncpatchfp = None
1099 ncpatchfp = None
1094 try:
1100 try:
1095 # Write the initial patch
1101 # Write the initial patch
1096 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1102 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1097 chunk.header.write(f)
1103 chunk.header.write(f)
1098 chunk.write(f)
1104 chunk.write(f)
1099 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1105 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1100 f.close()
1106 f.close()
1101 # Start the editor and wait for it to complete
1107 # Start the editor and wait for it to complete
1102 editor = ui.geteditor()
1108 editor = ui.geteditor()
1103 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1109 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1104 environ={'HGUSER': ui.username()},
1110 environ={'HGUSER': ui.username()},
1105 blockedtag='filterpatch')
1111 blockedtag='filterpatch')
1106 if ret != 0:
1112 if ret != 0:
1107 ui.warn(_("editor exited with exit code %d\n") % ret)
1113 ui.warn(_("editor exited with exit code %d\n") % ret)
1108 continue
1114 continue
1109 # Remove comment lines
1115 # Remove comment lines
1110 patchfp = open(patchfn)
1116 patchfp = open(patchfn)
1111 ncpatchfp = stringio()
1117 ncpatchfp = stringio()
1112 for line in util.iterfile(patchfp):
1118 for line in util.iterfile(patchfp):
1113 if not line.startswith('#'):
1119 if not line.startswith('#'):
1114 ncpatchfp.write(line)
1120 ncpatchfp.write(line)
1115 patchfp.close()
1121 patchfp.close()
1116 ncpatchfp.seek(0)
1122 ncpatchfp.seek(0)
1117 newpatches = parsepatch(ncpatchfp)
1123 newpatches = parsepatch(ncpatchfp)
1118 finally:
1124 finally:
1119 os.unlink(patchfn)
1125 os.unlink(patchfn)
1120 del ncpatchfp
1126 del ncpatchfp
1121 # Signal that the chunk shouldn't be applied as-is, but
1127 # Signal that the chunk shouldn't be applied as-is, but
1122 # provide the new patch to be used instead.
1128 # provide the new patch to be used instead.
1123 ret = False
1129 ret = False
1124 elif r == 3: # Skip
1130 elif r == 3: # Skip
1125 ret = skipfile = False
1131 ret = skipfile = False
1126 elif r == 4: # file (Record remaining)
1132 elif r == 4: # file (Record remaining)
1127 ret = skipfile = True
1133 ret = skipfile = True
1128 elif r == 5: # done, skip remaining
1134 elif r == 5: # done, skip remaining
1129 ret = skipall = False
1135 ret = skipall = False
1130 elif r == 6: # all
1136 elif r == 6: # all
1131 ret = skipall = True
1137 ret = skipall = True
1132 elif r == 7: # quit
1138 elif r == 7: # quit
1133 raise error.Abort(_('user quit'))
1139 raise error.Abort(_('user quit'))
1134 return ret, skipfile, skipall, newpatches
1140 return ret, skipfile, skipall, newpatches
1135
1141
1136 seen = set()
1142 seen = set()
1137 applied = {} # 'filename' -> [] of chunks
1143 applied = {} # 'filename' -> [] of chunks
1138 skipfile, skipall = None, None
1144 skipfile, skipall = None, None
1139 pos, total = 1, sum(len(h.hunks) for h in headers)
1145 pos, total = 1, sum(len(h.hunks) for h in headers)
1140 for h in headers:
1146 for h in headers:
1141 pos += len(h.hunks)
1147 pos += len(h.hunks)
1142 skipfile = None
1148 skipfile = None
1143 fixoffset = 0
1149 fixoffset = 0
1144 hdr = ''.join(h.header)
1150 hdr = ''.join(h.header)
1145 if hdr in seen:
1151 if hdr in seen:
1146 continue
1152 continue
1147 seen.add(hdr)
1153 seen.add(hdr)
1148 if skipall is None:
1154 if skipall is None:
1149 h.pretty(ui)
1155 h.pretty(ui)
1150 msg = (_('examine changes to %s?') %
1156 msg = (_('examine changes to %s?') %
1151 _(' and ').join("'%s'" % f for f in h.files()))
1157 _(' and ').join("'%s'" % f for f in h.files()))
1152 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1158 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1153 if not r:
1159 if not r:
1154 continue
1160 continue
1155 applied[h.filename()] = [h]
1161 applied[h.filename()] = [h]
1156 if h.allhunks():
1162 if h.allhunks():
1157 applied[h.filename()] += h.hunks
1163 applied[h.filename()] += h.hunks
1158 continue
1164 continue
1159 for i, chunk in enumerate(h.hunks):
1165 for i, chunk in enumerate(h.hunks):
1160 if skipfile is None and skipall is None:
1166 if skipfile is None and skipall is None:
1161 chunk.pretty(ui)
1167 chunk.pretty(ui)
1162 if total == 1:
1168 if total == 1:
1163 msg = messages['single'] % chunk.filename()
1169 msg = messages['single'] % chunk.filename()
1164 else:
1170 else:
1165 idx = pos - len(h.hunks) + i
1171 idx = pos - len(h.hunks) + i
1166 msg = messages['multiple'] % (idx, total, chunk.filename())
1172 msg = messages['multiple'] % (idx, total, chunk.filename())
1167 r, skipfile, skipall, newpatches = prompt(skipfile,
1173 r, skipfile, skipall, newpatches = prompt(skipfile,
1168 skipall, msg, chunk)
1174 skipall, msg, chunk)
1169 if r:
1175 if r:
1170 if fixoffset:
1176 if fixoffset:
1171 chunk = copy.copy(chunk)
1177 chunk = copy.copy(chunk)
1172 chunk.toline += fixoffset
1178 chunk.toline += fixoffset
1173 applied[chunk.filename()].append(chunk)
1179 applied[chunk.filename()].append(chunk)
1174 elif newpatches is not None:
1180 elif newpatches is not None:
1175 for newpatch in newpatches:
1181 for newpatch in newpatches:
1176 for newhunk in newpatch.hunks:
1182 for newhunk in newpatch.hunks:
1177 if fixoffset:
1183 if fixoffset:
1178 newhunk.toline += fixoffset
1184 newhunk.toline += fixoffset
1179 applied[newhunk.filename()].append(newhunk)
1185 applied[newhunk.filename()].append(newhunk)
1180 else:
1186 else:
1181 fixoffset += chunk.removed - chunk.added
1187 fixoffset += chunk.removed - chunk.added
1182 return (sum([h for h in applied.itervalues()
1188 return (sum([h for h in applied.itervalues()
1183 if h[0].special() or len(h) > 1], []), {})
1189 if h[0].special() or len(h) > 1], []), {})
1184 class hunk(object):
1190 class hunk(object):
1185 def __init__(self, desc, num, lr, context):
1191 def __init__(self, desc, num, lr, context):
1186 self.number = num
1192 self.number = num
1187 self.desc = desc
1193 self.desc = desc
1188 self.hunk = [desc]
1194 self.hunk = [desc]
1189 self.a = []
1195 self.a = []
1190 self.b = []
1196 self.b = []
1191 self.starta = self.lena = None
1197 self.starta = self.lena = None
1192 self.startb = self.lenb = None
1198 self.startb = self.lenb = None
1193 if lr is not None:
1199 if lr is not None:
1194 if context:
1200 if context:
1195 self.read_context_hunk(lr)
1201 self.read_context_hunk(lr)
1196 else:
1202 else:
1197 self.read_unified_hunk(lr)
1203 self.read_unified_hunk(lr)
1198
1204
1199 def getnormalized(self):
1205 def getnormalized(self):
1200 """Return a copy with line endings normalized to LF."""
1206 """Return a copy with line endings normalized to LF."""
1201
1207
1202 def normalize(lines):
1208 def normalize(lines):
1203 nlines = []
1209 nlines = []
1204 for line in lines:
1210 for line in lines:
1205 if line.endswith('\r\n'):
1211 if line.endswith('\r\n'):
1206 line = line[:-2] + '\n'
1212 line = line[:-2] + '\n'
1207 nlines.append(line)
1213 nlines.append(line)
1208 return nlines
1214 return nlines
1209
1215
1210 # Dummy object, it is rebuilt manually
1216 # Dummy object, it is rebuilt manually
1211 nh = hunk(self.desc, self.number, None, None)
1217 nh = hunk(self.desc, self.number, None, None)
1212 nh.number = self.number
1218 nh.number = self.number
1213 nh.desc = self.desc
1219 nh.desc = self.desc
1214 nh.hunk = self.hunk
1220 nh.hunk = self.hunk
1215 nh.a = normalize(self.a)
1221 nh.a = normalize(self.a)
1216 nh.b = normalize(self.b)
1222 nh.b = normalize(self.b)
1217 nh.starta = self.starta
1223 nh.starta = self.starta
1218 nh.startb = self.startb
1224 nh.startb = self.startb
1219 nh.lena = self.lena
1225 nh.lena = self.lena
1220 nh.lenb = self.lenb
1226 nh.lenb = self.lenb
1221 return nh
1227 return nh
1222
1228
1223 def read_unified_hunk(self, lr):
1229 def read_unified_hunk(self, lr):
1224 m = unidesc.match(self.desc)
1230 m = unidesc.match(self.desc)
1225 if not m:
1231 if not m:
1226 raise PatchError(_("bad hunk #%d") % self.number)
1232 raise PatchError(_("bad hunk #%d") % self.number)
1227 self.starta, self.lena, self.startb, self.lenb = m.groups()
1233 self.starta, self.lena, self.startb, self.lenb = m.groups()
1228 if self.lena is None:
1234 if self.lena is None:
1229 self.lena = 1
1235 self.lena = 1
1230 else:
1236 else:
1231 self.lena = int(self.lena)
1237 self.lena = int(self.lena)
1232 if self.lenb is None:
1238 if self.lenb is None:
1233 self.lenb = 1
1239 self.lenb = 1
1234 else:
1240 else:
1235 self.lenb = int(self.lenb)
1241 self.lenb = int(self.lenb)
1236 self.starta = int(self.starta)
1242 self.starta = int(self.starta)
1237 self.startb = int(self.startb)
1243 self.startb = int(self.startb)
1238 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1244 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1239 self.b)
1245 self.b)
1240 # if we hit eof before finishing out the hunk, the last line will
1246 # if we hit eof before finishing out the hunk, the last line will
1241 # be zero length. Lets try to fix it up.
1247 # be zero length. Lets try to fix it up.
1242 while len(self.hunk[-1]) == 0:
1248 while len(self.hunk[-1]) == 0:
1243 del self.hunk[-1]
1249 del self.hunk[-1]
1244 del self.a[-1]
1250 del self.a[-1]
1245 del self.b[-1]
1251 del self.b[-1]
1246 self.lena -= 1
1252 self.lena -= 1
1247 self.lenb -= 1
1253 self.lenb -= 1
1248 self._fixnewline(lr)
1254 self._fixnewline(lr)
1249
1255
1250 def read_context_hunk(self, lr):
1256 def read_context_hunk(self, lr):
1251 self.desc = lr.readline()
1257 self.desc = lr.readline()
1252 m = contextdesc.match(self.desc)
1258 m = contextdesc.match(self.desc)
1253 if not m:
1259 if not m:
1254 raise PatchError(_("bad hunk #%d") % self.number)
1260 raise PatchError(_("bad hunk #%d") % self.number)
1255 self.starta, aend = m.groups()
1261 self.starta, aend = m.groups()
1256 self.starta = int(self.starta)
1262 self.starta = int(self.starta)
1257 if aend is None:
1263 if aend is None:
1258 aend = self.starta
1264 aend = self.starta
1259 self.lena = int(aend) - self.starta
1265 self.lena = int(aend) - self.starta
1260 if self.starta:
1266 if self.starta:
1261 self.lena += 1
1267 self.lena += 1
1262 for x in xrange(self.lena):
1268 for x in xrange(self.lena):
1263 l = lr.readline()
1269 l = lr.readline()
1264 if l.startswith('---'):
1270 if l.startswith('---'):
1265 # lines addition, old block is empty
1271 # lines addition, old block is empty
1266 lr.push(l)
1272 lr.push(l)
1267 break
1273 break
1268 s = l[2:]
1274 s = l[2:]
1269 if l.startswith('- ') or l.startswith('! '):
1275 if l.startswith('- ') or l.startswith('! '):
1270 u = '-' + s
1276 u = '-' + s
1271 elif l.startswith(' '):
1277 elif l.startswith(' '):
1272 u = ' ' + s
1278 u = ' ' + s
1273 else:
1279 else:
1274 raise PatchError(_("bad hunk #%d old text line %d") %
1280 raise PatchError(_("bad hunk #%d old text line %d") %
1275 (self.number, x))
1281 (self.number, x))
1276 self.a.append(u)
1282 self.a.append(u)
1277 self.hunk.append(u)
1283 self.hunk.append(u)
1278
1284
1279 l = lr.readline()
1285 l = lr.readline()
1280 if l.startswith('\ '):
1286 if l.startswith('\ '):
1281 s = self.a[-1][:-1]
1287 s = self.a[-1][:-1]
1282 self.a[-1] = s
1288 self.a[-1] = s
1283 self.hunk[-1] = s
1289 self.hunk[-1] = s
1284 l = lr.readline()
1290 l = lr.readline()
1285 m = contextdesc.match(l)
1291 m = contextdesc.match(l)
1286 if not m:
1292 if not m:
1287 raise PatchError(_("bad hunk #%d") % self.number)
1293 raise PatchError(_("bad hunk #%d") % self.number)
1288 self.startb, bend = m.groups()
1294 self.startb, bend = m.groups()
1289 self.startb = int(self.startb)
1295 self.startb = int(self.startb)
1290 if bend is None:
1296 if bend is None:
1291 bend = self.startb
1297 bend = self.startb
1292 self.lenb = int(bend) - self.startb
1298 self.lenb = int(bend) - self.startb
1293 if self.startb:
1299 if self.startb:
1294 self.lenb += 1
1300 self.lenb += 1
1295 hunki = 1
1301 hunki = 1
1296 for x in xrange(self.lenb):
1302 for x in xrange(self.lenb):
1297 l = lr.readline()
1303 l = lr.readline()
1298 if l.startswith('\ '):
1304 if l.startswith('\ '):
1299 # XXX: the only way to hit this is with an invalid line range.
1305 # XXX: the only way to hit this is with an invalid line range.
1300 # The no-eol marker is not counted in the line range, but I
1306 # The no-eol marker is not counted in the line range, but I
1301 # guess there are diff(1) out there which behave differently.
1307 # guess there are diff(1) out there which behave differently.
1302 s = self.b[-1][:-1]
1308 s = self.b[-1][:-1]
1303 self.b[-1] = s
1309 self.b[-1] = s
1304 self.hunk[hunki - 1] = s
1310 self.hunk[hunki - 1] = s
1305 continue
1311 continue
1306 if not l:
1312 if not l:
1307 # line deletions, new block is empty and we hit EOF
1313 # line deletions, new block is empty and we hit EOF
1308 lr.push(l)
1314 lr.push(l)
1309 break
1315 break
1310 s = l[2:]
1316 s = l[2:]
1311 if l.startswith('+ ') or l.startswith('! '):
1317 if l.startswith('+ ') or l.startswith('! '):
1312 u = '+' + s
1318 u = '+' + s
1313 elif l.startswith(' '):
1319 elif l.startswith(' '):
1314 u = ' ' + s
1320 u = ' ' + s
1315 elif len(self.b) == 0:
1321 elif len(self.b) == 0:
1316 # line deletions, new block is empty
1322 # line deletions, new block is empty
1317 lr.push(l)
1323 lr.push(l)
1318 break
1324 break
1319 else:
1325 else:
1320 raise PatchError(_("bad hunk #%d old text line %d") %
1326 raise PatchError(_("bad hunk #%d old text line %d") %
1321 (self.number, x))
1327 (self.number, x))
1322 self.b.append(s)
1328 self.b.append(s)
1323 while True:
1329 while True:
1324 if hunki >= len(self.hunk):
1330 if hunki >= len(self.hunk):
1325 h = ""
1331 h = ""
1326 else:
1332 else:
1327 h = self.hunk[hunki]
1333 h = self.hunk[hunki]
1328 hunki += 1
1334 hunki += 1
1329 if h == u:
1335 if h == u:
1330 break
1336 break
1331 elif h.startswith('-'):
1337 elif h.startswith('-'):
1332 continue
1338 continue
1333 else:
1339 else:
1334 self.hunk.insert(hunki - 1, u)
1340 self.hunk.insert(hunki - 1, u)
1335 break
1341 break
1336
1342
1337 if not self.a:
1343 if not self.a:
1338 # this happens when lines were only added to the hunk
1344 # this happens when lines were only added to the hunk
1339 for x in self.hunk:
1345 for x in self.hunk:
1340 if x.startswith('-') or x.startswith(' '):
1346 if x.startswith('-') or x.startswith(' '):
1341 self.a.append(x)
1347 self.a.append(x)
1342 if not self.b:
1348 if not self.b:
1343 # this happens when lines were only deleted from the hunk
1349 # this happens when lines were only deleted from the hunk
1344 for x in self.hunk:
1350 for x in self.hunk:
1345 if x.startswith('+') or x.startswith(' '):
1351 if x.startswith('+') or x.startswith(' '):
1346 self.b.append(x[1:])
1352 self.b.append(x[1:])
1347 # @@ -start,len +start,len @@
1353 # @@ -start,len +start,len @@
1348 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1354 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1349 self.startb, self.lenb)
1355 self.startb, self.lenb)
1350 self.hunk[0] = self.desc
1356 self.hunk[0] = self.desc
1351 self._fixnewline(lr)
1357 self._fixnewline(lr)
1352
1358
1353 def _fixnewline(self, lr):
1359 def _fixnewline(self, lr):
1354 l = lr.readline()
1360 l = lr.readline()
1355 if l.startswith('\ '):
1361 if l.startswith('\ '):
1356 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1362 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1357 else:
1363 else:
1358 lr.push(l)
1364 lr.push(l)
1359
1365
1360 def complete(self):
1366 def complete(self):
1361 return len(self.a) == self.lena and len(self.b) == self.lenb
1367 return len(self.a) == self.lena and len(self.b) == self.lenb
1362
1368
1363 def _fuzzit(self, old, new, fuzz, toponly):
1369 def _fuzzit(self, old, new, fuzz, toponly):
1364 # this removes context lines from the top and bottom of list 'l'. It
1370 # this removes context lines from the top and bottom of list 'l'. It
1365 # checks the hunk to make sure only context lines are removed, and then
1371 # checks the hunk to make sure only context lines are removed, and then
1366 # returns a new shortened list of lines.
1372 # returns a new shortened list of lines.
1367 fuzz = min(fuzz, len(old))
1373 fuzz = min(fuzz, len(old))
1368 if fuzz:
1374 if fuzz:
1369 top = 0
1375 top = 0
1370 bot = 0
1376 bot = 0
1371 hlen = len(self.hunk)
1377 hlen = len(self.hunk)
1372 for x in xrange(hlen - 1):
1378 for x in xrange(hlen - 1):
1373 # the hunk starts with the @@ line, so use x+1
1379 # the hunk starts with the @@ line, so use x+1
1374 if self.hunk[x + 1][0] == ' ':
1380 if self.hunk[x + 1][0] == ' ':
1375 top += 1
1381 top += 1
1376 else:
1382 else:
1377 break
1383 break
1378 if not toponly:
1384 if not toponly:
1379 for x in xrange(hlen - 1):
1385 for x in xrange(hlen - 1):
1380 if self.hunk[hlen - bot - 1][0] == ' ':
1386 if self.hunk[hlen - bot - 1][0] == ' ':
1381 bot += 1
1387 bot += 1
1382 else:
1388 else:
1383 break
1389 break
1384
1390
1385 bot = min(fuzz, bot)
1391 bot = min(fuzz, bot)
1386 top = min(fuzz, top)
1392 top = min(fuzz, top)
1387 return old[top:len(old) - bot], new[top:len(new) - bot], top
1393 return old[top:len(old) - bot], new[top:len(new) - bot], top
1388 return old, new, 0
1394 return old, new, 0
1389
1395
1390 def fuzzit(self, fuzz, toponly):
1396 def fuzzit(self, fuzz, toponly):
1391 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1397 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1392 oldstart = self.starta + top
1398 oldstart = self.starta + top
1393 newstart = self.startb + top
1399 newstart = self.startb + top
1394 # zero length hunk ranges already have their start decremented
1400 # zero length hunk ranges already have their start decremented
1395 if self.lena and oldstart > 0:
1401 if self.lena and oldstart > 0:
1396 oldstart -= 1
1402 oldstart -= 1
1397 if self.lenb and newstart > 0:
1403 if self.lenb and newstart > 0:
1398 newstart -= 1
1404 newstart -= 1
1399 return old, oldstart, new, newstart
1405 return old, oldstart, new, newstart
1400
1406
1401 class binhunk(object):
1407 class binhunk(object):
1402 'A binary patch file.'
1408 'A binary patch file.'
1403 def __init__(self, lr, fname):
1409 def __init__(self, lr, fname):
1404 self.text = None
1410 self.text = None
1405 self.delta = False
1411 self.delta = False
1406 self.hunk = ['GIT binary patch\n']
1412 self.hunk = ['GIT binary patch\n']
1407 self._fname = fname
1413 self._fname = fname
1408 self._read(lr)
1414 self._read(lr)
1409
1415
1410 def complete(self):
1416 def complete(self):
1411 return self.text is not None
1417 return self.text is not None
1412
1418
1413 def new(self, lines):
1419 def new(self, lines):
1414 if self.delta:
1420 if self.delta:
1415 return [applybindelta(self.text, ''.join(lines))]
1421 return [applybindelta(self.text, ''.join(lines))]
1416 return [self.text]
1422 return [self.text]
1417
1423
1418 def _read(self, lr):
1424 def _read(self, lr):
1419 def getline(lr, hunk):
1425 def getline(lr, hunk):
1420 l = lr.readline()
1426 l = lr.readline()
1421 hunk.append(l)
1427 hunk.append(l)
1422 return l.rstrip('\r\n')
1428 return l.rstrip('\r\n')
1423
1429
1424 size = 0
1430 size = 0
1425 while True:
1431 while True:
1426 line = getline(lr, self.hunk)
1432 line = getline(lr, self.hunk)
1427 if not line:
1433 if not line:
1428 raise PatchError(_('could not extract "%s" binary data')
1434 raise PatchError(_('could not extract "%s" binary data')
1429 % self._fname)
1435 % self._fname)
1430 if line.startswith('literal '):
1436 if line.startswith('literal '):
1431 size = int(line[8:].rstrip())
1437 size = int(line[8:].rstrip())
1432 break
1438 break
1433 if line.startswith('delta '):
1439 if line.startswith('delta '):
1434 size = int(line[6:].rstrip())
1440 size = int(line[6:].rstrip())
1435 self.delta = True
1441 self.delta = True
1436 break
1442 break
1437 dec = []
1443 dec = []
1438 line = getline(lr, self.hunk)
1444 line = getline(lr, self.hunk)
1439 while len(line) > 1:
1445 while len(line) > 1:
1440 l = line[0]
1446 l = line[0]
1441 if l <= 'Z' and l >= 'A':
1447 if l <= 'Z' and l >= 'A':
1442 l = ord(l) - ord('A') + 1
1448 l = ord(l) - ord('A') + 1
1443 else:
1449 else:
1444 l = ord(l) - ord('a') + 27
1450 l = ord(l) - ord('a') + 27
1445 try:
1451 try:
1446 dec.append(util.b85decode(line[1:])[:l])
1452 dec.append(util.b85decode(line[1:])[:l])
1447 except ValueError as e:
1453 except ValueError as e:
1448 raise PatchError(_('could not decode "%s" binary patch: %s')
1454 raise PatchError(_('could not decode "%s" binary patch: %s')
1449 % (self._fname, str(e)))
1455 % (self._fname, str(e)))
1450 line = getline(lr, self.hunk)
1456 line = getline(lr, self.hunk)
1451 text = zlib.decompress(''.join(dec))
1457 text = zlib.decompress(''.join(dec))
1452 if len(text) != size:
1458 if len(text) != size:
1453 raise PatchError(_('"%s" length is %d bytes, should be %d')
1459 raise PatchError(_('"%s" length is %d bytes, should be %d')
1454 % (self._fname, len(text), size))
1460 % (self._fname, len(text), size))
1455 self.text = text
1461 self.text = text
1456
1462
1457 def parsefilename(str):
1463 def parsefilename(str):
1458 # --- filename \t|space stuff
1464 # --- filename \t|space stuff
1459 s = str[4:].rstrip('\r\n')
1465 s = str[4:].rstrip('\r\n')
1460 i = s.find('\t')
1466 i = s.find('\t')
1461 if i < 0:
1467 if i < 0:
1462 i = s.find(' ')
1468 i = s.find(' ')
1463 if i < 0:
1469 if i < 0:
1464 return s
1470 return s
1465 return s[:i]
1471 return s[:i]
1466
1472
1467 def reversehunks(hunks):
1473 def reversehunks(hunks):
1468 '''reverse the signs in the hunks given as argument
1474 '''reverse the signs in the hunks given as argument
1469
1475
1470 This function operates on hunks coming out of patch.filterpatch, that is
1476 This function operates on hunks coming out of patch.filterpatch, that is
1471 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1477 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1472
1478
1473 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1479 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1474 ... --- a/folder1/g
1480 ... --- a/folder1/g
1475 ... +++ b/folder1/g
1481 ... +++ b/folder1/g
1476 ... @@ -1,7 +1,7 @@
1482 ... @@ -1,7 +1,7 @@
1477 ... +firstline
1483 ... +firstline
1478 ... c
1484 ... c
1479 ... 1
1485 ... 1
1480 ... 2
1486 ... 2
1481 ... + 3
1487 ... + 3
1482 ... -4
1488 ... -4
1483 ... 5
1489 ... 5
1484 ... d
1490 ... d
1485 ... +lastline"""
1491 ... +lastline"""
1486 >>> hunks = parsepatch(rawpatch)
1492 >>> hunks = parsepatch(rawpatch)
1487 >>> hunkscomingfromfilterpatch = []
1493 >>> hunkscomingfromfilterpatch = []
1488 >>> for h in hunks:
1494 >>> for h in hunks:
1489 ... hunkscomingfromfilterpatch.append(h)
1495 ... hunkscomingfromfilterpatch.append(h)
1490 ... hunkscomingfromfilterpatch.extend(h.hunks)
1496 ... hunkscomingfromfilterpatch.extend(h.hunks)
1491
1497
1492 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1498 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1493 >>> from . import util
1499 >>> from . import util
1494 >>> fp = util.stringio()
1500 >>> fp = util.stringio()
1495 >>> for c in reversedhunks:
1501 >>> for c in reversedhunks:
1496 ... c.write(fp)
1502 ... c.write(fp)
1497 >>> fp.seek(0)
1503 >>> fp.seek(0)
1498 >>> reversedpatch = fp.read()
1504 >>> reversedpatch = fp.read()
1499 >>> print reversedpatch
1505 >>> print reversedpatch
1500 diff --git a/folder1/g b/folder1/g
1506 diff --git a/folder1/g b/folder1/g
1501 --- a/folder1/g
1507 --- a/folder1/g
1502 +++ b/folder1/g
1508 +++ b/folder1/g
1503 @@ -1,4 +1,3 @@
1509 @@ -1,4 +1,3 @@
1504 -firstline
1510 -firstline
1505 c
1511 c
1506 1
1512 1
1507 2
1513 2
1508 @@ -2,6 +1,6 @@
1514 @@ -2,6 +1,6 @@
1509 c
1515 c
1510 1
1516 1
1511 2
1517 2
1512 - 3
1518 - 3
1513 +4
1519 +4
1514 5
1520 5
1515 d
1521 d
1516 @@ -6,3 +5,2 @@
1522 @@ -6,3 +5,2 @@
1517 5
1523 5
1518 d
1524 d
1519 -lastline
1525 -lastline
1520
1526
1521 '''
1527 '''
1522
1528
1523 newhunks = []
1529 newhunks = []
1524 for c in hunks:
1530 for c in hunks:
1525 if util.safehasattr(c, 'reversehunk'):
1531 if util.safehasattr(c, 'reversehunk'):
1526 c = c.reversehunk()
1532 c = c.reversehunk()
1527 newhunks.append(c)
1533 newhunks.append(c)
1528 return newhunks
1534 return newhunks
1529
1535
1530 def parsepatch(originalchunks):
1536 def parsepatch(originalchunks, maxcontext=None):
1531 """patch -> [] of headers -> [] of hunks """
1537 """patch -> [] of headers -> [] of hunks
1538
1539 If maxcontext is not None, trim context lines if necessary.
1540
1541 >>> rawpatch = '''diff --git a/folder1/g b/folder1/g
1542 ... --- a/folder1/g
1543 ... +++ b/folder1/g
1544 ... @@ -1,8 +1,10 @@
1545 ... 1
1546 ... 2
1547 ... -3
1548 ... 4
1549 ... 5
1550 ... 6
1551 ... +6.1
1552 ... +6.2
1553 ... 7
1554 ... 8
1555 ... +9'''
1556 >>> out = util.stringio()
1557 >>> headers = parsepatch([rawpatch], maxcontext=1)
1558 >>> for header in headers:
1559 ... header.write(out)
1560 ... for hunk in header.hunks:
1561 ... hunk.write(out)
1562 >>> print(out.getvalue())
1563 diff --git a/folder1/g b/folder1/g
1564 --- a/folder1/g
1565 +++ b/folder1/g
1566 @@ -2,3 +2,2 @@
1567 2
1568 -3
1569 4
1570 @@ -6,2 +5,4 @@
1571 6
1572 +6.1
1573 +6.2
1574 7
1575 @@ -8,1 +9,2 @@
1576 8
1577 +9
1578 """
1532 class parser(object):
1579 class parser(object):
1533 """patch parsing state machine"""
1580 """patch parsing state machine"""
1534 def __init__(self):
1581 def __init__(self):
1535 self.fromline = 0
1582 self.fromline = 0
1536 self.toline = 0
1583 self.toline = 0
1537 self.proc = ''
1584 self.proc = ''
1538 self.header = None
1585 self.header = None
1539 self.context = []
1586 self.context = []
1540 self.before = []
1587 self.before = []
1541 self.hunk = []
1588 self.hunk = []
1542 self.headers = []
1589 self.headers = []
1543
1590
1544 def addrange(self, limits):
1591 def addrange(self, limits):
1545 fromstart, fromend, tostart, toend, proc = limits
1592 fromstart, fromend, tostart, toend, proc = limits
1546 self.fromline = int(fromstart)
1593 self.fromline = int(fromstart)
1547 self.toline = int(tostart)
1594 self.toline = int(tostart)
1548 self.proc = proc
1595 self.proc = proc
1549
1596
1550 def addcontext(self, context):
1597 def addcontext(self, context):
1551 if self.hunk:
1598 if self.hunk:
1552 h = recordhunk(self.header, self.fromline, self.toline,
1599 h = recordhunk(self.header, self.fromline, self.toline,
1553 self.proc, self.before, self.hunk, context)
1600 self.proc, self.before, self.hunk, context, maxcontext)
1554 self.header.hunks.append(h)
1601 self.header.hunks.append(h)
1555 self.fromline += len(self.before) + h.removed
1602 self.fromline += len(self.before) + h.removed
1556 self.toline += len(self.before) + h.added
1603 self.toline += len(self.before) + h.added
1557 self.before = []
1604 self.before = []
1558 self.hunk = []
1605 self.hunk = []
1559 self.context = context
1606 self.context = context
1560
1607
1561 def addhunk(self, hunk):
1608 def addhunk(self, hunk):
1562 if self.context:
1609 if self.context:
1563 self.before = self.context
1610 self.before = self.context
1564 self.context = []
1611 self.context = []
1565 self.hunk = hunk
1612 self.hunk = hunk
1566
1613
1567 def newfile(self, hdr):
1614 def newfile(self, hdr):
1568 self.addcontext([])
1615 self.addcontext([])
1569 h = header(hdr)
1616 h = header(hdr)
1570 self.headers.append(h)
1617 self.headers.append(h)
1571 self.header = h
1618 self.header = h
1572
1619
1573 def addother(self, line):
1620 def addother(self, line):
1574 pass # 'other' lines are ignored
1621 pass # 'other' lines are ignored
1575
1622
1576 def finished(self):
1623 def finished(self):
1577 self.addcontext([])
1624 self.addcontext([])
1578 return self.headers
1625 return self.headers
1579
1626
1580 transitions = {
1627 transitions = {
1581 'file': {'context': addcontext,
1628 'file': {'context': addcontext,
1582 'file': newfile,
1629 'file': newfile,
1583 'hunk': addhunk,
1630 'hunk': addhunk,
1584 'range': addrange},
1631 'range': addrange},
1585 'context': {'file': newfile,
1632 'context': {'file': newfile,
1586 'hunk': addhunk,
1633 'hunk': addhunk,
1587 'range': addrange,
1634 'range': addrange,
1588 'other': addother},
1635 'other': addother},
1589 'hunk': {'context': addcontext,
1636 'hunk': {'context': addcontext,
1590 'file': newfile,
1637 'file': newfile,
1591 'range': addrange},
1638 'range': addrange},
1592 'range': {'context': addcontext,
1639 'range': {'context': addcontext,
1593 'hunk': addhunk},
1640 'hunk': addhunk},
1594 'other': {'other': addother},
1641 'other': {'other': addother},
1595 }
1642 }
1596
1643
1597 p = parser()
1644 p = parser()
1598 fp = stringio()
1645 fp = stringio()
1599 fp.write(''.join(originalchunks))
1646 fp.write(''.join(originalchunks))
1600 fp.seek(0)
1647 fp.seek(0)
1601
1648
1602 state = 'context'
1649 state = 'context'
1603 for newstate, data in scanpatch(fp):
1650 for newstate, data in scanpatch(fp):
1604 try:
1651 try:
1605 p.transitions[state][newstate](p, data)
1652 p.transitions[state][newstate](p, data)
1606 except KeyError:
1653 except KeyError:
1607 raise PatchError('unhandled transition: %s -> %s' %
1654 raise PatchError('unhandled transition: %s -> %s' %
1608 (state, newstate))
1655 (state, newstate))
1609 state = newstate
1656 state = newstate
1610 del fp
1657 del fp
1611 return p.finished()
1658 return p.finished()
1612
1659
1613 def pathtransform(path, strip, prefix):
1660 def pathtransform(path, strip, prefix):
1614 '''turn a path from a patch into a path suitable for the repository
1661 '''turn a path from a patch into a path suitable for the repository
1615
1662
1616 prefix, if not empty, is expected to be normalized with a / at the end.
1663 prefix, if not empty, is expected to be normalized with a / at the end.
1617
1664
1618 Returns (stripped components, path in repository).
1665 Returns (stripped components, path in repository).
1619
1666
1620 >>> pathtransform('a/b/c', 0, '')
1667 >>> pathtransform('a/b/c', 0, '')
1621 ('', 'a/b/c')
1668 ('', 'a/b/c')
1622 >>> pathtransform(' a/b/c ', 0, '')
1669 >>> pathtransform(' a/b/c ', 0, '')
1623 ('', ' a/b/c')
1670 ('', ' a/b/c')
1624 >>> pathtransform(' a/b/c ', 2, '')
1671 >>> pathtransform(' a/b/c ', 2, '')
1625 ('a/b/', 'c')
1672 ('a/b/', 'c')
1626 >>> pathtransform('a/b/c', 0, 'd/e/')
1673 >>> pathtransform('a/b/c', 0, 'd/e/')
1627 ('', 'd/e/a/b/c')
1674 ('', 'd/e/a/b/c')
1628 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1675 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1629 ('a//b/', 'd/e/c')
1676 ('a//b/', 'd/e/c')
1630 >>> pathtransform('a/b/c', 3, '')
1677 >>> pathtransform('a/b/c', 3, '')
1631 Traceback (most recent call last):
1678 Traceback (most recent call last):
1632 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1679 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1633 '''
1680 '''
1634 pathlen = len(path)
1681 pathlen = len(path)
1635 i = 0
1682 i = 0
1636 if strip == 0:
1683 if strip == 0:
1637 return '', prefix + path.rstrip()
1684 return '', prefix + path.rstrip()
1638 count = strip
1685 count = strip
1639 while count > 0:
1686 while count > 0:
1640 i = path.find('/', i)
1687 i = path.find('/', i)
1641 if i == -1:
1688 if i == -1:
1642 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1689 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1643 (count, strip, path))
1690 (count, strip, path))
1644 i += 1
1691 i += 1
1645 # consume '//' in the path
1692 # consume '//' in the path
1646 while i < pathlen - 1 and path[i] == '/':
1693 while i < pathlen - 1 and path[i] == '/':
1647 i += 1
1694 i += 1
1648 count -= 1
1695 count -= 1
1649 return path[:i].lstrip(), prefix + path[i:].rstrip()
1696 return path[:i].lstrip(), prefix + path[i:].rstrip()
1650
1697
1651 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1698 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1652 nulla = afile_orig == "/dev/null"
1699 nulla = afile_orig == "/dev/null"
1653 nullb = bfile_orig == "/dev/null"
1700 nullb = bfile_orig == "/dev/null"
1654 create = nulla and hunk.starta == 0 and hunk.lena == 0
1701 create = nulla and hunk.starta == 0 and hunk.lena == 0
1655 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1702 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1656 abase, afile = pathtransform(afile_orig, strip, prefix)
1703 abase, afile = pathtransform(afile_orig, strip, prefix)
1657 gooda = not nulla and backend.exists(afile)
1704 gooda = not nulla and backend.exists(afile)
1658 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1705 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1659 if afile == bfile:
1706 if afile == bfile:
1660 goodb = gooda
1707 goodb = gooda
1661 else:
1708 else:
1662 goodb = not nullb and backend.exists(bfile)
1709 goodb = not nullb and backend.exists(bfile)
1663 missing = not goodb and not gooda and not create
1710 missing = not goodb and not gooda and not create
1664
1711
1665 # some diff programs apparently produce patches where the afile is
1712 # some diff programs apparently produce patches where the afile is
1666 # not /dev/null, but afile starts with bfile
1713 # not /dev/null, but afile starts with bfile
1667 abasedir = afile[:afile.rfind('/') + 1]
1714 abasedir = afile[:afile.rfind('/') + 1]
1668 bbasedir = bfile[:bfile.rfind('/') + 1]
1715 bbasedir = bfile[:bfile.rfind('/') + 1]
1669 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1716 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1670 and hunk.starta == 0 and hunk.lena == 0):
1717 and hunk.starta == 0 and hunk.lena == 0):
1671 create = True
1718 create = True
1672 missing = False
1719 missing = False
1673
1720
1674 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1721 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1675 # diff is between a file and its backup. In this case, the original
1722 # diff is between a file and its backup. In this case, the original
1676 # file should be patched (see original mpatch code).
1723 # file should be patched (see original mpatch code).
1677 isbackup = (abase == bbase and bfile.startswith(afile))
1724 isbackup = (abase == bbase and bfile.startswith(afile))
1678 fname = None
1725 fname = None
1679 if not missing:
1726 if not missing:
1680 if gooda and goodb:
1727 if gooda and goodb:
1681 if isbackup:
1728 if isbackup:
1682 fname = afile
1729 fname = afile
1683 else:
1730 else:
1684 fname = bfile
1731 fname = bfile
1685 elif gooda:
1732 elif gooda:
1686 fname = afile
1733 fname = afile
1687
1734
1688 if not fname:
1735 if not fname:
1689 if not nullb:
1736 if not nullb:
1690 if isbackup:
1737 if isbackup:
1691 fname = afile
1738 fname = afile
1692 else:
1739 else:
1693 fname = bfile
1740 fname = bfile
1694 elif not nulla:
1741 elif not nulla:
1695 fname = afile
1742 fname = afile
1696 else:
1743 else:
1697 raise PatchError(_("undefined source and destination files"))
1744 raise PatchError(_("undefined source and destination files"))
1698
1745
1699 gp = patchmeta(fname)
1746 gp = patchmeta(fname)
1700 if create:
1747 if create:
1701 gp.op = 'ADD'
1748 gp.op = 'ADD'
1702 elif remove:
1749 elif remove:
1703 gp.op = 'DELETE'
1750 gp.op = 'DELETE'
1704 return gp
1751 return gp
1705
1752
1706 def scanpatch(fp):
1753 def scanpatch(fp):
1707 """like patch.iterhunks, but yield different events
1754 """like patch.iterhunks, but yield different events
1708
1755
1709 - ('file', [header_lines + fromfile + tofile])
1756 - ('file', [header_lines + fromfile + tofile])
1710 - ('context', [context_lines])
1757 - ('context', [context_lines])
1711 - ('hunk', [hunk_lines])
1758 - ('hunk', [hunk_lines])
1712 - ('range', (-start,len, +start,len, proc))
1759 - ('range', (-start,len, +start,len, proc))
1713 """
1760 """
1714 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1761 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1715 lr = linereader(fp)
1762 lr = linereader(fp)
1716
1763
1717 def scanwhile(first, p):
1764 def scanwhile(first, p):
1718 """scan lr while predicate holds"""
1765 """scan lr while predicate holds"""
1719 lines = [first]
1766 lines = [first]
1720 for line in iter(lr.readline, ''):
1767 for line in iter(lr.readline, ''):
1721 if p(line):
1768 if p(line):
1722 lines.append(line)
1769 lines.append(line)
1723 else:
1770 else:
1724 lr.push(line)
1771 lr.push(line)
1725 break
1772 break
1726 return lines
1773 return lines
1727
1774
1728 for line in iter(lr.readline, ''):
1775 for line in iter(lr.readline, ''):
1729 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1776 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1730 def notheader(line):
1777 def notheader(line):
1731 s = line.split(None, 1)
1778 s = line.split(None, 1)
1732 return not s or s[0] not in ('---', 'diff')
1779 return not s or s[0] not in ('---', 'diff')
1733 header = scanwhile(line, notheader)
1780 header = scanwhile(line, notheader)
1734 fromfile = lr.readline()
1781 fromfile = lr.readline()
1735 if fromfile.startswith('---'):
1782 if fromfile.startswith('---'):
1736 tofile = lr.readline()
1783 tofile = lr.readline()
1737 header += [fromfile, tofile]
1784 header += [fromfile, tofile]
1738 else:
1785 else:
1739 lr.push(fromfile)
1786 lr.push(fromfile)
1740 yield 'file', header
1787 yield 'file', header
1741 elif line[0] == ' ':
1788 elif line[0] == ' ':
1742 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1789 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1743 elif line[0] in '-+':
1790 elif line[0] in '-+':
1744 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1791 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1745 else:
1792 else:
1746 m = lines_re.match(line)
1793 m = lines_re.match(line)
1747 if m:
1794 if m:
1748 yield 'range', m.groups()
1795 yield 'range', m.groups()
1749 else:
1796 else:
1750 yield 'other', line
1797 yield 'other', line
1751
1798
1752 def scangitpatch(lr, firstline):
1799 def scangitpatch(lr, firstline):
1753 """
1800 """
1754 Git patches can emit:
1801 Git patches can emit:
1755 - rename a to b
1802 - rename a to b
1756 - change b
1803 - change b
1757 - copy a to c
1804 - copy a to c
1758 - change c
1805 - change c
1759
1806
1760 We cannot apply this sequence as-is, the renamed 'a' could not be
1807 We cannot apply this sequence as-is, the renamed 'a' could not be
1761 found for it would have been renamed already. And we cannot copy
1808 found for it would have been renamed already. And we cannot copy
1762 from 'b' instead because 'b' would have been changed already. So
1809 from 'b' instead because 'b' would have been changed already. So
1763 we scan the git patch for copy and rename commands so we can
1810 we scan the git patch for copy and rename commands so we can
1764 perform the copies ahead of time.
1811 perform the copies ahead of time.
1765 """
1812 """
1766 pos = 0
1813 pos = 0
1767 try:
1814 try:
1768 pos = lr.fp.tell()
1815 pos = lr.fp.tell()
1769 fp = lr.fp
1816 fp = lr.fp
1770 except IOError:
1817 except IOError:
1771 fp = stringio(lr.fp.read())
1818 fp = stringio(lr.fp.read())
1772 gitlr = linereader(fp)
1819 gitlr = linereader(fp)
1773 gitlr.push(firstline)
1820 gitlr.push(firstline)
1774 gitpatches = readgitpatch(gitlr)
1821 gitpatches = readgitpatch(gitlr)
1775 fp.seek(pos)
1822 fp.seek(pos)
1776 return gitpatches
1823 return gitpatches
1777
1824
1778 def iterhunks(fp):
1825 def iterhunks(fp):
1779 """Read a patch and yield the following events:
1826 """Read a patch and yield the following events:
1780 - ("file", afile, bfile, firsthunk): select a new target file.
1827 - ("file", afile, bfile, firsthunk): select a new target file.
1781 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1828 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1782 "file" event.
1829 "file" event.
1783 - ("git", gitchanges): current diff is in git format, gitchanges
1830 - ("git", gitchanges): current diff is in git format, gitchanges
1784 maps filenames to gitpatch records. Unique event.
1831 maps filenames to gitpatch records. Unique event.
1785 """
1832 """
1786 afile = ""
1833 afile = ""
1787 bfile = ""
1834 bfile = ""
1788 state = None
1835 state = None
1789 hunknum = 0
1836 hunknum = 0
1790 emitfile = newfile = False
1837 emitfile = newfile = False
1791 gitpatches = None
1838 gitpatches = None
1792
1839
1793 # our states
1840 # our states
1794 BFILE = 1
1841 BFILE = 1
1795 context = None
1842 context = None
1796 lr = linereader(fp)
1843 lr = linereader(fp)
1797
1844
1798 for x in iter(lr.readline, ''):
1845 for x in iter(lr.readline, ''):
1799 if state == BFILE and (
1846 if state == BFILE and (
1800 (not context and x[0] == '@')
1847 (not context and x[0] == '@')
1801 or (context is not False and x.startswith('***************'))
1848 or (context is not False and x.startswith('***************'))
1802 or x.startswith('GIT binary patch')):
1849 or x.startswith('GIT binary patch')):
1803 gp = None
1850 gp = None
1804 if (gitpatches and
1851 if (gitpatches and
1805 gitpatches[-1].ispatching(afile, bfile)):
1852 gitpatches[-1].ispatching(afile, bfile)):
1806 gp = gitpatches.pop()
1853 gp = gitpatches.pop()
1807 if x.startswith('GIT binary patch'):
1854 if x.startswith('GIT binary patch'):
1808 h = binhunk(lr, gp.path)
1855 h = binhunk(lr, gp.path)
1809 else:
1856 else:
1810 if context is None and x.startswith('***************'):
1857 if context is None and x.startswith('***************'):
1811 context = True
1858 context = True
1812 h = hunk(x, hunknum + 1, lr, context)
1859 h = hunk(x, hunknum + 1, lr, context)
1813 hunknum += 1
1860 hunknum += 1
1814 if emitfile:
1861 if emitfile:
1815 emitfile = False
1862 emitfile = False
1816 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1863 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1817 yield 'hunk', h
1864 yield 'hunk', h
1818 elif x.startswith('diff --git a/'):
1865 elif x.startswith('diff --git a/'):
1819 m = gitre.match(x.rstrip(' \r\n'))
1866 m = gitre.match(x.rstrip(' \r\n'))
1820 if not m:
1867 if not m:
1821 continue
1868 continue
1822 if gitpatches is None:
1869 if gitpatches is None:
1823 # scan whole input for git metadata
1870 # scan whole input for git metadata
1824 gitpatches = scangitpatch(lr, x)
1871 gitpatches = scangitpatch(lr, x)
1825 yield 'git', [g.copy() for g in gitpatches
1872 yield 'git', [g.copy() for g in gitpatches
1826 if g.op in ('COPY', 'RENAME')]
1873 if g.op in ('COPY', 'RENAME')]
1827 gitpatches.reverse()
1874 gitpatches.reverse()
1828 afile = 'a/' + m.group(1)
1875 afile = 'a/' + m.group(1)
1829 bfile = 'b/' + m.group(2)
1876 bfile = 'b/' + m.group(2)
1830 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1877 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1831 gp = gitpatches.pop()
1878 gp = gitpatches.pop()
1832 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1879 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1833 if not gitpatches:
1880 if not gitpatches:
1834 raise PatchError(_('failed to synchronize metadata for "%s"')
1881 raise PatchError(_('failed to synchronize metadata for "%s"')
1835 % afile[2:])
1882 % afile[2:])
1836 gp = gitpatches[-1]
1883 gp = gitpatches[-1]
1837 newfile = True
1884 newfile = True
1838 elif x.startswith('---'):
1885 elif x.startswith('---'):
1839 # check for a unified diff
1886 # check for a unified diff
1840 l2 = lr.readline()
1887 l2 = lr.readline()
1841 if not l2.startswith('+++'):
1888 if not l2.startswith('+++'):
1842 lr.push(l2)
1889 lr.push(l2)
1843 continue
1890 continue
1844 newfile = True
1891 newfile = True
1845 context = False
1892 context = False
1846 afile = parsefilename(x)
1893 afile = parsefilename(x)
1847 bfile = parsefilename(l2)
1894 bfile = parsefilename(l2)
1848 elif x.startswith('***'):
1895 elif x.startswith('***'):
1849 # check for a context diff
1896 # check for a context diff
1850 l2 = lr.readline()
1897 l2 = lr.readline()
1851 if not l2.startswith('---'):
1898 if not l2.startswith('---'):
1852 lr.push(l2)
1899 lr.push(l2)
1853 continue
1900 continue
1854 l3 = lr.readline()
1901 l3 = lr.readline()
1855 lr.push(l3)
1902 lr.push(l3)
1856 if not l3.startswith("***************"):
1903 if not l3.startswith("***************"):
1857 lr.push(l2)
1904 lr.push(l2)
1858 continue
1905 continue
1859 newfile = True
1906 newfile = True
1860 context = True
1907 context = True
1861 afile = parsefilename(x)
1908 afile = parsefilename(x)
1862 bfile = parsefilename(l2)
1909 bfile = parsefilename(l2)
1863
1910
1864 if newfile:
1911 if newfile:
1865 newfile = False
1912 newfile = False
1866 emitfile = True
1913 emitfile = True
1867 state = BFILE
1914 state = BFILE
1868 hunknum = 0
1915 hunknum = 0
1869
1916
1870 while gitpatches:
1917 while gitpatches:
1871 gp = gitpatches.pop()
1918 gp = gitpatches.pop()
1872 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1919 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1873
1920
1874 def applybindelta(binchunk, data):
1921 def applybindelta(binchunk, data):
1875 """Apply a binary delta hunk
1922 """Apply a binary delta hunk
1876 The algorithm used is the algorithm from git's patch-delta.c
1923 The algorithm used is the algorithm from git's patch-delta.c
1877 """
1924 """
1878 def deltahead(binchunk):
1925 def deltahead(binchunk):
1879 i = 0
1926 i = 0
1880 for c in binchunk:
1927 for c in binchunk:
1881 i += 1
1928 i += 1
1882 if not (ord(c) & 0x80):
1929 if not (ord(c) & 0x80):
1883 return i
1930 return i
1884 return i
1931 return i
1885 out = ""
1932 out = ""
1886 s = deltahead(binchunk)
1933 s = deltahead(binchunk)
1887 binchunk = binchunk[s:]
1934 binchunk = binchunk[s:]
1888 s = deltahead(binchunk)
1935 s = deltahead(binchunk)
1889 binchunk = binchunk[s:]
1936 binchunk = binchunk[s:]
1890 i = 0
1937 i = 0
1891 while i < len(binchunk):
1938 while i < len(binchunk):
1892 cmd = ord(binchunk[i])
1939 cmd = ord(binchunk[i])
1893 i += 1
1940 i += 1
1894 if (cmd & 0x80):
1941 if (cmd & 0x80):
1895 offset = 0
1942 offset = 0
1896 size = 0
1943 size = 0
1897 if (cmd & 0x01):
1944 if (cmd & 0x01):
1898 offset = ord(binchunk[i])
1945 offset = ord(binchunk[i])
1899 i += 1
1946 i += 1
1900 if (cmd & 0x02):
1947 if (cmd & 0x02):
1901 offset |= ord(binchunk[i]) << 8
1948 offset |= ord(binchunk[i]) << 8
1902 i += 1
1949 i += 1
1903 if (cmd & 0x04):
1950 if (cmd & 0x04):
1904 offset |= ord(binchunk[i]) << 16
1951 offset |= ord(binchunk[i]) << 16
1905 i += 1
1952 i += 1
1906 if (cmd & 0x08):
1953 if (cmd & 0x08):
1907 offset |= ord(binchunk[i]) << 24
1954 offset |= ord(binchunk[i]) << 24
1908 i += 1
1955 i += 1
1909 if (cmd & 0x10):
1956 if (cmd & 0x10):
1910 size = ord(binchunk[i])
1957 size = ord(binchunk[i])
1911 i += 1
1958 i += 1
1912 if (cmd & 0x20):
1959 if (cmd & 0x20):
1913 size |= ord(binchunk[i]) << 8
1960 size |= ord(binchunk[i]) << 8
1914 i += 1
1961 i += 1
1915 if (cmd & 0x40):
1962 if (cmd & 0x40):
1916 size |= ord(binchunk[i]) << 16
1963 size |= ord(binchunk[i]) << 16
1917 i += 1
1964 i += 1
1918 if size == 0:
1965 if size == 0:
1919 size = 0x10000
1966 size = 0x10000
1920 offset_end = offset + size
1967 offset_end = offset + size
1921 out += data[offset:offset_end]
1968 out += data[offset:offset_end]
1922 elif cmd != 0:
1969 elif cmd != 0:
1923 offset_end = i + cmd
1970 offset_end = i + cmd
1924 out += binchunk[i:offset_end]
1971 out += binchunk[i:offset_end]
1925 i += cmd
1972 i += cmd
1926 else:
1973 else:
1927 raise PatchError(_('unexpected delta opcode 0'))
1974 raise PatchError(_('unexpected delta opcode 0'))
1928 return out
1975 return out
1929
1976
1930 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1977 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1931 """Reads a patch from fp and tries to apply it.
1978 """Reads a patch from fp and tries to apply it.
1932
1979
1933 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1980 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1934 there was any fuzz.
1981 there was any fuzz.
1935
1982
1936 If 'eolmode' is 'strict', the patch content and patched file are
1983 If 'eolmode' is 'strict', the patch content and patched file are
1937 read in binary mode. Otherwise, line endings are ignored when
1984 read in binary mode. Otherwise, line endings are ignored when
1938 patching then normalized according to 'eolmode'.
1985 patching then normalized according to 'eolmode'.
1939 """
1986 """
1940 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1987 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1941 prefix=prefix, eolmode=eolmode)
1988 prefix=prefix, eolmode=eolmode)
1942
1989
1943 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1990 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1944 eolmode='strict'):
1991 eolmode='strict'):
1945
1992
1946 if prefix:
1993 if prefix:
1947 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1994 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1948 prefix)
1995 prefix)
1949 if prefix != '':
1996 if prefix != '':
1950 prefix += '/'
1997 prefix += '/'
1951 def pstrip(p):
1998 def pstrip(p):
1952 return pathtransform(p, strip - 1, prefix)[1]
1999 return pathtransform(p, strip - 1, prefix)[1]
1953
2000
1954 rejects = 0
2001 rejects = 0
1955 err = 0
2002 err = 0
1956 current_file = None
2003 current_file = None
1957
2004
1958 for state, values in iterhunks(fp):
2005 for state, values in iterhunks(fp):
1959 if state == 'hunk':
2006 if state == 'hunk':
1960 if not current_file:
2007 if not current_file:
1961 continue
2008 continue
1962 ret = current_file.apply(values)
2009 ret = current_file.apply(values)
1963 if ret > 0:
2010 if ret > 0:
1964 err = 1
2011 err = 1
1965 elif state == 'file':
2012 elif state == 'file':
1966 if current_file:
2013 if current_file:
1967 rejects += current_file.close()
2014 rejects += current_file.close()
1968 current_file = None
2015 current_file = None
1969 afile, bfile, first_hunk, gp = values
2016 afile, bfile, first_hunk, gp = values
1970 if gp:
2017 if gp:
1971 gp.path = pstrip(gp.path)
2018 gp.path = pstrip(gp.path)
1972 if gp.oldpath:
2019 if gp.oldpath:
1973 gp.oldpath = pstrip(gp.oldpath)
2020 gp.oldpath = pstrip(gp.oldpath)
1974 else:
2021 else:
1975 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2022 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1976 prefix)
2023 prefix)
1977 if gp.op == 'RENAME':
2024 if gp.op == 'RENAME':
1978 backend.unlink(gp.oldpath)
2025 backend.unlink(gp.oldpath)
1979 if not first_hunk:
2026 if not first_hunk:
1980 if gp.op == 'DELETE':
2027 if gp.op == 'DELETE':
1981 backend.unlink(gp.path)
2028 backend.unlink(gp.path)
1982 continue
2029 continue
1983 data, mode = None, None
2030 data, mode = None, None
1984 if gp.op in ('RENAME', 'COPY'):
2031 if gp.op in ('RENAME', 'COPY'):
1985 data, mode = store.getfile(gp.oldpath)[:2]
2032 data, mode = store.getfile(gp.oldpath)[:2]
1986 if data is None:
2033 if data is None:
1987 # This means that the old path does not exist
2034 # This means that the old path does not exist
1988 raise PatchError(_("source file '%s' does not exist")
2035 raise PatchError(_("source file '%s' does not exist")
1989 % gp.oldpath)
2036 % gp.oldpath)
1990 if gp.mode:
2037 if gp.mode:
1991 mode = gp.mode
2038 mode = gp.mode
1992 if gp.op == 'ADD':
2039 if gp.op == 'ADD':
1993 # Added files without content have no hunk and
2040 # Added files without content have no hunk and
1994 # must be created
2041 # must be created
1995 data = ''
2042 data = ''
1996 if data or mode:
2043 if data or mode:
1997 if (gp.op in ('ADD', 'RENAME', 'COPY')
2044 if (gp.op in ('ADD', 'RENAME', 'COPY')
1998 and backend.exists(gp.path)):
2045 and backend.exists(gp.path)):
1999 raise PatchError(_("cannot create %s: destination "
2046 raise PatchError(_("cannot create %s: destination "
2000 "already exists") % gp.path)
2047 "already exists") % gp.path)
2001 backend.setfile(gp.path, data, mode, gp.oldpath)
2048 backend.setfile(gp.path, data, mode, gp.oldpath)
2002 continue
2049 continue
2003 try:
2050 try:
2004 current_file = patcher(ui, gp, backend, store,
2051 current_file = patcher(ui, gp, backend, store,
2005 eolmode=eolmode)
2052 eolmode=eolmode)
2006 except PatchError as inst:
2053 except PatchError as inst:
2007 ui.warn(str(inst) + '\n')
2054 ui.warn(str(inst) + '\n')
2008 current_file = None
2055 current_file = None
2009 rejects += 1
2056 rejects += 1
2010 continue
2057 continue
2011 elif state == 'git':
2058 elif state == 'git':
2012 for gp in values:
2059 for gp in values:
2013 path = pstrip(gp.oldpath)
2060 path = pstrip(gp.oldpath)
2014 data, mode = backend.getfile(path)
2061 data, mode = backend.getfile(path)
2015 if data is None:
2062 if data is None:
2016 # The error ignored here will trigger a getfile()
2063 # The error ignored here will trigger a getfile()
2017 # error in a place more appropriate for error
2064 # error in a place more appropriate for error
2018 # handling, and will not interrupt the patching
2065 # handling, and will not interrupt the patching
2019 # process.
2066 # process.
2020 pass
2067 pass
2021 else:
2068 else:
2022 store.setfile(path, data, mode)
2069 store.setfile(path, data, mode)
2023 else:
2070 else:
2024 raise error.Abort(_('unsupported parser state: %s') % state)
2071 raise error.Abort(_('unsupported parser state: %s') % state)
2025
2072
2026 if current_file:
2073 if current_file:
2027 rejects += current_file.close()
2074 rejects += current_file.close()
2028
2075
2029 if rejects:
2076 if rejects:
2030 return -1
2077 return -1
2031 return err
2078 return err
2032
2079
2033 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2080 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2034 similarity):
2081 similarity):
2035 """use <patcher> to apply <patchname> to the working directory.
2082 """use <patcher> to apply <patchname> to the working directory.
2036 returns whether patch was applied with fuzz factor."""
2083 returns whether patch was applied with fuzz factor."""
2037
2084
2038 fuzz = False
2085 fuzz = False
2039 args = []
2086 args = []
2040 cwd = repo.root
2087 cwd = repo.root
2041 if cwd:
2088 if cwd:
2042 args.append('-d %s' % util.shellquote(cwd))
2089 args.append('-d %s' % util.shellquote(cwd))
2043 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2090 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2044 util.shellquote(patchname)))
2091 util.shellquote(patchname)))
2045 try:
2092 try:
2046 for line in util.iterfile(fp):
2093 for line in util.iterfile(fp):
2047 line = line.rstrip()
2094 line = line.rstrip()
2048 ui.note(line + '\n')
2095 ui.note(line + '\n')
2049 if line.startswith('patching file '):
2096 if line.startswith('patching file '):
2050 pf = util.parsepatchoutput(line)
2097 pf = util.parsepatchoutput(line)
2051 printed_file = False
2098 printed_file = False
2052 files.add(pf)
2099 files.add(pf)
2053 elif line.find('with fuzz') >= 0:
2100 elif line.find('with fuzz') >= 0:
2054 fuzz = True
2101 fuzz = True
2055 if not printed_file:
2102 if not printed_file:
2056 ui.warn(pf + '\n')
2103 ui.warn(pf + '\n')
2057 printed_file = True
2104 printed_file = True
2058 ui.warn(line + '\n')
2105 ui.warn(line + '\n')
2059 elif line.find('saving rejects to file') >= 0:
2106 elif line.find('saving rejects to file') >= 0:
2060 ui.warn(line + '\n')
2107 ui.warn(line + '\n')
2061 elif line.find('FAILED') >= 0:
2108 elif line.find('FAILED') >= 0:
2062 if not printed_file:
2109 if not printed_file:
2063 ui.warn(pf + '\n')
2110 ui.warn(pf + '\n')
2064 printed_file = True
2111 printed_file = True
2065 ui.warn(line + '\n')
2112 ui.warn(line + '\n')
2066 finally:
2113 finally:
2067 if files:
2114 if files:
2068 scmutil.marktouched(repo, files, similarity)
2115 scmutil.marktouched(repo, files, similarity)
2069 code = fp.close()
2116 code = fp.close()
2070 if code:
2117 if code:
2071 raise PatchError(_("patch command failed: %s") %
2118 raise PatchError(_("patch command failed: %s") %
2072 util.explainexit(code)[0])
2119 util.explainexit(code)[0])
2073 return fuzz
2120 return fuzz
2074
2121
2075 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2122 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2076 eolmode='strict'):
2123 eolmode='strict'):
2077 if files is None:
2124 if files is None:
2078 files = set()
2125 files = set()
2079 if eolmode is None:
2126 if eolmode is None:
2080 eolmode = ui.config('patch', 'eol')
2127 eolmode = ui.config('patch', 'eol')
2081 if eolmode.lower() not in eolmodes:
2128 if eolmode.lower() not in eolmodes:
2082 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2129 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2083 eolmode = eolmode.lower()
2130 eolmode = eolmode.lower()
2084
2131
2085 store = filestore()
2132 store = filestore()
2086 try:
2133 try:
2087 fp = open(patchobj, 'rb')
2134 fp = open(patchobj, 'rb')
2088 except TypeError:
2135 except TypeError:
2089 fp = patchobj
2136 fp = patchobj
2090 try:
2137 try:
2091 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2138 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2092 eolmode=eolmode)
2139 eolmode=eolmode)
2093 finally:
2140 finally:
2094 if fp != patchobj:
2141 if fp != patchobj:
2095 fp.close()
2142 fp.close()
2096 files.update(backend.close())
2143 files.update(backend.close())
2097 store.close()
2144 store.close()
2098 if ret < 0:
2145 if ret < 0:
2099 raise PatchError(_('patch failed to apply'))
2146 raise PatchError(_('patch failed to apply'))
2100 return ret > 0
2147 return ret > 0
2101
2148
2102 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2149 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2103 eolmode='strict', similarity=0):
2150 eolmode='strict', similarity=0):
2104 """use builtin patch to apply <patchobj> to the working directory.
2151 """use builtin patch to apply <patchobj> to the working directory.
2105 returns whether patch was applied with fuzz factor."""
2152 returns whether patch was applied with fuzz factor."""
2106 backend = workingbackend(ui, repo, similarity)
2153 backend = workingbackend(ui, repo, similarity)
2107 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2154 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2108
2155
2109 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2156 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2110 eolmode='strict'):
2157 eolmode='strict'):
2111 backend = repobackend(ui, repo, ctx, store)
2158 backend = repobackend(ui, repo, ctx, store)
2112 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2159 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2113
2160
2114 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2161 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2115 similarity=0):
2162 similarity=0):
2116 """Apply <patchname> to the working directory.
2163 """Apply <patchname> to the working directory.
2117
2164
2118 'eolmode' specifies how end of lines should be handled. It can be:
2165 'eolmode' specifies how end of lines should be handled. It can be:
2119 - 'strict': inputs are read in binary mode, EOLs are preserved
2166 - 'strict': inputs are read in binary mode, EOLs are preserved
2120 - 'crlf': EOLs are ignored when patching and reset to CRLF
2167 - 'crlf': EOLs are ignored when patching and reset to CRLF
2121 - 'lf': EOLs are ignored when patching and reset to LF
2168 - 'lf': EOLs are ignored when patching and reset to LF
2122 - None: get it from user settings, default to 'strict'
2169 - None: get it from user settings, default to 'strict'
2123 'eolmode' is ignored when using an external patcher program.
2170 'eolmode' is ignored when using an external patcher program.
2124
2171
2125 Returns whether patch was applied with fuzz factor.
2172 Returns whether patch was applied with fuzz factor.
2126 """
2173 """
2127 patcher = ui.config('ui', 'patch')
2174 patcher = ui.config('ui', 'patch')
2128 if files is None:
2175 if files is None:
2129 files = set()
2176 files = set()
2130 if patcher:
2177 if patcher:
2131 return _externalpatch(ui, repo, patcher, patchname, strip,
2178 return _externalpatch(ui, repo, patcher, patchname, strip,
2132 files, similarity)
2179 files, similarity)
2133 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2180 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2134 similarity)
2181 similarity)
2135
2182
2136 def changedfiles(ui, repo, patchpath, strip=1):
2183 def changedfiles(ui, repo, patchpath, strip=1):
2137 backend = fsbackend(ui, repo.root)
2184 backend = fsbackend(ui, repo.root)
2138 with open(patchpath, 'rb') as fp:
2185 with open(patchpath, 'rb') as fp:
2139 changed = set()
2186 changed = set()
2140 for state, values in iterhunks(fp):
2187 for state, values in iterhunks(fp):
2141 if state == 'file':
2188 if state == 'file':
2142 afile, bfile, first_hunk, gp = values
2189 afile, bfile, first_hunk, gp = values
2143 if gp:
2190 if gp:
2144 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2191 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2145 if gp.oldpath:
2192 if gp.oldpath:
2146 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2193 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2147 else:
2194 else:
2148 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2195 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2149 '')
2196 '')
2150 changed.add(gp.path)
2197 changed.add(gp.path)
2151 if gp.op == 'RENAME':
2198 if gp.op == 'RENAME':
2152 changed.add(gp.oldpath)
2199 changed.add(gp.oldpath)
2153 elif state not in ('hunk', 'git'):
2200 elif state not in ('hunk', 'git'):
2154 raise error.Abort(_('unsupported parser state: %s') % state)
2201 raise error.Abort(_('unsupported parser state: %s') % state)
2155 return changed
2202 return changed
2156
2203
2157 class GitDiffRequired(Exception):
2204 class GitDiffRequired(Exception):
2158 pass
2205 pass
2159
2206
2160 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2207 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2161 '''return diffopts with all features supported and parsed'''
2208 '''return diffopts with all features supported and parsed'''
2162 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2209 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2163 git=True, whitespace=True, formatchanging=True)
2210 git=True, whitespace=True, formatchanging=True)
2164
2211
2165 diffopts = diffallopts
2212 diffopts = diffallopts
2166
2213
2167 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2214 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2168 whitespace=False, formatchanging=False):
2215 whitespace=False, formatchanging=False):
2169 '''return diffopts with only opted-in features parsed
2216 '''return diffopts with only opted-in features parsed
2170
2217
2171 Features:
2218 Features:
2172 - git: git-style diffs
2219 - git: git-style diffs
2173 - whitespace: whitespace options like ignoreblanklines and ignorews
2220 - whitespace: whitespace options like ignoreblanklines and ignorews
2174 - formatchanging: options that will likely break or cause correctness issues
2221 - formatchanging: options that will likely break or cause correctness issues
2175 with most diff parsers
2222 with most diff parsers
2176 '''
2223 '''
2177 def get(key, name=None, getter=ui.configbool, forceplain=None):
2224 def get(key, name=None, getter=ui.configbool, forceplain=None):
2178 if opts:
2225 if opts:
2179 v = opts.get(key)
2226 v = opts.get(key)
2180 # diffopts flags are either None-default (which is passed
2227 # diffopts flags are either None-default (which is passed
2181 # through unchanged, so we can identify unset values), or
2228 # through unchanged, so we can identify unset values), or
2182 # some other falsey default (eg --unified, which defaults
2229 # some other falsey default (eg --unified, which defaults
2183 # to an empty string). We only want to override the config
2230 # to an empty string). We only want to override the config
2184 # entries from hgrc with command line values if they
2231 # entries from hgrc with command line values if they
2185 # appear to have been set, which is any truthy value,
2232 # appear to have been set, which is any truthy value,
2186 # True, or False.
2233 # True, or False.
2187 if v or isinstance(v, bool):
2234 if v or isinstance(v, bool):
2188 return v
2235 return v
2189 if forceplain is not None and ui.plain():
2236 if forceplain is not None and ui.plain():
2190 return forceplain
2237 return forceplain
2191 return getter(section, name or key, None, untrusted=untrusted)
2238 return getter(section, name or key, None, untrusted=untrusted)
2192
2239
2193 # core options, expected to be understood by every diff parser
2240 # core options, expected to be understood by every diff parser
2194 buildopts = {
2241 buildopts = {
2195 'nodates': get('nodates'),
2242 'nodates': get('nodates'),
2196 'showfunc': get('show_function', 'showfunc'),
2243 'showfunc': get('show_function', 'showfunc'),
2197 'context': get('unified', getter=ui.config),
2244 'context': get('unified', getter=ui.config),
2198 }
2245 }
2199
2246
2200 if git:
2247 if git:
2201 buildopts['git'] = get('git')
2248 buildopts['git'] = get('git')
2202
2249
2203 # since this is in the experimental section, we need to call
2250 # since this is in the experimental section, we need to call
2204 # ui.configbool directory
2251 # ui.configbool directory
2205 buildopts['showsimilarity'] = ui.configbool('experimental',
2252 buildopts['showsimilarity'] = ui.configbool('experimental',
2206 'extendedheader.similarity')
2253 'extendedheader.similarity')
2207
2254
2208 # need to inspect the ui object instead of using get() since we want to
2255 # need to inspect the ui object instead of using get() since we want to
2209 # test for an int
2256 # test for an int
2210 hconf = ui.config('experimental', 'extendedheader.index')
2257 hconf = ui.config('experimental', 'extendedheader.index')
2211 if hconf is not None:
2258 if hconf is not None:
2212 hlen = None
2259 hlen = None
2213 try:
2260 try:
2214 # the hash config could be an integer (for length of hash) or a
2261 # the hash config could be an integer (for length of hash) or a
2215 # word (e.g. short, full, none)
2262 # word (e.g. short, full, none)
2216 hlen = int(hconf)
2263 hlen = int(hconf)
2217 if hlen < 0 or hlen > 40:
2264 if hlen < 0 or hlen > 40:
2218 msg = _("invalid length for extendedheader.index: '%d'\n")
2265 msg = _("invalid length for extendedheader.index: '%d'\n")
2219 ui.warn(msg % hlen)
2266 ui.warn(msg % hlen)
2220 except ValueError:
2267 except ValueError:
2221 # default value
2268 # default value
2222 if hconf == 'short' or hconf == '':
2269 if hconf == 'short' or hconf == '':
2223 hlen = 12
2270 hlen = 12
2224 elif hconf == 'full':
2271 elif hconf == 'full':
2225 hlen = 40
2272 hlen = 40
2226 elif hconf != 'none':
2273 elif hconf != 'none':
2227 msg = _("invalid value for extendedheader.index: '%s'\n")
2274 msg = _("invalid value for extendedheader.index: '%s'\n")
2228 ui.warn(msg % hconf)
2275 ui.warn(msg % hconf)
2229 finally:
2276 finally:
2230 buildopts['index'] = hlen
2277 buildopts['index'] = hlen
2231
2278
2232 if whitespace:
2279 if whitespace:
2233 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2280 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2234 buildopts['ignorewsamount'] = get('ignore_space_change',
2281 buildopts['ignorewsamount'] = get('ignore_space_change',
2235 'ignorewsamount')
2282 'ignorewsamount')
2236 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2283 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2237 'ignoreblanklines')
2284 'ignoreblanklines')
2238 if formatchanging:
2285 if formatchanging:
2239 buildopts['text'] = opts and opts.get('text')
2286 buildopts['text'] = opts and opts.get('text')
2240 binary = None if opts is None else opts.get('binary')
2287 binary = None if opts is None else opts.get('binary')
2241 buildopts['nobinary'] = (not binary if binary is not None
2288 buildopts['nobinary'] = (not binary if binary is not None
2242 else get('nobinary', forceplain=False))
2289 else get('nobinary', forceplain=False))
2243 buildopts['noprefix'] = get('noprefix', forceplain=False)
2290 buildopts['noprefix'] = get('noprefix', forceplain=False)
2244
2291
2245 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2292 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2246
2293
2247 def diff(repo, node1=None, node2=None, match=None, changes=None,
2294 def diff(repo, node1=None, node2=None, match=None, changes=None,
2248 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2295 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2249 '''yields diff of changes to files between two nodes, or node and
2296 '''yields diff of changes to files between two nodes, or node and
2250 working directory.
2297 working directory.
2251
2298
2252 if node1 is None, use first dirstate parent instead.
2299 if node1 is None, use first dirstate parent instead.
2253 if node2 is None, compare node1 with working directory.
2300 if node2 is None, compare node1 with working directory.
2254
2301
2255 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2302 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2256 every time some change cannot be represented with the current
2303 every time some change cannot be represented with the current
2257 patch format. Return False to upgrade to git patch format, True to
2304 patch format. Return False to upgrade to git patch format, True to
2258 accept the loss or raise an exception to abort the diff. It is
2305 accept the loss or raise an exception to abort the diff. It is
2259 called with the name of current file being diffed as 'fn'. If set
2306 called with the name of current file being diffed as 'fn'. If set
2260 to None, patches will always be upgraded to git format when
2307 to None, patches will always be upgraded to git format when
2261 necessary.
2308 necessary.
2262
2309
2263 prefix is a filename prefix that is prepended to all filenames on
2310 prefix is a filename prefix that is prepended to all filenames on
2264 display (used for subrepos).
2311 display (used for subrepos).
2265
2312
2266 relroot, if not empty, must be normalized with a trailing /. Any match
2313 relroot, if not empty, must be normalized with a trailing /. Any match
2267 patterns that fall outside it will be ignored.
2314 patterns that fall outside it will be ignored.
2268
2315
2269 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2316 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2270 information.'''
2317 information.'''
2271 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2318 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2272 changes=changes, opts=opts,
2319 changes=changes, opts=opts,
2273 losedatafn=losedatafn, prefix=prefix,
2320 losedatafn=losedatafn, prefix=prefix,
2274 relroot=relroot, copy=copy):
2321 relroot=relroot, copy=copy):
2275 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2322 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2276 if header and (text or len(header) > 1):
2323 if header and (text or len(header) > 1):
2277 yield '\n'.join(header) + '\n'
2324 yield '\n'.join(header) + '\n'
2278 if text:
2325 if text:
2279 yield text
2326 yield text
2280
2327
2281 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2328 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2282 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2329 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2283 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2330 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2284 where `header` is a list of diff headers and `hunks` is an iterable of
2331 where `header` is a list of diff headers and `hunks` is an iterable of
2285 (`hunkrange`, `hunklines`) tuples.
2332 (`hunkrange`, `hunklines`) tuples.
2286
2333
2287 See diff() for the meaning of parameters.
2334 See diff() for the meaning of parameters.
2288 """
2335 """
2289
2336
2290 if opts is None:
2337 if opts is None:
2291 opts = mdiff.defaultopts
2338 opts = mdiff.defaultopts
2292
2339
2293 if not node1 and not node2:
2340 if not node1 and not node2:
2294 node1 = repo.dirstate.p1()
2341 node1 = repo.dirstate.p1()
2295
2342
2296 def lrugetfilectx():
2343 def lrugetfilectx():
2297 cache = {}
2344 cache = {}
2298 order = collections.deque()
2345 order = collections.deque()
2299 def getfilectx(f, ctx):
2346 def getfilectx(f, ctx):
2300 fctx = ctx.filectx(f, filelog=cache.get(f))
2347 fctx = ctx.filectx(f, filelog=cache.get(f))
2301 if f not in cache:
2348 if f not in cache:
2302 if len(cache) > 20:
2349 if len(cache) > 20:
2303 del cache[order.popleft()]
2350 del cache[order.popleft()]
2304 cache[f] = fctx.filelog()
2351 cache[f] = fctx.filelog()
2305 else:
2352 else:
2306 order.remove(f)
2353 order.remove(f)
2307 order.append(f)
2354 order.append(f)
2308 return fctx
2355 return fctx
2309 return getfilectx
2356 return getfilectx
2310 getfilectx = lrugetfilectx()
2357 getfilectx = lrugetfilectx()
2311
2358
2312 ctx1 = repo[node1]
2359 ctx1 = repo[node1]
2313 ctx2 = repo[node2]
2360 ctx2 = repo[node2]
2314
2361
2315 relfiltered = False
2362 relfiltered = False
2316 if relroot != '' and match.always():
2363 if relroot != '' and match.always():
2317 # as a special case, create a new matcher with just the relroot
2364 # as a special case, create a new matcher with just the relroot
2318 pats = [relroot]
2365 pats = [relroot]
2319 match = scmutil.match(ctx2, pats, default='path')
2366 match = scmutil.match(ctx2, pats, default='path')
2320 relfiltered = True
2367 relfiltered = True
2321
2368
2322 if not changes:
2369 if not changes:
2323 changes = repo.status(ctx1, ctx2, match=match)
2370 changes = repo.status(ctx1, ctx2, match=match)
2324 modified, added, removed = changes[:3]
2371 modified, added, removed = changes[:3]
2325
2372
2326 if not modified and not added and not removed:
2373 if not modified and not added and not removed:
2327 return []
2374 return []
2328
2375
2329 if repo.ui.debugflag:
2376 if repo.ui.debugflag:
2330 hexfunc = hex
2377 hexfunc = hex
2331 else:
2378 else:
2332 hexfunc = short
2379 hexfunc = short
2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2380 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334
2381
2335 if copy is None:
2382 if copy is None:
2336 copy = {}
2383 copy = {}
2337 if opts.git or opts.upgrade:
2384 if opts.git or opts.upgrade:
2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2385 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339
2386
2340 if relroot is not None:
2387 if relroot is not None:
2341 if not relfiltered:
2388 if not relfiltered:
2342 # XXX this would ideally be done in the matcher, but that is
2389 # XXX this would ideally be done in the matcher, but that is
2343 # generally meant to 'or' patterns, not 'and' them. In this case we
2390 # generally meant to 'or' patterns, not 'and' them. In this case we
2344 # need to 'and' all the patterns from the matcher with relroot.
2391 # need to 'and' all the patterns from the matcher with relroot.
2345 def filterrel(l):
2392 def filterrel(l):
2346 return [f for f in l if f.startswith(relroot)]
2393 return [f for f in l if f.startswith(relroot)]
2347 modified = filterrel(modified)
2394 modified = filterrel(modified)
2348 added = filterrel(added)
2395 added = filterrel(added)
2349 removed = filterrel(removed)
2396 removed = filterrel(removed)
2350 relfiltered = True
2397 relfiltered = True
2351 # filter out copies where either side isn't inside the relative root
2398 # filter out copies where either side isn't inside the relative root
2352 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2399 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2353 if dst.startswith(relroot)
2400 if dst.startswith(relroot)
2354 and src.startswith(relroot)))
2401 and src.startswith(relroot)))
2355
2402
2356 modifiedset = set(modified)
2403 modifiedset = set(modified)
2357 addedset = set(added)
2404 addedset = set(added)
2358 removedset = set(removed)
2405 removedset = set(removed)
2359 for f in modified:
2406 for f in modified:
2360 if f not in ctx1:
2407 if f not in ctx1:
2361 # Fix up added, since merged-in additions appear as
2408 # Fix up added, since merged-in additions appear as
2362 # modifications during merges
2409 # modifications during merges
2363 modifiedset.remove(f)
2410 modifiedset.remove(f)
2364 addedset.add(f)
2411 addedset.add(f)
2365 for f in removed:
2412 for f in removed:
2366 if f not in ctx1:
2413 if f not in ctx1:
2367 # Merged-in additions that are then removed are reported as removed.
2414 # Merged-in additions that are then removed are reported as removed.
2368 # They are not in ctx1, so We don't want to show them in the diff.
2415 # They are not in ctx1, so We don't want to show them in the diff.
2369 removedset.remove(f)
2416 removedset.remove(f)
2370 modified = sorted(modifiedset)
2417 modified = sorted(modifiedset)
2371 added = sorted(addedset)
2418 added = sorted(addedset)
2372 removed = sorted(removedset)
2419 removed = sorted(removedset)
2373 for dst, src in copy.items():
2420 for dst, src in copy.items():
2374 if src not in ctx1:
2421 if src not in ctx1:
2375 # Files merged in during a merge and then copied/renamed are
2422 # Files merged in during a merge and then copied/renamed are
2376 # reported as copies. We want to show them in the diff as additions.
2423 # reported as copies. We want to show them in the diff as additions.
2377 del copy[dst]
2424 del copy[dst]
2378
2425
2379 def difffn(opts, losedata):
2426 def difffn(opts, losedata):
2380 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2427 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2381 copy, getfilectx, opts, losedata, prefix, relroot)
2428 copy, getfilectx, opts, losedata, prefix, relroot)
2382 if opts.upgrade and not opts.git:
2429 if opts.upgrade and not opts.git:
2383 try:
2430 try:
2384 def losedata(fn):
2431 def losedata(fn):
2385 if not losedatafn or not losedatafn(fn=fn):
2432 if not losedatafn or not losedatafn(fn=fn):
2386 raise GitDiffRequired
2433 raise GitDiffRequired
2387 # Buffer the whole output until we are sure it can be generated
2434 # Buffer the whole output until we are sure it can be generated
2388 return list(difffn(opts.copy(git=False), losedata))
2435 return list(difffn(opts.copy(git=False), losedata))
2389 except GitDiffRequired:
2436 except GitDiffRequired:
2390 return difffn(opts.copy(git=True), None)
2437 return difffn(opts.copy(git=True), None)
2391 else:
2438 else:
2392 return difffn(opts, None)
2439 return difffn(opts, None)
2393
2440
2394 def difflabel(func, *args, **kw):
2441 def difflabel(func, *args, **kw):
2395 '''yields 2-tuples of (output, label) based on the output of func()'''
2442 '''yields 2-tuples of (output, label) based on the output of func()'''
2396 headprefixes = [('diff', 'diff.diffline'),
2443 headprefixes = [('diff', 'diff.diffline'),
2397 ('copy', 'diff.extended'),
2444 ('copy', 'diff.extended'),
2398 ('rename', 'diff.extended'),
2445 ('rename', 'diff.extended'),
2399 ('old', 'diff.extended'),
2446 ('old', 'diff.extended'),
2400 ('new', 'diff.extended'),
2447 ('new', 'diff.extended'),
2401 ('deleted', 'diff.extended'),
2448 ('deleted', 'diff.extended'),
2402 ('index', 'diff.extended'),
2449 ('index', 'diff.extended'),
2403 ('similarity', 'diff.extended'),
2450 ('similarity', 'diff.extended'),
2404 ('---', 'diff.file_a'),
2451 ('---', 'diff.file_a'),
2405 ('+++', 'diff.file_b')]
2452 ('+++', 'diff.file_b')]
2406 textprefixes = [('@', 'diff.hunk'),
2453 textprefixes = [('@', 'diff.hunk'),
2407 ('-', 'diff.deleted'),
2454 ('-', 'diff.deleted'),
2408 ('+', 'diff.inserted')]
2455 ('+', 'diff.inserted')]
2409 head = False
2456 head = False
2410 for chunk in func(*args, **kw):
2457 for chunk in func(*args, **kw):
2411 lines = chunk.split('\n')
2458 lines = chunk.split('\n')
2412 for i, line in enumerate(lines):
2459 for i, line in enumerate(lines):
2413 if i != 0:
2460 if i != 0:
2414 yield ('\n', '')
2461 yield ('\n', '')
2415 if head:
2462 if head:
2416 if line.startswith('@'):
2463 if line.startswith('@'):
2417 head = False
2464 head = False
2418 else:
2465 else:
2419 if line and line[0] not in ' +-@\\':
2466 if line and line[0] not in ' +-@\\':
2420 head = True
2467 head = True
2421 stripline = line
2468 stripline = line
2422 diffline = False
2469 diffline = False
2423 if not head and line and line[0] in '+-':
2470 if not head and line and line[0] in '+-':
2424 # highlight tabs and trailing whitespace, but only in
2471 # highlight tabs and trailing whitespace, but only in
2425 # changed lines
2472 # changed lines
2426 stripline = line.rstrip()
2473 stripline = line.rstrip()
2427 diffline = True
2474 diffline = True
2428
2475
2429 prefixes = textprefixes
2476 prefixes = textprefixes
2430 if head:
2477 if head:
2431 prefixes = headprefixes
2478 prefixes = headprefixes
2432 for prefix, label in prefixes:
2479 for prefix, label in prefixes:
2433 if stripline.startswith(prefix):
2480 if stripline.startswith(prefix):
2434 if diffline:
2481 if diffline:
2435 for token in tabsplitter.findall(stripline):
2482 for token in tabsplitter.findall(stripline):
2436 if '\t' == token[0]:
2483 if '\t' == token[0]:
2437 yield (token, 'diff.tab')
2484 yield (token, 'diff.tab')
2438 else:
2485 else:
2439 yield (token, label)
2486 yield (token, label)
2440 else:
2487 else:
2441 yield (stripline, label)
2488 yield (stripline, label)
2442 break
2489 break
2443 else:
2490 else:
2444 yield (line, '')
2491 yield (line, '')
2445 if line != stripline:
2492 if line != stripline:
2446 yield (line[len(stripline):], 'diff.trailingwhitespace')
2493 yield (line[len(stripline):], 'diff.trailingwhitespace')
2447
2494
2448 def diffui(*args, **kw):
2495 def diffui(*args, **kw):
2449 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2496 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2450 return difflabel(diff, *args, **kw)
2497 return difflabel(diff, *args, **kw)
2451
2498
2452 def _filepairs(modified, added, removed, copy, opts):
2499 def _filepairs(modified, added, removed, copy, opts):
2453 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2500 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2454 before and f2 is the the name after. For added files, f1 will be None,
2501 before and f2 is the the name after. For added files, f1 will be None,
2455 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2502 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2456 or 'rename' (the latter two only if opts.git is set).'''
2503 or 'rename' (the latter two only if opts.git is set).'''
2457 gone = set()
2504 gone = set()
2458
2505
2459 copyto = dict([(v, k) for k, v in copy.items()])
2506 copyto = dict([(v, k) for k, v in copy.items()])
2460
2507
2461 addedset, removedset = set(added), set(removed)
2508 addedset, removedset = set(added), set(removed)
2462
2509
2463 for f in sorted(modified + added + removed):
2510 for f in sorted(modified + added + removed):
2464 copyop = None
2511 copyop = None
2465 f1, f2 = f, f
2512 f1, f2 = f, f
2466 if f in addedset:
2513 if f in addedset:
2467 f1 = None
2514 f1 = None
2468 if f in copy:
2515 if f in copy:
2469 if opts.git:
2516 if opts.git:
2470 f1 = copy[f]
2517 f1 = copy[f]
2471 if f1 in removedset and f1 not in gone:
2518 if f1 in removedset and f1 not in gone:
2472 copyop = 'rename'
2519 copyop = 'rename'
2473 gone.add(f1)
2520 gone.add(f1)
2474 else:
2521 else:
2475 copyop = 'copy'
2522 copyop = 'copy'
2476 elif f in removedset:
2523 elif f in removedset:
2477 f2 = None
2524 f2 = None
2478 if opts.git:
2525 if opts.git:
2479 # have we already reported a copy above?
2526 # have we already reported a copy above?
2480 if (f in copyto and copyto[f] in addedset
2527 if (f in copyto and copyto[f] in addedset
2481 and copy[copyto[f]] == f):
2528 and copy[copyto[f]] == f):
2482 continue
2529 continue
2483 yield f1, f2, copyop
2530 yield f1, f2, copyop
2484
2531
2485 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2532 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2486 copy, getfilectx, opts, losedatafn, prefix, relroot):
2533 copy, getfilectx, opts, losedatafn, prefix, relroot):
2487 '''given input data, generate a diff and yield it in blocks
2534 '''given input data, generate a diff and yield it in blocks
2488
2535
2489 If generating a diff would lose data like flags or binary data and
2536 If generating a diff would lose data like flags or binary data and
2490 losedatafn is not None, it will be called.
2537 losedatafn is not None, it will be called.
2491
2538
2492 relroot is removed and prefix is added to every path in the diff output.
2539 relroot is removed and prefix is added to every path in the diff output.
2493
2540
2494 If relroot is not empty, this function expects every path in modified,
2541 If relroot is not empty, this function expects every path in modified,
2495 added, removed and copy to start with it.'''
2542 added, removed and copy to start with it.'''
2496
2543
2497 def gitindex(text):
2544 def gitindex(text):
2498 if not text:
2545 if not text:
2499 text = ""
2546 text = ""
2500 l = len(text)
2547 l = len(text)
2501 s = hashlib.sha1('blob %d\0' % l)
2548 s = hashlib.sha1('blob %d\0' % l)
2502 s.update(text)
2549 s.update(text)
2503 return s.hexdigest()
2550 return s.hexdigest()
2504
2551
2505 if opts.noprefix:
2552 if opts.noprefix:
2506 aprefix = bprefix = ''
2553 aprefix = bprefix = ''
2507 else:
2554 else:
2508 aprefix = 'a/'
2555 aprefix = 'a/'
2509 bprefix = 'b/'
2556 bprefix = 'b/'
2510
2557
2511 def diffline(f, revs):
2558 def diffline(f, revs):
2512 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2559 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2513 return 'diff %s %s' % (revinfo, f)
2560 return 'diff %s %s' % (revinfo, f)
2514
2561
2515 def isempty(fctx):
2562 def isempty(fctx):
2516 return fctx is None or fctx.size() == 0
2563 return fctx is None or fctx.size() == 0
2517
2564
2518 date1 = util.datestr(ctx1.date())
2565 date1 = util.datestr(ctx1.date())
2519 date2 = util.datestr(ctx2.date())
2566 date2 = util.datestr(ctx2.date())
2520
2567
2521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2568 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2522
2569
2523 if relroot != '' and (repo.ui.configbool('devel', 'all')
2570 if relroot != '' and (repo.ui.configbool('devel', 'all')
2524 or repo.ui.configbool('devel', 'check-relroot')):
2571 or repo.ui.configbool('devel', 'check-relroot')):
2525 for f in modified + added + removed + copy.keys() + copy.values():
2572 for f in modified + added + removed + copy.keys() + copy.values():
2526 if f is not None and not f.startswith(relroot):
2573 if f is not None and not f.startswith(relroot):
2527 raise AssertionError(
2574 raise AssertionError(
2528 "file %s doesn't start with relroot %s" % (f, relroot))
2575 "file %s doesn't start with relroot %s" % (f, relroot))
2529
2576
2530 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2577 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2531 content1 = None
2578 content1 = None
2532 content2 = None
2579 content2 = None
2533 fctx1 = None
2580 fctx1 = None
2534 fctx2 = None
2581 fctx2 = None
2535 flag1 = None
2582 flag1 = None
2536 flag2 = None
2583 flag2 = None
2537 if f1:
2584 if f1:
2538 fctx1 = getfilectx(f1, ctx1)
2585 fctx1 = getfilectx(f1, ctx1)
2539 if opts.git or losedatafn:
2586 if opts.git or losedatafn:
2540 flag1 = ctx1.flags(f1)
2587 flag1 = ctx1.flags(f1)
2541 if f2:
2588 if f2:
2542 fctx2 = getfilectx(f2, ctx2)
2589 fctx2 = getfilectx(f2, ctx2)
2543 if opts.git or losedatafn:
2590 if opts.git or losedatafn:
2544 flag2 = ctx2.flags(f2)
2591 flag2 = ctx2.flags(f2)
2545 # if binary is True, output "summary" or "base85", but not "text diff"
2592 # if binary is True, output "summary" or "base85", but not "text diff"
2546 binary = not opts.text and any(f.isbinary()
2593 binary = not opts.text and any(f.isbinary()
2547 for f in [fctx1, fctx2] if f is not None)
2594 for f in [fctx1, fctx2] if f is not None)
2548
2595
2549 if losedatafn and not opts.git:
2596 if losedatafn and not opts.git:
2550 if (binary or
2597 if (binary or
2551 # copy/rename
2598 # copy/rename
2552 f2 in copy or
2599 f2 in copy or
2553 # empty file creation
2600 # empty file creation
2554 (not f1 and isempty(fctx2)) or
2601 (not f1 and isempty(fctx2)) or
2555 # empty file deletion
2602 # empty file deletion
2556 (isempty(fctx1) and not f2) or
2603 (isempty(fctx1) and not f2) or
2557 # create with flags
2604 # create with flags
2558 (not f1 and flag2) or
2605 (not f1 and flag2) or
2559 # change flags
2606 # change flags
2560 (f1 and f2 and flag1 != flag2)):
2607 (f1 and f2 and flag1 != flag2)):
2561 losedatafn(f2 or f1)
2608 losedatafn(f2 or f1)
2562
2609
2563 path1 = f1 or f2
2610 path1 = f1 or f2
2564 path2 = f2 or f1
2611 path2 = f2 or f1
2565 path1 = posixpath.join(prefix, path1[len(relroot):])
2612 path1 = posixpath.join(prefix, path1[len(relroot):])
2566 path2 = posixpath.join(prefix, path2[len(relroot):])
2613 path2 = posixpath.join(prefix, path2[len(relroot):])
2567 header = []
2614 header = []
2568 if opts.git:
2615 if opts.git:
2569 header.append('diff --git %s%s %s%s' %
2616 header.append('diff --git %s%s %s%s' %
2570 (aprefix, path1, bprefix, path2))
2617 (aprefix, path1, bprefix, path2))
2571 if not f1: # added
2618 if not f1: # added
2572 header.append('new file mode %s' % gitmode[flag2])
2619 header.append('new file mode %s' % gitmode[flag2])
2573 elif not f2: # removed
2620 elif not f2: # removed
2574 header.append('deleted file mode %s' % gitmode[flag1])
2621 header.append('deleted file mode %s' % gitmode[flag1])
2575 else: # modified/copied/renamed
2622 else: # modified/copied/renamed
2576 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2623 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2577 if mode1 != mode2:
2624 if mode1 != mode2:
2578 header.append('old mode %s' % mode1)
2625 header.append('old mode %s' % mode1)
2579 header.append('new mode %s' % mode2)
2626 header.append('new mode %s' % mode2)
2580 if copyop is not None:
2627 if copyop is not None:
2581 if opts.showsimilarity:
2628 if opts.showsimilarity:
2582 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2629 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2583 header.append('similarity index %d%%' % sim)
2630 header.append('similarity index %d%%' % sim)
2584 header.append('%s from %s' % (copyop, path1))
2631 header.append('%s from %s' % (copyop, path1))
2585 header.append('%s to %s' % (copyop, path2))
2632 header.append('%s to %s' % (copyop, path2))
2586 elif revs and not repo.ui.quiet:
2633 elif revs and not repo.ui.quiet:
2587 header.append(diffline(path1, revs))
2634 header.append(diffline(path1, revs))
2588
2635
2589 # fctx.is | diffopts | what to | is fctx.data()
2636 # fctx.is | diffopts | what to | is fctx.data()
2590 # binary() | text nobinary git index | output? | outputted?
2637 # binary() | text nobinary git index | output? | outputted?
2591 # ------------------------------------|----------------------------
2638 # ------------------------------------|----------------------------
2592 # yes | no no no * | summary | no
2639 # yes | no no no * | summary | no
2593 # yes | no no yes * | base85 | yes
2640 # yes | no no yes * | base85 | yes
2594 # yes | no yes no * | summary | no
2641 # yes | no yes no * | summary | no
2595 # yes | no yes yes 0 | summary | no
2642 # yes | no yes yes 0 | summary | no
2596 # yes | no yes yes >0 | summary | semi [1]
2643 # yes | no yes yes >0 | summary | semi [1]
2597 # yes | yes * * * | text diff | yes
2644 # yes | yes * * * | text diff | yes
2598 # no | * * * * | text diff | yes
2645 # no | * * * * | text diff | yes
2599 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2646 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2600 if binary and (not opts.git or (opts.git and opts.nobinary and not
2647 if binary and (not opts.git or (opts.git and opts.nobinary and not
2601 opts.index)):
2648 opts.index)):
2602 # fast path: no binary content will be displayed, content1 and
2649 # fast path: no binary content will be displayed, content1 and
2603 # content2 are only used for equivalent test. cmp() could have a
2650 # content2 are only used for equivalent test. cmp() could have a
2604 # fast path.
2651 # fast path.
2605 if fctx1 is not None:
2652 if fctx1 is not None:
2606 content1 = b'\0'
2653 content1 = b'\0'
2607 if fctx2 is not None:
2654 if fctx2 is not None:
2608 if fctx1 is not None and not fctx1.cmp(fctx2):
2655 if fctx1 is not None and not fctx1.cmp(fctx2):
2609 content2 = b'\0' # not different
2656 content2 = b'\0' # not different
2610 else:
2657 else:
2611 content2 = b'\0\0'
2658 content2 = b'\0\0'
2612 else:
2659 else:
2613 # normal path: load contents
2660 # normal path: load contents
2614 if fctx1 is not None:
2661 if fctx1 is not None:
2615 content1 = fctx1.data()
2662 content1 = fctx1.data()
2616 if fctx2 is not None:
2663 if fctx2 is not None:
2617 content2 = fctx2.data()
2664 content2 = fctx2.data()
2618
2665
2619 if binary and opts.git and not opts.nobinary:
2666 if binary and opts.git and not opts.nobinary:
2620 text = mdiff.b85diff(content1, content2)
2667 text = mdiff.b85diff(content1, content2)
2621 if text:
2668 if text:
2622 header.append('index %s..%s' %
2669 header.append('index %s..%s' %
2623 (gitindex(content1), gitindex(content2)))
2670 (gitindex(content1), gitindex(content2)))
2624 hunks = (None, [text]),
2671 hunks = (None, [text]),
2625 else:
2672 else:
2626 if opts.git and opts.index > 0:
2673 if opts.git and opts.index > 0:
2627 flag = flag1
2674 flag = flag1
2628 if flag is None:
2675 if flag is None:
2629 flag = flag2
2676 flag = flag2
2630 header.append('index %s..%s %s' %
2677 header.append('index %s..%s %s' %
2631 (gitindex(content1)[0:opts.index],
2678 (gitindex(content1)[0:opts.index],
2632 gitindex(content2)[0:opts.index],
2679 gitindex(content2)[0:opts.index],
2633 gitmode[flag]))
2680 gitmode[flag]))
2634
2681
2635 uheaders, hunks = mdiff.unidiff(content1, date1,
2682 uheaders, hunks = mdiff.unidiff(content1, date1,
2636 content2, date2,
2683 content2, date2,
2637 path1, path2, opts=opts)
2684 path1, path2, opts=opts)
2638 header.extend(uheaders)
2685 header.extend(uheaders)
2639 yield header, hunks
2686 yield header, hunks
2640
2687
2641 def diffstatsum(stats):
2688 def diffstatsum(stats):
2642 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2689 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2643 for f, a, r, b in stats:
2690 for f, a, r, b in stats:
2644 maxfile = max(maxfile, encoding.colwidth(f))
2691 maxfile = max(maxfile, encoding.colwidth(f))
2645 maxtotal = max(maxtotal, a + r)
2692 maxtotal = max(maxtotal, a + r)
2646 addtotal += a
2693 addtotal += a
2647 removetotal += r
2694 removetotal += r
2648 binary = binary or b
2695 binary = binary or b
2649
2696
2650 return maxfile, maxtotal, addtotal, removetotal, binary
2697 return maxfile, maxtotal, addtotal, removetotal, binary
2651
2698
2652 def diffstatdata(lines):
2699 def diffstatdata(lines):
2653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2700 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2654
2701
2655 results = []
2702 results = []
2656 filename, adds, removes, isbinary = None, 0, 0, False
2703 filename, adds, removes, isbinary = None, 0, 0, False
2657
2704
2658 def addresult():
2705 def addresult():
2659 if filename:
2706 if filename:
2660 results.append((filename, adds, removes, isbinary))
2707 results.append((filename, adds, removes, isbinary))
2661
2708
2662 # inheader is used to track if a line is in the
2709 # inheader is used to track if a line is in the
2663 # header portion of the diff. This helps properly account
2710 # header portion of the diff. This helps properly account
2664 # for lines that start with '--' or '++'
2711 # for lines that start with '--' or '++'
2665 inheader = False
2712 inheader = False
2666
2713
2667 for line in lines:
2714 for line in lines:
2668 if line.startswith('diff'):
2715 if line.startswith('diff'):
2669 addresult()
2716 addresult()
2670 # starting a new file diff
2717 # starting a new file diff
2671 # set numbers to 0 and reset inheader
2718 # set numbers to 0 and reset inheader
2672 inheader = True
2719 inheader = True
2673 adds, removes, isbinary = 0, 0, False
2720 adds, removes, isbinary = 0, 0, False
2674 if line.startswith('diff --git a/'):
2721 if line.startswith('diff --git a/'):
2675 filename = gitre.search(line).group(2)
2722 filename = gitre.search(line).group(2)
2676 elif line.startswith('diff -r'):
2723 elif line.startswith('diff -r'):
2677 # format: "diff -r ... -r ... filename"
2724 # format: "diff -r ... -r ... filename"
2678 filename = diffre.search(line).group(1)
2725 filename = diffre.search(line).group(1)
2679 elif line.startswith('@@'):
2726 elif line.startswith('@@'):
2680 inheader = False
2727 inheader = False
2681 elif line.startswith('+') and not inheader:
2728 elif line.startswith('+') and not inheader:
2682 adds += 1
2729 adds += 1
2683 elif line.startswith('-') and not inheader:
2730 elif line.startswith('-') and not inheader:
2684 removes += 1
2731 removes += 1
2685 elif (line.startswith('GIT binary patch') or
2732 elif (line.startswith('GIT binary patch') or
2686 line.startswith('Binary file')):
2733 line.startswith('Binary file')):
2687 isbinary = True
2734 isbinary = True
2688 addresult()
2735 addresult()
2689 return results
2736 return results
2690
2737
2691 def diffstat(lines, width=80):
2738 def diffstat(lines, width=80):
2692 output = []
2739 output = []
2693 stats = diffstatdata(lines)
2740 stats = diffstatdata(lines)
2694 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2741 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2695
2742
2696 countwidth = len(str(maxtotal))
2743 countwidth = len(str(maxtotal))
2697 if hasbinary and countwidth < 3:
2744 if hasbinary and countwidth < 3:
2698 countwidth = 3
2745 countwidth = 3
2699 graphwidth = width - countwidth - maxname - 6
2746 graphwidth = width - countwidth - maxname - 6
2700 if graphwidth < 10:
2747 if graphwidth < 10:
2701 graphwidth = 10
2748 graphwidth = 10
2702
2749
2703 def scale(i):
2750 def scale(i):
2704 if maxtotal <= graphwidth:
2751 if maxtotal <= graphwidth:
2705 return i
2752 return i
2706 # If diffstat runs out of room it doesn't print anything,
2753 # If diffstat runs out of room it doesn't print anything,
2707 # which isn't very useful, so always print at least one + or -
2754 # which isn't very useful, so always print at least one + or -
2708 # if there were at least some changes.
2755 # if there were at least some changes.
2709 return max(i * graphwidth // maxtotal, int(bool(i)))
2756 return max(i * graphwidth // maxtotal, int(bool(i)))
2710
2757
2711 for filename, adds, removes, isbinary in stats:
2758 for filename, adds, removes, isbinary in stats:
2712 if isbinary:
2759 if isbinary:
2713 count = 'Bin'
2760 count = 'Bin'
2714 else:
2761 else:
2715 count = '%d' % (adds + removes)
2762 count = '%d' % (adds + removes)
2716 pluses = '+' * scale(adds)
2763 pluses = '+' * scale(adds)
2717 minuses = '-' * scale(removes)
2764 minuses = '-' * scale(removes)
2718 output.append(' %s%s | %*s %s%s\n' %
2765 output.append(' %s%s | %*s %s%s\n' %
2719 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2766 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2720 countwidth, count, pluses, minuses))
2767 countwidth, count, pluses, minuses))
2721
2768
2722 if stats:
2769 if stats:
2723 output.append(_(' %d files changed, %d insertions(+), '
2770 output.append(_(' %d files changed, %d insertions(+), '
2724 '%d deletions(-)\n')
2771 '%d deletions(-)\n')
2725 % (len(stats), totaladds, totalremoves))
2772 % (len(stats), totaladds, totalremoves))
2726
2773
2727 return ''.join(output)
2774 return ''.join(output)
2728
2775
2729 def diffstatui(*args, **kw):
2776 def diffstatui(*args, **kw):
2730 '''like diffstat(), but yields 2-tuples of (output, label) for
2777 '''like diffstat(), but yields 2-tuples of (output, label) for
2731 ui.write()
2778 ui.write()
2732 '''
2779 '''
2733
2780
2734 for line in diffstat(*args, **kw).splitlines():
2781 for line in diffstat(*args, **kw).splitlines():
2735 if line and line[-1] in '+-':
2782 if line and line[-1] in '+-':
2736 name, graph = line.rsplit(' ', 1)
2783 name, graph = line.rsplit(' ', 1)
2737 yield (name + ' ', '')
2784 yield (name + ' ', '')
2738 m = re.search(br'\++', graph)
2785 m = re.search(br'\++', graph)
2739 if m:
2786 if m:
2740 yield (m.group(0), 'diffstat.inserted')
2787 yield (m.group(0), 'diffstat.inserted')
2741 m = re.search(br'-+', graph)
2788 m = re.search(br'-+', graph)
2742 if m:
2789 if m:
2743 yield (m.group(0), 'diffstat.deleted')
2790 yield (m.group(0), 'diffstat.deleted')
2744 else:
2791 else:
2745 yield (line, '')
2792 yield (line, '')
2746 yield ('\n', '')
2793 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now