##// END OF EJS Templates
py3: replace bytes[n] with bytes[n:n + 1] in patch.py where needed
Yuya Nishihara -
r34069:8b8b70cb default
parent child Browse files
Show More
@@ -1,2797 +1,2797 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 class PatchError(Exception):
49 class PatchError(Exception):
50 pass
50 pass
51
51
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = email.Parser.Parser().parse(c)
111 m = email.Parser.Parser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 inheader = False
154 inheader = False
155 cur = []
155 cur = []
156
156
157 mimeheaders = ['content-type']
157 mimeheaders = ['content-type']
158
158
159 if not util.safehasattr(stream, 'next'):
159 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
160 # http responses, for example, have readline but not next
161 stream = fiter(stream)
161 stream = fiter(stream)
162
162
163 for line in stream:
163 for line in stream:
164 cur.append(line)
164 cur.append(line)
165 if line.startswith('# HG changeset patch'):
165 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
166 return hgsplit(stream, cur)
167 elif line.startswith('From '):
167 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
168 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
169 elif isheader(line, inheader):
170 inheader = True
170 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
172 # let email parser handle this
173 return mimesplit(stream, cur)
173 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
174 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
175 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
176 return headersplit(stream, cur)
177 # Not enough info, keep reading
177 # Not enough info, keep reading
178
178
179 # if we are here, we have a very plain patch
179 # if we are here, we have a very plain patch
180 return remainder(cur)
180 return remainder(cur)
181
181
182 ## Some facility for extensible patch parsing:
182 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
183 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
184 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
185 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
186 ('Node ID', 'nodeid'),
187 ]
187 ]
188
188
189 def extract(ui, fileobj):
189 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
190 '''extract patch from data read from fileobj.
191
191
192 patch can be a normal patch or contained in an email message.
192 patch can be a normal patch or contained in an email message.
193
193
194 return a dictionary. Standard keys are:
194 return a dictionary. Standard keys are:
195 - filename,
195 - filename,
196 - message,
196 - message,
197 - user,
197 - user,
198 - date,
198 - date,
199 - branch,
199 - branch,
200 - node,
200 - node,
201 - p1,
201 - p1,
202 - p2.
202 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
203 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
205
206 # attempt to detect the start of a patch
206 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
207 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 br'---[ \t].*?^\+\+\+[ \t]|'
210 br'---[ \t].*?^\+\+\+[ \t]|'
211 br'\*\*\*[ \t].*?^---[ \t])',
211 br'\*\*\*[ \t].*?^---[ \t])',
212 re.MULTILINE | re.DOTALL)
212 re.MULTILINE | re.DOTALL)
213
213
214 data = {}
214 data = {}
215 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
216 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
217 try:
217 try:
218 msg = email.Parser.Parser().parse(fileobj)
218 msg = email.Parser.Parser().parse(fileobj)
219
219
220 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
221 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 data['user'] = msg['From'] and mail.headdecode(msg['From'])
222 if not subject and not data['user']:
222 if not subject and not data['user']:
223 # Not an email, restore parsed headers if any
223 # Not an email, restore parsed headers if any
224 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
225
225
226 # should try to parse msg['Date']
226 # should try to parse msg['Date']
227 parents = []
227 parents = []
228
228
229 if subject:
229 if subject:
230 if subject.startswith('[PATCH'):
230 if subject.startswith('[PATCH'):
231 pend = subject.find(']')
231 pend = subject.find(']')
232 if pend >= 0:
232 if pend >= 0:
233 subject = subject[pend + 1:].lstrip()
233 subject = subject[pend + 1:].lstrip()
234 subject = re.sub(br'\n[ \t]+', ' ', subject)
234 subject = re.sub(br'\n[ \t]+', ' ', subject)
235 ui.debug('Subject: %s\n' % subject)
235 ui.debug('Subject: %s\n' % subject)
236 if data['user']:
236 if data['user']:
237 ui.debug('From: %s\n' % data['user'])
237 ui.debug('From: %s\n' % data['user'])
238 diffs_seen = 0
238 diffs_seen = 0
239 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
240 message = ''
240 message = ''
241 for part in msg.walk():
241 for part in msg.walk():
242 content_type = part.get_content_type()
242 content_type = part.get_content_type()
243 ui.debug('Content-Type: %s\n' % content_type)
243 ui.debug('Content-Type: %s\n' % content_type)
244 if content_type not in ok_types:
244 if content_type not in ok_types:
245 continue
245 continue
246 payload = part.get_payload(decode=True)
246 payload = part.get_payload(decode=True)
247 m = diffre.search(payload)
247 m = diffre.search(payload)
248 if m:
248 if m:
249 hgpatch = False
249 hgpatch = False
250 hgpatchheader = False
250 hgpatchheader = False
251 ignoretext = False
251 ignoretext = False
252
252
253 ui.debug('found patch at byte %d\n' % m.start(0))
253 ui.debug('found patch at byte %d\n' % m.start(0))
254 diffs_seen += 1
254 diffs_seen += 1
255 cfp = stringio()
255 cfp = stringio()
256 for line in payload[:m.start(0)].splitlines():
256 for line in payload[:m.start(0)].splitlines():
257 if line.startswith('# HG changeset patch') and not hgpatch:
257 if line.startswith('# HG changeset patch') and not hgpatch:
258 ui.debug('patch generated by hg export\n')
258 ui.debug('patch generated by hg export\n')
259 hgpatch = True
259 hgpatch = True
260 hgpatchheader = True
260 hgpatchheader = True
261 # drop earlier commit message content
261 # drop earlier commit message content
262 cfp.seek(0)
262 cfp.seek(0)
263 cfp.truncate()
263 cfp.truncate()
264 subject = None
264 subject = None
265 elif hgpatchheader:
265 elif hgpatchheader:
266 if line.startswith('# User '):
266 if line.startswith('# User '):
267 data['user'] = line[7:]
267 data['user'] = line[7:]
268 ui.debug('From: %s\n' % data['user'])
268 ui.debug('From: %s\n' % data['user'])
269 elif line.startswith("# Parent "):
269 elif line.startswith("# Parent "):
270 parents.append(line[9:].lstrip())
270 parents.append(line[9:].lstrip())
271 elif line.startswith("# "):
271 elif line.startswith("# "):
272 for header, key in patchheadermap:
272 for header, key in patchheadermap:
273 prefix = '# %s ' % header
273 prefix = '# %s ' % header
274 if line.startswith(prefix):
274 if line.startswith(prefix):
275 data[key] = line[len(prefix):]
275 data[key] = line[len(prefix):]
276 else:
276 else:
277 hgpatchheader = False
277 hgpatchheader = False
278 elif line == '---':
278 elif line == '---':
279 ignoretext = True
279 ignoretext = True
280 if not hgpatchheader and not ignoretext:
280 if not hgpatchheader and not ignoretext:
281 cfp.write(line)
281 cfp.write(line)
282 cfp.write('\n')
282 cfp.write('\n')
283 message = cfp.getvalue()
283 message = cfp.getvalue()
284 if tmpfp:
284 if tmpfp:
285 tmpfp.write(payload)
285 tmpfp.write(payload)
286 if not payload.endswith('\n'):
286 if not payload.endswith('\n'):
287 tmpfp.write('\n')
287 tmpfp.write('\n')
288 elif not diffs_seen and message and content_type == 'text/plain':
288 elif not diffs_seen and message and content_type == 'text/plain':
289 message += '\n' + payload
289 message += '\n' + payload
290 except: # re-raises
290 except: # re-raises
291 tmpfp.close()
291 tmpfp.close()
292 os.unlink(tmpname)
292 os.unlink(tmpname)
293 raise
293 raise
294
294
295 if subject and not message.startswith(subject):
295 if subject and not message.startswith(subject):
296 message = '%s\n%s' % (subject, message)
296 message = '%s\n%s' % (subject, message)
297 data['message'] = message
297 data['message'] = message
298 tmpfp.close()
298 tmpfp.close()
299 if parents:
299 if parents:
300 data['p1'] = parents.pop(0)
300 data['p1'] = parents.pop(0)
301 if parents:
301 if parents:
302 data['p2'] = parents.pop(0)
302 data['p2'] = parents.pop(0)
303
303
304 if diffs_seen:
304 if diffs_seen:
305 data['filename'] = tmpname
305 data['filename'] = tmpname
306 else:
306 else:
307 os.unlink(tmpname)
307 os.unlink(tmpname)
308 return data
308 return data
309
309
310 class patchmeta(object):
310 class patchmeta(object):
311 """Patched file metadata
311 """Patched file metadata
312
312
313 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
314 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 or COPY. 'path' is patched file path. 'oldpath' is set to the
315 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 origin file when 'op' is either COPY or RENAME, None otherwise. If
316 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 file mode is changed, 'mode' is a tuple (islink, isexec) where
317 'islink' is True if the file is a symlink and 'isexec' is True if
317 'islink' is True if the file is a symlink and 'isexec' is True if
318 the file is executable. Otherwise, 'mode' is None.
318 the file is executable. Otherwise, 'mode' is None.
319 """
319 """
320 def __init__(self, path):
320 def __init__(self, path):
321 self.path = path
321 self.path = path
322 self.oldpath = None
322 self.oldpath = None
323 self.mode = None
323 self.mode = None
324 self.op = 'MODIFY'
324 self.op = 'MODIFY'
325 self.binary = False
325 self.binary = False
326
326
327 def setmode(self, mode):
327 def setmode(self, mode):
328 islink = mode & 0o20000
328 islink = mode & 0o20000
329 isexec = mode & 0o100
329 isexec = mode & 0o100
330 self.mode = (islink, isexec)
330 self.mode = (islink, isexec)
331
331
332 def copy(self):
332 def copy(self):
333 other = patchmeta(self.path)
333 other = patchmeta(self.path)
334 other.oldpath = self.oldpath
334 other.oldpath = self.oldpath
335 other.mode = self.mode
335 other.mode = self.mode
336 other.op = self.op
336 other.op = self.op
337 other.binary = self.binary
337 other.binary = self.binary
338 return other
338 return other
339
339
340 def _ispatchinga(self, afile):
340 def _ispatchinga(self, afile):
341 if afile == '/dev/null':
341 if afile == '/dev/null':
342 return self.op == 'ADD'
342 return self.op == 'ADD'
343 return afile == 'a/' + (self.oldpath or self.path)
343 return afile == 'a/' + (self.oldpath or self.path)
344
344
345 def _ispatchingb(self, bfile):
345 def _ispatchingb(self, bfile):
346 if bfile == '/dev/null':
346 if bfile == '/dev/null':
347 return self.op == 'DELETE'
347 return self.op == 'DELETE'
348 return bfile == 'b/' + self.path
348 return bfile == 'b/' + self.path
349
349
350 def ispatching(self, afile, bfile):
350 def ispatching(self, afile, bfile):
351 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351 return self._ispatchinga(afile) and self._ispatchingb(bfile)
352
352
353 def __repr__(self):
353 def __repr__(self):
354 return "<patchmeta %s %r>" % (self.op, self.path)
354 return "<patchmeta %s %r>" % (self.op, self.path)
355
355
356 def readgitpatch(lr):
356 def readgitpatch(lr):
357 """extract git-style metadata about patches from <patchname>"""
357 """extract git-style metadata about patches from <patchname>"""
358
358
359 # Filter patch for git information
359 # Filter patch for git information
360 gp = None
360 gp = None
361 gitpatches = []
361 gitpatches = []
362 for line in lr:
362 for line in lr:
363 line = line.rstrip(' \r\n')
363 line = line.rstrip(' \r\n')
364 if line.startswith('diff --git a/'):
364 if line.startswith('diff --git a/'):
365 m = gitre.match(line)
365 m = gitre.match(line)
366 if m:
366 if m:
367 if gp:
367 if gp:
368 gitpatches.append(gp)
368 gitpatches.append(gp)
369 dst = m.group(2)
369 dst = m.group(2)
370 gp = patchmeta(dst)
370 gp = patchmeta(dst)
371 elif gp:
371 elif gp:
372 if line.startswith('--- '):
372 if line.startswith('--- '):
373 gitpatches.append(gp)
373 gitpatches.append(gp)
374 gp = None
374 gp = None
375 continue
375 continue
376 if line.startswith('rename from '):
376 if line.startswith('rename from '):
377 gp.op = 'RENAME'
377 gp.op = 'RENAME'
378 gp.oldpath = line[12:]
378 gp.oldpath = line[12:]
379 elif line.startswith('rename to '):
379 elif line.startswith('rename to '):
380 gp.path = line[10:]
380 gp.path = line[10:]
381 elif line.startswith('copy from '):
381 elif line.startswith('copy from '):
382 gp.op = 'COPY'
382 gp.op = 'COPY'
383 gp.oldpath = line[10:]
383 gp.oldpath = line[10:]
384 elif line.startswith('copy to '):
384 elif line.startswith('copy to '):
385 gp.path = line[8:]
385 gp.path = line[8:]
386 elif line.startswith('deleted file'):
386 elif line.startswith('deleted file'):
387 gp.op = 'DELETE'
387 gp.op = 'DELETE'
388 elif line.startswith('new file mode '):
388 elif line.startswith('new file mode '):
389 gp.op = 'ADD'
389 gp.op = 'ADD'
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('new mode '):
391 elif line.startswith('new mode '):
392 gp.setmode(int(line[-6:], 8))
392 gp.setmode(int(line[-6:], 8))
393 elif line.startswith('GIT binary patch'):
393 elif line.startswith('GIT binary patch'):
394 gp.binary = True
394 gp.binary = True
395 if gp:
395 if gp:
396 gitpatches.append(gp)
396 gitpatches.append(gp)
397
397
398 return gitpatches
398 return gitpatches
399
399
400 class linereader(object):
400 class linereader(object):
401 # simple class to allow pushing lines back into the input stream
401 # simple class to allow pushing lines back into the input stream
402 def __init__(self, fp):
402 def __init__(self, fp):
403 self.fp = fp
403 self.fp = fp
404 self.buf = []
404 self.buf = []
405
405
406 def push(self, line):
406 def push(self, line):
407 if line is not None:
407 if line is not None:
408 self.buf.append(line)
408 self.buf.append(line)
409
409
410 def readline(self):
410 def readline(self):
411 if self.buf:
411 if self.buf:
412 l = self.buf[0]
412 l = self.buf[0]
413 del self.buf[0]
413 del self.buf[0]
414 return l
414 return l
415 return self.fp.readline()
415 return self.fp.readline()
416
416
417 def __iter__(self):
417 def __iter__(self):
418 return iter(self.readline, '')
418 return iter(self.readline, '')
419
419
420 class abstractbackend(object):
420 class abstractbackend(object):
421 def __init__(self, ui):
421 def __init__(self, ui):
422 self.ui = ui
422 self.ui = ui
423
423
424 def getfile(self, fname):
424 def getfile(self, fname):
425 """Return target file data and flags as a (data, (islink,
425 """Return target file data and flags as a (data, (islink,
426 isexec)) tuple. Data is None if file is missing/deleted.
426 isexec)) tuple. Data is None if file is missing/deleted.
427 """
427 """
428 raise NotImplementedError
428 raise NotImplementedError
429
429
430 def setfile(self, fname, data, mode, copysource):
430 def setfile(self, fname, data, mode, copysource):
431 """Write data to target file fname and set its mode. mode is a
431 """Write data to target file fname and set its mode. mode is a
432 (islink, isexec) tuple. If data is None, the file content should
432 (islink, isexec) tuple. If data is None, the file content should
433 be left unchanged. If the file is modified after being copied,
433 be left unchanged. If the file is modified after being copied,
434 copysource is set to the original file name.
434 copysource is set to the original file name.
435 """
435 """
436 raise NotImplementedError
436 raise NotImplementedError
437
437
438 def unlink(self, fname):
438 def unlink(self, fname):
439 """Unlink target file."""
439 """Unlink target file."""
440 raise NotImplementedError
440 raise NotImplementedError
441
441
442 def writerej(self, fname, failed, total, lines):
442 def writerej(self, fname, failed, total, lines):
443 """Write rejected lines for fname. total is the number of hunks
443 """Write rejected lines for fname. total is the number of hunks
444 which failed to apply and total the total number of hunks for this
444 which failed to apply and total the total number of hunks for this
445 files.
445 files.
446 """
446 """
447 pass
447 pass
448
448
449 def exists(self, fname):
449 def exists(self, fname):
450 raise NotImplementedError
450 raise NotImplementedError
451
451
452 def close(self):
452 def close(self):
453 raise NotImplementedError
453 raise NotImplementedError
454
454
455 class fsbackend(abstractbackend):
455 class fsbackend(abstractbackend):
456 def __init__(self, ui, basedir):
456 def __init__(self, ui, basedir):
457 super(fsbackend, self).__init__(ui)
457 super(fsbackend, self).__init__(ui)
458 self.opener = vfsmod.vfs(basedir)
458 self.opener = vfsmod.vfs(basedir)
459
459
460 def getfile(self, fname):
460 def getfile(self, fname):
461 if self.opener.islink(fname):
461 if self.opener.islink(fname):
462 return (self.opener.readlink(fname), (True, False))
462 return (self.opener.readlink(fname), (True, False))
463
463
464 isexec = False
464 isexec = False
465 try:
465 try:
466 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
467 except OSError as e:
467 except OSError as e:
468 if e.errno != errno.ENOENT:
468 if e.errno != errno.ENOENT:
469 raise
469 raise
470 try:
470 try:
471 return (self.opener.read(fname), (False, isexec))
471 return (self.opener.read(fname), (False, isexec))
472 except IOError as e:
472 except IOError as e:
473 if e.errno != errno.ENOENT:
473 if e.errno != errno.ENOENT:
474 raise
474 raise
475 return None, None
475 return None, None
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 islink, isexec = mode
478 islink, isexec = mode
479 if data is None:
479 if data is None:
480 self.opener.setflags(fname, islink, isexec)
480 self.opener.setflags(fname, islink, isexec)
481 return
481 return
482 if islink:
482 if islink:
483 self.opener.symlink(data, fname)
483 self.opener.symlink(data, fname)
484 else:
484 else:
485 self.opener.write(fname, data)
485 self.opener.write(fname, data)
486 if isexec:
486 if isexec:
487 self.opener.setflags(fname, False, True)
487 self.opener.setflags(fname, False, True)
488
488
489 def unlink(self, fname):
489 def unlink(self, fname):
490 self.opener.unlinkpath(fname, ignoremissing=True)
490 self.opener.unlinkpath(fname, ignoremissing=True)
491
491
492 def writerej(self, fname, failed, total, lines):
492 def writerej(self, fname, failed, total, lines):
493 fname = fname + ".rej"
493 fname = fname + ".rej"
494 self.ui.warn(
494 self.ui.warn(
495 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
496 (failed, total, fname))
496 (failed, total, fname))
497 fp = self.opener(fname, 'w')
497 fp = self.opener(fname, 'w')
498 fp.writelines(lines)
498 fp.writelines(lines)
499 fp.close()
499 fp.close()
500
500
501 def exists(self, fname):
501 def exists(self, fname):
502 return self.opener.lexists(fname)
502 return self.opener.lexists(fname)
503
503
504 class workingbackend(fsbackend):
504 class workingbackend(fsbackend):
505 def __init__(self, ui, repo, similarity):
505 def __init__(self, ui, repo, similarity):
506 super(workingbackend, self).__init__(ui, repo.root)
506 super(workingbackend, self).__init__(ui, repo.root)
507 self.repo = repo
507 self.repo = repo
508 self.similarity = similarity
508 self.similarity = similarity
509 self.removed = set()
509 self.removed = set()
510 self.changed = set()
510 self.changed = set()
511 self.copied = []
511 self.copied = []
512
512
513 def _checkknown(self, fname):
513 def _checkknown(self, fname):
514 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 if self.repo.dirstate[fname] == '?' and self.exists(fname):
515 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
516
516
517 def setfile(self, fname, data, mode, copysource):
517 def setfile(self, fname, data, mode, copysource):
518 self._checkknown(fname)
518 self._checkknown(fname)
519 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 super(workingbackend, self).setfile(fname, data, mode, copysource)
520 if copysource is not None:
520 if copysource is not None:
521 self.copied.append((copysource, fname))
521 self.copied.append((copysource, fname))
522 self.changed.add(fname)
522 self.changed.add(fname)
523
523
524 def unlink(self, fname):
524 def unlink(self, fname):
525 self._checkknown(fname)
525 self._checkknown(fname)
526 super(workingbackend, self).unlink(fname)
526 super(workingbackend, self).unlink(fname)
527 self.removed.add(fname)
527 self.removed.add(fname)
528 self.changed.add(fname)
528 self.changed.add(fname)
529
529
530 def close(self):
530 def close(self):
531 wctx = self.repo[None]
531 wctx = self.repo[None]
532 changed = set(self.changed)
532 changed = set(self.changed)
533 for src, dst in self.copied:
533 for src, dst in self.copied:
534 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
535 if self.removed:
535 if self.removed:
536 wctx.forget(sorted(self.removed))
536 wctx.forget(sorted(self.removed))
537 for f in self.removed:
537 for f in self.removed:
538 if f not in self.repo.dirstate:
538 if f not in self.repo.dirstate:
539 # File was deleted and no longer belongs to the
539 # File was deleted and no longer belongs to the
540 # dirstate, it was probably marked added then
540 # dirstate, it was probably marked added then
541 # deleted, and should not be considered by
541 # deleted, and should not be considered by
542 # marktouched().
542 # marktouched().
543 changed.discard(f)
543 changed.discard(f)
544 if changed:
544 if changed:
545 scmutil.marktouched(self.repo, changed, self.similarity)
545 scmutil.marktouched(self.repo, changed, self.similarity)
546 return sorted(self.changed)
546 return sorted(self.changed)
547
547
548 class filestore(object):
548 class filestore(object):
549 def __init__(self, maxsize=None):
549 def __init__(self, maxsize=None):
550 self.opener = None
550 self.opener = None
551 self.files = {}
551 self.files = {}
552 self.created = 0
552 self.created = 0
553 self.maxsize = maxsize
553 self.maxsize = maxsize
554 if self.maxsize is None:
554 if self.maxsize is None:
555 self.maxsize = 4*(2**20)
555 self.maxsize = 4*(2**20)
556 self.size = 0
556 self.size = 0
557 self.data = {}
557 self.data = {}
558
558
559 def setfile(self, fname, data, mode, copied=None):
559 def setfile(self, fname, data, mode, copied=None):
560 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
561 self.data[fname] = (data, mode, copied)
561 self.data[fname] = (data, mode, copied)
562 self.size += len(data)
562 self.size += len(data)
563 else:
563 else:
564 if self.opener is None:
564 if self.opener is None:
565 root = tempfile.mkdtemp(prefix='hg-patch-')
565 root = tempfile.mkdtemp(prefix='hg-patch-')
566 self.opener = vfsmod.vfs(root)
566 self.opener = vfsmod.vfs(root)
567 # Avoid filename issues with these simple names
567 # Avoid filename issues with these simple names
568 fn = str(self.created)
568 fn = str(self.created)
569 self.opener.write(fn, data)
569 self.opener.write(fn, data)
570 self.created += 1
570 self.created += 1
571 self.files[fname] = (fn, mode, copied)
571 self.files[fname] = (fn, mode, copied)
572
572
573 def getfile(self, fname):
573 def getfile(self, fname):
574 if fname in self.data:
574 if fname in self.data:
575 return self.data[fname]
575 return self.data[fname]
576 if not self.opener or fname not in self.files:
576 if not self.opener or fname not in self.files:
577 return None, None, None
577 return None, None, None
578 fn, mode, copied = self.files[fname]
578 fn, mode, copied = self.files[fname]
579 return self.opener.read(fn), mode, copied
579 return self.opener.read(fn), mode, copied
580
580
581 def close(self):
581 def close(self):
582 if self.opener:
582 if self.opener:
583 shutil.rmtree(self.opener.base)
583 shutil.rmtree(self.opener.base)
584
584
585 class repobackend(abstractbackend):
585 class repobackend(abstractbackend):
586 def __init__(self, ui, repo, ctx, store):
586 def __init__(self, ui, repo, ctx, store):
587 super(repobackend, self).__init__(ui)
587 super(repobackend, self).__init__(ui)
588 self.repo = repo
588 self.repo = repo
589 self.ctx = ctx
589 self.ctx = ctx
590 self.store = store
590 self.store = store
591 self.changed = set()
591 self.changed = set()
592 self.removed = set()
592 self.removed = set()
593 self.copied = {}
593 self.copied = {}
594
594
595 def _checkknown(self, fname):
595 def _checkknown(self, fname):
596 if fname not in self.ctx:
596 if fname not in self.ctx:
597 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
598
598
599 def getfile(self, fname):
599 def getfile(self, fname):
600 try:
600 try:
601 fctx = self.ctx[fname]
601 fctx = self.ctx[fname]
602 except error.LookupError:
602 except error.LookupError:
603 return None, None
603 return None, None
604 flags = fctx.flags()
604 flags = fctx.flags()
605 return fctx.data(), ('l' in flags, 'x' in flags)
605 return fctx.data(), ('l' in flags, 'x' in flags)
606
606
607 def setfile(self, fname, data, mode, copysource):
607 def setfile(self, fname, data, mode, copysource):
608 if copysource:
608 if copysource:
609 self._checkknown(copysource)
609 self._checkknown(copysource)
610 if data is None:
610 if data is None:
611 data = self.ctx[fname].data()
611 data = self.ctx[fname].data()
612 self.store.setfile(fname, data, mode, copysource)
612 self.store.setfile(fname, data, mode, copysource)
613 self.changed.add(fname)
613 self.changed.add(fname)
614 if copysource:
614 if copysource:
615 self.copied[fname] = copysource
615 self.copied[fname] = copysource
616
616
617 def unlink(self, fname):
617 def unlink(self, fname):
618 self._checkknown(fname)
618 self._checkknown(fname)
619 self.removed.add(fname)
619 self.removed.add(fname)
620
620
621 def exists(self, fname):
621 def exists(self, fname):
622 return fname in self.ctx
622 return fname in self.ctx
623
623
624 def close(self):
624 def close(self):
625 return self.changed | self.removed
625 return self.changed | self.removed
626
626
627 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
628 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
629 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
630 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630 eolmodes = ['strict', 'crlf', 'lf', 'auto']
631
631
632 class patchfile(object):
632 class patchfile(object):
633 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 def __init__(self, ui, gp, backend, store, eolmode='strict'):
634 self.fname = gp.path
634 self.fname = gp.path
635 self.eolmode = eolmode
635 self.eolmode = eolmode
636 self.eol = None
636 self.eol = None
637 self.backend = backend
637 self.backend = backend
638 self.ui = ui
638 self.ui = ui
639 self.lines = []
639 self.lines = []
640 self.exists = False
640 self.exists = False
641 self.missing = True
641 self.missing = True
642 self.mode = gp.mode
642 self.mode = gp.mode
643 self.copysource = gp.oldpath
643 self.copysource = gp.oldpath
644 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
645 self.remove = gp.op == 'DELETE'
645 self.remove = gp.op == 'DELETE'
646 if self.copysource is None:
646 if self.copysource is None:
647 data, mode = backend.getfile(self.fname)
647 data, mode = backend.getfile(self.fname)
648 else:
648 else:
649 data, mode = store.getfile(self.copysource)[:2]
649 data, mode = store.getfile(self.copysource)[:2]
650 if data is not None:
650 if data is not None:
651 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.exists = self.copysource is None or backend.exists(self.fname)
652 self.missing = False
652 self.missing = False
653 if data:
653 if data:
654 self.lines = mdiff.splitnewlines(data)
654 self.lines = mdiff.splitnewlines(data)
655 if self.mode is None:
655 if self.mode is None:
656 self.mode = mode
656 self.mode = mode
657 if self.lines:
657 if self.lines:
658 # Normalize line endings
658 # Normalize line endings
659 if self.lines[0].endswith('\r\n'):
659 if self.lines[0].endswith('\r\n'):
660 self.eol = '\r\n'
660 self.eol = '\r\n'
661 elif self.lines[0].endswith('\n'):
661 elif self.lines[0].endswith('\n'):
662 self.eol = '\n'
662 self.eol = '\n'
663 if eolmode != 'strict':
663 if eolmode != 'strict':
664 nlines = []
664 nlines = []
665 for l in self.lines:
665 for l in self.lines:
666 if l.endswith('\r\n'):
666 if l.endswith('\r\n'):
667 l = l[:-2] + '\n'
667 l = l[:-2] + '\n'
668 nlines.append(l)
668 nlines.append(l)
669 self.lines = nlines
669 self.lines = nlines
670 else:
670 else:
671 if self.create:
671 if self.create:
672 self.missing = False
672 self.missing = False
673 if self.mode is None:
673 if self.mode is None:
674 self.mode = (False, False)
674 self.mode = (False, False)
675 if self.missing:
675 if self.missing:
676 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
677 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
678 "current directory)\n"))
678 "current directory)\n"))
679
679
680 self.hash = {}
680 self.hash = {}
681 self.dirty = 0
681 self.dirty = 0
682 self.offset = 0
682 self.offset = 0
683 self.skew = 0
683 self.skew = 0
684 self.rej = []
684 self.rej = []
685 self.fileprinted = False
685 self.fileprinted = False
686 self.printfile(False)
686 self.printfile(False)
687 self.hunks = 0
687 self.hunks = 0
688
688
689 def writelines(self, fname, lines, mode):
689 def writelines(self, fname, lines, mode):
690 if self.eolmode == 'auto':
690 if self.eolmode == 'auto':
691 eol = self.eol
691 eol = self.eol
692 elif self.eolmode == 'crlf':
692 elif self.eolmode == 'crlf':
693 eol = '\r\n'
693 eol = '\r\n'
694 else:
694 else:
695 eol = '\n'
695 eol = '\n'
696
696
697 if self.eolmode != 'strict' and eol and eol != '\n':
697 if self.eolmode != 'strict' and eol and eol != '\n':
698 rawlines = []
698 rawlines = []
699 for l in lines:
699 for l in lines:
700 if l and l[-1] == '\n':
700 if l and l[-1] == '\n':
701 l = l[:-1] + eol
701 l = l[:-1] + eol
702 rawlines.append(l)
702 rawlines.append(l)
703 lines = rawlines
703 lines = rawlines
704
704
705 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
706
706
707 def printfile(self, warn):
707 def printfile(self, warn):
708 if self.fileprinted:
708 if self.fileprinted:
709 return
709 return
710 if warn or self.ui.verbose:
710 if warn or self.ui.verbose:
711 self.fileprinted = True
711 self.fileprinted = True
712 s = _("patching file %s\n") % self.fname
712 s = _("patching file %s\n") % self.fname
713 if warn:
713 if warn:
714 self.ui.warn(s)
714 self.ui.warn(s)
715 else:
715 else:
716 self.ui.note(s)
716 self.ui.note(s)
717
717
718
718
719 def findlines(self, l, linenum):
719 def findlines(self, l, linenum):
720 # looks through the hash and finds candidate lines. The
720 # looks through the hash and finds candidate lines. The
721 # result is a list of line numbers sorted based on distance
721 # result is a list of line numbers sorted based on distance
722 # from linenum
722 # from linenum
723
723
724 cand = self.hash.get(l, [])
724 cand = self.hash.get(l, [])
725 if len(cand) > 1:
725 if len(cand) > 1:
726 # resort our list of potentials forward then back.
726 # resort our list of potentials forward then back.
727 cand.sort(key=lambda x: abs(x - linenum))
727 cand.sort(key=lambda x: abs(x - linenum))
728 return cand
728 return cand
729
729
730 def write_rej(self):
730 def write_rej(self):
731 # our rejects are a little different from patch(1). This always
731 # our rejects are a little different from patch(1). This always
732 # creates rejects in the same form as the original patch. A file
732 # creates rejects in the same form as the original patch. A file
733 # header is inserted so that you can run the reject through patch again
733 # header is inserted so that you can run the reject through patch again
734 # without having to type the filename.
734 # without having to type the filename.
735 if not self.rej:
735 if not self.rej:
736 return
736 return
737 base = os.path.basename(self.fname)
737 base = os.path.basename(self.fname)
738 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 lines = ["--- %s\n+++ %s\n" % (base, base)]
739 for x in self.rej:
739 for x in self.rej:
740 for l in x.hunk:
740 for l in x.hunk:
741 lines.append(l)
741 lines.append(l)
742 if l[-1:] != '\n':
742 if l[-1:] != '\n':
743 lines.append("\n\ No newline at end of file\n")
743 lines.append("\n\ No newline at end of file\n")
744 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
745
745
746 def apply(self, h):
746 def apply(self, h):
747 if not h.complete():
747 if not h.complete():
748 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
749 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 (h.number, h.desc, len(h.a), h.lena, len(h.b),
750 h.lenb))
750 h.lenb))
751
751
752 self.hunks += 1
752 self.hunks += 1
753
753
754 if self.missing:
754 if self.missing:
755 self.rej.append(h)
755 self.rej.append(h)
756 return -1
756 return -1
757
757
758 if self.exists and self.create:
758 if self.exists and self.create:
759 if self.copysource:
759 if self.copysource:
760 self.ui.warn(_("cannot create %s: destination already "
760 self.ui.warn(_("cannot create %s: destination already "
761 "exists\n") % self.fname)
761 "exists\n") % self.fname)
762 else:
762 else:
763 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.ui.warn(_("file %s already exists\n") % self.fname)
764 self.rej.append(h)
764 self.rej.append(h)
765 return -1
765 return -1
766
766
767 if isinstance(h, binhunk):
767 if isinstance(h, binhunk):
768 if self.remove:
768 if self.remove:
769 self.backend.unlink(self.fname)
769 self.backend.unlink(self.fname)
770 else:
770 else:
771 l = h.new(self.lines)
771 l = h.new(self.lines)
772 self.lines[:] = l
772 self.lines[:] = l
773 self.offset += len(l)
773 self.offset += len(l)
774 self.dirty = True
774 self.dirty = True
775 return 0
775 return 0
776
776
777 horig = h
777 horig = h
778 if (self.eolmode in ('crlf', 'lf')
778 if (self.eolmode in ('crlf', 'lf')
779 or self.eolmode == 'auto' and self.eol):
779 or self.eolmode == 'auto' and self.eol):
780 # If new eols are going to be normalized, then normalize
780 # If new eols are going to be normalized, then normalize
781 # hunk data before patching. Otherwise, preserve input
781 # hunk data before patching. Otherwise, preserve input
782 # line-endings.
782 # line-endings.
783 h = h.getnormalized()
783 h = h.getnormalized()
784
784
785 # fast case first, no offsets, no fuzz
785 # fast case first, no offsets, no fuzz
786 old, oldstart, new, newstart = h.fuzzit(0, False)
786 old, oldstart, new, newstart = h.fuzzit(0, False)
787 oldstart += self.offset
787 oldstart += self.offset
788 orig_start = oldstart
788 orig_start = oldstart
789 # if there's skew we want to emit the "(offset %d lines)" even
789 # if there's skew we want to emit the "(offset %d lines)" even
790 # when the hunk cleanly applies at start + skew, so skip the
790 # when the hunk cleanly applies at start + skew, so skip the
791 # fast case code
791 # fast case code
792 if (self.skew == 0 and
792 if (self.skew == 0 and
793 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
794 if self.remove:
794 if self.remove:
795 self.backend.unlink(self.fname)
795 self.backend.unlink(self.fname)
796 else:
796 else:
797 self.lines[oldstart:oldstart + len(old)] = new
797 self.lines[oldstart:oldstart + len(old)] = new
798 self.offset += len(new) - len(old)
798 self.offset += len(new) - len(old)
799 self.dirty = True
799 self.dirty = True
800 return 0
800 return 0
801
801
802 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
803 self.hash = {}
803 self.hash = {}
804 for x, s in enumerate(self.lines):
804 for x, s in enumerate(self.lines):
805 self.hash.setdefault(s, []).append(x)
805 self.hash.setdefault(s, []).append(x)
806
806
807 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
808 for toponly in [True, False]:
808 for toponly in [True, False]:
809 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
810 oldstart = oldstart + self.offset + self.skew
810 oldstart = oldstart + self.offset + self.skew
811 oldstart = min(oldstart, len(self.lines))
811 oldstart = min(oldstart, len(self.lines))
812 if old:
812 if old:
813 cand = self.findlines(old[0][1:], oldstart)
813 cand = self.findlines(old[0][1:], oldstart)
814 else:
814 else:
815 # Only adding lines with no or fuzzed context, just
815 # Only adding lines with no or fuzzed context, just
816 # take the skew in account
816 # take the skew in account
817 cand = [oldstart]
817 cand = [oldstart]
818
818
819 for l in cand:
819 for l in cand:
820 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
821 self.lines[l : l + len(old)] = new
821 self.lines[l : l + len(old)] = new
822 self.offset += len(new) - len(old)
822 self.offset += len(new) - len(old)
823 self.skew = l - orig_start
823 self.skew = l - orig_start
824 self.dirty = True
824 self.dirty = True
825 offset = l - orig_start - fuzzlen
825 offset = l - orig_start - fuzzlen
826 if fuzzlen:
826 if fuzzlen:
827 msg = _("Hunk #%d succeeded at %d "
827 msg = _("Hunk #%d succeeded at %d "
828 "with fuzz %d "
828 "with fuzz %d "
829 "(offset %d lines).\n")
829 "(offset %d lines).\n")
830 self.printfile(True)
830 self.printfile(True)
831 self.ui.warn(msg %
831 self.ui.warn(msg %
832 (h.number, l + 1, fuzzlen, offset))
832 (h.number, l + 1, fuzzlen, offset))
833 else:
833 else:
834 msg = _("Hunk #%d succeeded at %d "
834 msg = _("Hunk #%d succeeded at %d "
835 "(offset %d lines).\n")
835 "(offset %d lines).\n")
836 self.ui.note(msg % (h.number, l + 1, offset))
836 self.ui.note(msg % (h.number, l + 1, offset))
837 return fuzzlen
837 return fuzzlen
838 self.printfile(True)
838 self.printfile(True)
839 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
840 self.rej.append(horig)
840 self.rej.append(horig)
841 return -1
841 return -1
842
842
843 def close(self):
843 def close(self):
844 if self.dirty:
844 if self.dirty:
845 self.writelines(self.fname, self.lines, self.mode)
845 self.writelines(self.fname, self.lines, self.mode)
846 self.write_rej()
846 self.write_rej()
847 return len(self.rej)
847 return len(self.rej)
848
848
849 class header(object):
849 class header(object):
850 """patch header
850 """patch header
851 """
851 """
852 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
853 diff_re = re.compile('diff -r .* (.*)$')
853 diff_re = re.compile('diff -r .* (.*)$')
854 allhunks_re = re.compile('(?:index|deleted file) ')
854 allhunks_re = re.compile('(?:index|deleted file) ')
855 pretty_re = re.compile('(?:new file|deleted file) ')
855 pretty_re = re.compile('(?:new file|deleted file) ')
856 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 special_re = re.compile('(?:index|deleted|copy|rename) ')
857 newfile_re = re.compile('(?:new file)')
857 newfile_re = re.compile('(?:new file)')
858
858
859 def __init__(self, header):
859 def __init__(self, header):
860 self.header = header
860 self.header = header
861 self.hunks = []
861 self.hunks = []
862
862
863 def binary(self):
863 def binary(self):
864 return any(h.startswith('index ') for h in self.header)
864 return any(h.startswith('index ') for h in self.header)
865
865
866 def pretty(self, fp):
866 def pretty(self, fp):
867 for h in self.header:
867 for h in self.header:
868 if h.startswith('index '):
868 if h.startswith('index '):
869 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 fp.write(_('this modifies a binary file (all or nothing)\n'))
870 break
870 break
871 if self.pretty_re.match(h):
871 if self.pretty_re.match(h):
872 fp.write(h)
872 fp.write(h)
873 if self.binary():
873 if self.binary():
874 fp.write(_('this is a binary file\n'))
874 fp.write(_('this is a binary file\n'))
875 break
875 break
876 if h.startswith('---'):
876 if h.startswith('---'):
877 fp.write(_('%d hunks, %d lines changed\n') %
877 fp.write(_('%d hunks, %d lines changed\n') %
878 (len(self.hunks),
878 (len(self.hunks),
879 sum([max(h.added, h.removed) for h in self.hunks])))
879 sum([max(h.added, h.removed) for h in self.hunks])))
880 break
880 break
881 fp.write(h)
881 fp.write(h)
882
882
883 def write(self, fp):
883 def write(self, fp):
884 fp.write(''.join(self.header))
884 fp.write(''.join(self.header))
885
885
886 def allhunks(self):
886 def allhunks(self):
887 return any(self.allhunks_re.match(h) for h in self.header)
887 return any(self.allhunks_re.match(h) for h in self.header)
888
888
889 def files(self):
889 def files(self):
890 match = self.diffgit_re.match(self.header[0])
890 match = self.diffgit_re.match(self.header[0])
891 if match:
891 if match:
892 fromfile, tofile = match.groups()
892 fromfile, tofile = match.groups()
893 if fromfile == tofile:
893 if fromfile == tofile:
894 return [fromfile]
894 return [fromfile]
895 return [fromfile, tofile]
895 return [fromfile, tofile]
896 else:
896 else:
897 return self.diff_re.match(self.header[0]).groups()
897 return self.diff_re.match(self.header[0]).groups()
898
898
899 def filename(self):
899 def filename(self):
900 return self.files()[-1]
900 return self.files()[-1]
901
901
902 def __repr__(self):
902 def __repr__(self):
903 return '<header %s>' % (' '.join(map(repr, self.files())))
903 return '<header %s>' % (' '.join(map(repr, self.files())))
904
904
905 def isnewfile(self):
905 def isnewfile(self):
906 return any(self.newfile_re.match(h) for h in self.header)
906 return any(self.newfile_re.match(h) for h in self.header)
907
907
908 def special(self):
908 def special(self):
909 # Special files are shown only at the header level and not at the hunk
909 # Special files are shown only at the header level and not at the hunk
910 # level for example a file that has been deleted is a special file.
910 # level for example a file that has been deleted is a special file.
911 # The user cannot change the content of the operation, in the case of
911 # The user cannot change the content of the operation, in the case of
912 # the deleted file he has to take the deletion or not take it, he
912 # the deleted file he has to take the deletion or not take it, he
913 # cannot take some of it.
913 # cannot take some of it.
914 # Newly added files are special if they are empty, they are not special
914 # Newly added files are special if they are empty, they are not special
915 # if they have some content as we want to be able to change it
915 # if they have some content as we want to be able to change it
916 nocontent = len(self.header) == 2
916 nocontent = len(self.header) == 2
917 emptynewfile = self.isnewfile() and nocontent
917 emptynewfile = self.isnewfile() and nocontent
918 return emptynewfile or \
918 return emptynewfile or \
919 any(self.special_re.match(h) for h in self.header)
919 any(self.special_re.match(h) for h in self.header)
920
920
921 class recordhunk(object):
921 class recordhunk(object):
922 """patch hunk
922 """patch hunk
923
923
924 XXX shouldn't we merge this with the other hunk class?
924 XXX shouldn't we merge this with the other hunk class?
925 """
925 """
926
926
927 def __init__(self, header, fromline, toline, proc, before, hunk, after,
927 def __init__(self, header, fromline, toline, proc, before, hunk, after,
928 maxcontext=None):
928 maxcontext=None):
929 def trimcontext(lines, reverse=False):
929 def trimcontext(lines, reverse=False):
930 if maxcontext is not None:
930 if maxcontext is not None:
931 delta = len(lines) - maxcontext
931 delta = len(lines) - maxcontext
932 if delta > 0:
932 if delta > 0:
933 if reverse:
933 if reverse:
934 return delta, lines[delta:]
934 return delta, lines[delta:]
935 else:
935 else:
936 return delta, lines[:maxcontext]
936 return delta, lines[:maxcontext]
937 return 0, lines
937 return 0, lines
938
938
939 self.header = header
939 self.header = header
940 trimedbefore, self.before = trimcontext(before, True)
940 trimedbefore, self.before = trimcontext(before, True)
941 self.fromline = fromline + trimedbefore
941 self.fromline = fromline + trimedbefore
942 self.toline = toline + trimedbefore
942 self.toline = toline + trimedbefore
943 _trimedafter, self.after = trimcontext(after, False)
943 _trimedafter, self.after = trimcontext(after, False)
944 self.proc = proc
944 self.proc = proc
945 self.hunk = hunk
945 self.hunk = hunk
946 self.added, self.removed = self.countchanges(self.hunk)
946 self.added, self.removed = self.countchanges(self.hunk)
947
947
948 def __eq__(self, v):
948 def __eq__(self, v):
949 if not isinstance(v, recordhunk):
949 if not isinstance(v, recordhunk):
950 return False
950 return False
951
951
952 return ((v.hunk == self.hunk) and
952 return ((v.hunk == self.hunk) and
953 (v.proc == self.proc) and
953 (v.proc == self.proc) and
954 (self.fromline == v.fromline) and
954 (self.fromline == v.fromline) and
955 (self.header.files() == v.header.files()))
955 (self.header.files() == v.header.files()))
956
956
957 def __hash__(self):
957 def __hash__(self):
958 return hash((tuple(self.hunk),
958 return hash((tuple(self.hunk),
959 tuple(self.header.files()),
959 tuple(self.header.files()),
960 self.fromline,
960 self.fromline,
961 self.proc))
961 self.proc))
962
962
963 def countchanges(self, hunk):
963 def countchanges(self, hunk):
964 """hunk -> (n+,n-)"""
964 """hunk -> (n+,n-)"""
965 add = len([h for h in hunk if h[0] == '+'])
965 add = len([h for h in hunk if h[0] == '+'])
966 rem = len([h for h in hunk if h[0] == '-'])
966 rem = len([h for h in hunk if h[0] == '-'])
967 return add, rem
967 return add, rem
968
968
969 def reversehunk(self):
969 def reversehunk(self):
970 """return another recordhunk which is the reverse of the hunk
970 """return another recordhunk which is the reverse of the hunk
971
971
972 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
972 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
973 that, swap fromline/toline and +/- signs while keep other things
973 that, swap fromline/toline and +/- signs while keep other things
974 unchanged.
974 unchanged.
975 """
975 """
976 m = {'+': '-', '-': '+', '\\': '\\'}
976 m = {'+': '-', '-': '+', '\\': '\\'}
977 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
977 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
978 return recordhunk(self.header, self.toline, self.fromline, self.proc,
978 return recordhunk(self.header, self.toline, self.fromline, self.proc,
979 self.before, hunk, self.after)
979 self.before, hunk, self.after)
980
980
981 def write(self, fp):
981 def write(self, fp):
982 delta = len(self.before) + len(self.after)
982 delta = len(self.before) + len(self.after)
983 if self.after and self.after[-1] == '\\ No newline at end of file\n':
983 if self.after and self.after[-1] == '\\ No newline at end of file\n':
984 delta -= 1
984 delta -= 1
985 fromlen = delta + self.removed
985 fromlen = delta + self.removed
986 tolen = delta + self.added
986 tolen = delta + self.added
987 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
987 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
988 (self.fromline, fromlen, self.toline, tolen,
988 (self.fromline, fromlen, self.toline, tolen,
989 self.proc and (' ' + self.proc)))
989 self.proc and (' ' + self.proc)))
990 fp.write(''.join(self.before + self.hunk + self.after))
990 fp.write(''.join(self.before + self.hunk + self.after))
991
991
992 pretty = write
992 pretty = write
993
993
994 def filename(self):
994 def filename(self):
995 return self.header.filename()
995 return self.header.filename()
996
996
997 def __repr__(self):
997 def __repr__(self):
998 return '<hunk %r@%d>' % (self.filename(), self.fromline)
998 return '<hunk %r@%d>' % (self.filename(), self.fromline)
999
999
1000 messages = {
1000 messages = {
1001 'multiple': {
1001 'multiple': {
1002 'discard': _("discard change %d/%d to '%s'?"),
1002 'discard': _("discard change %d/%d to '%s'?"),
1003 'record': _("record change %d/%d to '%s'?"),
1003 'record': _("record change %d/%d to '%s'?"),
1004 'revert': _("revert change %d/%d to '%s'?"),
1004 'revert': _("revert change %d/%d to '%s'?"),
1005 },
1005 },
1006 'single': {
1006 'single': {
1007 'discard': _("discard this change to '%s'?"),
1007 'discard': _("discard this change to '%s'?"),
1008 'record': _("record this change to '%s'?"),
1008 'record': _("record this change to '%s'?"),
1009 'revert': _("revert this change to '%s'?"),
1009 'revert': _("revert this change to '%s'?"),
1010 },
1010 },
1011 'help': {
1011 'help': {
1012 'discard': _('[Ynesfdaq?]'
1012 'discard': _('[Ynesfdaq?]'
1013 '$$ &Yes, discard this change'
1013 '$$ &Yes, discard this change'
1014 '$$ &No, skip this change'
1014 '$$ &No, skip this change'
1015 '$$ &Edit this change manually'
1015 '$$ &Edit this change manually'
1016 '$$ &Skip remaining changes to this file'
1016 '$$ &Skip remaining changes to this file'
1017 '$$ Discard remaining changes to this &file'
1017 '$$ Discard remaining changes to this &file'
1018 '$$ &Done, skip remaining changes and files'
1018 '$$ &Done, skip remaining changes and files'
1019 '$$ Discard &all changes to all remaining files'
1019 '$$ Discard &all changes to all remaining files'
1020 '$$ &Quit, discarding no changes'
1020 '$$ &Quit, discarding no changes'
1021 '$$ &? (display help)'),
1021 '$$ &? (display help)'),
1022 'record': _('[Ynesfdaq?]'
1022 'record': _('[Ynesfdaq?]'
1023 '$$ &Yes, record this change'
1023 '$$ &Yes, record this change'
1024 '$$ &No, skip this change'
1024 '$$ &No, skip this change'
1025 '$$ &Edit this change manually'
1025 '$$ &Edit this change manually'
1026 '$$ &Skip remaining changes to this file'
1026 '$$ &Skip remaining changes to this file'
1027 '$$ Record remaining changes to this &file'
1027 '$$ Record remaining changes to this &file'
1028 '$$ &Done, skip remaining changes and files'
1028 '$$ &Done, skip remaining changes and files'
1029 '$$ Record &all changes to all remaining files'
1029 '$$ Record &all changes to all remaining files'
1030 '$$ &Quit, recording no changes'
1030 '$$ &Quit, recording no changes'
1031 '$$ &? (display help)'),
1031 '$$ &? (display help)'),
1032 'revert': _('[Ynesfdaq?]'
1032 'revert': _('[Ynesfdaq?]'
1033 '$$ &Yes, revert this change'
1033 '$$ &Yes, revert this change'
1034 '$$ &No, skip this change'
1034 '$$ &No, skip this change'
1035 '$$ &Edit this change manually'
1035 '$$ &Edit this change manually'
1036 '$$ &Skip remaining changes to this file'
1036 '$$ &Skip remaining changes to this file'
1037 '$$ Revert remaining changes to this &file'
1037 '$$ Revert remaining changes to this &file'
1038 '$$ &Done, skip remaining changes and files'
1038 '$$ &Done, skip remaining changes and files'
1039 '$$ Revert &all changes to all remaining files'
1039 '$$ Revert &all changes to all remaining files'
1040 '$$ &Quit, reverting no changes'
1040 '$$ &Quit, reverting no changes'
1041 '$$ &? (display help)')
1041 '$$ &? (display help)')
1042 }
1042 }
1043 }
1043 }
1044
1044
1045 def filterpatch(ui, headers, operation=None):
1045 def filterpatch(ui, headers, operation=None):
1046 """Interactively filter patch chunks into applied-only chunks"""
1046 """Interactively filter patch chunks into applied-only chunks"""
1047 if operation is None:
1047 if operation is None:
1048 operation = 'record'
1048 operation = 'record'
1049
1049
1050 def prompt(skipfile, skipall, query, chunk):
1050 def prompt(skipfile, skipall, query, chunk):
1051 """prompt query, and process base inputs
1051 """prompt query, and process base inputs
1052
1052
1053 - y/n for the rest of file
1053 - y/n for the rest of file
1054 - y/n for the rest
1054 - y/n for the rest
1055 - ? (help)
1055 - ? (help)
1056 - q (quit)
1056 - q (quit)
1057
1057
1058 Return True/False and possibly updated skipfile and skipall.
1058 Return True/False and possibly updated skipfile and skipall.
1059 """
1059 """
1060 newpatches = None
1060 newpatches = None
1061 if skipall is not None:
1061 if skipall is not None:
1062 return skipall, skipfile, skipall, newpatches
1062 return skipall, skipfile, skipall, newpatches
1063 if skipfile is not None:
1063 if skipfile is not None:
1064 return skipfile, skipfile, skipall, newpatches
1064 return skipfile, skipfile, skipall, newpatches
1065 while True:
1065 while True:
1066 resps = messages['help'][operation]
1066 resps = messages['help'][operation]
1067 r = ui.promptchoice("%s %s" % (query, resps))
1067 r = ui.promptchoice("%s %s" % (query, resps))
1068 ui.write("\n")
1068 ui.write("\n")
1069 if r == 8: # ?
1069 if r == 8: # ?
1070 for c, t in ui.extractchoices(resps)[1]:
1070 for c, t in ui.extractchoices(resps)[1]:
1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1072 continue
1072 continue
1073 elif r == 0: # yes
1073 elif r == 0: # yes
1074 ret = True
1074 ret = True
1075 elif r == 1: # no
1075 elif r == 1: # no
1076 ret = False
1076 ret = False
1077 elif r == 2: # Edit patch
1077 elif r == 2: # Edit patch
1078 if chunk is None:
1078 if chunk is None:
1079 ui.write(_('cannot edit patch for whole file'))
1079 ui.write(_('cannot edit patch for whole file'))
1080 ui.write("\n")
1080 ui.write("\n")
1081 continue
1081 continue
1082 if chunk.header.binary():
1082 if chunk.header.binary():
1083 ui.write(_('cannot edit patch for binary file'))
1083 ui.write(_('cannot edit patch for binary file'))
1084 ui.write("\n")
1084 ui.write("\n")
1085 continue
1085 continue
1086 # Patch comment based on the Git one (based on comment at end of
1086 # Patch comment based on the Git one (based on comment at end of
1087 # https://mercurial-scm.org/wiki/RecordExtension)
1087 # https://mercurial-scm.org/wiki/RecordExtension)
1088 phelp = '---' + _("""
1088 phelp = '---' + _("""
1089 To remove '-' lines, make them ' ' lines (context).
1089 To remove '-' lines, make them ' ' lines (context).
1090 To remove '+' lines, delete them.
1090 To remove '+' lines, delete them.
1091 Lines starting with # will be removed from the patch.
1091 Lines starting with # will be removed from the patch.
1092
1092
1093 If the patch applies cleanly, the edited hunk will immediately be
1093 If the patch applies cleanly, the edited hunk will immediately be
1094 added to the record list. If it does not apply cleanly, a rejects
1094 added to the record list. If it does not apply cleanly, a rejects
1095 file will be generated: you can use that when you try again. If
1095 file will be generated: you can use that when you try again. If
1096 all lines of the hunk are removed, then the edit is aborted and
1096 all lines of the hunk are removed, then the edit is aborted and
1097 the hunk is left unchanged.
1097 the hunk is left unchanged.
1098 """)
1098 """)
1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1100 suffix=".diff", text=True)
1100 suffix=".diff", text=True)
1101 ncpatchfp = None
1101 ncpatchfp = None
1102 try:
1102 try:
1103 # Write the initial patch
1103 # Write the initial patch
1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1105 chunk.header.write(f)
1105 chunk.header.write(f)
1106 chunk.write(f)
1106 chunk.write(f)
1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1108 f.close()
1108 f.close()
1109 # Start the editor and wait for it to complete
1109 # Start the editor and wait for it to complete
1110 editor = ui.geteditor()
1110 editor = ui.geteditor()
1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1112 environ={'HGUSER': ui.username()},
1112 environ={'HGUSER': ui.username()},
1113 blockedtag='filterpatch')
1113 blockedtag='filterpatch')
1114 if ret != 0:
1114 if ret != 0:
1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1116 continue
1116 continue
1117 # Remove comment lines
1117 # Remove comment lines
1118 patchfp = open(patchfn)
1118 patchfp = open(patchfn)
1119 ncpatchfp = stringio()
1119 ncpatchfp = stringio()
1120 for line in util.iterfile(patchfp):
1120 for line in util.iterfile(patchfp):
1121 if not line.startswith('#'):
1121 if not line.startswith('#'):
1122 ncpatchfp.write(line)
1122 ncpatchfp.write(line)
1123 patchfp.close()
1123 patchfp.close()
1124 ncpatchfp.seek(0)
1124 ncpatchfp.seek(0)
1125 newpatches = parsepatch(ncpatchfp)
1125 newpatches = parsepatch(ncpatchfp)
1126 finally:
1126 finally:
1127 os.unlink(patchfn)
1127 os.unlink(patchfn)
1128 del ncpatchfp
1128 del ncpatchfp
1129 # Signal that the chunk shouldn't be applied as-is, but
1129 # Signal that the chunk shouldn't be applied as-is, but
1130 # provide the new patch to be used instead.
1130 # provide the new patch to be used instead.
1131 ret = False
1131 ret = False
1132 elif r == 3: # Skip
1132 elif r == 3: # Skip
1133 ret = skipfile = False
1133 ret = skipfile = False
1134 elif r == 4: # file (Record remaining)
1134 elif r == 4: # file (Record remaining)
1135 ret = skipfile = True
1135 ret = skipfile = True
1136 elif r == 5: # done, skip remaining
1136 elif r == 5: # done, skip remaining
1137 ret = skipall = False
1137 ret = skipall = False
1138 elif r == 6: # all
1138 elif r == 6: # all
1139 ret = skipall = True
1139 ret = skipall = True
1140 elif r == 7: # quit
1140 elif r == 7: # quit
1141 raise error.Abort(_('user quit'))
1141 raise error.Abort(_('user quit'))
1142 return ret, skipfile, skipall, newpatches
1142 return ret, skipfile, skipall, newpatches
1143
1143
1144 seen = set()
1144 seen = set()
1145 applied = {} # 'filename' -> [] of chunks
1145 applied = {} # 'filename' -> [] of chunks
1146 skipfile, skipall = None, None
1146 skipfile, skipall = None, None
1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1148 for h in headers:
1148 for h in headers:
1149 pos += len(h.hunks)
1149 pos += len(h.hunks)
1150 skipfile = None
1150 skipfile = None
1151 fixoffset = 0
1151 fixoffset = 0
1152 hdr = ''.join(h.header)
1152 hdr = ''.join(h.header)
1153 if hdr in seen:
1153 if hdr in seen:
1154 continue
1154 continue
1155 seen.add(hdr)
1155 seen.add(hdr)
1156 if skipall is None:
1156 if skipall is None:
1157 h.pretty(ui)
1157 h.pretty(ui)
1158 msg = (_('examine changes to %s?') %
1158 msg = (_('examine changes to %s?') %
1159 _(' and ').join("'%s'" % f for f in h.files()))
1159 _(' and ').join("'%s'" % f for f in h.files()))
1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1161 if not r:
1161 if not r:
1162 continue
1162 continue
1163 applied[h.filename()] = [h]
1163 applied[h.filename()] = [h]
1164 if h.allhunks():
1164 if h.allhunks():
1165 applied[h.filename()] += h.hunks
1165 applied[h.filename()] += h.hunks
1166 continue
1166 continue
1167 for i, chunk in enumerate(h.hunks):
1167 for i, chunk in enumerate(h.hunks):
1168 if skipfile is None and skipall is None:
1168 if skipfile is None and skipall is None:
1169 chunk.pretty(ui)
1169 chunk.pretty(ui)
1170 if total == 1:
1170 if total == 1:
1171 msg = messages['single'][operation] % chunk.filename()
1171 msg = messages['single'][operation] % chunk.filename()
1172 else:
1172 else:
1173 idx = pos - len(h.hunks) + i
1173 idx = pos - len(h.hunks) + i
1174 msg = messages['multiple'][operation] % (idx, total,
1174 msg = messages['multiple'][operation] % (idx, total,
1175 chunk.filename())
1175 chunk.filename())
1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1177 skipall, msg, chunk)
1177 skipall, msg, chunk)
1178 if r:
1178 if r:
1179 if fixoffset:
1179 if fixoffset:
1180 chunk = copy.copy(chunk)
1180 chunk = copy.copy(chunk)
1181 chunk.toline += fixoffset
1181 chunk.toline += fixoffset
1182 applied[chunk.filename()].append(chunk)
1182 applied[chunk.filename()].append(chunk)
1183 elif newpatches is not None:
1183 elif newpatches is not None:
1184 for newpatch in newpatches:
1184 for newpatch in newpatches:
1185 for newhunk in newpatch.hunks:
1185 for newhunk in newpatch.hunks:
1186 if fixoffset:
1186 if fixoffset:
1187 newhunk.toline += fixoffset
1187 newhunk.toline += fixoffset
1188 applied[newhunk.filename()].append(newhunk)
1188 applied[newhunk.filename()].append(newhunk)
1189 else:
1189 else:
1190 fixoffset += chunk.removed - chunk.added
1190 fixoffset += chunk.removed - chunk.added
1191 return (sum([h for h in applied.itervalues()
1191 return (sum([h for h in applied.itervalues()
1192 if h[0].special() or len(h) > 1], []), {})
1192 if h[0].special() or len(h) > 1], []), {})
1193 class hunk(object):
1193 class hunk(object):
1194 def __init__(self, desc, num, lr, context):
1194 def __init__(self, desc, num, lr, context):
1195 self.number = num
1195 self.number = num
1196 self.desc = desc
1196 self.desc = desc
1197 self.hunk = [desc]
1197 self.hunk = [desc]
1198 self.a = []
1198 self.a = []
1199 self.b = []
1199 self.b = []
1200 self.starta = self.lena = None
1200 self.starta = self.lena = None
1201 self.startb = self.lenb = None
1201 self.startb = self.lenb = None
1202 if lr is not None:
1202 if lr is not None:
1203 if context:
1203 if context:
1204 self.read_context_hunk(lr)
1204 self.read_context_hunk(lr)
1205 else:
1205 else:
1206 self.read_unified_hunk(lr)
1206 self.read_unified_hunk(lr)
1207
1207
1208 def getnormalized(self):
1208 def getnormalized(self):
1209 """Return a copy with line endings normalized to LF."""
1209 """Return a copy with line endings normalized to LF."""
1210
1210
1211 def normalize(lines):
1211 def normalize(lines):
1212 nlines = []
1212 nlines = []
1213 for line in lines:
1213 for line in lines:
1214 if line.endswith('\r\n'):
1214 if line.endswith('\r\n'):
1215 line = line[:-2] + '\n'
1215 line = line[:-2] + '\n'
1216 nlines.append(line)
1216 nlines.append(line)
1217 return nlines
1217 return nlines
1218
1218
1219 # Dummy object, it is rebuilt manually
1219 # Dummy object, it is rebuilt manually
1220 nh = hunk(self.desc, self.number, None, None)
1220 nh = hunk(self.desc, self.number, None, None)
1221 nh.number = self.number
1221 nh.number = self.number
1222 nh.desc = self.desc
1222 nh.desc = self.desc
1223 nh.hunk = self.hunk
1223 nh.hunk = self.hunk
1224 nh.a = normalize(self.a)
1224 nh.a = normalize(self.a)
1225 nh.b = normalize(self.b)
1225 nh.b = normalize(self.b)
1226 nh.starta = self.starta
1226 nh.starta = self.starta
1227 nh.startb = self.startb
1227 nh.startb = self.startb
1228 nh.lena = self.lena
1228 nh.lena = self.lena
1229 nh.lenb = self.lenb
1229 nh.lenb = self.lenb
1230 return nh
1230 return nh
1231
1231
1232 def read_unified_hunk(self, lr):
1232 def read_unified_hunk(self, lr):
1233 m = unidesc.match(self.desc)
1233 m = unidesc.match(self.desc)
1234 if not m:
1234 if not m:
1235 raise PatchError(_("bad hunk #%d") % self.number)
1235 raise PatchError(_("bad hunk #%d") % self.number)
1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1237 if self.lena is None:
1237 if self.lena is None:
1238 self.lena = 1
1238 self.lena = 1
1239 else:
1239 else:
1240 self.lena = int(self.lena)
1240 self.lena = int(self.lena)
1241 if self.lenb is None:
1241 if self.lenb is None:
1242 self.lenb = 1
1242 self.lenb = 1
1243 else:
1243 else:
1244 self.lenb = int(self.lenb)
1244 self.lenb = int(self.lenb)
1245 self.starta = int(self.starta)
1245 self.starta = int(self.starta)
1246 self.startb = int(self.startb)
1246 self.startb = int(self.startb)
1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1248 self.b)
1248 self.b)
1249 # if we hit eof before finishing out the hunk, the last line will
1249 # if we hit eof before finishing out the hunk, the last line will
1250 # be zero length. Lets try to fix it up.
1250 # be zero length. Lets try to fix it up.
1251 while len(self.hunk[-1]) == 0:
1251 while len(self.hunk[-1]) == 0:
1252 del self.hunk[-1]
1252 del self.hunk[-1]
1253 del self.a[-1]
1253 del self.a[-1]
1254 del self.b[-1]
1254 del self.b[-1]
1255 self.lena -= 1
1255 self.lena -= 1
1256 self.lenb -= 1
1256 self.lenb -= 1
1257 self._fixnewline(lr)
1257 self._fixnewline(lr)
1258
1258
1259 def read_context_hunk(self, lr):
1259 def read_context_hunk(self, lr):
1260 self.desc = lr.readline()
1260 self.desc = lr.readline()
1261 m = contextdesc.match(self.desc)
1261 m = contextdesc.match(self.desc)
1262 if not m:
1262 if not m:
1263 raise PatchError(_("bad hunk #%d") % self.number)
1263 raise PatchError(_("bad hunk #%d") % self.number)
1264 self.starta, aend = m.groups()
1264 self.starta, aend = m.groups()
1265 self.starta = int(self.starta)
1265 self.starta = int(self.starta)
1266 if aend is None:
1266 if aend is None:
1267 aend = self.starta
1267 aend = self.starta
1268 self.lena = int(aend) - self.starta
1268 self.lena = int(aend) - self.starta
1269 if self.starta:
1269 if self.starta:
1270 self.lena += 1
1270 self.lena += 1
1271 for x in xrange(self.lena):
1271 for x in xrange(self.lena):
1272 l = lr.readline()
1272 l = lr.readline()
1273 if l.startswith('---'):
1273 if l.startswith('---'):
1274 # lines addition, old block is empty
1274 # lines addition, old block is empty
1275 lr.push(l)
1275 lr.push(l)
1276 break
1276 break
1277 s = l[2:]
1277 s = l[2:]
1278 if l.startswith('- ') or l.startswith('! '):
1278 if l.startswith('- ') or l.startswith('! '):
1279 u = '-' + s
1279 u = '-' + s
1280 elif l.startswith(' '):
1280 elif l.startswith(' '):
1281 u = ' ' + s
1281 u = ' ' + s
1282 else:
1282 else:
1283 raise PatchError(_("bad hunk #%d old text line %d") %
1283 raise PatchError(_("bad hunk #%d old text line %d") %
1284 (self.number, x))
1284 (self.number, x))
1285 self.a.append(u)
1285 self.a.append(u)
1286 self.hunk.append(u)
1286 self.hunk.append(u)
1287
1287
1288 l = lr.readline()
1288 l = lr.readline()
1289 if l.startswith('\ '):
1289 if l.startswith('\ '):
1290 s = self.a[-1][:-1]
1290 s = self.a[-1][:-1]
1291 self.a[-1] = s
1291 self.a[-1] = s
1292 self.hunk[-1] = s
1292 self.hunk[-1] = s
1293 l = lr.readline()
1293 l = lr.readline()
1294 m = contextdesc.match(l)
1294 m = contextdesc.match(l)
1295 if not m:
1295 if not m:
1296 raise PatchError(_("bad hunk #%d") % self.number)
1296 raise PatchError(_("bad hunk #%d") % self.number)
1297 self.startb, bend = m.groups()
1297 self.startb, bend = m.groups()
1298 self.startb = int(self.startb)
1298 self.startb = int(self.startb)
1299 if bend is None:
1299 if bend is None:
1300 bend = self.startb
1300 bend = self.startb
1301 self.lenb = int(bend) - self.startb
1301 self.lenb = int(bend) - self.startb
1302 if self.startb:
1302 if self.startb:
1303 self.lenb += 1
1303 self.lenb += 1
1304 hunki = 1
1304 hunki = 1
1305 for x in xrange(self.lenb):
1305 for x in xrange(self.lenb):
1306 l = lr.readline()
1306 l = lr.readline()
1307 if l.startswith('\ '):
1307 if l.startswith('\ '):
1308 # XXX: the only way to hit this is with an invalid line range.
1308 # XXX: the only way to hit this is with an invalid line range.
1309 # The no-eol marker is not counted in the line range, but I
1309 # The no-eol marker is not counted in the line range, but I
1310 # guess there are diff(1) out there which behave differently.
1310 # guess there are diff(1) out there which behave differently.
1311 s = self.b[-1][:-1]
1311 s = self.b[-1][:-1]
1312 self.b[-1] = s
1312 self.b[-1] = s
1313 self.hunk[hunki - 1] = s
1313 self.hunk[hunki - 1] = s
1314 continue
1314 continue
1315 if not l:
1315 if not l:
1316 # line deletions, new block is empty and we hit EOF
1316 # line deletions, new block is empty and we hit EOF
1317 lr.push(l)
1317 lr.push(l)
1318 break
1318 break
1319 s = l[2:]
1319 s = l[2:]
1320 if l.startswith('+ ') or l.startswith('! '):
1320 if l.startswith('+ ') or l.startswith('! '):
1321 u = '+' + s
1321 u = '+' + s
1322 elif l.startswith(' '):
1322 elif l.startswith(' '):
1323 u = ' ' + s
1323 u = ' ' + s
1324 elif len(self.b) == 0:
1324 elif len(self.b) == 0:
1325 # line deletions, new block is empty
1325 # line deletions, new block is empty
1326 lr.push(l)
1326 lr.push(l)
1327 break
1327 break
1328 else:
1328 else:
1329 raise PatchError(_("bad hunk #%d old text line %d") %
1329 raise PatchError(_("bad hunk #%d old text line %d") %
1330 (self.number, x))
1330 (self.number, x))
1331 self.b.append(s)
1331 self.b.append(s)
1332 while True:
1332 while True:
1333 if hunki >= len(self.hunk):
1333 if hunki >= len(self.hunk):
1334 h = ""
1334 h = ""
1335 else:
1335 else:
1336 h = self.hunk[hunki]
1336 h = self.hunk[hunki]
1337 hunki += 1
1337 hunki += 1
1338 if h == u:
1338 if h == u:
1339 break
1339 break
1340 elif h.startswith('-'):
1340 elif h.startswith('-'):
1341 continue
1341 continue
1342 else:
1342 else:
1343 self.hunk.insert(hunki - 1, u)
1343 self.hunk.insert(hunki - 1, u)
1344 break
1344 break
1345
1345
1346 if not self.a:
1346 if not self.a:
1347 # this happens when lines were only added to the hunk
1347 # this happens when lines were only added to the hunk
1348 for x in self.hunk:
1348 for x in self.hunk:
1349 if x.startswith('-') or x.startswith(' '):
1349 if x.startswith('-') or x.startswith(' '):
1350 self.a.append(x)
1350 self.a.append(x)
1351 if not self.b:
1351 if not self.b:
1352 # this happens when lines were only deleted from the hunk
1352 # this happens when lines were only deleted from the hunk
1353 for x in self.hunk:
1353 for x in self.hunk:
1354 if x.startswith('+') or x.startswith(' '):
1354 if x.startswith('+') or x.startswith(' '):
1355 self.b.append(x[1:])
1355 self.b.append(x[1:])
1356 # @@ -start,len +start,len @@
1356 # @@ -start,len +start,len @@
1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1358 self.startb, self.lenb)
1358 self.startb, self.lenb)
1359 self.hunk[0] = self.desc
1359 self.hunk[0] = self.desc
1360 self._fixnewline(lr)
1360 self._fixnewline(lr)
1361
1361
1362 def _fixnewline(self, lr):
1362 def _fixnewline(self, lr):
1363 l = lr.readline()
1363 l = lr.readline()
1364 if l.startswith('\ '):
1364 if l.startswith('\ '):
1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1366 else:
1366 else:
1367 lr.push(l)
1367 lr.push(l)
1368
1368
1369 def complete(self):
1369 def complete(self):
1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1371
1371
1372 def _fuzzit(self, old, new, fuzz, toponly):
1372 def _fuzzit(self, old, new, fuzz, toponly):
1373 # this removes context lines from the top and bottom of list 'l'. It
1373 # this removes context lines from the top and bottom of list 'l'. It
1374 # checks the hunk to make sure only context lines are removed, and then
1374 # checks the hunk to make sure only context lines are removed, and then
1375 # returns a new shortened list of lines.
1375 # returns a new shortened list of lines.
1376 fuzz = min(fuzz, len(old))
1376 fuzz = min(fuzz, len(old))
1377 if fuzz:
1377 if fuzz:
1378 top = 0
1378 top = 0
1379 bot = 0
1379 bot = 0
1380 hlen = len(self.hunk)
1380 hlen = len(self.hunk)
1381 for x in xrange(hlen - 1):
1381 for x in xrange(hlen - 1):
1382 # the hunk starts with the @@ line, so use x+1
1382 # the hunk starts with the @@ line, so use x+1
1383 if self.hunk[x + 1][0] == ' ':
1383 if self.hunk[x + 1][0] == ' ':
1384 top += 1
1384 top += 1
1385 else:
1385 else:
1386 break
1386 break
1387 if not toponly:
1387 if not toponly:
1388 for x in xrange(hlen - 1):
1388 for x in xrange(hlen - 1):
1389 if self.hunk[hlen - bot - 1][0] == ' ':
1389 if self.hunk[hlen - bot - 1][0] == ' ':
1390 bot += 1
1390 bot += 1
1391 else:
1391 else:
1392 break
1392 break
1393
1393
1394 bot = min(fuzz, bot)
1394 bot = min(fuzz, bot)
1395 top = min(fuzz, top)
1395 top = min(fuzz, top)
1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1397 return old, new, 0
1397 return old, new, 0
1398
1398
1399 def fuzzit(self, fuzz, toponly):
1399 def fuzzit(self, fuzz, toponly):
1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1401 oldstart = self.starta + top
1401 oldstart = self.starta + top
1402 newstart = self.startb + top
1402 newstart = self.startb + top
1403 # zero length hunk ranges already have their start decremented
1403 # zero length hunk ranges already have their start decremented
1404 if self.lena and oldstart > 0:
1404 if self.lena and oldstart > 0:
1405 oldstart -= 1
1405 oldstart -= 1
1406 if self.lenb and newstart > 0:
1406 if self.lenb and newstart > 0:
1407 newstart -= 1
1407 newstart -= 1
1408 return old, oldstart, new, newstart
1408 return old, oldstart, new, newstart
1409
1409
1410 class binhunk(object):
1410 class binhunk(object):
1411 'A binary patch file.'
1411 'A binary patch file.'
1412 def __init__(self, lr, fname):
1412 def __init__(self, lr, fname):
1413 self.text = None
1413 self.text = None
1414 self.delta = False
1414 self.delta = False
1415 self.hunk = ['GIT binary patch\n']
1415 self.hunk = ['GIT binary patch\n']
1416 self._fname = fname
1416 self._fname = fname
1417 self._read(lr)
1417 self._read(lr)
1418
1418
1419 def complete(self):
1419 def complete(self):
1420 return self.text is not None
1420 return self.text is not None
1421
1421
1422 def new(self, lines):
1422 def new(self, lines):
1423 if self.delta:
1423 if self.delta:
1424 return [applybindelta(self.text, ''.join(lines))]
1424 return [applybindelta(self.text, ''.join(lines))]
1425 return [self.text]
1425 return [self.text]
1426
1426
1427 def _read(self, lr):
1427 def _read(self, lr):
1428 def getline(lr, hunk):
1428 def getline(lr, hunk):
1429 l = lr.readline()
1429 l = lr.readline()
1430 hunk.append(l)
1430 hunk.append(l)
1431 return l.rstrip('\r\n')
1431 return l.rstrip('\r\n')
1432
1432
1433 size = 0
1433 size = 0
1434 while True:
1434 while True:
1435 line = getline(lr, self.hunk)
1435 line = getline(lr, self.hunk)
1436 if not line:
1436 if not line:
1437 raise PatchError(_('could not extract "%s" binary data')
1437 raise PatchError(_('could not extract "%s" binary data')
1438 % self._fname)
1438 % self._fname)
1439 if line.startswith('literal '):
1439 if line.startswith('literal '):
1440 size = int(line[8:].rstrip())
1440 size = int(line[8:].rstrip())
1441 break
1441 break
1442 if line.startswith('delta '):
1442 if line.startswith('delta '):
1443 size = int(line[6:].rstrip())
1443 size = int(line[6:].rstrip())
1444 self.delta = True
1444 self.delta = True
1445 break
1445 break
1446 dec = []
1446 dec = []
1447 line = getline(lr, self.hunk)
1447 line = getline(lr, self.hunk)
1448 while len(line) > 1:
1448 while len(line) > 1:
1449 l = line[0]
1449 l = line[0]
1450 if l <= 'Z' and l >= 'A':
1450 if l <= 'Z' and l >= 'A':
1451 l = ord(l) - ord('A') + 1
1451 l = ord(l) - ord('A') + 1
1452 else:
1452 else:
1453 l = ord(l) - ord('a') + 27
1453 l = ord(l) - ord('a') + 27
1454 try:
1454 try:
1455 dec.append(util.b85decode(line[1:])[:l])
1455 dec.append(util.b85decode(line[1:])[:l])
1456 except ValueError as e:
1456 except ValueError as e:
1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1458 % (self._fname, str(e)))
1458 % (self._fname, str(e)))
1459 line = getline(lr, self.hunk)
1459 line = getline(lr, self.hunk)
1460 text = zlib.decompress(''.join(dec))
1460 text = zlib.decompress(''.join(dec))
1461 if len(text) != size:
1461 if len(text) != size:
1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1463 % (self._fname, len(text), size))
1463 % (self._fname, len(text), size))
1464 self.text = text
1464 self.text = text
1465
1465
1466 def parsefilename(str):
1466 def parsefilename(str):
1467 # --- filename \t|space stuff
1467 # --- filename \t|space stuff
1468 s = str[4:].rstrip('\r\n')
1468 s = str[4:].rstrip('\r\n')
1469 i = s.find('\t')
1469 i = s.find('\t')
1470 if i < 0:
1470 if i < 0:
1471 i = s.find(' ')
1471 i = s.find(' ')
1472 if i < 0:
1472 if i < 0:
1473 return s
1473 return s
1474 return s[:i]
1474 return s[:i]
1475
1475
1476 def reversehunks(hunks):
1476 def reversehunks(hunks):
1477 '''reverse the signs in the hunks given as argument
1477 '''reverse the signs in the hunks given as argument
1478
1478
1479 This function operates on hunks coming out of patch.filterpatch, that is
1479 This function operates on hunks coming out of patch.filterpatch, that is
1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1481
1481
1482 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1482 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1483 ... --- a/folder1/g
1483 ... --- a/folder1/g
1484 ... +++ b/folder1/g
1484 ... +++ b/folder1/g
1485 ... @@ -1,7 +1,7 @@
1485 ... @@ -1,7 +1,7 @@
1486 ... +firstline
1486 ... +firstline
1487 ... c
1487 ... c
1488 ... 1
1488 ... 1
1489 ... 2
1489 ... 2
1490 ... + 3
1490 ... + 3
1491 ... -4
1491 ... -4
1492 ... 5
1492 ... 5
1493 ... d
1493 ... d
1494 ... +lastline"""
1494 ... +lastline"""
1495 >>> hunks = parsepatch(rawpatch)
1495 >>> hunks = parsepatch(rawpatch)
1496 >>> hunkscomingfromfilterpatch = []
1496 >>> hunkscomingfromfilterpatch = []
1497 >>> for h in hunks:
1497 >>> for h in hunks:
1498 ... hunkscomingfromfilterpatch.append(h)
1498 ... hunkscomingfromfilterpatch.append(h)
1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1500
1500
1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1502 >>> from . import util
1502 >>> from . import util
1503 >>> fp = util.stringio()
1503 >>> fp = util.stringio()
1504 >>> for c in reversedhunks:
1504 >>> for c in reversedhunks:
1505 ... c.write(fp)
1505 ... c.write(fp)
1506 >>> fp.seek(0)
1506 >>> fp.seek(0)
1507 >>> reversedpatch = fp.read()
1507 >>> reversedpatch = fp.read()
1508 >>> print reversedpatch
1508 >>> print reversedpatch
1509 diff --git a/folder1/g b/folder1/g
1509 diff --git a/folder1/g b/folder1/g
1510 --- a/folder1/g
1510 --- a/folder1/g
1511 +++ b/folder1/g
1511 +++ b/folder1/g
1512 @@ -1,4 +1,3 @@
1512 @@ -1,4 +1,3 @@
1513 -firstline
1513 -firstline
1514 c
1514 c
1515 1
1515 1
1516 2
1516 2
1517 @@ -2,6 +1,6 @@
1517 @@ -2,6 +1,6 @@
1518 c
1518 c
1519 1
1519 1
1520 2
1520 2
1521 - 3
1521 - 3
1522 +4
1522 +4
1523 5
1523 5
1524 d
1524 d
1525 @@ -6,3 +5,2 @@
1525 @@ -6,3 +5,2 @@
1526 5
1526 5
1527 d
1527 d
1528 -lastline
1528 -lastline
1529
1529
1530 '''
1530 '''
1531
1531
1532 newhunks = []
1532 newhunks = []
1533 for c in hunks:
1533 for c in hunks:
1534 if util.safehasattr(c, 'reversehunk'):
1534 if util.safehasattr(c, 'reversehunk'):
1535 c = c.reversehunk()
1535 c = c.reversehunk()
1536 newhunks.append(c)
1536 newhunks.append(c)
1537 return newhunks
1537 return newhunks
1538
1538
1539 def parsepatch(originalchunks, maxcontext=None):
1539 def parsepatch(originalchunks, maxcontext=None):
1540 """patch -> [] of headers -> [] of hunks
1540 """patch -> [] of headers -> [] of hunks
1541
1541
1542 If maxcontext is not None, trim context lines if necessary.
1542 If maxcontext is not None, trim context lines if necessary.
1543
1543
1544 >>> rawpatch = '''diff --git a/folder1/g b/folder1/g
1544 >>> rawpatch = '''diff --git a/folder1/g b/folder1/g
1545 ... --- a/folder1/g
1545 ... --- a/folder1/g
1546 ... +++ b/folder1/g
1546 ... +++ b/folder1/g
1547 ... @@ -1,8 +1,10 @@
1547 ... @@ -1,8 +1,10 @@
1548 ... 1
1548 ... 1
1549 ... 2
1549 ... 2
1550 ... -3
1550 ... -3
1551 ... 4
1551 ... 4
1552 ... 5
1552 ... 5
1553 ... 6
1553 ... 6
1554 ... +6.1
1554 ... +6.1
1555 ... +6.2
1555 ... +6.2
1556 ... 7
1556 ... 7
1557 ... 8
1557 ... 8
1558 ... +9'''
1558 ... +9'''
1559 >>> out = util.stringio()
1559 >>> out = util.stringio()
1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1561 >>> for header in headers:
1561 >>> for header in headers:
1562 ... header.write(out)
1562 ... header.write(out)
1563 ... for hunk in header.hunks:
1563 ... for hunk in header.hunks:
1564 ... hunk.write(out)
1564 ... hunk.write(out)
1565 >>> print(out.getvalue())
1565 >>> print(out.getvalue())
1566 diff --git a/folder1/g b/folder1/g
1566 diff --git a/folder1/g b/folder1/g
1567 --- a/folder1/g
1567 --- a/folder1/g
1568 +++ b/folder1/g
1568 +++ b/folder1/g
1569 @@ -2,3 +2,2 @@
1569 @@ -2,3 +2,2 @@
1570 2
1570 2
1571 -3
1571 -3
1572 4
1572 4
1573 @@ -6,2 +5,4 @@
1573 @@ -6,2 +5,4 @@
1574 6
1574 6
1575 +6.1
1575 +6.1
1576 +6.2
1576 +6.2
1577 7
1577 7
1578 @@ -8,1 +9,2 @@
1578 @@ -8,1 +9,2 @@
1579 8
1579 8
1580 +9
1580 +9
1581 """
1581 """
1582 class parser(object):
1582 class parser(object):
1583 """patch parsing state machine"""
1583 """patch parsing state machine"""
1584 def __init__(self):
1584 def __init__(self):
1585 self.fromline = 0
1585 self.fromline = 0
1586 self.toline = 0
1586 self.toline = 0
1587 self.proc = ''
1587 self.proc = ''
1588 self.header = None
1588 self.header = None
1589 self.context = []
1589 self.context = []
1590 self.before = []
1590 self.before = []
1591 self.hunk = []
1591 self.hunk = []
1592 self.headers = []
1592 self.headers = []
1593
1593
1594 def addrange(self, limits):
1594 def addrange(self, limits):
1595 fromstart, fromend, tostart, toend, proc = limits
1595 fromstart, fromend, tostart, toend, proc = limits
1596 self.fromline = int(fromstart)
1596 self.fromline = int(fromstart)
1597 self.toline = int(tostart)
1597 self.toline = int(tostart)
1598 self.proc = proc
1598 self.proc = proc
1599
1599
1600 def addcontext(self, context):
1600 def addcontext(self, context):
1601 if self.hunk:
1601 if self.hunk:
1602 h = recordhunk(self.header, self.fromline, self.toline,
1602 h = recordhunk(self.header, self.fromline, self.toline,
1603 self.proc, self.before, self.hunk, context, maxcontext)
1603 self.proc, self.before, self.hunk, context, maxcontext)
1604 self.header.hunks.append(h)
1604 self.header.hunks.append(h)
1605 self.fromline += len(self.before) + h.removed
1605 self.fromline += len(self.before) + h.removed
1606 self.toline += len(self.before) + h.added
1606 self.toline += len(self.before) + h.added
1607 self.before = []
1607 self.before = []
1608 self.hunk = []
1608 self.hunk = []
1609 self.context = context
1609 self.context = context
1610
1610
1611 def addhunk(self, hunk):
1611 def addhunk(self, hunk):
1612 if self.context:
1612 if self.context:
1613 self.before = self.context
1613 self.before = self.context
1614 self.context = []
1614 self.context = []
1615 self.hunk = hunk
1615 self.hunk = hunk
1616
1616
1617 def newfile(self, hdr):
1617 def newfile(self, hdr):
1618 self.addcontext([])
1618 self.addcontext([])
1619 h = header(hdr)
1619 h = header(hdr)
1620 self.headers.append(h)
1620 self.headers.append(h)
1621 self.header = h
1621 self.header = h
1622
1622
1623 def addother(self, line):
1623 def addother(self, line):
1624 pass # 'other' lines are ignored
1624 pass # 'other' lines are ignored
1625
1625
1626 def finished(self):
1626 def finished(self):
1627 self.addcontext([])
1627 self.addcontext([])
1628 return self.headers
1628 return self.headers
1629
1629
1630 transitions = {
1630 transitions = {
1631 'file': {'context': addcontext,
1631 'file': {'context': addcontext,
1632 'file': newfile,
1632 'file': newfile,
1633 'hunk': addhunk,
1633 'hunk': addhunk,
1634 'range': addrange},
1634 'range': addrange},
1635 'context': {'file': newfile,
1635 'context': {'file': newfile,
1636 'hunk': addhunk,
1636 'hunk': addhunk,
1637 'range': addrange,
1637 'range': addrange,
1638 'other': addother},
1638 'other': addother},
1639 'hunk': {'context': addcontext,
1639 'hunk': {'context': addcontext,
1640 'file': newfile,
1640 'file': newfile,
1641 'range': addrange},
1641 'range': addrange},
1642 'range': {'context': addcontext,
1642 'range': {'context': addcontext,
1643 'hunk': addhunk},
1643 'hunk': addhunk},
1644 'other': {'other': addother},
1644 'other': {'other': addother},
1645 }
1645 }
1646
1646
1647 p = parser()
1647 p = parser()
1648 fp = stringio()
1648 fp = stringio()
1649 fp.write(''.join(originalchunks))
1649 fp.write(''.join(originalchunks))
1650 fp.seek(0)
1650 fp.seek(0)
1651
1651
1652 state = 'context'
1652 state = 'context'
1653 for newstate, data in scanpatch(fp):
1653 for newstate, data in scanpatch(fp):
1654 try:
1654 try:
1655 p.transitions[state][newstate](p, data)
1655 p.transitions[state][newstate](p, data)
1656 except KeyError:
1656 except KeyError:
1657 raise PatchError('unhandled transition: %s -> %s' %
1657 raise PatchError('unhandled transition: %s -> %s' %
1658 (state, newstate))
1658 (state, newstate))
1659 state = newstate
1659 state = newstate
1660 del fp
1660 del fp
1661 return p.finished()
1661 return p.finished()
1662
1662
1663 def pathtransform(path, strip, prefix):
1663 def pathtransform(path, strip, prefix):
1664 '''turn a path from a patch into a path suitable for the repository
1664 '''turn a path from a patch into a path suitable for the repository
1665
1665
1666 prefix, if not empty, is expected to be normalized with a / at the end.
1666 prefix, if not empty, is expected to be normalized with a / at the end.
1667
1667
1668 Returns (stripped components, path in repository).
1668 Returns (stripped components, path in repository).
1669
1669
1670 >>> pathtransform('a/b/c', 0, '')
1670 >>> pathtransform('a/b/c', 0, '')
1671 ('', 'a/b/c')
1671 ('', 'a/b/c')
1672 >>> pathtransform(' a/b/c ', 0, '')
1672 >>> pathtransform(' a/b/c ', 0, '')
1673 ('', ' a/b/c')
1673 ('', ' a/b/c')
1674 >>> pathtransform(' a/b/c ', 2, '')
1674 >>> pathtransform(' a/b/c ', 2, '')
1675 ('a/b/', 'c')
1675 ('a/b/', 'c')
1676 >>> pathtransform('a/b/c', 0, 'd/e/')
1676 >>> pathtransform('a/b/c', 0, 'd/e/')
1677 ('', 'd/e/a/b/c')
1677 ('', 'd/e/a/b/c')
1678 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1678 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1679 ('a//b/', 'd/e/c')
1679 ('a//b/', 'd/e/c')
1680 >>> pathtransform('a/b/c', 3, '')
1680 >>> pathtransform('a/b/c', 3, '')
1681 Traceback (most recent call last):
1681 Traceback (most recent call last):
1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1683 '''
1683 '''
1684 pathlen = len(path)
1684 pathlen = len(path)
1685 i = 0
1685 i = 0
1686 if strip == 0:
1686 if strip == 0:
1687 return '', prefix + path.rstrip()
1687 return '', prefix + path.rstrip()
1688 count = strip
1688 count = strip
1689 while count > 0:
1689 while count > 0:
1690 i = path.find('/', i)
1690 i = path.find('/', i)
1691 if i == -1:
1691 if i == -1:
1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1693 (count, strip, path))
1693 (count, strip, path))
1694 i += 1
1694 i += 1
1695 # consume '//' in the path
1695 # consume '//' in the path
1696 while i < pathlen - 1 and path[i] == '/':
1696 while i < pathlen - 1 and path[i:i + 1] == '/':
1697 i += 1
1697 i += 1
1698 count -= 1
1698 count -= 1
1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1700
1700
1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1702 nulla = afile_orig == "/dev/null"
1702 nulla = afile_orig == "/dev/null"
1703 nullb = bfile_orig == "/dev/null"
1703 nullb = bfile_orig == "/dev/null"
1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1707 gooda = not nulla and backend.exists(afile)
1707 gooda = not nulla and backend.exists(afile)
1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1709 if afile == bfile:
1709 if afile == bfile:
1710 goodb = gooda
1710 goodb = gooda
1711 else:
1711 else:
1712 goodb = not nullb and backend.exists(bfile)
1712 goodb = not nullb and backend.exists(bfile)
1713 missing = not goodb and not gooda and not create
1713 missing = not goodb and not gooda and not create
1714
1714
1715 # some diff programs apparently produce patches where the afile is
1715 # some diff programs apparently produce patches where the afile is
1716 # not /dev/null, but afile starts with bfile
1716 # not /dev/null, but afile starts with bfile
1717 abasedir = afile[:afile.rfind('/') + 1]
1717 abasedir = afile[:afile.rfind('/') + 1]
1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1720 and hunk.starta == 0 and hunk.lena == 0):
1720 and hunk.starta == 0 and hunk.lena == 0):
1721 create = True
1721 create = True
1722 missing = False
1722 missing = False
1723
1723
1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1725 # diff is between a file and its backup. In this case, the original
1725 # diff is between a file and its backup. In this case, the original
1726 # file should be patched (see original mpatch code).
1726 # file should be patched (see original mpatch code).
1727 isbackup = (abase == bbase and bfile.startswith(afile))
1727 isbackup = (abase == bbase and bfile.startswith(afile))
1728 fname = None
1728 fname = None
1729 if not missing:
1729 if not missing:
1730 if gooda and goodb:
1730 if gooda and goodb:
1731 if isbackup:
1731 if isbackup:
1732 fname = afile
1732 fname = afile
1733 else:
1733 else:
1734 fname = bfile
1734 fname = bfile
1735 elif gooda:
1735 elif gooda:
1736 fname = afile
1736 fname = afile
1737
1737
1738 if not fname:
1738 if not fname:
1739 if not nullb:
1739 if not nullb:
1740 if isbackup:
1740 if isbackup:
1741 fname = afile
1741 fname = afile
1742 else:
1742 else:
1743 fname = bfile
1743 fname = bfile
1744 elif not nulla:
1744 elif not nulla:
1745 fname = afile
1745 fname = afile
1746 else:
1746 else:
1747 raise PatchError(_("undefined source and destination files"))
1747 raise PatchError(_("undefined source and destination files"))
1748
1748
1749 gp = patchmeta(fname)
1749 gp = patchmeta(fname)
1750 if create:
1750 if create:
1751 gp.op = 'ADD'
1751 gp.op = 'ADD'
1752 elif remove:
1752 elif remove:
1753 gp.op = 'DELETE'
1753 gp.op = 'DELETE'
1754 return gp
1754 return gp
1755
1755
1756 def scanpatch(fp):
1756 def scanpatch(fp):
1757 """like patch.iterhunks, but yield different events
1757 """like patch.iterhunks, but yield different events
1758
1758
1759 - ('file', [header_lines + fromfile + tofile])
1759 - ('file', [header_lines + fromfile + tofile])
1760 - ('context', [context_lines])
1760 - ('context', [context_lines])
1761 - ('hunk', [hunk_lines])
1761 - ('hunk', [hunk_lines])
1762 - ('range', (-start,len, +start,len, proc))
1762 - ('range', (-start,len, +start,len, proc))
1763 """
1763 """
1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1765 lr = linereader(fp)
1765 lr = linereader(fp)
1766
1766
1767 def scanwhile(first, p):
1767 def scanwhile(first, p):
1768 """scan lr while predicate holds"""
1768 """scan lr while predicate holds"""
1769 lines = [first]
1769 lines = [first]
1770 for line in iter(lr.readline, ''):
1770 for line in iter(lr.readline, ''):
1771 if p(line):
1771 if p(line):
1772 lines.append(line)
1772 lines.append(line)
1773 else:
1773 else:
1774 lr.push(line)
1774 lr.push(line)
1775 break
1775 break
1776 return lines
1776 return lines
1777
1777
1778 for line in iter(lr.readline, ''):
1778 for line in iter(lr.readline, ''):
1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1780 def notheader(line):
1780 def notheader(line):
1781 s = line.split(None, 1)
1781 s = line.split(None, 1)
1782 return not s or s[0] not in ('---', 'diff')
1782 return not s or s[0] not in ('---', 'diff')
1783 header = scanwhile(line, notheader)
1783 header = scanwhile(line, notheader)
1784 fromfile = lr.readline()
1784 fromfile = lr.readline()
1785 if fromfile.startswith('---'):
1785 if fromfile.startswith('---'):
1786 tofile = lr.readline()
1786 tofile = lr.readline()
1787 header += [fromfile, tofile]
1787 header += [fromfile, tofile]
1788 else:
1788 else:
1789 lr.push(fromfile)
1789 lr.push(fromfile)
1790 yield 'file', header
1790 yield 'file', header
1791 elif line[0] == ' ':
1791 elif line[0:1] == ' ':
1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1793 elif line[0] in '-+':
1793 elif line[0] in '-+':
1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1795 else:
1795 else:
1796 m = lines_re.match(line)
1796 m = lines_re.match(line)
1797 if m:
1797 if m:
1798 yield 'range', m.groups()
1798 yield 'range', m.groups()
1799 else:
1799 else:
1800 yield 'other', line
1800 yield 'other', line
1801
1801
1802 def scangitpatch(lr, firstline):
1802 def scangitpatch(lr, firstline):
1803 """
1803 """
1804 Git patches can emit:
1804 Git patches can emit:
1805 - rename a to b
1805 - rename a to b
1806 - change b
1806 - change b
1807 - copy a to c
1807 - copy a to c
1808 - change c
1808 - change c
1809
1809
1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1811 found for it would have been renamed already. And we cannot copy
1811 found for it would have been renamed already. And we cannot copy
1812 from 'b' instead because 'b' would have been changed already. So
1812 from 'b' instead because 'b' would have been changed already. So
1813 we scan the git patch for copy and rename commands so we can
1813 we scan the git patch for copy and rename commands so we can
1814 perform the copies ahead of time.
1814 perform the copies ahead of time.
1815 """
1815 """
1816 pos = 0
1816 pos = 0
1817 try:
1817 try:
1818 pos = lr.fp.tell()
1818 pos = lr.fp.tell()
1819 fp = lr.fp
1819 fp = lr.fp
1820 except IOError:
1820 except IOError:
1821 fp = stringio(lr.fp.read())
1821 fp = stringio(lr.fp.read())
1822 gitlr = linereader(fp)
1822 gitlr = linereader(fp)
1823 gitlr.push(firstline)
1823 gitlr.push(firstline)
1824 gitpatches = readgitpatch(gitlr)
1824 gitpatches = readgitpatch(gitlr)
1825 fp.seek(pos)
1825 fp.seek(pos)
1826 return gitpatches
1826 return gitpatches
1827
1827
1828 def iterhunks(fp):
1828 def iterhunks(fp):
1829 """Read a patch and yield the following events:
1829 """Read a patch and yield the following events:
1830 - ("file", afile, bfile, firsthunk): select a new target file.
1830 - ("file", afile, bfile, firsthunk): select a new target file.
1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1832 "file" event.
1832 "file" event.
1833 - ("git", gitchanges): current diff is in git format, gitchanges
1833 - ("git", gitchanges): current diff is in git format, gitchanges
1834 maps filenames to gitpatch records. Unique event.
1834 maps filenames to gitpatch records. Unique event.
1835 """
1835 """
1836 afile = ""
1836 afile = ""
1837 bfile = ""
1837 bfile = ""
1838 state = None
1838 state = None
1839 hunknum = 0
1839 hunknum = 0
1840 emitfile = newfile = False
1840 emitfile = newfile = False
1841 gitpatches = None
1841 gitpatches = None
1842
1842
1843 # our states
1843 # our states
1844 BFILE = 1
1844 BFILE = 1
1845 context = None
1845 context = None
1846 lr = linereader(fp)
1846 lr = linereader(fp)
1847
1847
1848 for x in iter(lr.readline, ''):
1848 for x in iter(lr.readline, ''):
1849 if state == BFILE and (
1849 if state == BFILE and (
1850 (not context and x[0] == '@')
1850 (not context and x[0] == '@')
1851 or (context is not False and x.startswith('***************'))
1851 or (context is not False and x.startswith('***************'))
1852 or x.startswith('GIT binary patch')):
1852 or x.startswith('GIT binary patch')):
1853 gp = None
1853 gp = None
1854 if (gitpatches and
1854 if (gitpatches and
1855 gitpatches[-1].ispatching(afile, bfile)):
1855 gitpatches[-1].ispatching(afile, bfile)):
1856 gp = gitpatches.pop()
1856 gp = gitpatches.pop()
1857 if x.startswith('GIT binary patch'):
1857 if x.startswith('GIT binary patch'):
1858 h = binhunk(lr, gp.path)
1858 h = binhunk(lr, gp.path)
1859 else:
1859 else:
1860 if context is None and x.startswith('***************'):
1860 if context is None and x.startswith('***************'):
1861 context = True
1861 context = True
1862 h = hunk(x, hunknum + 1, lr, context)
1862 h = hunk(x, hunknum + 1, lr, context)
1863 hunknum += 1
1863 hunknum += 1
1864 if emitfile:
1864 if emitfile:
1865 emitfile = False
1865 emitfile = False
1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1867 yield 'hunk', h
1867 yield 'hunk', h
1868 elif x.startswith('diff --git a/'):
1868 elif x.startswith('diff --git a/'):
1869 m = gitre.match(x.rstrip(' \r\n'))
1869 m = gitre.match(x.rstrip(' \r\n'))
1870 if not m:
1870 if not m:
1871 continue
1871 continue
1872 if gitpatches is None:
1872 if gitpatches is None:
1873 # scan whole input for git metadata
1873 # scan whole input for git metadata
1874 gitpatches = scangitpatch(lr, x)
1874 gitpatches = scangitpatch(lr, x)
1875 yield 'git', [g.copy() for g in gitpatches
1875 yield 'git', [g.copy() for g in gitpatches
1876 if g.op in ('COPY', 'RENAME')]
1876 if g.op in ('COPY', 'RENAME')]
1877 gitpatches.reverse()
1877 gitpatches.reverse()
1878 afile = 'a/' + m.group(1)
1878 afile = 'a/' + m.group(1)
1879 bfile = 'b/' + m.group(2)
1879 bfile = 'b/' + m.group(2)
1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1881 gp = gitpatches.pop()
1881 gp = gitpatches.pop()
1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1883 if not gitpatches:
1883 if not gitpatches:
1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1885 % afile[2:])
1885 % afile[2:])
1886 gp = gitpatches[-1]
1886 gp = gitpatches[-1]
1887 newfile = True
1887 newfile = True
1888 elif x.startswith('---'):
1888 elif x.startswith('---'):
1889 # check for a unified diff
1889 # check for a unified diff
1890 l2 = lr.readline()
1890 l2 = lr.readline()
1891 if not l2.startswith('+++'):
1891 if not l2.startswith('+++'):
1892 lr.push(l2)
1892 lr.push(l2)
1893 continue
1893 continue
1894 newfile = True
1894 newfile = True
1895 context = False
1895 context = False
1896 afile = parsefilename(x)
1896 afile = parsefilename(x)
1897 bfile = parsefilename(l2)
1897 bfile = parsefilename(l2)
1898 elif x.startswith('***'):
1898 elif x.startswith('***'):
1899 # check for a context diff
1899 # check for a context diff
1900 l2 = lr.readline()
1900 l2 = lr.readline()
1901 if not l2.startswith('---'):
1901 if not l2.startswith('---'):
1902 lr.push(l2)
1902 lr.push(l2)
1903 continue
1903 continue
1904 l3 = lr.readline()
1904 l3 = lr.readline()
1905 lr.push(l3)
1905 lr.push(l3)
1906 if not l3.startswith("***************"):
1906 if not l3.startswith("***************"):
1907 lr.push(l2)
1907 lr.push(l2)
1908 continue
1908 continue
1909 newfile = True
1909 newfile = True
1910 context = True
1910 context = True
1911 afile = parsefilename(x)
1911 afile = parsefilename(x)
1912 bfile = parsefilename(l2)
1912 bfile = parsefilename(l2)
1913
1913
1914 if newfile:
1914 if newfile:
1915 newfile = False
1915 newfile = False
1916 emitfile = True
1916 emitfile = True
1917 state = BFILE
1917 state = BFILE
1918 hunknum = 0
1918 hunknum = 0
1919
1919
1920 while gitpatches:
1920 while gitpatches:
1921 gp = gitpatches.pop()
1921 gp = gitpatches.pop()
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1923
1923
1924 def applybindelta(binchunk, data):
1924 def applybindelta(binchunk, data):
1925 """Apply a binary delta hunk
1925 """Apply a binary delta hunk
1926 The algorithm used is the algorithm from git's patch-delta.c
1926 The algorithm used is the algorithm from git's patch-delta.c
1927 """
1927 """
1928 def deltahead(binchunk):
1928 def deltahead(binchunk):
1929 i = 0
1929 i = 0
1930 for c in binchunk:
1930 for c in binchunk:
1931 i += 1
1931 i += 1
1932 if not (ord(c) & 0x80):
1932 if not (ord(c) & 0x80):
1933 return i
1933 return i
1934 return i
1934 return i
1935 out = ""
1935 out = ""
1936 s = deltahead(binchunk)
1936 s = deltahead(binchunk)
1937 binchunk = binchunk[s:]
1937 binchunk = binchunk[s:]
1938 s = deltahead(binchunk)
1938 s = deltahead(binchunk)
1939 binchunk = binchunk[s:]
1939 binchunk = binchunk[s:]
1940 i = 0
1940 i = 0
1941 while i < len(binchunk):
1941 while i < len(binchunk):
1942 cmd = ord(binchunk[i])
1942 cmd = ord(binchunk[i])
1943 i += 1
1943 i += 1
1944 if (cmd & 0x80):
1944 if (cmd & 0x80):
1945 offset = 0
1945 offset = 0
1946 size = 0
1946 size = 0
1947 if (cmd & 0x01):
1947 if (cmd & 0x01):
1948 offset = ord(binchunk[i])
1948 offset = ord(binchunk[i])
1949 i += 1
1949 i += 1
1950 if (cmd & 0x02):
1950 if (cmd & 0x02):
1951 offset |= ord(binchunk[i]) << 8
1951 offset |= ord(binchunk[i]) << 8
1952 i += 1
1952 i += 1
1953 if (cmd & 0x04):
1953 if (cmd & 0x04):
1954 offset |= ord(binchunk[i]) << 16
1954 offset |= ord(binchunk[i]) << 16
1955 i += 1
1955 i += 1
1956 if (cmd & 0x08):
1956 if (cmd & 0x08):
1957 offset |= ord(binchunk[i]) << 24
1957 offset |= ord(binchunk[i]) << 24
1958 i += 1
1958 i += 1
1959 if (cmd & 0x10):
1959 if (cmd & 0x10):
1960 size = ord(binchunk[i])
1960 size = ord(binchunk[i])
1961 i += 1
1961 i += 1
1962 if (cmd & 0x20):
1962 if (cmd & 0x20):
1963 size |= ord(binchunk[i]) << 8
1963 size |= ord(binchunk[i]) << 8
1964 i += 1
1964 i += 1
1965 if (cmd & 0x40):
1965 if (cmd & 0x40):
1966 size |= ord(binchunk[i]) << 16
1966 size |= ord(binchunk[i]) << 16
1967 i += 1
1967 i += 1
1968 if size == 0:
1968 if size == 0:
1969 size = 0x10000
1969 size = 0x10000
1970 offset_end = offset + size
1970 offset_end = offset + size
1971 out += data[offset:offset_end]
1971 out += data[offset:offset_end]
1972 elif cmd != 0:
1972 elif cmd != 0:
1973 offset_end = i + cmd
1973 offset_end = i + cmd
1974 out += binchunk[i:offset_end]
1974 out += binchunk[i:offset_end]
1975 i += cmd
1975 i += cmd
1976 else:
1976 else:
1977 raise PatchError(_('unexpected delta opcode 0'))
1977 raise PatchError(_('unexpected delta opcode 0'))
1978 return out
1978 return out
1979
1979
1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1981 """Reads a patch from fp and tries to apply it.
1981 """Reads a patch from fp and tries to apply it.
1982
1982
1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1984 there was any fuzz.
1984 there was any fuzz.
1985
1985
1986 If 'eolmode' is 'strict', the patch content and patched file are
1986 If 'eolmode' is 'strict', the patch content and patched file are
1987 read in binary mode. Otherwise, line endings are ignored when
1987 read in binary mode. Otherwise, line endings are ignored when
1988 patching then normalized according to 'eolmode'.
1988 patching then normalized according to 'eolmode'.
1989 """
1989 """
1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1991 prefix=prefix, eolmode=eolmode)
1991 prefix=prefix, eolmode=eolmode)
1992
1992
1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1994 eolmode='strict'):
1994 eolmode='strict'):
1995
1995
1996 if prefix:
1996 if prefix:
1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1998 prefix)
1998 prefix)
1999 if prefix != '':
1999 if prefix != '':
2000 prefix += '/'
2000 prefix += '/'
2001 def pstrip(p):
2001 def pstrip(p):
2002 return pathtransform(p, strip - 1, prefix)[1]
2002 return pathtransform(p, strip - 1, prefix)[1]
2003
2003
2004 rejects = 0
2004 rejects = 0
2005 err = 0
2005 err = 0
2006 current_file = None
2006 current_file = None
2007
2007
2008 for state, values in iterhunks(fp):
2008 for state, values in iterhunks(fp):
2009 if state == 'hunk':
2009 if state == 'hunk':
2010 if not current_file:
2010 if not current_file:
2011 continue
2011 continue
2012 ret = current_file.apply(values)
2012 ret = current_file.apply(values)
2013 if ret > 0:
2013 if ret > 0:
2014 err = 1
2014 err = 1
2015 elif state == 'file':
2015 elif state == 'file':
2016 if current_file:
2016 if current_file:
2017 rejects += current_file.close()
2017 rejects += current_file.close()
2018 current_file = None
2018 current_file = None
2019 afile, bfile, first_hunk, gp = values
2019 afile, bfile, first_hunk, gp = values
2020 if gp:
2020 if gp:
2021 gp.path = pstrip(gp.path)
2021 gp.path = pstrip(gp.path)
2022 if gp.oldpath:
2022 if gp.oldpath:
2023 gp.oldpath = pstrip(gp.oldpath)
2023 gp.oldpath = pstrip(gp.oldpath)
2024 else:
2024 else:
2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2026 prefix)
2026 prefix)
2027 if gp.op == 'RENAME':
2027 if gp.op == 'RENAME':
2028 backend.unlink(gp.oldpath)
2028 backend.unlink(gp.oldpath)
2029 if not first_hunk:
2029 if not first_hunk:
2030 if gp.op == 'DELETE':
2030 if gp.op == 'DELETE':
2031 backend.unlink(gp.path)
2031 backend.unlink(gp.path)
2032 continue
2032 continue
2033 data, mode = None, None
2033 data, mode = None, None
2034 if gp.op in ('RENAME', 'COPY'):
2034 if gp.op in ('RENAME', 'COPY'):
2035 data, mode = store.getfile(gp.oldpath)[:2]
2035 data, mode = store.getfile(gp.oldpath)[:2]
2036 if data is None:
2036 if data is None:
2037 # This means that the old path does not exist
2037 # This means that the old path does not exist
2038 raise PatchError(_("source file '%s' does not exist")
2038 raise PatchError(_("source file '%s' does not exist")
2039 % gp.oldpath)
2039 % gp.oldpath)
2040 if gp.mode:
2040 if gp.mode:
2041 mode = gp.mode
2041 mode = gp.mode
2042 if gp.op == 'ADD':
2042 if gp.op == 'ADD':
2043 # Added files without content have no hunk and
2043 # Added files without content have no hunk and
2044 # must be created
2044 # must be created
2045 data = ''
2045 data = ''
2046 if data or mode:
2046 if data or mode:
2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2048 and backend.exists(gp.path)):
2048 and backend.exists(gp.path)):
2049 raise PatchError(_("cannot create %s: destination "
2049 raise PatchError(_("cannot create %s: destination "
2050 "already exists") % gp.path)
2050 "already exists") % gp.path)
2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2052 continue
2052 continue
2053 try:
2053 try:
2054 current_file = patcher(ui, gp, backend, store,
2054 current_file = patcher(ui, gp, backend, store,
2055 eolmode=eolmode)
2055 eolmode=eolmode)
2056 except PatchError as inst:
2056 except PatchError as inst:
2057 ui.warn(str(inst) + '\n')
2057 ui.warn(str(inst) + '\n')
2058 current_file = None
2058 current_file = None
2059 rejects += 1
2059 rejects += 1
2060 continue
2060 continue
2061 elif state == 'git':
2061 elif state == 'git':
2062 for gp in values:
2062 for gp in values:
2063 path = pstrip(gp.oldpath)
2063 path = pstrip(gp.oldpath)
2064 data, mode = backend.getfile(path)
2064 data, mode = backend.getfile(path)
2065 if data is None:
2065 if data is None:
2066 # The error ignored here will trigger a getfile()
2066 # The error ignored here will trigger a getfile()
2067 # error in a place more appropriate for error
2067 # error in a place more appropriate for error
2068 # handling, and will not interrupt the patching
2068 # handling, and will not interrupt the patching
2069 # process.
2069 # process.
2070 pass
2070 pass
2071 else:
2071 else:
2072 store.setfile(path, data, mode)
2072 store.setfile(path, data, mode)
2073 else:
2073 else:
2074 raise error.Abort(_('unsupported parser state: %s') % state)
2074 raise error.Abort(_('unsupported parser state: %s') % state)
2075
2075
2076 if current_file:
2076 if current_file:
2077 rejects += current_file.close()
2077 rejects += current_file.close()
2078
2078
2079 if rejects:
2079 if rejects:
2080 return -1
2080 return -1
2081 return err
2081 return err
2082
2082
2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2084 similarity):
2084 similarity):
2085 """use <patcher> to apply <patchname> to the working directory.
2085 """use <patcher> to apply <patchname> to the working directory.
2086 returns whether patch was applied with fuzz factor."""
2086 returns whether patch was applied with fuzz factor."""
2087
2087
2088 fuzz = False
2088 fuzz = False
2089 args = []
2089 args = []
2090 cwd = repo.root
2090 cwd = repo.root
2091 if cwd:
2091 if cwd:
2092 args.append('-d %s' % util.shellquote(cwd))
2092 args.append('-d %s' % util.shellquote(cwd))
2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2094 util.shellquote(patchname)))
2094 util.shellquote(patchname)))
2095 try:
2095 try:
2096 for line in util.iterfile(fp):
2096 for line in util.iterfile(fp):
2097 line = line.rstrip()
2097 line = line.rstrip()
2098 ui.note(line + '\n')
2098 ui.note(line + '\n')
2099 if line.startswith('patching file '):
2099 if line.startswith('patching file '):
2100 pf = util.parsepatchoutput(line)
2100 pf = util.parsepatchoutput(line)
2101 printed_file = False
2101 printed_file = False
2102 files.add(pf)
2102 files.add(pf)
2103 elif line.find('with fuzz') >= 0:
2103 elif line.find('with fuzz') >= 0:
2104 fuzz = True
2104 fuzz = True
2105 if not printed_file:
2105 if not printed_file:
2106 ui.warn(pf + '\n')
2106 ui.warn(pf + '\n')
2107 printed_file = True
2107 printed_file = True
2108 ui.warn(line + '\n')
2108 ui.warn(line + '\n')
2109 elif line.find('saving rejects to file') >= 0:
2109 elif line.find('saving rejects to file') >= 0:
2110 ui.warn(line + '\n')
2110 ui.warn(line + '\n')
2111 elif line.find('FAILED') >= 0:
2111 elif line.find('FAILED') >= 0:
2112 if not printed_file:
2112 if not printed_file:
2113 ui.warn(pf + '\n')
2113 ui.warn(pf + '\n')
2114 printed_file = True
2114 printed_file = True
2115 ui.warn(line + '\n')
2115 ui.warn(line + '\n')
2116 finally:
2116 finally:
2117 if files:
2117 if files:
2118 scmutil.marktouched(repo, files, similarity)
2118 scmutil.marktouched(repo, files, similarity)
2119 code = fp.close()
2119 code = fp.close()
2120 if code:
2120 if code:
2121 raise PatchError(_("patch command failed: %s") %
2121 raise PatchError(_("patch command failed: %s") %
2122 util.explainexit(code)[0])
2122 util.explainexit(code)[0])
2123 return fuzz
2123 return fuzz
2124
2124
2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2126 eolmode='strict'):
2126 eolmode='strict'):
2127 if files is None:
2127 if files is None:
2128 files = set()
2128 files = set()
2129 if eolmode is None:
2129 if eolmode is None:
2130 eolmode = ui.config('patch', 'eol')
2130 eolmode = ui.config('patch', 'eol')
2131 if eolmode.lower() not in eolmodes:
2131 if eolmode.lower() not in eolmodes:
2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2133 eolmode = eolmode.lower()
2133 eolmode = eolmode.lower()
2134
2134
2135 store = filestore()
2135 store = filestore()
2136 try:
2136 try:
2137 fp = open(patchobj, 'rb')
2137 fp = open(patchobj, 'rb')
2138 except TypeError:
2138 except TypeError:
2139 fp = patchobj
2139 fp = patchobj
2140 try:
2140 try:
2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2142 eolmode=eolmode)
2142 eolmode=eolmode)
2143 finally:
2143 finally:
2144 if fp != patchobj:
2144 if fp != patchobj:
2145 fp.close()
2145 fp.close()
2146 files.update(backend.close())
2146 files.update(backend.close())
2147 store.close()
2147 store.close()
2148 if ret < 0:
2148 if ret < 0:
2149 raise PatchError(_('patch failed to apply'))
2149 raise PatchError(_('patch failed to apply'))
2150 return ret > 0
2150 return ret > 0
2151
2151
2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2153 eolmode='strict', similarity=0):
2153 eolmode='strict', similarity=0):
2154 """use builtin patch to apply <patchobj> to the working directory.
2154 """use builtin patch to apply <patchobj> to the working directory.
2155 returns whether patch was applied with fuzz factor."""
2155 returns whether patch was applied with fuzz factor."""
2156 backend = workingbackend(ui, repo, similarity)
2156 backend = workingbackend(ui, repo, similarity)
2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2158
2158
2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2160 eolmode='strict'):
2160 eolmode='strict'):
2161 backend = repobackend(ui, repo, ctx, store)
2161 backend = repobackend(ui, repo, ctx, store)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163
2163
2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2165 similarity=0):
2165 similarity=0):
2166 """Apply <patchname> to the working directory.
2166 """Apply <patchname> to the working directory.
2167
2167
2168 'eolmode' specifies how end of lines should be handled. It can be:
2168 'eolmode' specifies how end of lines should be handled. It can be:
2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2171 - 'lf': EOLs are ignored when patching and reset to LF
2171 - 'lf': EOLs are ignored when patching and reset to LF
2172 - None: get it from user settings, default to 'strict'
2172 - None: get it from user settings, default to 'strict'
2173 'eolmode' is ignored when using an external patcher program.
2173 'eolmode' is ignored when using an external patcher program.
2174
2174
2175 Returns whether patch was applied with fuzz factor.
2175 Returns whether patch was applied with fuzz factor.
2176 """
2176 """
2177 patcher = ui.config('ui', 'patch')
2177 patcher = ui.config('ui', 'patch')
2178 if files is None:
2178 if files is None:
2179 files = set()
2179 files = set()
2180 if patcher:
2180 if patcher:
2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2182 files, similarity)
2182 files, similarity)
2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2184 similarity)
2184 similarity)
2185
2185
2186 def changedfiles(ui, repo, patchpath, strip=1):
2186 def changedfiles(ui, repo, patchpath, strip=1):
2187 backend = fsbackend(ui, repo.root)
2187 backend = fsbackend(ui, repo.root)
2188 with open(patchpath, 'rb') as fp:
2188 with open(patchpath, 'rb') as fp:
2189 changed = set()
2189 changed = set()
2190 for state, values in iterhunks(fp):
2190 for state, values in iterhunks(fp):
2191 if state == 'file':
2191 if state == 'file':
2192 afile, bfile, first_hunk, gp = values
2192 afile, bfile, first_hunk, gp = values
2193 if gp:
2193 if gp:
2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2195 if gp.oldpath:
2195 if gp.oldpath:
2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2197 else:
2197 else:
2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2199 '')
2199 '')
2200 changed.add(gp.path)
2200 changed.add(gp.path)
2201 if gp.op == 'RENAME':
2201 if gp.op == 'RENAME':
2202 changed.add(gp.oldpath)
2202 changed.add(gp.oldpath)
2203 elif state not in ('hunk', 'git'):
2203 elif state not in ('hunk', 'git'):
2204 raise error.Abort(_('unsupported parser state: %s') % state)
2204 raise error.Abort(_('unsupported parser state: %s') % state)
2205 return changed
2205 return changed
2206
2206
2207 class GitDiffRequired(Exception):
2207 class GitDiffRequired(Exception):
2208 pass
2208 pass
2209
2209
2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2211 '''return diffopts with all features supported and parsed'''
2211 '''return diffopts with all features supported and parsed'''
2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2213 git=True, whitespace=True, formatchanging=True)
2213 git=True, whitespace=True, formatchanging=True)
2214
2214
2215 diffopts = diffallopts
2215 diffopts = diffallopts
2216
2216
2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2218 whitespace=False, formatchanging=False):
2218 whitespace=False, formatchanging=False):
2219 '''return diffopts with only opted-in features parsed
2219 '''return diffopts with only opted-in features parsed
2220
2220
2221 Features:
2221 Features:
2222 - git: git-style diffs
2222 - git: git-style diffs
2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2224 - formatchanging: options that will likely break or cause correctness issues
2224 - formatchanging: options that will likely break or cause correctness issues
2225 with most diff parsers
2225 with most diff parsers
2226 '''
2226 '''
2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2228 if opts:
2228 if opts:
2229 v = opts.get(key)
2229 v = opts.get(key)
2230 # diffopts flags are either None-default (which is passed
2230 # diffopts flags are either None-default (which is passed
2231 # through unchanged, so we can identify unset values), or
2231 # through unchanged, so we can identify unset values), or
2232 # some other falsey default (eg --unified, which defaults
2232 # some other falsey default (eg --unified, which defaults
2233 # to an empty string). We only want to override the config
2233 # to an empty string). We only want to override the config
2234 # entries from hgrc with command line values if they
2234 # entries from hgrc with command line values if they
2235 # appear to have been set, which is any truthy value,
2235 # appear to have been set, which is any truthy value,
2236 # True, or False.
2236 # True, or False.
2237 if v or isinstance(v, bool):
2237 if v or isinstance(v, bool):
2238 return v
2238 return v
2239 if forceplain is not None and ui.plain():
2239 if forceplain is not None and ui.plain():
2240 return forceplain
2240 return forceplain
2241 return getter(section, name or key, None, untrusted=untrusted)
2241 return getter(section, name or key, None, untrusted=untrusted)
2242
2242
2243 # core options, expected to be understood by every diff parser
2243 # core options, expected to be understood by every diff parser
2244 buildopts = {
2244 buildopts = {
2245 'nodates': get('nodates'),
2245 'nodates': get('nodates'),
2246 'showfunc': get('show_function', 'showfunc'),
2246 'showfunc': get('show_function', 'showfunc'),
2247 'context': get('unified', getter=ui.config),
2247 'context': get('unified', getter=ui.config),
2248 }
2248 }
2249
2249
2250 if git:
2250 if git:
2251 buildopts['git'] = get('git')
2251 buildopts['git'] = get('git')
2252
2252
2253 # since this is in the experimental section, we need to call
2253 # since this is in the experimental section, we need to call
2254 # ui.configbool directory
2254 # ui.configbool directory
2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2256 'extendedheader.similarity')
2256 'extendedheader.similarity')
2257
2257
2258 # need to inspect the ui object instead of using get() since we want to
2258 # need to inspect the ui object instead of using get() since we want to
2259 # test for an int
2259 # test for an int
2260 hconf = ui.config('experimental', 'extendedheader.index')
2260 hconf = ui.config('experimental', 'extendedheader.index')
2261 if hconf is not None:
2261 if hconf is not None:
2262 hlen = None
2262 hlen = None
2263 try:
2263 try:
2264 # the hash config could be an integer (for length of hash) or a
2264 # the hash config could be an integer (for length of hash) or a
2265 # word (e.g. short, full, none)
2265 # word (e.g. short, full, none)
2266 hlen = int(hconf)
2266 hlen = int(hconf)
2267 if hlen < 0 or hlen > 40:
2267 if hlen < 0 or hlen > 40:
2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2269 ui.warn(msg % hlen)
2269 ui.warn(msg % hlen)
2270 except ValueError:
2270 except ValueError:
2271 # default value
2271 # default value
2272 if hconf == 'short' or hconf == '':
2272 if hconf == 'short' or hconf == '':
2273 hlen = 12
2273 hlen = 12
2274 elif hconf == 'full':
2274 elif hconf == 'full':
2275 hlen = 40
2275 hlen = 40
2276 elif hconf != 'none':
2276 elif hconf != 'none':
2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2278 ui.warn(msg % hconf)
2278 ui.warn(msg % hconf)
2279 finally:
2279 finally:
2280 buildopts['index'] = hlen
2280 buildopts['index'] = hlen
2281
2281
2282 if whitespace:
2282 if whitespace:
2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2285 'ignorewsamount')
2285 'ignorewsamount')
2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2287 'ignoreblanklines')
2287 'ignoreblanklines')
2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2289 if formatchanging:
2289 if formatchanging:
2290 buildopts['text'] = opts and opts.get('text')
2290 buildopts['text'] = opts and opts.get('text')
2291 binary = None if opts is None else opts.get('binary')
2291 binary = None if opts is None else opts.get('binary')
2292 buildopts['nobinary'] = (not binary if binary is not None
2292 buildopts['nobinary'] = (not binary if binary is not None
2293 else get('nobinary', forceplain=False))
2293 else get('nobinary', forceplain=False))
2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2295
2295
2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2297
2297
2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2300 '''yields diff of changes to files between two nodes, or node and
2300 '''yields diff of changes to files between two nodes, or node and
2301 working directory.
2301 working directory.
2302
2302
2303 if node1 is None, use first dirstate parent instead.
2303 if node1 is None, use first dirstate parent instead.
2304 if node2 is None, compare node1 with working directory.
2304 if node2 is None, compare node1 with working directory.
2305
2305
2306 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2306 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2307 every time some change cannot be represented with the current
2307 every time some change cannot be represented with the current
2308 patch format. Return False to upgrade to git patch format, True to
2308 patch format. Return False to upgrade to git patch format, True to
2309 accept the loss or raise an exception to abort the diff. It is
2309 accept the loss or raise an exception to abort the diff. It is
2310 called with the name of current file being diffed as 'fn'. If set
2310 called with the name of current file being diffed as 'fn'. If set
2311 to None, patches will always be upgraded to git format when
2311 to None, patches will always be upgraded to git format when
2312 necessary.
2312 necessary.
2313
2313
2314 prefix is a filename prefix that is prepended to all filenames on
2314 prefix is a filename prefix that is prepended to all filenames on
2315 display (used for subrepos).
2315 display (used for subrepos).
2316
2316
2317 relroot, if not empty, must be normalized with a trailing /. Any match
2317 relroot, if not empty, must be normalized with a trailing /. Any match
2318 patterns that fall outside it will be ignored.
2318 patterns that fall outside it will be ignored.
2319
2319
2320 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2320 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2321 information.'''
2321 information.'''
2322 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2322 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2323 changes=changes, opts=opts,
2323 changes=changes, opts=opts,
2324 losedatafn=losedatafn, prefix=prefix,
2324 losedatafn=losedatafn, prefix=prefix,
2325 relroot=relroot, copy=copy):
2325 relroot=relroot, copy=copy):
2326 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2326 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2327 if header and (text or len(header) > 1):
2327 if header and (text or len(header) > 1):
2328 yield '\n'.join(header) + '\n'
2328 yield '\n'.join(header) + '\n'
2329 if text:
2329 if text:
2330 yield text
2330 yield text
2331
2331
2332 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2332 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2333 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2333 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2334 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2334 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2335 where `header` is a list of diff headers and `hunks` is an iterable of
2335 where `header` is a list of diff headers and `hunks` is an iterable of
2336 (`hunkrange`, `hunklines`) tuples.
2336 (`hunkrange`, `hunklines`) tuples.
2337
2337
2338 See diff() for the meaning of parameters.
2338 See diff() for the meaning of parameters.
2339 """
2339 """
2340
2340
2341 if opts is None:
2341 if opts is None:
2342 opts = mdiff.defaultopts
2342 opts = mdiff.defaultopts
2343
2343
2344 if not node1 and not node2:
2344 if not node1 and not node2:
2345 node1 = repo.dirstate.p1()
2345 node1 = repo.dirstate.p1()
2346
2346
2347 def lrugetfilectx():
2347 def lrugetfilectx():
2348 cache = {}
2348 cache = {}
2349 order = collections.deque()
2349 order = collections.deque()
2350 def getfilectx(f, ctx):
2350 def getfilectx(f, ctx):
2351 fctx = ctx.filectx(f, filelog=cache.get(f))
2351 fctx = ctx.filectx(f, filelog=cache.get(f))
2352 if f not in cache:
2352 if f not in cache:
2353 if len(cache) > 20:
2353 if len(cache) > 20:
2354 del cache[order.popleft()]
2354 del cache[order.popleft()]
2355 cache[f] = fctx.filelog()
2355 cache[f] = fctx.filelog()
2356 else:
2356 else:
2357 order.remove(f)
2357 order.remove(f)
2358 order.append(f)
2358 order.append(f)
2359 return fctx
2359 return fctx
2360 return getfilectx
2360 return getfilectx
2361 getfilectx = lrugetfilectx()
2361 getfilectx = lrugetfilectx()
2362
2362
2363 ctx1 = repo[node1]
2363 ctx1 = repo[node1]
2364 ctx2 = repo[node2]
2364 ctx2 = repo[node2]
2365
2365
2366 relfiltered = False
2366 relfiltered = False
2367 if relroot != '' and match.always():
2367 if relroot != '' and match.always():
2368 # as a special case, create a new matcher with just the relroot
2368 # as a special case, create a new matcher with just the relroot
2369 pats = [relroot]
2369 pats = [relroot]
2370 match = scmutil.match(ctx2, pats, default='path')
2370 match = scmutil.match(ctx2, pats, default='path')
2371 relfiltered = True
2371 relfiltered = True
2372
2372
2373 if not changes:
2373 if not changes:
2374 changes = repo.status(ctx1, ctx2, match=match)
2374 changes = repo.status(ctx1, ctx2, match=match)
2375 modified, added, removed = changes[:3]
2375 modified, added, removed = changes[:3]
2376
2376
2377 if not modified and not added and not removed:
2377 if not modified and not added and not removed:
2378 return []
2378 return []
2379
2379
2380 if repo.ui.debugflag:
2380 if repo.ui.debugflag:
2381 hexfunc = hex
2381 hexfunc = hex
2382 else:
2382 else:
2383 hexfunc = short
2383 hexfunc = short
2384 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2384 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2385
2385
2386 if copy is None:
2386 if copy is None:
2387 copy = {}
2387 copy = {}
2388 if opts.git or opts.upgrade:
2388 if opts.git or opts.upgrade:
2389 copy = copies.pathcopies(ctx1, ctx2, match=match)
2389 copy = copies.pathcopies(ctx1, ctx2, match=match)
2390
2390
2391 if relroot is not None:
2391 if relroot is not None:
2392 if not relfiltered:
2392 if not relfiltered:
2393 # XXX this would ideally be done in the matcher, but that is
2393 # XXX this would ideally be done in the matcher, but that is
2394 # generally meant to 'or' patterns, not 'and' them. In this case we
2394 # generally meant to 'or' patterns, not 'and' them. In this case we
2395 # need to 'and' all the patterns from the matcher with relroot.
2395 # need to 'and' all the patterns from the matcher with relroot.
2396 def filterrel(l):
2396 def filterrel(l):
2397 return [f for f in l if f.startswith(relroot)]
2397 return [f for f in l if f.startswith(relroot)]
2398 modified = filterrel(modified)
2398 modified = filterrel(modified)
2399 added = filterrel(added)
2399 added = filterrel(added)
2400 removed = filterrel(removed)
2400 removed = filterrel(removed)
2401 relfiltered = True
2401 relfiltered = True
2402 # filter out copies where either side isn't inside the relative root
2402 # filter out copies where either side isn't inside the relative root
2403 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2403 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2404 if dst.startswith(relroot)
2404 if dst.startswith(relroot)
2405 and src.startswith(relroot)))
2405 and src.startswith(relroot)))
2406
2406
2407 modifiedset = set(modified)
2407 modifiedset = set(modified)
2408 addedset = set(added)
2408 addedset = set(added)
2409 removedset = set(removed)
2409 removedset = set(removed)
2410 for f in modified:
2410 for f in modified:
2411 if f not in ctx1:
2411 if f not in ctx1:
2412 # Fix up added, since merged-in additions appear as
2412 # Fix up added, since merged-in additions appear as
2413 # modifications during merges
2413 # modifications during merges
2414 modifiedset.remove(f)
2414 modifiedset.remove(f)
2415 addedset.add(f)
2415 addedset.add(f)
2416 for f in removed:
2416 for f in removed:
2417 if f not in ctx1:
2417 if f not in ctx1:
2418 # Merged-in additions that are then removed are reported as removed.
2418 # Merged-in additions that are then removed are reported as removed.
2419 # They are not in ctx1, so We don't want to show them in the diff.
2419 # They are not in ctx1, so We don't want to show them in the diff.
2420 removedset.remove(f)
2420 removedset.remove(f)
2421 modified = sorted(modifiedset)
2421 modified = sorted(modifiedset)
2422 added = sorted(addedset)
2422 added = sorted(addedset)
2423 removed = sorted(removedset)
2423 removed = sorted(removedset)
2424 for dst, src in copy.items():
2424 for dst, src in copy.items():
2425 if src not in ctx1:
2425 if src not in ctx1:
2426 # Files merged in during a merge and then copied/renamed are
2426 # Files merged in during a merge and then copied/renamed are
2427 # reported as copies. We want to show them in the diff as additions.
2427 # reported as copies. We want to show them in the diff as additions.
2428 del copy[dst]
2428 del copy[dst]
2429
2429
2430 def difffn(opts, losedata):
2430 def difffn(opts, losedata):
2431 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2431 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2432 copy, getfilectx, opts, losedata, prefix, relroot)
2432 copy, getfilectx, opts, losedata, prefix, relroot)
2433 if opts.upgrade and not opts.git:
2433 if opts.upgrade and not opts.git:
2434 try:
2434 try:
2435 def losedata(fn):
2435 def losedata(fn):
2436 if not losedatafn or not losedatafn(fn=fn):
2436 if not losedatafn or not losedatafn(fn=fn):
2437 raise GitDiffRequired
2437 raise GitDiffRequired
2438 # Buffer the whole output until we are sure it can be generated
2438 # Buffer the whole output until we are sure it can be generated
2439 return list(difffn(opts.copy(git=False), losedata))
2439 return list(difffn(opts.copy(git=False), losedata))
2440 except GitDiffRequired:
2440 except GitDiffRequired:
2441 return difffn(opts.copy(git=True), None)
2441 return difffn(opts.copy(git=True), None)
2442 else:
2442 else:
2443 return difffn(opts, None)
2443 return difffn(opts, None)
2444
2444
2445 def difflabel(func, *args, **kw):
2445 def difflabel(func, *args, **kw):
2446 '''yields 2-tuples of (output, label) based on the output of func()'''
2446 '''yields 2-tuples of (output, label) based on the output of func()'''
2447 headprefixes = [('diff', 'diff.diffline'),
2447 headprefixes = [('diff', 'diff.diffline'),
2448 ('copy', 'diff.extended'),
2448 ('copy', 'diff.extended'),
2449 ('rename', 'diff.extended'),
2449 ('rename', 'diff.extended'),
2450 ('old', 'diff.extended'),
2450 ('old', 'diff.extended'),
2451 ('new', 'diff.extended'),
2451 ('new', 'diff.extended'),
2452 ('deleted', 'diff.extended'),
2452 ('deleted', 'diff.extended'),
2453 ('index', 'diff.extended'),
2453 ('index', 'diff.extended'),
2454 ('similarity', 'diff.extended'),
2454 ('similarity', 'diff.extended'),
2455 ('---', 'diff.file_a'),
2455 ('---', 'diff.file_a'),
2456 ('+++', 'diff.file_b')]
2456 ('+++', 'diff.file_b')]
2457 textprefixes = [('@', 'diff.hunk'),
2457 textprefixes = [('@', 'diff.hunk'),
2458 ('-', 'diff.deleted'),
2458 ('-', 'diff.deleted'),
2459 ('+', 'diff.inserted')]
2459 ('+', 'diff.inserted')]
2460 head = False
2460 head = False
2461 for chunk in func(*args, **kw):
2461 for chunk in func(*args, **kw):
2462 lines = chunk.split('\n')
2462 lines = chunk.split('\n')
2463 for i, line in enumerate(lines):
2463 for i, line in enumerate(lines):
2464 if i != 0:
2464 if i != 0:
2465 yield ('\n', '')
2465 yield ('\n', '')
2466 if head:
2466 if head:
2467 if line.startswith('@'):
2467 if line.startswith('@'):
2468 head = False
2468 head = False
2469 else:
2469 else:
2470 if line and line[0] not in ' +-@\\':
2470 if line and line[0] not in ' +-@\\':
2471 head = True
2471 head = True
2472 stripline = line
2472 stripline = line
2473 diffline = False
2473 diffline = False
2474 if not head and line and line[0] in '+-':
2474 if not head and line and line[0] in '+-':
2475 # highlight tabs and trailing whitespace, but only in
2475 # highlight tabs and trailing whitespace, but only in
2476 # changed lines
2476 # changed lines
2477 stripline = line.rstrip()
2477 stripline = line.rstrip()
2478 diffline = True
2478 diffline = True
2479
2479
2480 prefixes = textprefixes
2480 prefixes = textprefixes
2481 if head:
2481 if head:
2482 prefixes = headprefixes
2482 prefixes = headprefixes
2483 for prefix, label in prefixes:
2483 for prefix, label in prefixes:
2484 if stripline.startswith(prefix):
2484 if stripline.startswith(prefix):
2485 if diffline:
2485 if diffline:
2486 for token in tabsplitter.findall(stripline):
2486 for token in tabsplitter.findall(stripline):
2487 if '\t' == token[0]:
2487 if '\t' == token[0]:
2488 yield (token, 'diff.tab')
2488 yield (token, 'diff.tab')
2489 else:
2489 else:
2490 yield (token, label)
2490 yield (token, label)
2491 else:
2491 else:
2492 yield (stripline, label)
2492 yield (stripline, label)
2493 break
2493 break
2494 else:
2494 else:
2495 yield (line, '')
2495 yield (line, '')
2496 if line != stripline:
2496 if line != stripline:
2497 yield (line[len(stripline):], 'diff.trailingwhitespace')
2497 yield (line[len(stripline):], 'diff.trailingwhitespace')
2498
2498
2499 def diffui(*args, **kw):
2499 def diffui(*args, **kw):
2500 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2500 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2501 return difflabel(diff, *args, **kw)
2501 return difflabel(diff, *args, **kw)
2502
2502
2503 def _filepairs(modified, added, removed, copy, opts):
2503 def _filepairs(modified, added, removed, copy, opts):
2504 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2504 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2505 before and f2 is the the name after. For added files, f1 will be None,
2505 before and f2 is the the name after. For added files, f1 will be None,
2506 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2506 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2507 or 'rename' (the latter two only if opts.git is set).'''
2507 or 'rename' (the latter two only if opts.git is set).'''
2508 gone = set()
2508 gone = set()
2509
2509
2510 copyto = dict([(v, k) for k, v in copy.items()])
2510 copyto = dict([(v, k) for k, v in copy.items()])
2511
2511
2512 addedset, removedset = set(added), set(removed)
2512 addedset, removedset = set(added), set(removed)
2513
2513
2514 for f in sorted(modified + added + removed):
2514 for f in sorted(modified + added + removed):
2515 copyop = None
2515 copyop = None
2516 f1, f2 = f, f
2516 f1, f2 = f, f
2517 if f in addedset:
2517 if f in addedset:
2518 f1 = None
2518 f1 = None
2519 if f in copy:
2519 if f in copy:
2520 if opts.git:
2520 if opts.git:
2521 f1 = copy[f]
2521 f1 = copy[f]
2522 if f1 in removedset and f1 not in gone:
2522 if f1 in removedset and f1 not in gone:
2523 copyop = 'rename'
2523 copyop = 'rename'
2524 gone.add(f1)
2524 gone.add(f1)
2525 else:
2525 else:
2526 copyop = 'copy'
2526 copyop = 'copy'
2527 elif f in removedset:
2527 elif f in removedset:
2528 f2 = None
2528 f2 = None
2529 if opts.git:
2529 if opts.git:
2530 # have we already reported a copy above?
2530 # have we already reported a copy above?
2531 if (f in copyto and copyto[f] in addedset
2531 if (f in copyto and copyto[f] in addedset
2532 and copy[copyto[f]] == f):
2532 and copy[copyto[f]] == f):
2533 continue
2533 continue
2534 yield f1, f2, copyop
2534 yield f1, f2, copyop
2535
2535
2536 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2536 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2537 copy, getfilectx, opts, losedatafn, prefix, relroot):
2537 copy, getfilectx, opts, losedatafn, prefix, relroot):
2538 '''given input data, generate a diff and yield it in blocks
2538 '''given input data, generate a diff and yield it in blocks
2539
2539
2540 If generating a diff would lose data like flags or binary data and
2540 If generating a diff would lose data like flags or binary data and
2541 losedatafn is not None, it will be called.
2541 losedatafn is not None, it will be called.
2542
2542
2543 relroot is removed and prefix is added to every path in the diff output.
2543 relroot is removed and prefix is added to every path in the diff output.
2544
2544
2545 If relroot is not empty, this function expects every path in modified,
2545 If relroot is not empty, this function expects every path in modified,
2546 added, removed and copy to start with it.'''
2546 added, removed and copy to start with it.'''
2547
2547
2548 def gitindex(text):
2548 def gitindex(text):
2549 if not text:
2549 if not text:
2550 text = ""
2550 text = ""
2551 l = len(text)
2551 l = len(text)
2552 s = hashlib.sha1('blob %d\0' % l)
2552 s = hashlib.sha1('blob %d\0' % l)
2553 s.update(text)
2553 s.update(text)
2554 return s.hexdigest()
2554 return s.hexdigest()
2555
2555
2556 if opts.noprefix:
2556 if opts.noprefix:
2557 aprefix = bprefix = ''
2557 aprefix = bprefix = ''
2558 else:
2558 else:
2559 aprefix = 'a/'
2559 aprefix = 'a/'
2560 bprefix = 'b/'
2560 bprefix = 'b/'
2561
2561
2562 def diffline(f, revs):
2562 def diffline(f, revs):
2563 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2563 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2564 return 'diff %s %s' % (revinfo, f)
2564 return 'diff %s %s' % (revinfo, f)
2565
2565
2566 def isempty(fctx):
2566 def isempty(fctx):
2567 return fctx is None or fctx.size() == 0
2567 return fctx is None or fctx.size() == 0
2568
2568
2569 date1 = util.datestr(ctx1.date())
2569 date1 = util.datestr(ctx1.date())
2570 date2 = util.datestr(ctx2.date())
2570 date2 = util.datestr(ctx2.date())
2571
2571
2572 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2572 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2573
2573
2574 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2574 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2575 or repo.ui.configbool('devel', 'check-relroot')):
2575 or repo.ui.configbool('devel', 'check-relroot')):
2576 for f in modified + added + removed + list(copy) + list(copy.values()):
2576 for f in modified + added + removed + list(copy) + list(copy.values()):
2577 if f is not None and not f.startswith(relroot):
2577 if f is not None and not f.startswith(relroot):
2578 raise AssertionError(
2578 raise AssertionError(
2579 "file %s doesn't start with relroot %s" % (f, relroot))
2579 "file %s doesn't start with relroot %s" % (f, relroot))
2580
2580
2581 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2581 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2582 content1 = None
2582 content1 = None
2583 content2 = None
2583 content2 = None
2584 fctx1 = None
2584 fctx1 = None
2585 fctx2 = None
2585 fctx2 = None
2586 flag1 = None
2586 flag1 = None
2587 flag2 = None
2587 flag2 = None
2588 if f1:
2588 if f1:
2589 fctx1 = getfilectx(f1, ctx1)
2589 fctx1 = getfilectx(f1, ctx1)
2590 if opts.git or losedatafn:
2590 if opts.git or losedatafn:
2591 flag1 = ctx1.flags(f1)
2591 flag1 = ctx1.flags(f1)
2592 if f2:
2592 if f2:
2593 fctx2 = getfilectx(f2, ctx2)
2593 fctx2 = getfilectx(f2, ctx2)
2594 if opts.git or losedatafn:
2594 if opts.git or losedatafn:
2595 flag2 = ctx2.flags(f2)
2595 flag2 = ctx2.flags(f2)
2596 # if binary is True, output "summary" or "base85", but not "text diff"
2596 # if binary is True, output "summary" or "base85", but not "text diff"
2597 binary = not opts.text and any(f.isbinary()
2597 binary = not opts.text and any(f.isbinary()
2598 for f in [fctx1, fctx2] if f is not None)
2598 for f in [fctx1, fctx2] if f is not None)
2599
2599
2600 if losedatafn and not opts.git:
2600 if losedatafn and not opts.git:
2601 if (binary or
2601 if (binary or
2602 # copy/rename
2602 # copy/rename
2603 f2 in copy or
2603 f2 in copy or
2604 # empty file creation
2604 # empty file creation
2605 (not f1 and isempty(fctx2)) or
2605 (not f1 and isempty(fctx2)) or
2606 # empty file deletion
2606 # empty file deletion
2607 (isempty(fctx1) and not f2) or
2607 (isempty(fctx1) and not f2) or
2608 # create with flags
2608 # create with flags
2609 (not f1 and flag2) or
2609 (not f1 and flag2) or
2610 # change flags
2610 # change flags
2611 (f1 and f2 and flag1 != flag2)):
2611 (f1 and f2 and flag1 != flag2)):
2612 losedatafn(f2 or f1)
2612 losedatafn(f2 or f1)
2613
2613
2614 path1 = f1 or f2
2614 path1 = f1 or f2
2615 path2 = f2 or f1
2615 path2 = f2 or f1
2616 path1 = posixpath.join(prefix, path1[len(relroot):])
2616 path1 = posixpath.join(prefix, path1[len(relroot):])
2617 path2 = posixpath.join(prefix, path2[len(relroot):])
2617 path2 = posixpath.join(prefix, path2[len(relroot):])
2618 header = []
2618 header = []
2619 if opts.git:
2619 if opts.git:
2620 header.append('diff --git %s%s %s%s' %
2620 header.append('diff --git %s%s %s%s' %
2621 (aprefix, path1, bprefix, path2))
2621 (aprefix, path1, bprefix, path2))
2622 if not f1: # added
2622 if not f1: # added
2623 header.append('new file mode %s' % gitmode[flag2])
2623 header.append('new file mode %s' % gitmode[flag2])
2624 elif not f2: # removed
2624 elif not f2: # removed
2625 header.append('deleted file mode %s' % gitmode[flag1])
2625 header.append('deleted file mode %s' % gitmode[flag1])
2626 else: # modified/copied/renamed
2626 else: # modified/copied/renamed
2627 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2627 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2628 if mode1 != mode2:
2628 if mode1 != mode2:
2629 header.append('old mode %s' % mode1)
2629 header.append('old mode %s' % mode1)
2630 header.append('new mode %s' % mode2)
2630 header.append('new mode %s' % mode2)
2631 if copyop is not None:
2631 if copyop is not None:
2632 if opts.showsimilarity:
2632 if opts.showsimilarity:
2633 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2633 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2634 header.append('similarity index %d%%' % sim)
2634 header.append('similarity index %d%%' % sim)
2635 header.append('%s from %s' % (copyop, path1))
2635 header.append('%s from %s' % (copyop, path1))
2636 header.append('%s to %s' % (copyop, path2))
2636 header.append('%s to %s' % (copyop, path2))
2637 elif revs and not repo.ui.quiet:
2637 elif revs and not repo.ui.quiet:
2638 header.append(diffline(path1, revs))
2638 header.append(diffline(path1, revs))
2639
2639
2640 # fctx.is | diffopts | what to | is fctx.data()
2640 # fctx.is | diffopts | what to | is fctx.data()
2641 # binary() | text nobinary git index | output? | outputted?
2641 # binary() | text nobinary git index | output? | outputted?
2642 # ------------------------------------|----------------------------
2642 # ------------------------------------|----------------------------
2643 # yes | no no no * | summary | no
2643 # yes | no no no * | summary | no
2644 # yes | no no yes * | base85 | yes
2644 # yes | no no yes * | base85 | yes
2645 # yes | no yes no * | summary | no
2645 # yes | no yes no * | summary | no
2646 # yes | no yes yes 0 | summary | no
2646 # yes | no yes yes 0 | summary | no
2647 # yes | no yes yes >0 | summary | semi [1]
2647 # yes | no yes yes >0 | summary | semi [1]
2648 # yes | yes * * * | text diff | yes
2648 # yes | yes * * * | text diff | yes
2649 # no | * * * * | text diff | yes
2649 # no | * * * * | text diff | yes
2650 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2650 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2651 if binary and (not opts.git or (opts.git and opts.nobinary and not
2651 if binary and (not opts.git or (opts.git and opts.nobinary and not
2652 opts.index)):
2652 opts.index)):
2653 # fast path: no binary content will be displayed, content1 and
2653 # fast path: no binary content will be displayed, content1 and
2654 # content2 are only used for equivalent test. cmp() could have a
2654 # content2 are only used for equivalent test. cmp() could have a
2655 # fast path.
2655 # fast path.
2656 if fctx1 is not None:
2656 if fctx1 is not None:
2657 content1 = b'\0'
2657 content1 = b'\0'
2658 if fctx2 is not None:
2658 if fctx2 is not None:
2659 if fctx1 is not None and not fctx1.cmp(fctx2):
2659 if fctx1 is not None and not fctx1.cmp(fctx2):
2660 content2 = b'\0' # not different
2660 content2 = b'\0' # not different
2661 else:
2661 else:
2662 content2 = b'\0\0'
2662 content2 = b'\0\0'
2663 else:
2663 else:
2664 # normal path: load contents
2664 # normal path: load contents
2665 if fctx1 is not None:
2665 if fctx1 is not None:
2666 content1 = fctx1.data()
2666 content1 = fctx1.data()
2667 if fctx2 is not None:
2667 if fctx2 is not None:
2668 content2 = fctx2.data()
2668 content2 = fctx2.data()
2669
2669
2670 if binary and opts.git and not opts.nobinary:
2670 if binary and opts.git and not opts.nobinary:
2671 text = mdiff.b85diff(content1, content2)
2671 text = mdiff.b85diff(content1, content2)
2672 if text:
2672 if text:
2673 header.append('index %s..%s' %
2673 header.append('index %s..%s' %
2674 (gitindex(content1), gitindex(content2)))
2674 (gitindex(content1), gitindex(content2)))
2675 hunks = (None, [text]),
2675 hunks = (None, [text]),
2676 else:
2676 else:
2677 if opts.git and opts.index > 0:
2677 if opts.git and opts.index > 0:
2678 flag = flag1
2678 flag = flag1
2679 if flag is None:
2679 if flag is None:
2680 flag = flag2
2680 flag = flag2
2681 header.append('index %s..%s %s' %
2681 header.append('index %s..%s %s' %
2682 (gitindex(content1)[0:opts.index],
2682 (gitindex(content1)[0:opts.index],
2683 gitindex(content2)[0:opts.index],
2683 gitindex(content2)[0:opts.index],
2684 gitmode[flag]))
2684 gitmode[flag]))
2685
2685
2686 uheaders, hunks = mdiff.unidiff(content1, date1,
2686 uheaders, hunks = mdiff.unidiff(content1, date1,
2687 content2, date2,
2687 content2, date2,
2688 path1, path2, opts=opts)
2688 path1, path2, opts=opts)
2689 header.extend(uheaders)
2689 header.extend(uheaders)
2690 yield header, hunks
2690 yield header, hunks
2691
2691
2692 def diffstatsum(stats):
2692 def diffstatsum(stats):
2693 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2693 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2694 for f, a, r, b in stats:
2694 for f, a, r, b in stats:
2695 maxfile = max(maxfile, encoding.colwidth(f))
2695 maxfile = max(maxfile, encoding.colwidth(f))
2696 maxtotal = max(maxtotal, a + r)
2696 maxtotal = max(maxtotal, a + r)
2697 addtotal += a
2697 addtotal += a
2698 removetotal += r
2698 removetotal += r
2699 binary = binary or b
2699 binary = binary or b
2700
2700
2701 return maxfile, maxtotal, addtotal, removetotal, binary
2701 return maxfile, maxtotal, addtotal, removetotal, binary
2702
2702
2703 def diffstatdata(lines):
2703 def diffstatdata(lines):
2704 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2704 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2705
2705
2706 results = []
2706 results = []
2707 filename, adds, removes, isbinary = None, 0, 0, False
2707 filename, adds, removes, isbinary = None, 0, 0, False
2708
2708
2709 def addresult():
2709 def addresult():
2710 if filename:
2710 if filename:
2711 results.append((filename, adds, removes, isbinary))
2711 results.append((filename, adds, removes, isbinary))
2712
2712
2713 # inheader is used to track if a line is in the
2713 # inheader is used to track if a line is in the
2714 # header portion of the diff. This helps properly account
2714 # header portion of the diff. This helps properly account
2715 # for lines that start with '--' or '++'
2715 # for lines that start with '--' or '++'
2716 inheader = False
2716 inheader = False
2717
2717
2718 for line in lines:
2718 for line in lines:
2719 if line.startswith('diff'):
2719 if line.startswith('diff'):
2720 addresult()
2720 addresult()
2721 # starting a new file diff
2721 # starting a new file diff
2722 # set numbers to 0 and reset inheader
2722 # set numbers to 0 and reset inheader
2723 inheader = True
2723 inheader = True
2724 adds, removes, isbinary = 0, 0, False
2724 adds, removes, isbinary = 0, 0, False
2725 if line.startswith('diff --git a/'):
2725 if line.startswith('diff --git a/'):
2726 filename = gitre.search(line).group(2)
2726 filename = gitre.search(line).group(2)
2727 elif line.startswith('diff -r'):
2727 elif line.startswith('diff -r'):
2728 # format: "diff -r ... -r ... filename"
2728 # format: "diff -r ... -r ... filename"
2729 filename = diffre.search(line).group(1)
2729 filename = diffre.search(line).group(1)
2730 elif line.startswith('@@'):
2730 elif line.startswith('@@'):
2731 inheader = False
2731 inheader = False
2732 elif line.startswith('+') and not inheader:
2732 elif line.startswith('+') and not inheader:
2733 adds += 1
2733 adds += 1
2734 elif line.startswith('-') and not inheader:
2734 elif line.startswith('-') and not inheader:
2735 removes += 1
2735 removes += 1
2736 elif (line.startswith('GIT binary patch') or
2736 elif (line.startswith('GIT binary patch') or
2737 line.startswith('Binary file')):
2737 line.startswith('Binary file')):
2738 isbinary = True
2738 isbinary = True
2739 addresult()
2739 addresult()
2740 return results
2740 return results
2741
2741
2742 def diffstat(lines, width=80):
2742 def diffstat(lines, width=80):
2743 output = []
2743 output = []
2744 stats = diffstatdata(lines)
2744 stats = diffstatdata(lines)
2745 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2745 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2746
2746
2747 countwidth = len(str(maxtotal))
2747 countwidth = len(str(maxtotal))
2748 if hasbinary and countwidth < 3:
2748 if hasbinary and countwidth < 3:
2749 countwidth = 3
2749 countwidth = 3
2750 graphwidth = width - countwidth - maxname - 6
2750 graphwidth = width - countwidth - maxname - 6
2751 if graphwidth < 10:
2751 if graphwidth < 10:
2752 graphwidth = 10
2752 graphwidth = 10
2753
2753
2754 def scale(i):
2754 def scale(i):
2755 if maxtotal <= graphwidth:
2755 if maxtotal <= graphwidth:
2756 return i
2756 return i
2757 # If diffstat runs out of room it doesn't print anything,
2757 # If diffstat runs out of room it doesn't print anything,
2758 # which isn't very useful, so always print at least one + or -
2758 # which isn't very useful, so always print at least one + or -
2759 # if there were at least some changes.
2759 # if there were at least some changes.
2760 return max(i * graphwidth // maxtotal, int(bool(i)))
2760 return max(i * graphwidth // maxtotal, int(bool(i)))
2761
2761
2762 for filename, adds, removes, isbinary in stats:
2762 for filename, adds, removes, isbinary in stats:
2763 if isbinary:
2763 if isbinary:
2764 count = 'Bin'
2764 count = 'Bin'
2765 else:
2765 else:
2766 count = '%d' % (adds + removes)
2766 count = '%d' % (adds + removes)
2767 pluses = '+' * scale(adds)
2767 pluses = '+' * scale(adds)
2768 minuses = '-' * scale(removes)
2768 minuses = '-' * scale(removes)
2769 output.append(' %s%s | %*s %s%s\n' %
2769 output.append(' %s%s | %*s %s%s\n' %
2770 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2770 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2771 countwidth, count, pluses, minuses))
2771 countwidth, count, pluses, minuses))
2772
2772
2773 if stats:
2773 if stats:
2774 output.append(_(' %d files changed, %d insertions(+), '
2774 output.append(_(' %d files changed, %d insertions(+), '
2775 '%d deletions(-)\n')
2775 '%d deletions(-)\n')
2776 % (len(stats), totaladds, totalremoves))
2776 % (len(stats), totaladds, totalremoves))
2777
2777
2778 return ''.join(output)
2778 return ''.join(output)
2779
2779
2780 def diffstatui(*args, **kw):
2780 def diffstatui(*args, **kw):
2781 '''like diffstat(), but yields 2-tuples of (output, label) for
2781 '''like diffstat(), but yields 2-tuples of (output, label) for
2782 ui.write()
2782 ui.write()
2783 '''
2783 '''
2784
2784
2785 for line in diffstat(*args, **kw).splitlines():
2785 for line in diffstat(*args, **kw).splitlines():
2786 if line and line[-1] in '+-':
2786 if line and line[-1] in '+-':
2787 name, graph = line.rsplit(' ', 1)
2787 name, graph = line.rsplit(' ', 1)
2788 yield (name + ' ', '')
2788 yield (name + ' ', '')
2789 m = re.search(br'\++', graph)
2789 m = re.search(br'\++', graph)
2790 if m:
2790 if m:
2791 yield (m.group(0), 'diffstat.inserted')
2791 yield (m.group(0), 'diffstat.inserted')
2792 m = re.search(br'-+', graph)
2792 m = re.search(br'-+', graph)
2793 if m:
2793 if m:
2794 yield (m.group(0), 'diffstat.deleted')
2794 yield (m.group(0), 'diffstat.deleted')
2795 else:
2795 else:
2796 yield (line, '')
2796 yield (line, '')
2797 yield ('\n', '')
2797 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now