##// END OF EJS Templates
patch: add close() to abstractbackend...
Martin von Zweigbergk -
r33156:b8ae289a default
parent child Browse files
Show More
@@ -1,2746 +1,2749 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 class PatchError(Exception):
49 class PatchError(Exception):
50 pass
50 pass
51
51
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = email.Parser.Parser().parse(c)
111 m = email.Parser.Parser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 inheader = False
154 inheader = False
155 cur = []
155 cur = []
156
156
157 mimeheaders = ['content-type']
157 mimeheaders = ['content-type']
158
158
159 if not util.safehasattr(stream, 'next'):
159 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
160 # http responses, for example, have readline but not next
161 stream = fiter(stream)
161 stream = fiter(stream)
162
162
163 for line in stream:
163 for line in stream:
164 cur.append(line)
164 cur.append(line)
165 if line.startswith('# HG changeset patch'):
165 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
166 return hgsplit(stream, cur)
167 elif line.startswith('From '):
167 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
168 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
169 elif isheader(line, inheader):
170 inheader = True
170 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
172 # let email parser handle this
173 return mimesplit(stream, cur)
173 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
174 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
175 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
176 return headersplit(stream, cur)
177 # Not enough info, keep reading
177 # Not enough info, keep reading
178
178
179 # if we are here, we have a very plain patch
179 # if we are here, we have a very plain patch
180 return remainder(cur)
180 return remainder(cur)
181
181
182 ## Some facility for extensible patch parsing:
182 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
183 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
184 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
185 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
186 ('Node ID', 'nodeid'),
187 ]
187 ]
188
188
189 def extract(ui, fileobj):
189 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
190 '''extract patch from data read from fileobj.
191
191
192 patch can be a normal patch or contained in an email message.
192 patch can be a normal patch or contained in an email message.
193
193
194 return a dictionary. Standard keys are:
194 return a dictionary. Standard keys are:
195 - filename,
195 - filename,
196 - message,
196 - message,
197 - user,
197 - user,
198 - date,
198 - date,
199 - branch,
199 - branch,
200 - node,
200 - node,
201 - p1,
201 - p1,
202 - p2.
202 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
203 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
205
206 # attempt to detect the start of a patch
206 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
207 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
212
212
213 data = {}
213 data = {}
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 try:
216 try:
217 msg = email.Parser.Parser().parse(fileobj)
217 msg = email.Parser.Parser().parse(fileobj)
218
218
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 if not subject and not data['user']:
221 if not subject and not data['user']:
222 # Not an email, restore parsed headers if any
222 # Not an email, restore parsed headers if any
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224
224
225 # should try to parse msg['Date']
225 # should try to parse msg['Date']
226 parents = []
226 parents = []
227
227
228 if subject:
228 if subject:
229 if subject.startswith('[PATCH'):
229 if subject.startswith('[PATCH'):
230 pend = subject.find(']')
230 pend = subject.find(']')
231 if pend >= 0:
231 if pend >= 0:
232 subject = subject[pend + 1:].lstrip()
232 subject = subject[pend + 1:].lstrip()
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
234 ui.debug('Subject: %s\n' % subject)
234 ui.debug('Subject: %s\n' % subject)
235 if data['user']:
235 if data['user']:
236 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
237 diffs_seen = 0
237 diffs_seen = 0
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 message = ''
239 message = ''
240 for part in msg.walk():
240 for part in msg.walk():
241 content_type = part.get_content_type()
241 content_type = part.get_content_type()
242 ui.debug('Content-Type: %s\n' % content_type)
242 ui.debug('Content-Type: %s\n' % content_type)
243 if content_type not in ok_types:
243 if content_type not in ok_types:
244 continue
244 continue
245 payload = part.get_payload(decode=True)
245 payload = part.get_payload(decode=True)
246 m = diffre.search(payload)
246 m = diffre.search(payload)
247 if m:
247 if m:
248 hgpatch = False
248 hgpatch = False
249 hgpatchheader = False
249 hgpatchheader = False
250 ignoretext = False
250 ignoretext = False
251
251
252 ui.debug('found patch at byte %d\n' % m.start(0))
252 ui.debug('found patch at byte %d\n' % m.start(0))
253 diffs_seen += 1
253 diffs_seen += 1
254 cfp = stringio()
254 cfp = stringio()
255 for line in payload[:m.start(0)].splitlines():
255 for line in payload[:m.start(0)].splitlines():
256 if line.startswith('# HG changeset patch') and not hgpatch:
256 if line.startswith('# HG changeset patch') and not hgpatch:
257 ui.debug('patch generated by hg export\n')
257 ui.debug('patch generated by hg export\n')
258 hgpatch = True
258 hgpatch = True
259 hgpatchheader = True
259 hgpatchheader = True
260 # drop earlier commit message content
260 # drop earlier commit message content
261 cfp.seek(0)
261 cfp.seek(0)
262 cfp.truncate()
262 cfp.truncate()
263 subject = None
263 subject = None
264 elif hgpatchheader:
264 elif hgpatchheader:
265 if line.startswith('# User '):
265 if line.startswith('# User '):
266 data['user'] = line[7:]
266 data['user'] = line[7:]
267 ui.debug('From: %s\n' % data['user'])
267 ui.debug('From: %s\n' % data['user'])
268 elif line.startswith("# Parent "):
268 elif line.startswith("# Parent "):
269 parents.append(line[9:].lstrip())
269 parents.append(line[9:].lstrip())
270 elif line.startswith("# "):
270 elif line.startswith("# "):
271 for header, key in patchheadermap:
271 for header, key in patchheadermap:
272 prefix = '# %s ' % header
272 prefix = '# %s ' % header
273 if line.startswith(prefix):
273 if line.startswith(prefix):
274 data[key] = line[len(prefix):]
274 data[key] = line[len(prefix):]
275 else:
275 else:
276 hgpatchheader = False
276 hgpatchheader = False
277 elif line == '---':
277 elif line == '---':
278 ignoretext = True
278 ignoretext = True
279 if not hgpatchheader and not ignoretext:
279 if not hgpatchheader and not ignoretext:
280 cfp.write(line)
280 cfp.write(line)
281 cfp.write('\n')
281 cfp.write('\n')
282 message = cfp.getvalue()
282 message = cfp.getvalue()
283 if tmpfp:
283 if tmpfp:
284 tmpfp.write(payload)
284 tmpfp.write(payload)
285 if not payload.endswith('\n'):
285 if not payload.endswith('\n'):
286 tmpfp.write('\n')
286 tmpfp.write('\n')
287 elif not diffs_seen and message and content_type == 'text/plain':
287 elif not diffs_seen and message and content_type == 'text/plain':
288 message += '\n' + payload
288 message += '\n' + payload
289 except: # re-raises
289 except: # re-raises
290 tmpfp.close()
290 tmpfp.close()
291 os.unlink(tmpname)
291 os.unlink(tmpname)
292 raise
292 raise
293
293
294 if subject and not message.startswith(subject):
294 if subject and not message.startswith(subject):
295 message = '%s\n%s' % (subject, message)
295 message = '%s\n%s' % (subject, message)
296 data['message'] = message
296 data['message'] = message
297 tmpfp.close()
297 tmpfp.close()
298 if parents:
298 if parents:
299 data['p1'] = parents.pop(0)
299 data['p1'] = parents.pop(0)
300 if parents:
300 if parents:
301 data['p2'] = parents.pop(0)
301 data['p2'] = parents.pop(0)
302
302
303 if diffs_seen:
303 if diffs_seen:
304 data['filename'] = tmpname
304 data['filename'] = tmpname
305 else:
305 else:
306 os.unlink(tmpname)
306 os.unlink(tmpname)
307 return data
307 return data
308
308
309 class patchmeta(object):
309 class patchmeta(object):
310 """Patched file metadata
310 """Patched file metadata
311
311
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 'islink' is True if the file is a symlink and 'isexec' is True if
316 'islink' is True if the file is a symlink and 'isexec' is True if
317 the file is executable. Otherwise, 'mode' is None.
317 the file is executable. Otherwise, 'mode' is None.
318 """
318 """
319 def __init__(self, path):
319 def __init__(self, path):
320 self.path = path
320 self.path = path
321 self.oldpath = None
321 self.oldpath = None
322 self.mode = None
322 self.mode = None
323 self.op = 'MODIFY'
323 self.op = 'MODIFY'
324 self.binary = False
324 self.binary = False
325
325
326 def setmode(self, mode):
326 def setmode(self, mode):
327 islink = mode & 0o20000
327 islink = mode & 0o20000
328 isexec = mode & 0o100
328 isexec = mode & 0o100
329 self.mode = (islink, isexec)
329 self.mode = (islink, isexec)
330
330
331 def copy(self):
331 def copy(self):
332 other = patchmeta(self.path)
332 other = patchmeta(self.path)
333 other.oldpath = self.oldpath
333 other.oldpath = self.oldpath
334 other.mode = self.mode
334 other.mode = self.mode
335 other.op = self.op
335 other.op = self.op
336 other.binary = self.binary
336 other.binary = self.binary
337 return other
337 return other
338
338
339 def _ispatchinga(self, afile):
339 def _ispatchinga(self, afile):
340 if afile == '/dev/null':
340 if afile == '/dev/null':
341 return self.op == 'ADD'
341 return self.op == 'ADD'
342 return afile == 'a/' + (self.oldpath or self.path)
342 return afile == 'a/' + (self.oldpath or self.path)
343
343
344 def _ispatchingb(self, bfile):
344 def _ispatchingb(self, bfile):
345 if bfile == '/dev/null':
345 if bfile == '/dev/null':
346 return self.op == 'DELETE'
346 return self.op == 'DELETE'
347 return bfile == 'b/' + self.path
347 return bfile == 'b/' + self.path
348
348
349 def ispatching(self, afile, bfile):
349 def ispatching(self, afile, bfile):
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351
351
352 def __repr__(self):
352 def __repr__(self):
353 return "<patchmeta %s %r>" % (self.op, self.path)
353 return "<patchmeta %s %r>" % (self.op, self.path)
354
354
355 def readgitpatch(lr):
355 def readgitpatch(lr):
356 """extract git-style metadata about patches from <patchname>"""
356 """extract git-style metadata about patches from <patchname>"""
357
357
358 # Filter patch for git information
358 # Filter patch for git information
359 gp = None
359 gp = None
360 gitpatches = []
360 gitpatches = []
361 for line in lr:
361 for line in lr:
362 line = line.rstrip(' \r\n')
362 line = line.rstrip(' \r\n')
363 if line.startswith('diff --git a/'):
363 if line.startswith('diff --git a/'):
364 m = gitre.match(line)
364 m = gitre.match(line)
365 if m:
365 if m:
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368 dst = m.group(2)
368 dst = m.group(2)
369 gp = patchmeta(dst)
369 gp = patchmeta(dst)
370 elif gp:
370 elif gp:
371 if line.startswith('--- '):
371 if line.startswith('--- '):
372 gitpatches.append(gp)
372 gitpatches.append(gp)
373 gp = None
373 gp = None
374 continue
374 continue
375 if line.startswith('rename from '):
375 if line.startswith('rename from '):
376 gp.op = 'RENAME'
376 gp.op = 'RENAME'
377 gp.oldpath = line[12:]
377 gp.oldpath = line[12:]
378 elif line.startswith('rename to '):
378 elif line.startswith('rename to '):
379 gp.path = line[10:]
379 gp.path = line[10:]
380 elif line.startswith('copy from '):
380 elif line.startswith('copy from '):
381 gp.op = 'COPY'
381 gp.op = 'COPY'
382 gp.oldpath = line[10:]
382 gp.oldpath = line[10:]
383 elif line.startswith('copy to '):
383 elif line.startswith('copy to '):
384 gp.path = line[8:]
384 gp.path = line[8:]
385 elif line.startswith('deleted file'):
385 elif line.startswith('deleted file'):
386 gp.op = 'DELETE'
386 gp.op = 'DELETE'
387 elif line.startswith('new file mode '):
387 elif line.startswith('new file mode '):
388 gp.op = 'ADD'
388 gp.op = 'ADD'
389 gp.setmode(int(line[-6:], 8))
389 gp.setmode(int(line[-6:], 8))
390 elif line.startswith('new mode '):
390 elif line.startswith('new mode '):
391 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
392 elif line.startswith('GIT binary patch'):
392 elif line.startswith('GIT binary patch'):
393 gp.binary = True
393 gp.binary = True
394 if gp:
394 if gp:
395 gitpatches.append(gp)
395 gitpatches.append(gp)
396
396
397 return gitpatches
397 return gitpatches
398
398
399 class linereader(object):
399 class linereader(object):
400 # simple class to allow pushing lines back into the input stream
400 # simple class to allow pushing lines back into the input stream
401 def __init__(self, fp):
401 def __init__(self, fp):
402 self.fp = fp
402 self.fp = fp
403 self.buf = []
403 self.buf = []
404
404
405 def push(self, line):
405 def push(self, line):
406 if line is not None:
406 if line is not None:
407 self.buf.append(line)
407 self.buf.append(line)
408
408
409 def readline(self):
409 def readline(self):
410 if self.buf:
410 if self.buf:
411 l = self.buf[0]
411 l = self.buf[0]
412 del self.buf[0]
412 del self.buf[0]
413 return l
413 return l
414 return self.fp.readline()
414 return self.fp.readline()
415
415
416 def __iter__(self):
416 def __iter__(self):
417 return iter(self.readline, '')
417 return iter(self.readline, '')
418
418
419 class abstractbackend(object):
419 class abstractbackend(object):
420 def __init__(self, ui):
420 def __init__(self, ui):
421 self.ui = ui
421 self.ui = ui
422
422
423 def getfile(self, fname):
423 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
424 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
425 isexec)) tuple. Data is None if file is missing/deleted.
426 """
426 """
427 raise NotImplementedError
427 raise NotImplementedError
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
430 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
431 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
432 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
433 copysource is set to the original file name.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def unlink(self, fname):
437 def unlink(self, fname):
438 """Unlink target file."""
438 """Unlink target file."""
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def writerej(self, fname, failed, total, lines):
441 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
442 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
443 which failed to apply and total the total number of hunks for this
444 files.
444 files.
445 """
445 """
446 pass
446 pass
447
447
448 def exists(self, fname):
448 def exists(self, fname):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def close(self):
452 raise NotImplementedError
453
451 class fsbackend(abstractbackend):
454 class fsbackend(abstractbackend):
452 def __init__(self, ui, basedir):
455 def __init__(self, ui, basedir):
453 super(fsbackend, self).__init__(ui)
456 super(fsbackend, self).__init__(ui)
454 self.opener = vfsmod.vfs(basedir)
457 self.opener = vfsmod.vfs(basedir)
455
458
456 def _join(self, f):
459 def _join(self, f):
457 return os.path.join(self.opener.base, f)
460 return os.path.join(self.opener.base, f)
458
461
459 def getfile(self, fname):
462 def getfile(self, fname):
460 if self.opener.islink(fname):
463 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
464 return (self.opener.readlink(fname), (True, False))
462
465
463 isexec = False
466 isexec = False
464 try:
467 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
469 except OSError as e:
467 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
468 raise
471 raise
469 try:
472 try:
470 return (self.opener.read(fname), (False, isexec))
473 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
474 except IOError as e:
472 if e.errno != errno.ENOENT:
475 if e.errno != errno.ENOENT:
473 raise
476 raise
474 return None, None
477 return None, None
475
478
476 def setfile(self, fname, data, mode, copysource):
479 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
480 islink, isexec = mode
478 if data is None:
481 if data is None:
479 self.opener.setflags(fname, islink, isexec)
482 self.opener.setflags(fname, islink, isexec)
480 return
483 return
481 if islink:
484 if islink:
482 self.opener.symlink(data, fname)
485 self.opener.symlink(data, fname)
483 else:
486 else:
484 self.opener.write(fname, data)
487 self.opener.write(fname, data)
485 if isexec:
488 if isexec:
486 self.opener.setflags(fname, False, True)
489 self.opener.setflags(fname, False, True)
487
490
488 def unlink(self, fname):
491 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
492 self.opener.unlinkpath(fname, ignoremissing=True)
490
493
491 def writerej(self, fname, failed, total, lines):
494 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
495 fname = fname + ".rej"
493 self.ui.warn(
496 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
498 (failed, total, fname))
496 fp = self.opener(fname, 'w')
499 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
500 fp.writelines(lines)
498 fp.close()
501 fp.close()
499
502
500 def exists(self, fname):
503 def exists(self, fname):
501 return self.opener.lexists(fname)
504 return self.opener.lexists(fname)
502
505
503 class workingbackend(fsbackend):
506 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
507 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
508 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
509 self.repo = repo
507 self.similarity = similarity
510 self.similarity = similarity
508 self.removed = set()
511 self.removed = set()
509 self.changed = set()
512 self.changed = set()
510 self.copied = []
513 self.copied = []
511
514
512 def _checkknown(self, fname):
515 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
518
516 def setfile(self, fname, data, mode, copysource):
519 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
520 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
522 if copysource is not None:
520 self.copied.append((copysource, fname))
523 self.copied.append((copysource, fname))
521 self.changed.add(fname)
524 self.changed.add(fname)
522
525
523 def unlink(self, fname):
526 def unlink(self, fname):
524 self._checkknown(fname)
527 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
528 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
529 self.removed.add(fname)
527 self.changed.add(fname)
530 self.changed.add(fname)
528
531
529 def close(self):
532 def close(self):
530 wctx = self.repo[None]
533 wctx = self.repo[None]
531 changed = set(self.changed)
534 changed = set(self.changed)
532 for src, dst in self.copied:
535 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
537 if self.removed:
535 wctx.forget(sorted(self.removed))
538 wctx.forget(sorted(self.removed))
536 for f in self.removed:
539 for f in self.removed:
537 if f not in self.repo.dirstate:
540 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
541 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
542 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
543 # deleted, and should not be considered by
541 # marktouched().
544 # marktouched().
542 changed.discard(f)
545 changed.discard(f)
543 if changed:
546 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
547 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
548 return sorted(self.changed)
546
549
547 class filestore(object):
550 class filestore(object):
548 def __init__(self, maxsize=None):
551 def __init__(self, maxsize=None):
549 self.opener = None
552 self.opener = None
550 self.files = {}
553 self.files = {}
551 self.created = 0
554 self.created = 0
552 self.maxsize = maxsize
555 self.maxsize = maxsize
553 if self.maxsize is None:
556 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
557 self.maxsize = 4*(2**20)
555 self.size = 0
558 self.size = 0
556 self.data = {}
559 self.data = {}
557
560
558 def setfile(self, fname, data, mode, copied=None):
561 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
563 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
564 self.size += len(data)
562 else:
565 else:
563 if self.opener is None:
566 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
567 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = vfsmod.vfs(root)
568 self.opener = vfsmod.vfs(root)
566 # Avoid filename issues with these simple names
569 # Avoid filename issues with these simple names
567 fn = str(self.created)
570 fn = str(self.created)
568 self.opener.write(fn, data)
571 self.opener.write(fn, data)
569 self.created += 1
572 self.created += 1
570 self.files[fname] = (fn, mode, copied)
573 self.files[fname] = (fn, mode, copied)
571
574
572 def getfile(self, fname):
575 def getfile(self, fname):
573 if fname in self.data:
576 if fname in self.data:
574 return self.data[fname]
577 return self.data[fname]
575 if not self.opener or fname not in self.files:
578 if not self.opener or fname not in self.files:
576 return None, None, None
579 return None, None, None
577 fn, mode, copied = self.files[fname]
580 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
581 return self.opener.read(fn), mode, copied
579
582
580 def close(self):
583 def close(self):
581 if self.opener:
584 if self.opener:
582 shutil.rmtree(self.opener.base)
585 shutil.rmtree(self.opener.base)
583
586
584 class repobackend(abstractbackend):
587 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
588 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
589 super(repobackend, self).__init__(ui)
587 self.repo = repo
590 self.repo = repo
588 self.ctx = ctx
591 self.ctx = ctx
589 self.store = store
592 self.store = store
590 self.changed = set()
593 self.changed = set()
591 self.removed = set()
594 self.removed = set()
592 self.copied = {}
595 self.copied = {}
593
596
594 def _checkknown(self, fname):
597 def _checkknown(self, fname):
595 if fname not in self.ctx:
598 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
600
598 def getfile(self, fname):
601 def getfile(self, fname):
599 try:
602 try:
600 fctx = self.ctx[fname]
603 fctx = self.ctx[fname]
601 except error.LookupError:
604 except error.LookupError:
602 return None, None
605 return None, None
603 flags = fctx.flags()
606 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
607 return fctx.data(), ('l' in flags, 'x' in flags)
605
608
606 def setfile(self, fname, data, mode, copysource):
609 def setfile(self, fname, data, mode, copysource):
607 if copysource:
610 if copysource:
608 self._checkknown(copysource)
611 self._checkknown(copysource)
609 if data is None:
612 if data is None:
610 data = self.ctx[fname].data()
613 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
614 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
615 self.changed.add(fname)
613 if copysource:
616 if copysource:
614 self.copied[fname] = copysource
617 self.copied[fname] = copysource
615
618
616 def unlink(self, fname):
619 def unlink(self, fname):
617 self._checkknown(fname)
620 self._checkknown(fname)
618 self.removed.add(fname)
621 self.removed.add(fname)
619
622
620 def exists(self, fname):
623 def exists(self, fname):
621 return fname in self.ctx
624 return fname in self.ctx
622
625
623 def close(self):
626 def close(self):
624 return self.changed | self.removed
627 return self.changed | self.removed
625
628
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
633
631 class patchfile(object):
634 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
636 self.fname = gp.path
634 self.eolmode = eolmode
637 self.eolmode = eolmode
635 self.eol = None
638 self.eol = None
636 self.backend = backend
639 self.backend = backend
637 self.ui = ui
640 self.ui = ui
638 self.lines = []
641 self.lines = []
639 self.exists = False
642 self.exists = False
640 self.missing = True
643 self.missing = True
641 self.mode = gp.mode
644 self.mode = gp.mode
642 self.copysource = gp.oldpath
645 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
647 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
648 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
649 data, mode = backend.getfile(self.fname)
647 else:
650 else:
648 data, mode = store.getfile(self.copysource)[:2]
651 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
652 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
653 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
654 self.missing = False
652 if data:
655 if data:
653 self.lines = mdiff.splitnewlines(data)
656 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
657 if self.mode is None:
655 self.mode = mode
658 self.mode = mode
656 if self.lines:
659 if self.lines:
657 # Normalize line endings
660 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
661 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
662 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
663 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
664 self.eol = '\n'
662 if eolmode != 'strict':
665 if eolmode != 'strict':
663 nlines = []
666 nlines = []
664 for l in self.lines:
667 for l in self.lines:
665 if l.endswith('\r\n'):
668 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
669 l = l[:-2] + '\n'
667 nlines.append(l)
670 nlines.append(l)
668 self.lines = nlines
671 self.lines = nlines
669 else:
672 else:
670 if self.create:
673 if self.create:
671 self.missing = False
674 self.missing = False
672 if self.mode is None:
675 if self.mode is None:
673 self.mode = (False, False)
676 self.mode = (False, False)
674 if self.missing:
677 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 "current directory)\n"))
680 "current directory)\n"))
678
681
679 self.hash = {}
682 self.hash = {}
680 self.dirty = 0
683 self.dirty = 0
681 self.offset = 0
684 self.offset = 0
682 self.skew = 0
685 self.skew = 0
683 self.rej = []
686 self.rej = []
684 self.fileprinted = False
687 self.fileprinted = False
685 self.printfile(False)
688 self.printfile(False)
686 self.hunks = 0
689 self.hunks = 0
687
690
688 def writelines(self, fname, lines, mode):
691 def writelines(self, fname, lines, mode):
689 if self.eolmode == 'auto':
692 if self.eolmode == 'auto':
690 eol = self.eol
693 eol = self.eol
691 elif self.eolmode == 'crlf':
694 elif self.eolmode == 'crlf':
692 eol = '\r\n'
695 eol = '\r\n'
693 else:
696 else:
694 eol = '\n'
697 eol = '\n'
695
698
696 if self.eolmode != 'strict' and eol and eol != '\n':
699 if self.eolmode != 'strict' and eol and eol != '\n':
697 rawlines = []
700 rawlines = []
698 for l in lines:
701 for l in lines:
699 if l and l[-1] == '\n':
702 if l and l[-1] == '\n':
700 l = l[:-1] + eol
703 l = l[:-1] + eol
701 rawlines.append(l)
704 rawlines.append(l)
702 lines = rawlines
705 lines = rawlines
703
706
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705
708
706 def printfile(self, warn):
709 def printfile(self, warn):
707 if self.fileprinted:
710 if self.fileprinted:
708 return
711 return
709 if warn or self.ui.verbose:
712 if warn or self.ui.verbose:
710 self.fileprinted = True
713 self.fileprinted = True
711 s = _("patching file %s\n") % self.fname
714 s = _("patching file %s\n") % self.fname
712 if warn:
715 if warn:
713 self.ui.warn(s)
716 self.ui.warn(s)
714 else:
717 else:
715 self.ui.note(s)
718 self.ui.note(s)
716
719
717
720
718 def findlines(self, l, linenum):
721 def findlines(self, l, linenum):
719 # looks through the hash and finds candidate lines. The
722 # looks through the hash and finds candidate lines. The
720 # result is a list of line numbers sorted based on distance
723 # result is a list of line numbers sorted based on distance
721 # from linenum
724 # from linenum
722
725
723 cand = self.hash.get(l, [])
726 cand = self.hash.get(l, [])
724 if len(cand) > 1:
727 if len(cand) > 1:
725 # resort our list of potentials forward then back.
728 # resort our list of potentials forward then back.
726 cand.sort(key=lambda x: abs(x - linenum))
729 cand.sort(key=lambda x: abs(x - linenum))
727 return cand
730 return cand
728
731
729 def write_rej(self):
732 def write_rej(self):
730 # our rejects are a little different from patch(1). This always
733 # our rejects are a little different from patch(1). This always
731 # creates rejects in the same form as the original patch. A file
734 # creates rejects in the same form as the original patch. A file
732 # header is inserted so that you can run the reject through patch again
735 # header is inserted so that you can run the reject through patch again
733 # without having to type the filename.
736 # without having to type the filename.
734 if not self.rej:
737 if not self.rej:
735 return
738 return
736 base = os.path.basename(self.fname)
739 base = os.path.basename(self.fname)
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 for x in self.rej:
741 for x in self.rej:
739 for l in x.hunk:
742 for l in x.hunk:
740 lines.append(l)
743 lines.append(l)
741 if l[-1:] != '\n':
744 if l[-1:] != '\n':
742 lines.append("\n\ No newline at end of file\n")
745 lines.append("\n\ No newline at end of file\n")
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744
747
745 def apply(self, h):
748 def apply(self, h):
746 if not h.complete():
749 if not h.complete():
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 h.lenb))
752 h.lenb))
750
753
751 self.hunks += 1
754 self.hunks += 1
752
755
753 if self.missing:
756 if self.missing:
754 self.rej.append(h)
757 self.rej.append(h)
755 return -1
758 return -1
756
759
757 if self.exists and self.create:
760 if self.exists and self.create:
758 if self.copysource:
761 if self.copysource:
759 self.ui.warn(_("cannot create %s: destination already "
762 self.ui.warn(_("cannot create %s: destination already "
760 "exists\n") % self.fname)
763 "exists\n") % self.fname)
761 else:
764 else:
762 self.ui.warn(_("file %s already exists\n") % self.fname)
765 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.rej.append(h)
766 self.rej.append(h)
764 return -1
767 return -1
765
768
766 if isinstance(h, binhunk):
769 if isinstance(h, binhunk):
767 if self.remove:
770 if self.remove:
768 self.backend.unlink(self.fname)
771 self.backend.unlink(self.fname)
769 else:
772 else:
770 l = h.new(self.lines)
773 l = h.new(self.lines)
771 self.lines[:] = l
774 self.lines[:] = l
772 self.offset += len(l)
775 self.offset += len(l)
773 self.dirty = True
776 self.dirty = True
774 return 0
777 return 0
775
778
776 horig = h
779 horig = h
777 if (self.eolmode in ('crlf', 'lf')
780 if (self.eolmode in ('crlf', 'lf')
778 or self.eolmode == 'auto' and self.eol):
781 or self.eolmode == 'auto' and self.eol):
779 # If new eols are going to be normalized, then normalize
782 # If new eols are going to be normalized, then normalize
780 # hunk data before patching. Otherwise, preserve input
783 # hunk data before patching. Otherwise, preserve input
781 # line-endings.
784 # line-endings.
782 h = h.getnormalized()
785 h = h.getnormalized()
783
786
784 # fast case first, no offsets, no fuzz
787 # fast case first, no offsets, no fuzz
785 old, oldstart, new, newstart = h.fuzzit(0, False)
788 old, oldstart, new, newstart = h.fuzzit(0, False)
786 oldstart += self.offset
789 oldstart += self.offset
787 orig_start = oldstart
790 orig_start = oldstart
788 # if there's skew we want to emit the "(offset %d lines)" even
791 # if there's skew we want to emit the "(offset %d lines)" even
789 # when the hunk cleanly applies at start + skew, so skip the
792 # when the hunk cleanly applies at start + skew, so skip the
790 # fast case code
793 # fast case code
791 if (self.skew == 0 and
794 if (self.skew == 0 and
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 if self.remove:
796 if self.remove:
794 self.backend.unlink(self.fname)
797 self.backend.unlink(self.fname)
795 else:
798 else:
796 self.lines[oldstart:oldstart + len(old)] = new
799 self.lines[oldstart:oldstart + len(old)] = new
797 self.offset += len(new) - len(old)
800 self.offset += len(new) - len(old)
798 self.dirty = True
801 self.dirty = True
799 return 0
802 return 0
800
803
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 self.hash = {}
805 self.hash = {}
803 for x, s in enumerate(self.lines):
806 for x, s in enumerate(self.lines):
804 self.hash.setdefault(s, []).append(x)
807 self.hash.setdefault(s, []).append(x)
805
808
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for toponly in [True, False]:
810 for toponly in [True, False]:
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 oldstart = oldstart + self.offset + self.skew
812 oldstart = oldstart + self.offset + self.skew
810 oldstart = min(oldstart, len(self.lines))
813 oldstart = min(oldstart, len(self.lines))
811 if old:
814 if old:
812 cand = self.findlines(old[0][1:], oldstart)
815 cand = self.findlines(old[0][1:], oldstart)
813 else:
816 else:
814 # Only adding lines with no or fuzzed context, just
817 # Only adding lines with no or fuzzed context, just
815 # take the skew in account
818 # take the skew in account
816 cand = [oldstart]
819 cand = [oldstart]
817
820
818 for l in cand:
821 for l in cand:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 self.lines[l : l + len(old)] = new
823 self.lines[l : l + len(old)] = new
821 self.offset += len(new) - len(old)
824 self.offset += len(new) - len(old)
822 self.skew = l - orig_start
825 self.skew = l - orig_start
823 self.dirty = True
826 self.dirty = True
824 offset = l - orig_start - fuzzlen
827 offset = l - orig_start - fuzzlen
825 if fuzzlen:
828 if fuzzlen:
826 msg = _("Hunk #%d succeeded at %d "
829 msg = _("Hunk #%d succeeded at %d "
827 "with fuzz %d "
830 "with fuzz %d "
828 "(offset %d lines).\n")
831 "(offset %d lines).\n")
829 self.printfile(True)
832 self.printfile(True)
830 self.ui.warn(msg %
833 self.ui.warn(msg %
831 (h.number, l + 1, fuzzlen, offset))
834 (h.number, l + 1, fuzzlen, offset))
832 else:
835 else:
833 msg = _("Hunk #%d succeeded at %d "
836 msg = _("Hunk #%d succeeded at %d "
834 "(offset %d lines).\n")
837 "(offset %d lines).\n")
835 self.ui.note(msg % (h.number, l + 1, offset))
838 self.ui.note(msg % (h.number, l + 1, offset))
836 return fuzzlen
839 return fuzzlen
837 self.printfile(True)
840 self.printfile(True)
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.rej.append(horig)
842 self.rej.append(horig)
840 return -1
843 return -1
841
844
842 def close(self):
845 def close(self):
843 if self.dirty:
846 if self.dirty:
844 self.writelines(self.fname, self.lines, self.mode)
847 self.writelines(self.fname, self.lines, self.mode)
845 self.write_rej()
848 self.write_rej()
846 return len(self.rej)
849 return len(self.rej)
847
850
848 class header(object):
851 class header(object):
849 """patch header
852 """patch header
850 """
853 """
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
855 diff_re = re.compile('diff -r .* (.*)$')
853 allhunks_re = re.compile('(?:index|deleted file) ')
856 allhunks_re = re.compile('(?:index|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
857 pretty_re = re.compile('(?:new file|deleted file) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 newfile_re = re.compile('(?:new file)')
859 newfile_re = re.compile('(?:new file)')
857
860
858 def __init__(self, header):
861 def __init__(self, header):
859 self.header = header
862 self.header = header
860 self.hunks = []
863 self.hunks = []
861
864
862 def binary(self):
865 def binary(self):
863 return any(h.startswith('index ') for h in self.header)
866 return any(h.startswith('index ') for h in self.header)
864
867
865 def pretty(self, fp):
868 def pretty(self, fp):
866 for h in self.header:
869 for h in self.header:
867 if h.startswith('index '):
870 if h.startswith('index '):
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 break
872 break
870 if self.pretty_re.match(h):
873 if self.pretty_re.match(h):
871 fp.write(h)
874 fp.write(h)
872 if self.binary():
875 if self.binary():
873 fp.write(_('this is a binary file\n'))
876 fp.write(_('this is a binary file\n'))
874 break
877 break
875 if h.startswith('---'):
878 if h.startswith('---'):
876 fp.write(_('%d hunks, %d lines changed\n') %
879 fp.write(_('%d hunks, %d lines changed\n') %
877 (len(self.hunks),
880 (len(self.hunks),
878 sum([max(h.added, h.removed) for h in self.hunks])))
881 sum([max(h.added, h.removed) for h in self.hunks])))
879 break
882 break
880 fp.write(h)
883 fp.write(h)
881
884
882 def write(self, fp):
885 def write(self, fp):
883 fp.write(''.join(self.header))
886 fp.write(''.join(self.header))
884
887
885 def allhunks(self):
888 def allhunks(self):
886 return any(self.allhunks_re.match(h) for h in self.header)
889 return any(self.allhunks_re.match(h) for h in self.header)
887
890
888 def files(self):
891 def files(self):
889 match = self.diffgit_re.match(self.header[0])
892 match = self.diffgit_re.match(self.header[0])
890 if match:
893 if match:
891 fromfile, tofile = match.groups()
894 fromfile, tofile = match.groups()
892 if fromfile == tofile:
895 if fromfile == tofile:
893 return [fromfile]
896 return [fromfile]
894 return [fromfile, tofile]
897 return [fromfile, tofile]
895 else:
898 else:
896 return self.diff_re.match(self.header[0]).groups()
899 return self.diff_re.match(self.header[0]).groups()
897
900
898 def filename(self):
901 def filename(self):
899 return self.files()[-1]
902 return self.files()[-1]
900
903
901 def __repr__(self):
904 def __repr__(self):
902 return '<header %s>' % (' '.join(map(repr, self.files())))
905 return '<header %s>' % (' '.join(map(repr, self.files())))
903
906
904 def isnewfile(self):
907 def isnewfile(self):
905 return any(self.newfile_re.match(h) for h in self.header)
908 return any(self.newfile_re.match(h) for h in self.header)
906
909
907 def special(self):
910 def special(self):
908 # Special files are shown only at the header level and not at the hunk
911 # Special files are shown only at the header level and not at the hunk
909 # level for example a file that has been deleted is a special file.
912 # level for example a file that has been deleted is a special file.
910 # The user cannot change the content of the operation, in the case of
913 # The user cannot change the content of the operation, in the case of
911 # the deleted file he has to take the deletion or not take it, he
914 # the deleted file he has to take the deletion or not take it, he
912 # cannot take some of it.
915 # cannot take some of it.
913 # Newly added files are special if they are empty, they are not special
916 # Newly added files are special if they are empty, they are not special
914 # if they have some content as we want to be able to change it
917 # if they have some content as we want to be able to change it
915 nocontent = len(self.header) == 2
918 nocontent = len(self.header) == 2
916 emptynewfile = self.isnewfile() and nocontent
919 emptynewfile = self.isnewfile() and nocontent
917 return emptynewfile or \
920 return emptynewfile or \
918 any(self.special_re.match(h) for h in self.header)
921 any(self.special_re.match(h) for h in self.header)
919
922
920 class recordhunk(object):
923 class recordhunk(object):
921 """patch hunk
924 """patch hunk
922
925
923 XXX shouldn't we merge this with the other hunk class?
926 XXX shouldn't we merge this with the other hunk class?
924 """
927 """
925 maxcontext = 3
928 maxcontext = 3
926
929
927 def __init__(self, header, fromline, toline, proc, before, hunk, after):
930 def __init__(self, header, fromline, toline, proc, before, hunk, after):
928 def trimcontext(number, lines):
931 def trimcontext(number, lines):
929 delta = len(lines) - self.maxcontext
932 delta = len(lines) - self.maxcontext
930 if False and delta > 0:
933 if False and delta > 0:
931 return number + delta, lines[:self.maxcontext]
934 return number + delta, lines[:self.maxcontext]
932 return number, lines
935 return number, lines
933
936
934 self.header = header
937 self.header = header
935 self.fromline, self.before = trimcontext(fromline, before)
938 self.fromline, self.before = trimcontext(fromline, before)
936 self.toline, self.after = trimcontext(toline, after)
939 self.toline, self.after = trimcontext(toline, after)
937 self.proc = proc
940 self.proc = proc
938 self.hunk = hunk
941 self.hunk = hunk
939 self.added, self.removed = self.countchanges(self.hunk)
942 self.added, self.removed = self.countchanges(self.hunk)
940
943
941 def __eq__(self, v):
944 def __eq__(self, v):
942 if not isinstance(v, recordhunk):
945 if not isinstance(v, recordhunk):
943 return False
946 return False
944
947
945 return ((v.hunk == self.hunk) and
948 return ((v.hunk == self.hunk) and
946 (v.proc == self.proc) and
949 (v.proc == self.proc) and
947 (self.fromline == v.fromline) and
950 (self.fromline == v.fromline) and
948 (self.header.files() == v.header.files()))
951 (self.header.files() == v.header.files()))
949
952
950 def __hash__(self):
953 def __hash__(self):
951 return hash((tuple(self.hunk),
954 return hash((tuple(self.hunk),
952 tuple(self.header.files()),
955 tuple(self.header.files()),
953 self.fromline,
956 self.fromline,
954 self.proc))
957 self.proc))
955
958
956 def countchanges(self, hunk):
959 def countchanges(self, hunk):
957 """hunk -> (n+,n-)"""
960 """hunk -> (n+,n-)"""
958 add = len([h for h in hunk if h[0] == '+'])
961 add = len([h for h in hunk if h[0] == '+'])
959 rem = len([h for h in hunk if h[0] == '-'])
962 rem = len([h for h in hunk if h[0] == '-'])
960 return add, rem
963 return add, rem
961
964
962 def reversehunk(self):
965 def reversehunk(self):
963 """return another recordhunk which is the reverse of the hunk
966 """return another recordhunk which is the reverse of the hunk
964
967
965 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
968 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
966 that, swap fromline/toline and +/- signs while keep other things
969 that, swap fromline/toline and +/- signs while keep other things
967 unchanged.
970 unchanged.
968 """
971 """
969 m = {'+': '-', '-': '+'}
972 m = {'+': '-', '-': '+'}
970 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
973 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
971 return recordhunk(self.header, self.toline, self.fromline, self.proc,
974 return recordhunk(self.header, self.toline, self.fromline, self.proc,
972 self.before, hunk, self.after)
975 self.before, hunk, self.after)
973
976
974 def write(self, fp):
977 def write(self, fp):
975 delta = len(self.before) + len(self.after)
978 delta = len(self.before) + len(self.after)
976 if self.after and self.after[-1] == '\\ No newline at end of file\n':
979 if self.after and self.after[-1] == '\\ No newline at end of file\n':
977 delta -= 1
980 delta -= 1
978 fromlen = delta + self.removed
981 fromlen = delta + self.removed
979 tolen = delta + self.added
982 tolen = delta + self.added
980 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
983 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
981 (self.fromline, fromlen, self.toline, tolen,
984 (self.fromline, fromlen, self.toline, tolen,
982 self.proc and (' ' + self.proc)))
985 self.proc and (' ' + self.proc)))
983 fp.write(''.join(self.before + self.hunk + self.after))
986 fp.write(''.join(self.before + self.hunk + self.after))
984
987
985 pretty = write
988 pretty = write
986
989
987 def filename(self):
990 def filename(self):
988 return self.header.filename()
991 return self.header.filename()
989
992
990 def __repr__(self):
993 def __repr__(self):
991 return '<hunk %r@%d>' % (self.filename(), self.fromline)
994 return '<hunk %r@%d>' % (self.filename(), self.fromline)
992
995
993 def filterpatch(ui, headers, operation=None):
996 def filterpatch(ui, headers, operation=None):
994 """Interactively filter patch chunks into applied-only chunks"""
997 """Interactively filter patch chunks into applied-only chunks"""
995 if operation is None:
998 if operation is None:
996 operation = 'record'
999 operation = 'record'
997 messages = {
1000 messages = {
998 'multiple': {
1001 'multiple': {
999 'discard': _("discard change %d/%d to '%s'?"),
1002 'discard': _("discard change %d/%d to '%s'?"),
1000 'record': _("record change %d/%d to '%s'?"),
1003 'record': _("record change %d/%d to '%s'?"),
1001 'revert': _("revert change %d/%d to '%s'?"),
1004 'revert': _("revert change %d/%d to '%s'?"),
1002 }[operation],
1005 }[operation],
1003 'single': {
1006 'single': {
1004 'discard': _("discard this change to '%s'?"),
1007 'discard': _("discard this change to '%s'?"),
1005 'record': _("record this change to '%s'?"),
1008 'record': _("record this change to '%s'?"),
1006 'revert': _("revert this change to '%s'?"),
1009 'revert': _("revert this change to '%s'?"),
1007 }[operation],
1010 }[operation],
1008 'help': {
1011 'help': {
1009 'discard': _('[Ynesfdaq?]'
1012 'discard': _('[Ynesfdaq?]'
1010 '$$ &Yes, discard this change'
1013 '$$ &Yes, discard this change'
1011 '$$ &No, skip this change'
1014 '$$ &No, skip this change'
1012 '$$ &Edit this change manually'
1015 '$$ &Edit this change manually'
1013 '$$ &Skip remaining changes to this file'
1016 '$$ &Skip remaining changes to this file'
1014 '$$ Discard remaining changes to this &file'
1017 '$$ Discard remaining changes to this &file'
1015 '$$ &Done, skip remaining changes and files'
1018 '$$ &Done, skip remaining changes and files'
1016 '$$ Discard &all changes to all remaining files'
1019 '$$ Discard &all changes to all remaining files'
1017 '$$ &Quit, discarding no changes'
1020 '$$ &Quit, discarding no changes'
1018 '$$ &? (display help)'),
1021 '$$ &? (display help)'),
1019 'record': _('[Ynesfdaq?]'
1022 'record': _('[Ynesfdaq?]'
1020 '$$ &Yes, record this change'
1023 '$$ &Yes, record this change'
1021 '$$ &No, skip this change'
1024 '$$ &No, skip this change'
1022 '$$ &Edit this change manually'
1025 '$$ &Edit this change manually'
1023 '$$ &Skip remaining changes to this file'
1026 '$$ &Skip remaining changes to this file'
1024 '$$ Record remaining changes to this &file'
1027 '$$ Record remaining changes to this &file'
1025 '$$ &Done, skip remaining changes and files'
1028 '$$ &Done, skip remaining changes and files'
1026 '$$ Record &all changes to all remaining files'
1029 '$$ Record &all changes to all remaining files'
1027 '$$ &Quit, recording no changes'
1030 '$$ &Quit, recording no changes'
1028 '$$ &? (display help)'),
1031 '$$ &? (display help)'),
1029 'revert': _('[Ynesfdaq?]'
1032 'revert': _('[Ynesfdaq?]'
1030 '$$ &Yes, revert this change'
1033 '$$ &Yes, revert this change'
1031 '$$ &No, skip this change'
1034 '$$ &No, skip this change'
1032 '$$ &Edit this change manually'
1035 '$$ &Edit this change manually'
1033 '$$ &Skip remaining changes to this file'
1036 '$$ &Skip remaining changes to this file'
1034 '$$ Revert remaining changes to this &file'
1037 '$$ Revert remaining changes to this &file'
1035 '$$ &Done, skip remaining changes and files'
1038 '$$ &Done, skip remaining changes and files'
1036 '$$ Revert &all changes to all remaining files'
1039 '$$ Revert &all changes to all remaining files'
1037 '$$ &Quit, reverting no changes'
1040 '$$ &Quit, reverting no changes'
1038 '$$ &? (display help)')
1041 '$$ &? (display help)')
1039 }[operation]
1042 }[operation]
1040 }
1043 }
1041
1044
1042 def prompt(skipfile, skipall, query, chunk):
1045 def prompt(skipfile, skipall, query, chunk):
1043 """prompt query, and process base inputs
1046 """prompt query, and process base inputs
1044
1047
1045 - y/n for the rest of file
1048 - y/n for the rest of file
1046 - y/n for the rest
1049 - y/n for the rest
1047 - ? (help)
1050 - ? (help)
1048 - q (quit)
1051 - q (quit)
1049
1052
1050 Return True/False and possibly updated skipfile and skipall.
1053 Return True/False and possibly updated skipfile and skipall.
1051 """
1054 """
1052 newpatches = None
1055 newpatches = None
1053 if skipall is not None:
1056 if skipall is not None:
1054 return skipall, skipfile, skipall, newpatches
1057 return skipall, skipfile, skipall, newpatches
1055 if skipfile is not None:
1058 if skipfile is not None:
1056 return skipfile, skipfile, skipall, newpatches
1059 return skipfile, skipfile, skipall, newpatches
1057 while True:
1060 while True:
1058 resps = messages['help']
1061 resps = messages['help']
1059 r = ui.promptchoice("%s %s" % (query, resps))
1062 r = ui.promptchoice("%s %s" % (query, resps))
1060 ui.write("\n")
1063 ui.write("\n")
1061 if r == 8: # ?
1064 if r == 8: # ?
1062 for c, t in ui.extractchoices(resps)[1]:
1065 for c, t in ui.extractchoices(resps)[1]:
1063 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1066 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1064 continue
1067 continue
1065 elif r == 0: # yes
1068 elif r == 0: # yes
1066 ret = True
1069 ret = True
1067 elif r == 1: # no
1070 elif r == 1: # no
1068 ret = False
1071 ret = False
1069 elif r == 2: # Edit patch
1072 elif r == 2: # Edit patch
1070 if chunk is None:
1073 if chunk is None:
1071 ui.write(_('cannot edit patch for whole file'))
1074 ui.write(_('cannot edit patch for whole file'))
1072 ui.write("\n")
1075 ui.write("\n")
1073 continue
1076 continue
1074 if chunk.header.binary():
1077 if chunk.header.binary():
1075 ui.write(_('cannot edit patch for binary file'))
1078 ui.write(_('cannot edit patch for binary file'))
1076 ui.write("\n")
1079 ui.write("\n")
1077 continue
1080 continue
1078 # Patch comment based on the Git one (based on comment at end of
1081 # Patch comment based on the Git one (based on comment at end of
1079 # https://mercurial-scm.org/wiki/RecordExtension)
1082 # https://mercurial-scm.org/wiki/RecordExtension)
1080 phelp = '---' + _("""
1083 phelp = '---' + _("""
1081 To remove '-' lines, make them ' ' lines (context).
1084 To remove '-' lines, make them ' ' lines (context).
1082 To remove '+' lines, delete them.
1085 To remove '+' lines, delete them.
1083 Lines starting with # will be removed from the patch.
1086 Lines starting with # will be removed from the patch.
1084
1087
1085 If the patch applies cleanly, the edited hunk will immediately be
1088 If the patch applies cleanly, the edited hunk will immediately be
1086 added to the record list. If it does not apply cleanly, a rejects
1089 added to the record list. If it does not apply cleanly, a rejects
1087 file will be generated: you can use that when you try again. If
1090 file will be generated: you can use that when you try again. If
1088 all lines of the hunk are removed, then the edit is aborted and
1091 all lines of the hunk are removed, then the edit is aborted and
1089 the hunk is left unchanged.
1092 the hunk is left unchanged.
1090 """)
1093 """)
1091 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1094 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1092 suffix=".diff", text=True)
1095 suffix=".diff", text=True)
1093 ncpatchfp = None
1096 ncpatchfp = None
1094 try:
1097 try:
1095 # Write the initial patch
1098 # Write the initial patch
1096 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1099 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1097 chunk.header.write(f)
1100 chunk.header.write(f)
1098 chunk.write(f)
1101 chunk.write(f)
1099 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1102 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1100 f.close()
1103 f.close()
1101 # Start the editor and wait for it to complete
1104 # Start the editor and wait for it to complete
1102 editor = ui.geteditor()
1105 editor = ui.geteditor()
1103 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1106 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1104 environ={'HGUSER': ui.username()},
1107 environ={'HGUSER': ui.username()},
1105 blockedtag='filterpatch')
1108 blockedtag='filterpatch')
1106 if ret != 0:
1109 if ret != 0:
1107 ui.warn(_("editor exited with exit code %d\n") % ret)
1110 ui.warn(_("editor exited with exit code %d\n") % ret)
1108 continue
1111 continue
1109 # Remove comment lines
1112 # Remove comment lines
1110 patchfp = open(patchfn)
1113 patchfp = open(patchfn)
1111 ncpatchfp = stringio()
1114 ncpatchfp = stringio()
1112 for line in util.iterfile(patchfp):
1115 for line in util.iterfile(patchfp):
1113 if not line.startswith('#'):
1116 if not line.startswith('#'):
1114 ncpatchfp.write(line)
1117 ncpatchfp.write(line)
1115 patchfp.close()
1118 patchfp.close()
1116 ncpatchfp.seek(0)
1119 ncpatchfp.seek(0)
1117 newpatches = parsepatch(ncpatchfp)
1120 newpatches = parsepatch(ncpatchfp)
1118 finally:
1121 finally:
1119 os.unlink(patchfn)
1122 os.unlink(patchfn)
1120 del ncpatchfp
1123 del ncpatchfp
1121 # Signal that the chunk shouldn't be applied as-is, but
1124 # Signal that the chunk shouldn't be applied as-is, but
1122 # provide the new patch to be used instead.
1125 # provide the new patch to be used instead.
1123 ret = False
1126 ret = False
1124 elif r == 3: # Skip
1127 elif r == 3: # Skip
1125 ret = skipfile = False
1128 ret = skipfile = False
1126 elif r == 4: # file (Record remaining)
1129 elif r == 4: # file (Record remaining)
1127 ret = skipfile = True
1130 ret = skipfile = True
1128 elif r == 5: # done, skip remaining
1131 elif r == 5: # done, skip remaining
1129 ret = skipall = False
1132 ret = skipall = False
1130 elif r == 6: # all
1133 elif r == 6: # all
1131 ret = skipall = True
1134 ret = skipall = True
1132 elif r == 7: # quit
1135 elif r == 7: # quit
1133 raise error.Abort(_('user quit'))
1136 raise error.Abort(_('user quit'))
1134 return ret, skipfile, skipall, newpatches
1137 return ret, skipfile, skipall, newpatches
1135
1138
1136 seen = set()
1139 seen = set()
1137 applied = {} # 'filename' -> [] of chunks
1140 applied = {} # 'filename' -> [] of chunks
1138 skipfile, skipall = None, None
1141 skipfile, skipall = None, None
1139 pos, total = 1, sum(len(h.hunks) for h in headers)
1142 pos, total = 1, sum(len(h.hunks) for h in headers)
1140 for h in headers:
1143 for h in headers:
1141 pos += len(h.hunks)
1144 pos += len(h.hunks)
1142 skipfile = None
1145 skipfile = None
1143 fixoffset = 0
1146 fixoffset = 0
1144 hdr = ''.join(h.header)
1147 hdr = ''.join(h.header)
1145 if hdr in seen:
1148 if hdr in seen:
1146 continue
1149 continue
1147 seen.add(hdr)
1150 seen.add(hdr)
1148 if skipall is None:
1151 if skipall is None:
1149 h.pretty(ui)
1152 h.pretty(ui)
1150 msg = (_('examine changes to %s?') %
1153 msg = (_('examine changes to %s?') %
1151 _(' and ').join("'%s'" % f for f in h.files()))
1154 _(' and ').join("'%s'" % f for f in h.files()))
1152 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1155 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1153 if not r:
1156 if not r:
1154 continue
1157 continue
1155 applied[h.filename()] = [h]
1158 applied[h.filename()] = [h]
1156 if h.allhunks():
1159 if h.allhunks():
1157 applied[h.filename()] += h.hunks
1160 applied[h.filename()] += h.hunks
1158 continue
1161 continue
1159 for i, chunk in enumerate(h.hunks):
1162 for i, chunk in enumerate(h.hunks):
1160 if skipfile is None and skipall is None:
1163 if skipfile is None and skipall is None:
1161 chunk.pretty(ui)
1164 chunk.pretty(ui)
1162 if total == 1:
1165 if total == 1:
1163 msg = messages['single'] % chunk.filename()
1166 msg = messages['single'] % chunk.filename()
1164 else:
1167 else:
1165 idx = pos - len(h.hunks) + i
1168 idx = pos - len(h.hunks) + i
1166 msg = messages['multiple'] % (idx, total, chunk.filename())
1169 msg = messages['multiple'] % (idx, total, chunk.filename())
1167 r, skipfile, skipall, newpatches = prompt(skipfile,
1170 r, skipfile, skipall, newpatches = prompt(skipfile,
1168 skipall, msg, chunk)
1171 skipall, msg, chunk)
1169 if r:
1172 if r:
1170 if fixoffset:
1173 if fixoffset:
1171 chunk = copy.copy(chunk)
1174 chunk = copy.copy(chunk)
1172 chunk.toline += fixoffset
1175 chunk.toline += fixoffset
1173 applied[chunk.filename()].append(chunk)
1176 applied[chunk.filename()].append(chunk)
1174 elif newpatches is not None:
1177 elif newpatches is not None:
1175 for newpatch in newpatches:
1178 for newpatch in newpatches:
1176 for newhunk in newpatch.hunks:
1179 for newhunk in newpatch.hunks:
1177 if fixoffset:
1180 if fixoffset:
1178 newhunk.toline += fixoffset
1181 newhunk.toline += fixoffset
1179 applied[newhunk.filename()].append(newhunk)
1182 applied[newhunk.filename()].append(newhunk)
1180 else:
1183 else:
1181 fixoffset += chunk.removed - chunk.added
1184 fixoffset += chunk.removed - chunk.added
1182 return (sum([h for h in applied.itervalues()
1185 return (sum([h for h in applied.itervalues()
1183 if h[0].special() or len(h) > 1], []), {})
1186 if h[0].special() or len(h) > 1], []), {})
1184 class hunk(object):
1187 class hunk(object):
1185 def __init__(self, desc, num, lr, context):
1188 def __init__(self, desc, num, lr, context):
1186 self.number = num
1189 self.number = num
1187 self.desc = desc
1190 self.desc = desc
1188 self.hunk = [desc]
1191 self.hunk = [desc]
1189 self.a = []
1192 self.a = []
1190 self.b = []
1193 self.b = []
1191 self.starta = self.lena = None
1194 self.starta = self.lena = None
1192 self.startb = self.lenb = None
1195 self.startb = self.lenb = None
1193 if lr is not None:
1196 if lr is not None:
1194 if context:
1197 if context:
1195 self.read_context_hunk(lr)
1198 self.read_context_hunk(lr)
1196 else:
1199 else:
1197 self.read_unified_hunk(lr)
1200 self.read_unified_hunk(lr)
1198
1201
1199 def getnormalized(self):
1202 def getnormalized(self):
1200 """Return a copy with line endings normalized to LF."""
1203 """Return a copy with line endings normalized to LF."""
1201
1204
1202 def normalize(lines):
1205 def normalize(lines):
1203 nlines = []
1206 nlines = []
1204 for line in lines:
1207 for line in lines:
1205 if line.endswith('\r\n'):
1208 if line.endswith('\r\n'):
1206 line = line[:-2] + '\n'
1209 line = line[:-2] + '\n'
1207 nlines.append(line)
1210 nlines.append(line)
1208 return nlines
1211 return nlines
1209
1212
1210 # Dummy object, it is rebuilt manually
1213 # Dummy object, it is rebuilt manually
1211 nh = hunk(self.desc, self.number, None, None)
1214 nh = hunk(self.desc, self.number, None, None)
1212 nh.number = self.number
1215 nh.number = self.number
1213 nh.desc = self.desc
1216 nh.desc = self.desc
1214 nh.hunk = self.hunk
1217 nh.hunk = self.hunk
1215 nh.a = normalize(self.a)
1218 nh.a = normalize(self.a)
1216 nh.b = normalize(self.b)
1219 nh.b = normalize(self.b)
1217 nh.starta = self.starta
1220 nh.starta = self.starta
1218 nh.startb = self.startb
1221 nh.startb = self.startb
1219 nh.lena = self.lena
1222 nh.lena = self.lena
1220 nh.lenb = self.lenb
1223 nh.lenb = self.lenb
1221 return nh
1224 return nh
1222
1225
1223 def read_unified_hunk(self, lr):
1226 def read_unified_hunk(self, lr):
1224 m = unidesc.match(self.desc)
1227 m = unidesc.match(self.desc)
1225 if not m:
1228 if not m:
1226 raise PatchError(_("bad hunk #%d") % self.number)
1229 raise PatchError(_("bad hunk #%d") % self.number)
1227 self.starta, self.lena, self.startb, self.lenb = m.groups()
1230 self.starta, self.lena, self.startb, self.lenb = m.groups()
1228 if self.lena is None:
1231 if self.lena is None:
1229 self.lena = 1
1232 self.lena = 1
1230 else:
1233 else:
1231 self.lena = int(self.lena)
1234 self.lena = int(self.lena)
1232 if self.lenb is None:
1235 if self.lenb is None:
1233 self.lenb = 1
1236 self.lenb = 1
1234 else:
1237 else:
1235 self.lenb = int(self.lenb)
1238 self.lenb = int(self.lenb)
1236 self.starta = int(self.starta)
1239 self.starta = int(self.starta)
1237 self.startb = int(self.startb)
1240 self.startb = int(self.startb)
1238 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1241 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1239 self.b)
1242 self.b)
1240 # if we hit eof before finishing out the hunk, the last line will
1243 # if we hit eof before finishing out the hunk, the last line will
1241 # be zero length. Lets try to fix it up.
1244 # be zero length. Lets try to fix it up.
1242 while len(self.hunk[-1]) == 0:
1245 while len(self.hunk[-1]) == 0:
1243 del self.hunk[-1]
1246 del self.hunk[-1]
1244 del self.a[-1]
1247 del self.a[-1]
1245 del self.b[-1]
1248 del self.b[-1]
1246 self.lena -= 1
1249 self.lena -= 1
1247 self.lenb -= 1
1250 self.lenb -= 1
1248 self._fixnewline(lr)
1251 self._fixnewline(lr)
1249
1252
1250 def read_context_hunk(self, lr):
1253 def read_context_hunk(self, lr):
1251 self.desc = lr.readline()
1254 self.desc = lr.readline()
1252 m = contextdesc.match(self.desc)
1255 m = contextdesc.match(self.desc)
1253 if not m:
1256 if not m:
1254 raise PatchError(_("bad hunk #%d") % self.number)
1257 raise PatchError(_("bad hunk #%d") % self.number)
1255 self.starta, aend = m.groups()
1258 self.starta, aend = m.groups()
1256 self.starta = int(self.starta)
1259 self.starta = int(self.starta)
1257 if aend is None:
1260 if aend is None:
1258 aend = self.starta
1261 aend = self.starta
1259 self.lena = int(aend) - self.starta
1262 self.lena = int(aend) - self.starta
1260 if self.starta:
1263 if self.starta:
1261 self.lena += 1
1264 self.lena += 1
1262 for x in xrange(self.lena):
1265 for x in xrange(self.lena):
1263 l = lr.readline()
1266 l = lr.readline()
1264 if l.startswith('---'):
1267 if l.startswith('---'):
1265 # lines addition, old block is empty
1268 # lines addition, old block is empty
1266 lr.push(l)
1269 lr.push(l)
1267 break
1270 break
1268 s = l[2:]
1271 s = l[2:]
1269 if l.startswith('- ') or l.startswith('! '):
1272 if l.startswith('- ') or l.startswith('! '):
1270 u = '-' + s
1273 u = '-' + s
1271 elif l.startswith(' '):
1274 elif l.startswith(' '):
1272 u = ' ' + s
1275 u = ' ' + s
1273 else:
1276 else:
1274 raise PatchError(_("bad hunk #%d old text line %d") %
1277 raise PatchError(_("bad hunk #%d old text line %d") %
1275 (self.number, x))
1278 (self.number, x))
1276 self.a.append(u)
1279 self.a.append(u)
1277 self.hunk.append(u)
1280 self.hunk.append(u)
1278
1281
1279 l = lr.readline()
1282 l = lr.readline()
1280 if l.startswith('\ '):
1283 if l.startswith('\ '):
1281 s = self.a[-1][:-1]
1284 s = self.a[-1][:-1]
1282 self.a[-1] = s
1285 self.a[-1] = s
1283 self.hunk[-1] = s
1286 self.hunk[-1] = s
1284 l = lr.readline()
1287 l = lr.readline()
1285 m = contextdesc.match(l)
1288 m = contextdesc.match(l)
1286 if not m:
1289 if not m:
1287 raise PatchError(_("bad hunk #%d") % self.number)
1290 raise PatchError(_("bad hunk #%d") % self.number)
1288 self.startb, bend = m.groups()
1291 self.startb, bend = m.groups()
1289 self.startb = int(self.startb)
1292 self.startb = int(self.startb)
1290 if bend is None:
1293 if bend is None:
1291 bend = self.startb
1294 bend = self.startb
1292 self.lenb = int(bend) - self.startb
1295 self.lenb = int(bend) - self.startb
1293 if self.startb:
1296 if self.startb:
1294 self.lenb += 1
1297 self.lenb += 1
1295 hunki = 1
1298 hunki = 1
1296 for x in xrange(self.lenb):
1299 for x in xrange(self.lenb):
1297 l = lr.readline()
1300 l = lr.readline()
1298 if l.startswith('\ '):
1301 if l.startswith('\ '):
1299 # XXX: the only way to hit this is with an invalid line range.
1302 # XXX: the only way to hit this is with an invalid line range.
1300 # The no-eol marker is not counted in the line range, but I
1303 # The no-eol marker is not counted in the line range, but I
1301 # guess there are diff(1) out there which behave differently.
1304 # guess there are diff(1) out there which behave differently.
1302 s = self.b[-1][:-1]
1305 s = self.b[-1][:-1]
1303 self.b[-1] = s
1306 self.b[-1] = s
1304 self.hunk[hunki - 1] = s
1307 self.hunk[hunki - 1] = s
1305 continue
1308 continue
1306 if not l:
1309 if not l:
1307 # line deletions, new block is empty and we hit EOF
1310 # line deletions, new block is empty and we hit EOF
1308 lr.push(l)
1311 lr.push(l)
1309 break
1312 break
1310 s = l[2:]
1313 s = l[2:]
1311 if l.startswith('+ ') or l.startswith('! '):
1314 if l.startswith('+ ') or l.startswith('! '):
1312 u = '+' + s
1315 u = '+' + s
1313 elif l.startswith(' '):
1316 elif l.startswith(' '):
1314 u = ' ' + s
1317 u = ' ' + s
1315 elif len(self.b) == 0:
1318 elif len(self.b) == 0:
1316 # line deletions, new block is empty
1319 # line deletions, new block is empty
1317 lr.push(l)
1320 lr.push(l)
1318 break
1321 break
1319 else:
1322 else:
1320 raise PatchError(_("bad hunk #%d old text line %d") %
1323 raise PatchError(_("bad hunk #%d old text line %d") %
1321 (self.number, x))
1324 (self.number, x))
1322 self.b.append(s)
1325 self.b.append(s)
1323 while True:
1326 while True:
1324 if hunki >= len(self.hunk):
1327 if hunki >= len(self.hunk):
1325 h = ""
1328 h = ""
1326 else:
1329 else:
1327 h = self.hunk[hunki]
1330 h = self.hunk[hunki]
1328 hunki += 1
1331 hunki += 1
1329 if h == u:
1332 if h == u:
1330 break
1333 break
1331 elif h.startswith('-'):
1334 elif h.startswith('-'):
1332 continue
1335 continue
1333 else:
1336 else:
1334 self.hunk.insert(hunki - 1, u)
1337 self.hunk.insert(hunki - 1, u)
1335 break
1338 break
1336
1339
1337 if not self.a:
1340 if not self.a:
1338 # this happens when lines were only added to the hunk
1341 # this happens when lines were only added to the hunk
1339 for x in self.hunk:
1342 for x in self.hunk:
1340 if x.startswith('-') or x.startswith(' '):
1343 if x.startswith('-') or x.startswith(' '):
1341 self.a.append(x)
1344 self.a.append(x)
1342 if not self.b:
1345 if not self.b:
1343 # this happens when lines were only deleted from the hunk
1346 # this happens when lines were only deleted from the hunk
1344 for x in self.hunk:
1347 for x in self.hunk:
1345 if x.startswith('+') or x.startswith(' '):
1348 if x.startswith('+') or x.startswith(' '):
1346 self.b.append(x[1:])
1349 self.b.append(x[1:])
1347 # @@ -start,len +start,len @@
1350 # @@ -start,len +start,len @@
1348 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1351 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1349 self.startb, self.lenb)
1352 self.startb, self.lenb)
1350 self.hunk[0] = self.desc
1353 self.hunk[0] = self.desc
1351 self._fixnewline(lr)
1354 self._fixnewline(lr)
1352
1355
1353 def _fixnewline(self, lr):
1356 def _fixnewline(self, lr):
1354 l = lr.readline()
1357 l = lr.readline()
1355 if l.startswith('\ '):
1358 if l.startswith('\ '):
1356 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1359 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1357 else:
1360 else:
1358 lr.push(l)
1361 lr.push(l)
1359
1362
1360 def complete(self):
1363 def complete(self):
1361 return len(self.a) == self.lena and len(self.b) == self.lenb
1364 return len(self.a) == self.lena and len(self.b) == self.lenb
1362
1365
1363 def _fuzzit(self, old, new, fuzz, toponly):
1366 def _fuzzit(self, old, new, fuzz, toponly):
1364 # this removes context lines from the top and bottom of list 'l'. It
1367 # this removes context lines from the top and bottom of list 'l'. It
1365 # checks the hunk to make sure only context lines are removed, and then
1368 # checks the hunk to make sure only context lines are removed, and then
1366 # returns a new shortened list of lines.
1369 # returns a new shortened list of lines.
1367 fuzz = min(fuzz, len(old))
1370 fuzz = min(fuzz, len(old))
1368 if fuzz:
1371 if fuzz:
1369 top = 0
1372 top = 0
1370 bot = 0
1373 bot = 0
1371 hlen = len(self.hunk)
1374 hlen = len(self.hunk)
1372 for x in xrange(hlen - 1):
1375 for x in xrange(hlen - 1):
1373 # the hunk starts with the @@ line, so use x+1
1376 # the hunk starts with the @@ line, so use x+1
1374 if self.hunk[x + 1][0] == ' ':
1377 if self.hunk[x + 1][0] == ' ':
1375 top += 1
1378 top += 1
1376 else:
1379 else:
1377 break
1380 break
1378 if not toponly:
1381 if not toponly:
1379 for x in xrange(hlen - 1):
1382 for x in xrange(hlen - 1):
1380 if self.hunk[hlen - bot - 1][0] == ' ':
1383 if self.hunk[hlen - bot - 1][0] == ' ':
1381 bot += 1
1384 bot += 1
1382 else:
1385 else:
1383 break
1386 break
1384
1387
1385 bot = min(fuzz, bot)
1388 bot = min(fuzz, bot)
1386 top = min(fuzz, top)
1389 top = min(fuzz, top)
1387 return old[top:len(old) - bot], new[top:len(new) - bot], top
1390 return old[top:len(old) - bot], new[top:len(new) - bot], top
1388 return old, new, 0
1391 return old, new, 0
1389
1392
1390 def fuzzit(self, fuzz, toponly):
1393 def fuzzit(self, fuzz, toponly):
1391 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1394 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1392 oldstart = self.starta + top
1395 oldstart = self.starta + top
1393 newstart = self.startb + top
1396 newstart = self.startb + top
1394 # zero length hunk ranges already have their start decremented
1397 # zero length hunk ranges already have their start decremented
1395 if self.lena and oldstart > 0:
1398 if self.lena and oldstart > 0:
1396 oldstart -= 1
1399 oldstart -= 1
1397 if self.lenb and newstart > 0:
1400 if self.lenb and newstart > 0:
1398 newstart -= 1
1401 newstart -= 1
1399 return old, oldstart, new, newstart
1402 return old, oldstart, new, newstart
1400
1403
1401 class binhunk(object):
1404 class binhunk(object):
1402 'A binary patch file.'
1405 'A binary patch file.'
1403 def __init__(self, lr, fname):
1406 def __init__(self, lr, fname):
1404 self.text = None
1407 self.text = None
1405 self.delta = False
1408 self.delta = False
1406 self.hunk = ['GIT binary patch\n']
1409 self.hunk = ['GIT binary patch\n']
1407 self._fname = fname
1410 self._fname = fname
1408 self._read(lr)
1411 self._read(lr)
1409
1412
1410 def complete(self):
1413 def complete(self):
1411 return self.text is not None
1414 return self.text is not None
1412
1415
1413 def new(self, lines):
1416 def new(self, lines):
1414 if self.delta:
1417 if self.delta:
1415 return [applybindelta(self.text, ''.join(lines))]
1418 return [applybindelta(self.text, ''.join(lines))]
1416 return [self.text]
1419 return [self.text]
1417
1420
1418 def _read(self, lr):
1421 def _read(self, lr):
1419 def getline(lr, hunk):
1422 def getline(lr, hunk):
1420 l = lr.readline()
1423 l = lr.readline()
1421 hunk.append(l)
1424 hunk.append(l)
1422 return l.rstrip('\r\n')
1425 return l.rstrip('\r\n')
1423
1426
1424 size = 0
1427 size = 0
1425 while True:
1428 while True:
1426 line = getline(lr, self.hunk)
1429 line = getline(lr, self.hunk)
1427 if not line:
1430 if not line:
1428 raise PatchError(_('could not extract "%s" binary data')
1431 raise PatchError(_('could not extract "%s" binary data')
1429 % self._fname)
1432 % self._fname)
1430 if line.startswith('literal '):
1433 if line.startswith('literal '):
1431 size = int(line[8:].rstrip())
1434 size = int(line[8:].rstrip())
1432 break
1435 break
1433 if line.startswith('delta '):
1436 if line.startswith('delta '):
1434 size = int(line[6:].rstrip())
1437 size = int(line[6:].rstrip())
1435 self.delta = True
1438 self.delta = True
1436 break
1439 break
1437 dec = []
1440 dec = []
1438 line = getline(lr, self.hunk)
1441 line = getline(lr, self.hunk)
1439 while len(line) > 1:
1442 while len(line) > 1:
1440 l = line[0]
1443 l = line[0]
1441 if l <= 'Z' and l >= 'A':
1444 if l <= 'Z' and l >= 'A':
1442 l = ord(l) - ord('A') + 1
1445 l = ord(l) - ord('A') + 1
1443 else:
1446 else:
1444 l = ord(l) - ord('a') + 27
1447 l = ord(l) - ord('a') + 27
1445 try:
1448 try:
1446 dec.append(util.b85decode(line[1:])[:l])
1449 dec.append(util.b85decode(line[1:])[:l])
1447 except ValueError as e:
1450 except ValueError as e:
1448 raise PatchError(_('could not decode "%s" binary patch: %s')
1451 raise PatchError(_('could not decode "%s" binary patch: %s')
1449 % (self._fname, str(e)))
1452 % (self._fname, str(e)))
1450 line = getline(lr, self.hunk)
1453 line = getline(lr, self.hunk)
1451 text = zlib.decompress(''.join(dec))
1454 text = zlib.decompress(''.join(dec))
1452 if len(text) != size:
1455 if len(text) != size:
1453 raise PatchError(_('"%s" length is %d bytes, should be %d')
1456 raise PatchError(_('"%s" length is %d bytes, should be %d')
1454 % (self._fname, len(text), size))
1457 % (self._fname, len(text), size))
1455 self.text = text
1458 self.text = text
1456
1459
1457 def parsefilename(str):
1460 def parsefilename(str):
1458 # --- filename \t|space stuff
1461 # --- filename \t|space stuff
1459 s = str[4:].rstrip('\r\n')
1462 s = str[4:].rstrip('\r\n')
1460 i = s.find('\t')
1463 i = s.find('\t')
1461 if i < 0:
1464 if i < 0:
1462 i = s.find(' ')
1465 i = s.find(' ')
1463 if i < 0:
1466 if i < 0:
1464 return s
1467 return s
1465 return s[:i]
1468 return s[:i]
1466
1469
1467 def reversehunks(hunks):
1470 def reversehunks(hunks):
1468 '''reverse the signs in the hunks given as argument
1471 '''reverse the signs in the hunks given as argument
1469
1472
1470 This function operates on hunks coming out of patch.filterpatch, that is
1473 This function operates on hunks coming out of patch.filterpatch, that is
1471 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1474 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1472
1475
1473 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1476 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1474 ... --- a/folder1/g
1477 ... --- a/folder1/g
1475 ... +++ b/folder1/g
1478 ... +++ b/folder1/g
1476 ... @@ -1,7 +1,7 @@
1479 ... @@ -1,7 +1,7 @@
1477 ... +firstline
1480 ... +firstline
1478 ... c
1481 ... c
1479 ... 1
1482 ... 1
1480 ... 2
1483 ... 2
1481 ... + 3
1484 ... + 3
1482 ... -4
1485 ... -4
1483 ... 5
1486 ... 5
1484 ... d
1487 ... d
1485 ... +lastline"""
1488 ... +lastline"""
1486 >>> hunks = parsepatch(rawpatch)
1489 >>> hunks = parsepatch(rawpatch)
1487 >>> hunkscomingfromfilterpatch = []
1490 >>> hunkscomingfromfilterpatch = []
1488 >>> for h in hunks:
1491 >>> for h in hunks:
1489 ... hunkscomingfromfilterpatch.append(h)
1492 ... hunkscomingfromfilterpatch.append(h)
1490 ... hunkscomingfromfilterpatch.extend(h.hunks)
1493 ... hunkscomingfromfilterpatch.extend(h.hunks)
1491
1494
1492 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1495 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1493 >>> from . import util
1496 >>> from . import util
1494 >>> fp = util.stringio()
1497 >>> fp = util.stringio()
1495 >>> for c in reversedhunks:
1498 >>> for c in reversedhunks:
1496 ... c.write(fp)
1499 ... c.write(fp)
1497 >>> fp.seek(0)
1500 >>> fp.seek(0)
1498 >>> reversedpatch = fp.read()
1501 >>> reversedpatch = fp.read()
1499 >>> print reversedpatch
1502 >>> print reversedpatch
1500 diff --git a/folder1/g b/folder1/g
1503 diff --git a/folder1/g b/folder1/g
1501 --- a/folder1/g
1504 --- a/folder1/g
1502 +++ b/folder1/g
1505 +++ b/folder1/g
1503 @@ -1,4 +1,3 @@
1506 @@ -1,4 +1,3 @@
1504 -firstline
1507 -firstline
1505 c
1508 c
1506 1
1509 1
1507 2
1510 2
1508 @@ -2,6 +1,6 @@
1511 @@ -2,6 +1,6 @@
1509 c
1512 c
1510 1
1513 1
1511 2
1514 2
1512 - 3
1515 - 3
1513 +4
1516 +4
1514 5
1517 5
1515 d
1518 d
1516 @@ -6,3 +5,2 @@
1519 @@ -6,3 +5,2 @@
1517 5
1520 5
1518 d
1521 d
1519 -lastline
1522 -lastline
1520
1523
1521 '''
1524 '''
1522
1525
1523 newhunks = []
1526 newhunks = []
1524 for c in hunks:
1527 for c in hunks:
1525 if util.safehasattr(c, 'reversehunk'):
1528 if util.safehasattr(c, 'reversehunk'):
1526 c = c.reversehunk()
1529 c = c.reversehunk()
1527 newhunks.append(c)
1530 newhunks.append(c)
1528 return newhunks
1531 return newhunks
1529
1532
1530 def parsepatch(originalchunks):
1533 def parsepatch(originalchunks):
1531 """patch -> [] of headers -> [] of hunks """
1534 """patch -> [] of headers -> [] of hunks """
1532 class parser(object):
1535 class parser(object):
1533 """patch parsing state machine"""
1536 """patch parsing state machine"""
1534 def __init__(self):
1537 def __init__(self):
1535 self.fromline = 0
1538 self.fromline = 0
1536 self.toline = 0
1539 self.toline = 0
1537 self.proc = ''
1540 self.proc = ''
1538 self.header = None
1541 self.header = None
1539 self.context = []
1542 self.context = []
1540 self.before = []
1543 self.before = []
1541 self.hunk = []
1544 self.hunk = []
1542 self.headers = []
1545 self.headers = []
1543
1546
1544 def addrange(self, limits):
1547 def addrange(self, limits):
1545 fromstart, fromend, tostart, toend, proc = limits
1548 fromstart, fromend, tostart, toend, proc = limits
1546 self.fromline = int(fromstart)
1549 self.fromline = int(fromstart)
1547 self.toline = int(tostart)
1550 self.toline = int(tostart)
1548 self.proc = proc
1551 self.proc = proc
1549
1552
1550 def addcontext(self, context):
1553 def addcontext(self, context):
1551 if self.hunk:
1554 if self.hunk:
1552 h = recordhunk(self.header, self.fromline, self.toline,
1555 h = recordhunk(self.header, self.fromline, self.toline,
1553 self.proc, self.before, self.hunk, context)
1556 self.proc, self.before, self.hunk, context)
1554 self.header.hunks.append(h)
1557 self.header.hunks.append(h)
1555 self.fromline += len(self.before) + h.removed
1558 self.fromline += len(self.before) + h.removed
1556 self.toline += len(self.before) + h.added
1559 self.toline += len(self.before) + h.added
1557 self.before = []
1560 self.before = []
1558 self.hunk = []
1561 self.hunk = []
1559 self.context = context
1562 self.context = context
1560
1563
1561 def addhunk(self, hunk):
1564 def addhunk(self, hunk):
1562 if self.context:
1565 if self.context:
1563 self.before = self.context
1566 self.before = self.context
1564 self.context = []
1567 self.context = []
1565 self.hunk = hunk
1568 self.hunk = hunk
1566
1569
1567 def newfile(self, hdr):
1570 def newfile(self, hdr):
1568 self.addcontext([])
1571 self.addcontext([])
1569 h = header(hdr)
1572 h = header(hdr)
1570 self.headers.append(h)
1573 self.headers.append(h)
1571 self.header = h
1574 self.header = h
1572
1575
1573 def addother(self, line):
1576 def addother(self, line):
1574 pass # 'other' lines are ignored
1577 pass # 'other' lines are ignored
1575
1578
1576 def finished(self):
1579 def finished(self):
1577 self.addcontext([])
1580 self.addcontext([])
1578 return self.headers
1581 return self.headers
1579
1582
1580 transitions = {
1583 transitions = {
1581 'file': {'context': addcontext,
1584 'file': {'context': addcontext,
1582 'file': newfile,
1585 'file': newfile,
1583 'hunk': addhunk,
1586 'hunk': addhunk,
1584 'range': addrange},
1587 'range': addrange},
1585 'context': {'file': newfile,
1588 'context': {'file': newfile,
1586 'hunk': addhunk,
1589 'hunk': addhunk,
1587 'range': addrange,
1590 'range': addrange,
1588 'other': addother},
1591 'other': addother},
1589 'hunk': {'context': addcontext,
1592 'hunk': {'context': addcontext,
1590 'file': newfile,
1593 'file': newfile,
1591 'range': addrange},
1594 'range': addrange},
1592 'range': {'context': addcontext,
1595 'range': {'context': addcontext,
1593 'hunk': addhunk},
1596 'hunk': addhunk},
1594 'other': {'other': addother},
1597 'other': {'other': addother},
1595 }
1598 }
1596
1599
1597 p = parser()
1600 p = parser()
1598 fp = stringio()
1601 fp = stringio()
1599 fp.write(''.join(originalchunks))
1602 fp.write(''.join(originalchunks))
1600 fp.seek(0)
1603 fp.seek(0)
1601
1604
1602 state = 'context'
1605 state = 'context'
1603 for newstate, data in scanpatch(fp):
1606 for newstate, data in scanpatch(fp):
1604 try:
1607 try:
1605 p.transitions[state][newstate](p, data)
1608 p.transitions[state][newstate](p, data)
1606 except KeyError:
1609 except KeyError:
1607 raise PatchError('unhandled transition: %s -> %s' %
1610 raise PatchError('unhandled transition: %s -> %s' %
1608 (state, newstate))
1611 (state, newstate))
1609 state = newstate
1612 state = newstate
1610 del fp
1613 del fp
1611 return p.finished()
1614 return p.finished()
1612
1615
1613 def pathtransform(path, strip, prefix):
1616 def pathtransform(path, strip, prefix):
1614 '''turn a path from a patch into a path suitable for the repository
1617 '''turn a path from a patch into a path suitable for the repository
1615
1618
1616 prefix, if not empty, is expected to be normalized with a / at the end.
1619 prefix, if not empty, is expected to be normalized with a / at the end.
1617
1620
1618 Returns (stripped components, path in repository).
1621 Returns (stripped components, path in repository).
1619
1622
1620 >>> pathtransform('a/b/c', 0, '')
1623 >>> pathtransform('a/b/c', 0, '')
1621 ('', 'a/b/c')
1624 ('', 'a/b/c')
1622 >>> pathtransform(' a/b/c ', 0, '')
1625 >>> pathtransform(' a/b/c ', 0, '')
1623 ('', ' a/b/c')
1626 ('', ' a/b/c')
1624 >>> pathtransform(' a/b/c ', 2, '')
1627 >>> pathtransform(' a/b/c ', 2, '')
1625 ('a/b/', 'c')
1628 ('a/b/', 'c')
1626 >>> pathtransform('a/b/c', 0, 'd/e/')
1629 >>> pathtransform('a/b/c', 0, 'd/e/')
1627 ('', 'd/e/a/b/c')
1630 ('', 'd/e/a/b/c')
1628 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1631 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1629 ('a//b/', 'd/e/c')
1632 ('a//b/', 'd/e/c')
1630 >>> pathtransform('a/b/c', 3, '')
1633 >>> pathtransform('a/b/c', 3, '')
1631 Traceback (most recent call last):
1634 Traceback (most recent call last):
1632 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1635 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1633 '''
1636 '''
1634 pathlen = len(path)
1637 pathlen = len(path)
1635 i = 0
1638 i = 0
1636 if strip == 0:
1639 if strip == 0:
1637 return '', prefix + path.rstrip()
1640 return '', prefix + path.rstrip()
1638 count = strip
1641 count = strip
1639 while count > 0:
1642 while count > 0:
1640 i = path.find('/', i)
1643 i = path.find('/', i)
1641 if i == -1:
1644 if i == -1:
1642 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1645 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1643 (count, strip, path))
1646 (count, strip, path))
1644 i += 1
1647 i += 1
1645 # consume '//' in the path
1648 # consume '//' in the path
1646 while i < pathlen - 1 and path[i] == '/':
1649 while i < pathlen - 1 and path[i] == '/':
1647 i += 1
1650 i += 1
1648 count -= 1
1651 count -= 1
1649 return path[:i].lstrip(), prefix + path[i:].rstrip()
1652 return path[:i].lstrip(), prefix + path[i:].rstrip()
1650
1653
1651 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1654 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1652 nulla = afile_orig == "/dev/null"
1655 nulla = afile_orig == "/dev/null"
1653 nullb = bfile_orig == "/dev/null"
1656 nullb = bfile_orig == "/dev/null"
1654 create = nulla and hunk.starta == 0 and hunk.lena == 0
1657 create = nulla and hunk.starta == 0 and hunk.lena == 0
1655 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1658 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1656 abase, afile = pathtransform(afile_orig, strip, prefix)
1659 abase, afile = pathtransform(afile_orig, strip, prefix)
1657 gooda = not nulla and backend.exists(afile)
1660 gooda = not nulla and backend.exists(afile)
1658 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1661 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1659 if afile == bfile:
1662 if afile == bfile:
1660 goodb = gooda
1663 goodb = gooda
1661 else:
1664 else:
1662 goodb = not nullb and backend.exists(bfile)
1665 goodb = not nullb and backend.exists(bfile)
1663 missing = not goodb and not gooda and not create
1666 missing = not goodb and not gooda and not create
1664
1667
1665 # some diff programs apparently produce patches where the afile is
1668 # some diff programs apparently produce patches where the afile is
1666 # not /dev/null, but afile starts with bfile
1669 # not /dev/null, but afile starts with bfile
1667 abasedir = afile[:afile.rfind('/') + 1]
1670 abasedir = afile[:afile.rfind('/') + 1]
1668 bbasedir = bfile[:bfile.rfind('/') + 1]
1671 bbasedir = bfile[:bfile.rfind('/') + 1]
1669 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1672 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1670 and hunk.starta == 0 and hunk.lena == 0):
1673 and hunk.starta == 0 and hunk.lena == 0):
1671 create = True
1674 create = True
1672 missing = False
1675 missing = False
1673
1676
1674 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1677 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1675 # diff is between a file and its backup. In this case, the original
1678 # diff is between a file and its backup. In this case, the original
1676 # file should be patched (see original mpatch code).
1679 # file should be patched (see original mpatch code).
1677 isbackup = (abase == bbase and bfile.startswith(afile))
1680 isbackup = (abase == bbase and bfile.startswith(afile))
1678 fname = None
1681 fname = None
1679 if not missing:
1682 if not missing:
1680 if gooda and goodb:
1683 if gooda and goodb:
1681 if isbackup:
1684 if isbackup:
1682 fname = afile
1685 fname = afile
1683 else:
1686 else:
1684 fname = bfile
1687 fname = bfile
1685 elif gooda:
1688 elif gooda:
1686 fname = afile
1689 fname = afile
1687
1690
1688 if not fname:
1691 if not fname:
1689 if not nullb:
1692 if not nullb:
1690 if isbackup:
1693 if isbackup:
1691 fname = afile
1694 fname = afile
1692 else:
1695 else:
1693 fname = bfile
1696 fname = bfile
1694 elif not nulla:
1697 elif not nulla:
1695 fname = afile
1698 fname = afile
1696 else:
1699 else:
1697 raise PatchError(_("undefined source and destination files"))
1700 raise PatchError(_("undefined source and destination files"))
1698
1701
1699 gp = patchmeta(fname)
1702 gp = patchmeta(fname)
1700 if create:
1703 if create:
1701 gp.op = 'ADD'
1704 gp.op = 'ADD'
1702 elif remove:
1705 elif remove:
1703 gp.op = 'DELETE'
1706 gp.op = 'DELETE'
1704 return gp
1707 return gp
1705
1708
1706 def scanpatch(fp):
1709 def scanpatch(fp):
1707 """like patch.iterhunks, but yield different events
1710 """like patch.iterhunks, but yield different events
1708
1711
1709 - ('file', [header_lines + fromfile + tofile])
1712 - ('file', [header_lines + fromfile + tofile])
1710 - ('context', [context_lines])
1713 - ('context', [context_lines])
1711 - ('hunk', [hunk_lines])
1714 - ('hunk', [hunk_lines])
1712 - ('range', (-start,len, +start,len, proc))
1715 - ('range', (-start,len, +start,len, proc))
1713 """
1716 """
1714 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1717 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1715 lr = linereader(fp)
1718 lr = linereader(fp)
1716
1719
1717 def scanwhile(first, p):
1720 def scanwhile(first, p):
1718 """scan lr while predicate holds"""
1721 """scan lr while predicate holds"""
1719 lines = [first]
1722 lines = [first]
1720 for line in iter(lr.readline, ''):
1723 for line in iter(lr.readline, ''):
1721 if p(line):
1724 if p(line):
1722 lines.append(line)
1725 lines.append(line)
1723 else:
1726 else:
1724 lr.push(line)
1727 lr.push(line)
1725 break
1728 break
1726 return lines
1729 return lines
1727
1730
1728 for line in iter(lr.readline, ''):
1731 for line in iter(lr.readline, ''):
1729 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1732 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1730 def notheader(line):
1733 def notheader(line):
1731 s = line.split(None, 1)
1734 s = line.split(None, 1)
1732 return not s or s[0] not in ('---', 'diff')
1735 return not s or s[0] not in ('---', 'diff')
1733 header = scanwhile(line, notheader)
1736 header = scanwhile(line, notheader)
1734 fromfile = lr.readline()
1737 fromfile = lr.readline()
1735 if fromfile.startswith('---'):
1738 if fromfile.startswith('---'):
1736 tofile = lr.readline()
1739 tofile = lr.readline()
1737 header += [fromfile, tofile]
1740 header += [fromfile, tofile]
1738 else:
1741 else:
1739 lr.push(fromfile)
1742 lr.push(fromfile)
1740 yield 'file', header
1743 yield 'file', header
1741 elif line[0] == ' ':
1744 elif line[0] == ' ':
1742 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1745 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1743 elif line[0] in '-+':
1746 elif line[0] in '-+':
1744 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1747 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1745 else:
1748 else:
1746 m = lines_re.match(line)
1749 m = lines_re.match(line)
1747 if m:
1750 if m:
1748 yield 'range', m.groups()
1751 yield 'range', m.groups()
1749 else:
1752 else:
1750 yield 'other', line
1753 yield 'other', line
1751
1754
1752 def scangitpatch(lr, firstline):
1755 def scangitpatch(lr, firstline):
1753 """
1756 """
1754 Git patches can emit:
1757 Git patches can emit:
1755 - rename a to b
1758 - rename a to b
1756 - change b
1759 - change b
1757 - copy a to c
1760 - copy a to c
1758 - change c
1761 - change c
1759
1762
1760 We cannot apply this sequence as-is, the renamed 'a' could not be
1763 We cannot apply this sequence as-is, the renamed 'a' could not be
1761 found for it would have been renamed already. And we cannot copy
1764 found for it would have been renamed already. And we cannot copy
1762 from 'b' instead because 'b' would have been changed already. So
1765 from 'b' instead because 'b' would have been changed already. So
1763 we scan the git patch for copy and rename commands so we can
1766 we scan the git patch for copy and rename commands so we can
1764 perform the copies ahead of time.
1767 perform the copies ahead of time.
1765 """
1768 """
1766 pos = 0
1769 pos = 0
1767 try:
1770 try:
1768 pos = lr.fp.tell()
1771 pos = lr.fp.tell()
1769 fp = lr.fp
1772 fp = lr.fp
1770 except IOError:
1773 except IOError:
1771 fp = stringio(lr.fp.read())
1774 fp = stringio(lr.fp.read())
1772 gitlr = linereader(fp)
1775 gitlr = linereader(fp)
1773 gitlr.push(firstline)
1776 gitlr.push(firstline)
1774 gitpatches = readgitpatch(gitlr)
1777 gitpatches = readgitpatch(gitlr)
1775 fp.seek(pos)
1778 fp.seek(pos)
1776 return gitpatches
1779 return gitpatches
1777
1780
1778 def iterhunks(fp):
1781 def iterhunks(fp):
1779 """Read a patch and yield the following events:
1782 """Read a patch and yield the following events:
1780 - ("file", afile, bfile, firsthunk): select a new target file.
1783 - ("file", afile, bfile, firsthunk): select a new target file.
1781 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1784 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1782 "file" event.
1785 "file" event.
1783 - ("git", gitchanges): current diff is in git format, gitchanges
1786 - ("git", gitchanges): current diff is in git format, gitchanges
1784 maps filenames to gitpatch records. Unique event.
1787 maps filenames to gitpatch records. Unique event.
1785 """
1788 """
1786 afile = ""
1789 afile = ""
1787 bfile = ""
1790 bfile = ""
1788 state = None
1791 state = None
1789 hunknum = 0
1792 hunknum = 0
1790 emitfile = newfile = False
1793 emitfile = newfile = False
1791 gitpatches = None
1794 gitpatches = None
1792
1795
1793 # our states
1796 # our states
1794 BFILE = 1
1797 BFILE = 1
1795 context = None
1798 context = None
1796 lr = linereader(fp)
1799 lr = linereader(fp)
1797
1800
1798 for x in iter(lr.readline, ''):
1801 for x in iter(lr.readline, ''):
1799 if state == BFILE and (
1802 if state == BFILE and (
1800 (not context and x[0] == '@')
1803 (not context and x[0] == '@')
1801 or (context is not False and x.startswith('***************'))
1804 or (context is not False and x.startswith('***************'))
1802 or x.startswith('GIT binary patch')):
1805 or x.startswith('GIT binary patch')):
1803 gp = None
1806 gp = None
1804 if (gitpatches and
1807 if (gitpatches and
1805 gitpatches[-1].ispatching(afile, bfile)):
1808 gitpatches[-1].ispatching(afile, bfile)):
1806 gp = gitpatches.pop()
1809 gp = gitpatches.pop()
1807 if x.startswith('GIT binary patch'):
1810 if x.startswith('GIT binary patch'):
1808 h = binhunk(lr, gp.path)
1811 h = binhunk(lr, gp.path)
1809 else:
1812 else:
1810 if context is None and x.startswith('***************'):
1813 if context is None and x.startswith('***************'):
1811 context = True
1814 context = True
1812 h = hunk(x, hunknum + 1, lr, context)
1815 h = hunk(x, hunknum + 1, lr, context)
1813 hunknum += 1
1816 hunknum += 1
1814 if emitfile:
1817 if emitfile:
1815 emitfile = False
1818 emitfile = False
1816 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1819 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1817 yield 'hunk', h
1820 yield 'hunk', h
1818 elif x.startswith('diff --git a/'):
1821 elif x.startswith('diff --git a/'):
1819 m = gitre.match(x.rstrip(' \r\n'))
1822 m = gitre.match(x.rstrip(' \r\n'))
1820 if not m:
1823 if not m:
1821 continue
1824 continue
1822 if gitpatches is None:
1825 if gitpatches is None:
1823 # scan whole input for git metadata
1826 # scan whole input for git metadata
1824 gitpatches = scangitpatch(lr, x)
1827 gitpatches = scangitpatch(lr, x)
1825 yield 'git', [g.copy() for g in gitpatches
1828 yield 'git', [g.copy() for g in gitpatches
1826 if g.op in ('COPY', 'RENAME')]
1829 if g.op in ('COPY', 'RENAME')]
1827 gitpatches.reverse()
1830 gitpatches.reverse()
1828 afile = 'a/' + m.group(1)
1831 afile = 'a/' + m.group(1)
1829 bfile = 'b/' + m.group(2)
1832 bfile = 'b/' + m.group(2)
1830 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1833 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1831 gp = gitpatches.pop()
1834 gp = gitpatches.pop()
1832 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1835 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1833 if not gitpatches:
1836 if not gitpatches:
1834 raise PatchError(_('failed to synchronize metadata for "%s"')
1837 raise PatchError(_('failed to synchronize metadata for "%s"')
1835 % afile[2:])
1838 % afile[2:])
1836 gp = gitpatches[-1]
1839 gp = gitpatches[-1]
1837 newfile = True
1840 newfile = True
1838 elif x.startswith('---'):
1841 elif x.startswith('---'):
1839 # check for a unified diff
1842 # check for a unified diff
1840 l2 = lr.readline()
1843 l2 = lr.readline()
1841 if not l2.startswith('+++'):
1844 if not l2.startswith('+++'):
1842 lr.push(l2)
1845 lr.push(l2)
1843 continue
1846 continue
1844 newfile = True
1847 newfile = True
1845 context = False
1848 context = False
1846 afile = parsefilename(x)
1849 afile = parsefilename(x)
1847 bfile = parsefilename(l2)
1850 bfile = parsefilename(l2)
1848 elif x.startswith('***'):
1851 elif x.startswith('***'):
1849 # check for a context diff
1852 # check for a context diff
1850 l2 = lr.readline()
1853 l2 = lr.readline()
1851 if not l2.startswith('---'):
1854 if not l2.startswith('---'):
1852 lr.push(l2)
1855 lr.push(l2)
1853 continue
1856 continue
1854 l3 = lr.readline()
1857 l3 = lr.readline()
1855 lr.push(l3)
1858 lr.push(l3)
1856 if not l3.startswith("***************"):
1859 if not l3.startswith("***************"):
1857 lr.push(l2)
1860 lr.push(l2)
1858 continue
1861 continue
1859 newfile = True
1862 newfile = True
1860 context = True
1863 context = True
1861 afile = parsefilename(x)
1864 afile = parsefilename(x)
1862 bfile = parsefilename(l2)
1865 bfile = parsefilename(l2)
1863
1866
1864 if newfile:
1867 if newfile:
1865 newfile = False
1868 newfile = False
1866 emitfile = True
1869 emitfile = True
1867 state = BFILE
1870 state = BFILE
1868 hunknum = 0
1871 hunknum = 0
1869
1872
1870 while gitpatches:
1873 while gitpatches:
1871 gp = gitpatches.pop()
1874 gp = gitpatches.pop()
1872 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1875 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1873
1876
1874 def applybindelta(binchunk, data):
1877 def applybindelta(binchunk, data):
1875 """Apply a binary delta hunk
1878 """Apply a binary delta hunk
1876 The algorithm used is the algorithm from git's patch-delta.c
1879 The algorithm used is the algorithm from git's patch-delta.c
1877 """
1880 """
1878 def deltahead(binchunk):
1881 def deltahead(binchunk):
1879 i = 0
1882 i = 0
1880 for c in binchunk:
1883 for c in binchunk:
1881 i += 1
1884 i += 1
1882 if not (ord(c) & 0x80):
1885 if not (ord(c) & 0x80):
1883 return i
1886 return i
1884 return i
1887 return i
1885 out = ""
1888 out = ""
1886 s = deltahead(binchunk)
1889 s = deltahead(binchunk)
1887 binchunk = binchunk[s:]
1890 binchunk = binchunk[s:]
1888 s = deltahead(binchunk)
1891 s = deltahead(binchunk)
1889 binchunk = binchunk[s:]
1892 binchunk = binchunk[s:]
1890 i = 0
1893 i = 0
1891 while i < len(binchunk):
1894 while i < len(binchunk):
1892 cmd = ord(binchunk[i])
1895 cmd = ord(binchunk[i])
1893 i += 1
1896 i += 1
1894 if (cmd & 0x80):
1897 if (cmd & 0x80):
1895 offset = 0
1898 offset = 0
1896 size = 0
1899 size = 0
1897 if (cmd & 0x01):
1900 if (cmd & 0x01):
1898 offset = ord(binchunk[i])
1901 offset = ord(binchunk[i])
1899 i += 1
1902 i += 1
1900 if (cmd & 0x02):
1903 if (cmd & 0x02):
1901 offset |= ord(binchunk[i]) << 8
1904 offset |= ord(binchunk[i]) << 8
1902 i += 1
1905 i += 1
1903 if (cmd & 0x04):
1906 if (cmd & 0x04):
1904 offset |= ord(binchunk[i]) << 16
1907 offset |= ord(binchunk[i]) << 16
1905 i += 1
1908 i += 1
1906 if (cmd & 0x08):
1909 if (cmd & 0x08):
1907 offset |= ord(binchunk[i]) << 24
1910 offset |= ord(binchunk[i]) << 24
1908 i += 1
1911 i += 1
1909 if (cmd & 0x10):
1912 if (cmd & 0x10):
1910 size = ord(binchunk[i])
1913 size = ord(binchunk[i])
1911 i += 1
1914 i += 1
1912 if (cmd & 0x20):
1915 if (cmd & 0x20):
1913 size |= ord(binchunk[i]) << 8
1916 size |= ord(binchunk[i]) << 8
1914 i += 1
1917 i += 1
1915 if (cmd & 0x40):
1918 if (cmd & 0x40):
1916 size |= ord(binchunk[i]) << 16
1919 size |= ord(binchunk[i]) << 16
1917 i += 1
1920 i += 1
1918 if size == 0:
1921 if size == 0:
1919 size = 0x10000
1922 size = 0x10000
1920 offset_end = offset + size
1923 offset_end = offset + size
1921 out += data[offset:offset_end]
1924 out += data[offset:offset_end]
1922 elif cmd != 0:
1925 elif cmd != 0:
1923 offset_end = i + cmd
1926 offset_end = i + cmd
1924 out += binchunk[i:offset_end]
1927 out += binchunk[i:offset_end]
1925 i += cmd
1928 i += cmd
1926 else:
1929 else:
1927 raise PatchError(_('unexpected delta opcode 0'))
1930 raise PatchError(_('unexpected delta opcode 0'))
1928 return out
1931 return out
1929
1932
1930 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1933 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1931 """Reads a patch from fp and tries to apply it.
1934 """Reads a patch from fp and tries to apply it.
1932
1935
1933 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1936 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1934 there was any fuzz.
1937 there was any fuzz.
1935
1938
1936 If 'eolmode' is 'strict', the patch content and patched file are
1939 If 'eolmode' is 'strict', the patch content and patched file are
1937 read in binary mode. Otherwise, line endings are ignored when
1940 read in binary mode. Otherwise, line endings are ignored when
1938 patching then normalized according to 'eolmode'.
1941 patching then normalized according to 'eolmode'.
1939 """
1942 """
1940 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1943 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1941 prefix=prefix, eolmode=eolmode)
1944 prefix=prefix, eolmode=eolmode)
1942
1945
1943 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1946 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1944 eolmode='strict'):
1947 eolmode='strict'):
1945
1948
1946 if prefix:
1949 if prefix:
1947 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1950 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1948 prefix)
1951 prefix)
1949 if prefix != '':
1952 if prefix != '':
1950 prefix += '/'
1953 prefix += '/'
1951 def pstrip(p):
1954 def pstrip(p):
1952 return pathtransform(p, strip - 1, prefix)[1]
1955 return pathtransform(p, strip - 1, prefix)[1]
1953
1956
1954 rejects = 0
1957 rejects = 0
1955 err = 0
1958 err = 0
1956 current_file = None
1959 current_file = None
1957
1960
1958 for state, values in iterhunks(fp):
1961 for state, values in iterhunks(fp):
1959 if state == 'hunk':
1962 if state == 'hunk':
1960 if not current_file:
1963 if not current_file:
1961 continue
1964 continue
1962 ret = current_file.apply(values)
1965 ret = current_file.apply(values)
1963 if ret > 0:
1966 if ret > 0:
1964 err = 1
1967 err = 1
1965 elif state == 'file':
1968 elif state == 'file':
1966 if current_file:
1969 if current_file:
1967 rejects += current_file.close()
1970 rejects += current_file.close()
1968 current_file = None
1971 current_file = None
1969 afile, bfile, first_hunk, gp = values
1972 afile, bfile, first_hunk, gp = values
1970 if gp:
1973 if gp:
1971 gp.path = pstrip(gp.path)
1974 gp.path = pstrip(gp.path)
1972 if gp.oldpath:
1975 if gp.oldpath:
1973 gp.oldpath = pstrip(gp.oldpath)
1976 gp.oldpath = pstrip(gp.oldpath)
1974 else:
1977 else:
1975 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1978 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1976 prefix)
1979 prefix)
1977 if gp.op == 'RENAME':
1980 if gp.op == 'RENAME':
1978 backend.unlink(gp.oldpath)
1981 backend.unlink(gp.oldpath)
1979 if not first_hunk:
1982 if not first_hunk:
1980 if gp.op == 'DELETE':
1983 if gp.op == 'DELETE':
1981 backend.unlink(gp.path)
1984 backend.unlink(gp.path)
1982 continue
1985 continue
1983 data, mode = None, None
1986 data, mode = None, None
1984 if gp.op in ('RENAME', 'COPY'):
1987 if gp.op in ('RENAME', 'COPY'):
1985 data, mode = store.getfile(gp.oldpath)[:2]
1988 data, mode = store.getfile(gp.oldpath)[:2]
1986 if data is None:
1989 if data is None:
1987 # This means that the old path does not exist
1990 # This means that the old path does not exist
1988 raise PatchError(_("source file '%s' does not exist")
1991 raise PatchError(_("source file '%s' does not exist")
1989 % gp.oldpath)
1992 % gp.oldpath)
1990 if gp.mode:
1993 if gp.mode:
1991 mode = gp.mode
1994 mode = gp.mode
1992 if gp.op == 'ADD':
1995 if gp.op == 'ADD':
1993 # Added files without content have no hunk and
1996 # Added files without content have no hunk and
1994 # must be created
1997 # must be created
1995 data = ''
1998 data = ''
1996 if data or mode:
1999 if data or mode:
1997 if (gp.op in ('ADD', 'RENAME', 'COPY')
2000 if (gp.op in ('ADD', 'RENAME', 'COPY')
1998 and backend.exists(gp.path)):
2001 and backend.exists(gp.path)):
1999 raise PatchError(_("cannot create %s: destination "
2002 raise PatchError(_("cannot create %s: destination "
2000 "already exists") % gp.path)
2003 "already exists") % gp.path)
2001 backend.setfile(gp.path, data, mode, gp.oldpath)
2004 backend.setfile(gp.path, data, mode, gp.oldpath)
2002 continue
2005 continue
2003 try:
2006 try:
2004 current_file = patcher(ui, gp, backend, store,
2007 current_file = patcher(ui, gp, backend, store,
2005 eolmode=eolmode)
2008 eolmode=eolmode)
2006 except PatchError as inst:
2009 except PatchError as inst:
2007 ui.warn(str(inst) + '\n')
2010 ui.warn(str(inst) + '\n')
2008 current_file = None
2011 current_file = None
2009 rejects += 1
2012 rejects += 1
2010 continue
2013 continue
2011 elif state == 'git':
2014 elif state == 'git':
2012 for gp in values:
2015 for gp in values:
2013 path = pstrip(gp.oldpath)
2016 path = pstrip(gp.oldpath)
2014 data, mode = backend.getfile(path)
2017 data, mode = backend.getfile(path)
2015 if data is None:
2018 if data is None:
2016 # The error ignored here will trigger a getfile()
2019 # The error ignored here will trigger a getfile()
2017 # error in a place more appropriate for error
2020 # error in a place more appropriate for error
2018 # handling, and will not interrupt the patching
2021 # handling, and will not interrupt the patching
2019 # process.
2022 # process.
2020 pass
2023 pass
2021 else:
2024 else:
2022 store.setfile(path, data, mode)
2025 store.setfile(path, data, mode)
2023 else:
2026 else:
2024 raise error.Abort(_('unsupported parser state: %s') % state)
2027 raise error.Abort(_('unsupported parser state: %s') % state)
2025
2028
2026 if current_file:
2029 if current_file:
2027 rejects += current_file.close()
2030 rejects += current_file.close()
2028
2031
2029 if rejects:
2032 if rejects:
2030 return -1
2033 return -1
2031 return err
2034 return err
2032
2035
2033 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2036 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2034 similarity):
2037 similarity):
2035 """use <patcher> to apply <patchname> to the working directory.
2038 """use <patcher> to apply <patchname> to the working directory.
2036 returns whether patch was applied with fuzz factor."""
2039 returns whether patch was applied with fuzz factor."""
2037
2040
2038 fuzz = False
2041 fuzz = False
2039 args = []
2042 args = []
2040 cwd = repo.root
2043 cwd = repo.root
2041 if cwd:
2044 if cwd:
2042 args.append('-d %s' % util.shellquote(cwd))
2045 args.append('-d %s' % util.shellquote(cwd))
2043 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2046 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2044 util.shellquote(patchname)))
2047 util.shellquote(patchname)))
2045 try:
2048 try:
2046 for line in util.iterfile(fp):
2049 for line in util.iterfile(fp):
2047 line = line.rstrip()
2050 line = line.rstrip()
2048 ui.note(line + '\n')
2051 ui.note(line + '\n')
2049 if line.startswith('patching file '):
2052 if line.startswith('patching file '):
2050 pf = util.parsepatchoutput(line)
2053 pf = util.parsepatchoutput(line)
2051 printed_file = False
2054 printed_file = False
2052 files.add(pf)
2055 files.add(pf)
2053 elif line.find('with fuzz') >= 0:
2056 elif line.find('with fuzz') >= 0:
2054 fuzz = True
2057 fuzz = True
2055 if not printed_file:
2058 if not printed_file:
2056 ui.warn(pf + '\n')
2059 ui.warn(pf + '\n')
2057 printed_file = True
2060 printed_file = True
2058 ui.warn(line + '\n')
2061 ui.warn(line + '\n')
2059 elif line.find('saving rejects to file') >= 0:
2062 elif line.find('saving rejects to file') >= 0:
2060 ui.warn(line + '\n')
2063 ui.warn(line + '\n')
2061 elif line.find('FAILED') >= 0:
2064 elif line.find('FAILED') >= 0:
2062 if not printed_file:
2065 if not printed_file:
2063 ui.warn(pf + '\n')
2066 ui.warn(pf + '\n')
2064 printed_file = True
2067 printed_file = True
2065 ui.warn(line + '\n')
2068 ui.warn(line + '\n')
2066 finally:
2069 finally:
2067 if files:
2070 if files:
2068 scmutil.marktouched(repo, files, similarity)
2071 scmutil.marktouched(repo, files, similarity)
2069 code = fp.close()
2072 code = fp.close()
2070 if code:
2073 if code:
2071 raise PatchError(_("patch command failed: %s") %
2074 raise PatchError(_("patch command failed: %s") %
2072 util.explainexit(code)[0])
2075 util.explainexit(code)[0])
2073 return fuzz
2076 return fuzz
2074
2077
2075 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2078 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2076 eolmode='strict'):
2079 eolmode='strict'):
2077 if files is None:
2080 if files is None:
2078 files = set()
2081 files = set()
2079 if eolmode is None:
2082 if eolmode is None:
2080 eolmode = ui.config('patch', 'eol', 'strict')
2083 eolmode = ui.config('patch', 'eol', 'strict')
2081 if eolmode.lower() not in eolmodes:
2084 if eolmode.lower() not in eolmodes:
2082 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2085 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2083 eolmode = eolmode.lower()
2086 eolmode = eolmode.lower()
2084
2087
2085 store = filestore()
2088 store = filestore()
2086 try:
2089 try:
2087 fp = open(patchobj, 'rb')
2090 fp = open(patchobj, 'rb')
2088 except TypeError:
2091 except TypeError:
2089 fp = patchobj
2092 fp = patchobj
2090 try:
2093 try:
2091 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2094 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2092 eolmode=eolmode)
2095 eolmode=eolmode)
2093 finally:
2096 finally:
2094 if fp != patchobj:
2097 if fp != patchobj:
2095 fp.close()
2098 fp.close()
2096 files.update(backend.close())
2099 files.update(backend.close())
2097 store.close()
2100 store.close()
2098 if ret < 0:
2101 if ret < 0:
2099 raise PatchError(_('patch failed to apply'))
2102 raise PatchError(_('patch failed to apply'))
2100 return ret > 0
2103 return ret > 0
2101
2104
2102 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2105 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2103 eolmode='strict', similarity=0):
2106 eolmode='strict', similarity=0):
2104 """use builtin patch to apply <patchobj> to the working directory.
2107 """use builtin patch to apply <patchobj> to the working directory.
2105 returns whether patch was applied with fuzz factor."""
2108 returns whether patch was applied with fuzz factor."""
2106 backend = workingbackend(ui, repo, similarity)
2109 backend = workingbackend(ui, repo, similarity)
2107 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2110 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2108
2111
2109 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2112 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2110 eolmode='strict'):
2113 eolmode='strict'):
2111 backend = repobackend(ui, repo, ctx, store)
2114 backend = repobackend(ui, repo, ctx, store)
2112 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2115 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2113
2116
2114 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2117 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2115 similarity=0):
2118 similarity=0):
2116 """Apply <patchname> to the working directory.
2119 """Apply <patchname> to the working directory.
2117
2120
2118 'eolmode' specifies how end of lines should be handled. It can be:
2121 'eolmode' specifies how end of lines should be handled. It can be:
2119 - 'strict': inputs are read in binary mode, EOLs are preserved
2122 - 'strict': inputs are read in binary mode, EOLs are preserved
2120 - 'crlf': EOLs are ignored when patching and reset to CRLF
2123 - 'crlf': EOLs are ignored when patching and reset to CRLF
2121 - 'lf': EOLs are ignored when patching and reset to LF
2124 - 'lf': EOLs are ignored when patching and reset to LF
2122 - None: get it from user settings, default to 'strict'
2125 - None: get it from user settings, default to 'strict'
2123 'eolmode' is ignored when using an external patcher program.
2126 'eolmode' is ignored when using an external patcher program.
2124
2127
2125 Returns whether patch was applied with fuzz factor.
2128 Returns whether patch was applied with fuzz factor.
2126 """
2129 """
2127 patcher = ui.config('ui', 'patch')
2130 patcher = ui.config('ui', 'patch')
2128 if files is None:
2131 if files is None:
2129 files = set()
2132 files = set()
2130 if patcher:
2133 if patcher:
2131 return _externalpatch(ui, repo, patcher, patchname, strip,
2134 return _externalpatch(ui, repo, patcher, patchname, strip,
2132 files, similarity)
2135 files, similarity)
2133 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2136 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2134 similarity)
2137 similarity)
2135
2138
2136 def changedfiles(ui, repo, patchpath, strip=1):
2139 def changedfiles(ui, repo, patchpath, strip=1):
2137 backend = fsbackend(ui, repo.root)
2140 backend = fsbackend(ui, repo.root)
2138 with open(patchpath, 'rb') as fp:
2141 with open(patchpath, 'rb') as fp:
2139 changed = set()
2142 changed = set()
2140 for state, values in iterhunks(fp):
2143 for state, values in iterhunks(fp):
2141 if state == 'file':
2144 if state == 'file':
2142 afile, bfile, first_hunk, gp = values
2145 afile, bfile, first_hunk, gp = values
2143 if gp:
2146 if gp:
2144 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2147 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2145 if gp.oldpath:
2148 if gp.oldpath:
2146 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2149 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2147 else:
2150 else:
2148 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2151 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2149 '')
2152 '')
2150 changed.add(gp.path)
2153 changed.add(gp.path)
2151 if gp.op == 'RENAME':
2154 if gp.op == 'RENAME':
2152 changed.add(gp.oldpath)
2155 changed.add(gp.oldpath)
2153 elif state not in ('hunk', 'git'):
2156 elif state not in ('hunk', 'git'):
2154 raise error.Abort(_('unsupported parser state: %s') % state)
2157 raise error.Abort(_('unsupported parser state: %s') % state)
2155 return changed
2158 return changed
2156
2159
2157 class GitDiffRequired(Exception):
2160 class GitDiffRequired(Exception):
2158 pass
2161 pass
2159
2162
2160 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2163 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2161 '''return diffopts with all features supported and parsed'''
2164 '''return diffopts with all features supported and parsed'''
2162 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2165 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2163 git=True, whitespace=True, formatchanging=True)
2166 git=True, whitespace=True, formatchanging=True)
2164
2167
2165 diffopts = diffallopts
2168 diffopts = diffallopts
2166
2169
2167 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2170 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2168 whitespace=False, formatchanging=False):
2171 whitespace=False, formatchanging=False):
2169 '''return diffopts with only opted-in features parsed
2172 '''return diffopts with only opted-in features parsed
2170
2173
2171 Features:
2174 Features:
2172 - git: git-style diffs
2175 - git: git-style diffs
2173 - whitespace: whitespace options like ignoreblanklines and ignorews
2176 - whitespace: whitespace options like ignoreblanklines and ignorews
2174 - formatchanging: options that will likely break or cause correctness issues
2177 - formatchanging: options that will likely break or cause correctness issues
2175 with most diff parsers
2178 with most diff parsers
2176 '''
2179 '''
2177 def get(key, name=None, getter=ui.configbool, forceplain=None):
2180 def get(key, name=None, getter=ui.configbool, forceplain=None):
2178 if opts:
2181 if opts:
2179 v = opts.get(key)
2182 v = opts.get(key)
2180 # diffopts flags are either None-default (which is passed
2183 # diffopts flags are either None-default (which is passed
2181 # through unchanged, so we can identify unset values), or
2184 # through unchanged, so we can identify unset values), or
2182 # some other falsey default (eg --unified, which defaults
2185 # some other falsey default (eg --unified, which defaults
2183 # to an empty string). We only want to override the config
2186 # to an empty string). We only want to override the config
2184 # entries from hgrc with command line values if they
2187 # entries from hgrc with command line values if they
2185 # appear to have been set, which is any truthy value,
2188 # appear to have been set, which is any truthy value,
2186 # True, or False.
2189 # True, or False.
2187 if v or isinstance(v, bool):
2190 if v or isinstance(v, bool):
2188 return v
2191 return v
2189 if forceplain is not None and ui.plain():
2192 if forceplain is not None and ui.plain():
2190 return forceplain
2193 return forceplain
2191 return getter(section, name or key, None, untrusted=untrusted)
2194 return getter(section, name or key, None, untrusted=untrusted)
2192
2195
2193 # core options, expected to be understood by every diff parser
2196 # core options, expected to be understood by every diff parser
2194 buildopts = {
2197 buildopts = {
2195 'nodates': get('nodates'),
2198 'nodates': get('nodates'),
2196 'showfunc': get('show_function', 'showfunc'),
2199 'showfunc': get('show_function', 'showfunc'),
2197 'context': get('unified', getter=ui.config),
2200 'context': get('unified', getter=ui.config),
2198 }
2201 }
2199
2202
2200 if git:
2203 if git:
2201 buildopts['git'] = get('git')
2204 buildopts['git'] = get('git')
2202
2205
2203 # since this is in the experimental section, we need to call
2206 # since this is in the experimental section, we need to call
2204 # ui.configbool directory
2207 # ui.configbool directory
2205 buildopts['showsimilarity'] = ui.configbool('experimental',
2208 buildopts['showsimilarity'] = ui.configbool('experimental',
2206 'extendedheader.similarity')
2209 'extendedheader.similarity')
2207
2210
2208 # need to inspect the ui object instead of using get() since we want to
2211 # need to inspect the ui object instead of using get() since we want to
2209 # test for an int
2212 # test for an int
2210 hconf = ui.config('experimental', 'extendedheader.index')
2213 hconf = ui.config('experimental', 'extendedheader.index')
2211 if hconf is not None:
2214 if hconf is not None:
2212 hlen = None
2215 hlen = None
2213 try:
2216 try:
2214 # the hash config could be an integer (for length of hash) or a
2217 # the hash config could be an integer (for length of hash) or a
2215 # word (e.g. short, full, none)
2218 # word (e.g. short, full, none)
2216 hlen = int(hconf)
2219 hlen = int(hconf)
2217 if hlen < 0 or hlen > 40:
2220 if hlen < 0 or hlen > 40:
2218 msg = _("invalid length for extendedheader.index: '%d'\n")
2221 msg = _("invalid length for extendedheader.index: '%d'\n")
2219 ui.warn(msg % hlen)
2222 ui.warn(msg % hlen)
2220 except ValueError:
2223 except ValueError:
2221 # default value
2224 # default value
2222 if hconf == 'short' or hconf == '':
2225 if hconf == 'short' or hconf == '':
2223 hlen = 12
2226 hlen = 12
2224 elif hconf == 'full':
2227 elif hconf == 'full':
2225 hlen = 40
2228 hlen = 40
2226 elif hconf != 'none':
2229 elif hconf != 'none':
2227 msg = _("invalid value for extendedheader.index: '%s'\n")
2230 msg = _("invalid value for extendedheader.index: '%s'\n")
2228 ui.warn(msg % hconf)
2231 ui.warn(msg % hconf)
2229 finally:
2232 finally:
2230 buildopts['index'] = hlen
2233 buildopts['index'] = hlen
2231
2234
2232 if whitespace:
2235 if whitespace:
2233 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2236 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2234 buildopts['ignorewsamount'] = get('ignore_space_change',
2237 buildopts['ignorewsamount'] = get('ignore_space_change',
2235 'ignorewsamount')
2238 'ignorewsamount')
2236 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2239 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2237 'ignoreblanklines')
2240 'ignoreblanklines')
2238 if formatchanging:
2241 if formatchanging:
2239 buildopts['text'] = opts and opts.get('text')
2242 buildopts['text'] = opts and opts.get('text')
2240 binary = None if opts is None else opts.get('binary')
2243 binary = None if opts is None else opts.get('binary')
2241 buildopts['nobinary'] = (not binary if binary is not None
2244 buildopts['nobinary'] = (not binary if binary is not None
2242 else get('nobinary', forceplain=False))
2245 else get('nobinary', forceplain=False))
2243 buildopts['noprefix'] = get('noprefix', forceplain=False)
2246 buildopts['noprefix'] = get('noprefix', forceplain=False)
2244
2247
2245 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2248 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2246
2249
2247 def diff(repo, node1=None, node2=None, match=None, changes=None,
2250 def diff(repo, node1=None, node2=None, match=None, changes=None,
2248 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2251 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2249 '''yields diff of changes to files between two nodes, or node and
2252 '''yields diff of changes to files between two nodes, or node and
2250 working directory.
2253 working directory.
2251
2254
2252 if node1 is None, use first dirstate parent instead.
2255 if node1 is None, use first dirstate parent instead.
2253 if node2 is None, compare node1 with working directory.
2256 if node2 is None, compare node1 with working directory.
2254
2257
2255 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2258 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2256 every time some change cannot be represented with the current
2259 every time some change cannot be represented with the current
2257 patch format. Return False to upgrade to git patch format, True to
2260 patch format. Return False to upgrade to git patch format, True to
2258 accept the loss or raise an exception to abort the diff. It is
2261 accept the loss or raise an exception to abort the diff. It is
2259 called with the name of current file being diffed as 'fn'. If set
2262 called with the name of current file being diffed as 'fn'. If set
2260 to None, patches will always be upgraded to git format when
2263 to None, patches will always be upgraded to git format when
2261 necessary.
2264 necessary.
2262
2265
2263 prefix is a filename prefix that is prepended to all filenames on
2266 prefix is a filename prefix that is prepended to all filenames on
2264 display (used for subrepos).
2267 display (used for subrepos).
2265
2268
2266 relroot, if not empty, must be normalized with a trailing /. Any match
2269 relroot, if not empty, must be normalized with a trailing /. Any match
2267 patterns that fall outside it will be ignored.
2270 patterns that fall outside it will be ignored.
2268
2271
2269 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2272 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2270 information.'''
2273 information.'''
2271 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2274 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2272 changes=changes, opts=opts,
2275 changes=changes, opts=opts,
2273 losedatafn=losedatafn, prefix=prefix,
2276 losedatafn=losedatafn, prefix=prefix,
2274 relroot=relroot, copy=copy):
2277 relroot=relroot, copy=copy):
2275 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2278 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2276 if header and (text or len(header) > 1):
2279 if header and (text or len(header) > 1):
2277 yield '\n'.join(header) + '\n'
2280 yield '\n'.join(header) + '\n'
2278 if text:
2281 if text:
2279 yield text
2282 yield text
2280
2283
2281 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2284 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2282 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2285 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2283 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2286 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2284 where `header` is a list of diff headers and `hunks` is an iterable of
2287 where `header` is a list of diff headers and `hunks` is an iterable of
2285 (`hunkrange`, `hunklines`) tuples.
2288 (`hunkrange`, `hunklines`) tuples.
2286
2289
2287 See diff() for the meaning of parameters.
2290 See diff() for the meaning of parameters.
2288 """
2291 """
2289
2292
2290 if opts is None:
2293 if opts is None:
2291 opts = mdiff.defaultopts
2294 opts = mdiff.defaultopts
2292
2295
2293 if not node1 and not node2:
2296 if not node1 and not node2:
2294 node1 = repo.dirstate.p1()
2297 node1 = repo.dirstate.p1()
2295
2298
2296 def lrugetfilectx():
2299 def lrugetfilectx():
2297 cache = {}
2300 cache = {}
2298 order = collections.deque()
2301 order = collections.deque()
2299 def getfilectx(f, ctx):
2302 def getfilectx(f, ctx):
2300 fctx = ctx.filectx(f, filelog=cache.get(f))
2303 fctx = ctx.filectx(f, filelog=cache.get(f))
2301 if f not in cache:
2304 if f not in cache:
2302 if len(cache) > 20:
2305 if len(cache) > 20:
2303 del cache[order.popleft()]
2306 del cache[order.popleft()]
2304 cache[f] = fctx.filelog()
2307 cache[f] = fctx.filelog()
2305 else:
2308 else:
2306 order.remove(f)
2309 order.remove(f)
2307 order.append(f)
2310 order.append(f)
2308 return fctx
2311 return fctx
2309 return getfilectx
2312 return getfilectx
2310 getfilectx = lrugetfilectx()
2313 getfilectx = lrugetfilectx()
2311
2314
2312 ctx1 = repo[node1]
2315 ctx1 = repo[node1]
2313 ctx2 = repo[node2]
2316 ctx2 = repo[node2]
2314
2317
2315 relfiltered = False
2318 relfiltered = False
2316 if relroot != '' and match.always():
2319 if relroot != '' and match.always():
2317 # as a special case, create a new matcher with just the relroot
2320 # as a special case, create a new matcher with just the relroot
2318 pats = [relroot]
2321 pats = [relroot]
2319 match = scmutil.match(ctx2, pats, default='path')
2322 match = scmutil.match(ctx2, pats, default='path')
2320 relfiltered = True
2323 relfiltered = True
2321
2324
2322 if not changes:
2325 if not changes:
2323 changes = repo.status(ctx1, ctx2, match=match)
2326 changes = repo.status(ctx1, ctx2, match=match)
2324 modified, added, removed = changes[:3]
2327 modified, added, removed = changes[:3]
2325
2328
2326 if not modified and not added and not removed:
2329 if not modified and not added and not removed:
2327 return []
2330 return []
2328
2331
2329 if repo.ui.debugflag:
2332 if repo.ui.debugflag:
2330 hexfunc = hex
2333 hexfunc = hex
2331 else:
2334 else:
2332 hexfunc = short
2335 hexfunc = short
2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2336 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334
2337
2335 if copy is None:
2338 if copy is None:
2336 copy = {}
2339 copy = {}
2337 if opts.git or opts.upgrade:
2340 if opts.git or opts.upgrade:
2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2341 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339
2342
2340 if relroot is not None:
2343 if relroot is not None:
2341 if not relfiltered:
2344 if not relfiltered:
2342 # XXX this would ideally be done in the matcher, but that is
2345 # XXX this would ideally be done in the matcher, but that is
2343 # generally meant to 'or' patterns, not 'and' them. In this case we
2346 # generally meant to 'or' patterns, not 'and' them. In this case we
2344 # need to 'and' all the patterns from the matcher with relroot.
2347 # need to 'and' all the patterns from the matcher with relroot.
2345 def filterrel(l):
2348 def filterrel(l):
2346 return [f for f in l if f.startswith(relroot)]
2349 return [f for f in l if f.startswith(relroot)]
2347 modified = filterrel(modified)
2350 modified = filterrel(modified)
2348 added = filterrel(added)
2351 added = filterrel(added)
2349 removed = filterrel(removed)
2352 removed = filterrel(removed)
2350 relfiltered = True
2353 relfiltered = True
2351 # filter out copies where either side isn't inside the relative root
2354 # filter out copies where either side isn't inside the relative root
2352 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2355 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2353 if dst.startswith(relroot)
2356 if dst.startswith(relroot)
2354 and src.startswith(relroot)))
2357 and src.startswith(relroot)))
2355
2358
2356 modifiedset = set(modified)
2359 modifiedset = set(modified)
2357 addedset = set(added)
2360 addedset = set(added)
2358 removedset = set(removed)
2361 removedset = set(removed)
2359 for f in modified:
2362 for f in modified:
2360 if f not in ctx1:
2363 if f not in ctx1:
2361 # Fix up added, since merged-in additions appear as
2364 # Fix up added, since merged-in additions appear as
2362 # modifications during merges
2365 # modifications during merges
2363 modifiedset.remove(f)
2366 modifiedset.remove(f)
2364 addedset.add(f)
2367 addedset.add(f)
2365 for f in removed:
2368 for f in removed:
2366 if f not in ctx1:
2369 if f not in ctx1:
2367 # Merged-in additions that are then removed are reported as removed.
2370 # Merged-in additions that are then removed are reported as removed.
2368 # They are not in ctx1, so We don't want to show them in the diff.
2371 # They are not in ctx1, so We don't want to show them in the diff.
2369 removedset.remove(f)
2372 removedset.remove(f)
2370 modified = sorted(modifiedset)
2373 modified = sorted(modifiedset)
2371 added = sorted(addedset)
2374 added = sorted(addedset)
2372 removed = sorted(removedset)
2375 removed = sorted(removedset)
2373 for dst, src in copy.items():
2376 for dst, src in copy.items():
2374 if src not in ctx1:
2377 if src not in ctx1:
2375 # Files merged in during a merge and then copied/renamed are
2378 # Files merged in during a merge and then copied/renamed are
2376 # reported as copies. We want to show them in the diff as additions.
2379 # reported as copies. We want to show them in the diff as additions.
2377 del copy[dst]
2380 del copy[dst]
2378
2381
2379 def difffn(opts, losedata):
2382 def difffn(opts, losedata):
2380 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2383 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2381 copy, getfilectx, opts, losedata, prefix, relroot)
2384 copy, getfilectx, opts, losedata, prefix, relroot)
2382 if opts.upgrade and not opts.git:
2385 if opts.upgrade and not opts.git:
2383 try:
2386 try:
2384 def losedata(fn):
2387 def losedata(fn):
2385 if not losedatafn or not losedatafn(fn=fn):
2388 if not losedatafn or not losedatafn(fn=fn):
2386 raise GitDiffRequired
2389 raise GitDiffRequired
2387 # Buffer the whole output until we are sure it can be generated
2390 # Buffer the whole output until we are sure it can be generated
2388 return list(difffn(opts.copy(git=False), losedata))
2391 return list(difffn(opts.copy(git=False), losedata))
2389 except GitDiffRequired:
2392 except GitDiffRequired:
2390 return difffn(opts.copy(git=True), None)
2393 return difffn(opts.copy(git=True), None)
2391 else:
2394 else:
2392 return difffn(opts, None)
2395 return difffn(opts, None)
2393
2396
2394 def difflabel(func, *args, **kw):
2397 def difflabel(func, *args, **kw):
2395 '''yields 2-tuples of (output, label) based on the output of func()'''
2398 '''yields 2-tuples of (output, label) based on the output of func()'''
2396 headprefixes = [('diff', 'diff.diffline'),
2399 headprefixes = [('diff', 'diff.diffline'),
2397 ('copy', 'diff.extended'),
2400 ('copy', 'diff.extended'),
2398 ('rename', 'diff.extended'),
2401 ('rename', 'diff.extended'),
2399 ('old', 'diff.extended'),
2402 ('old', 'diff.extended'),
2400 ('new', 'diff.extended'),
2403 ('new', 'diff.extended'),
2401 ('deleted', 'diff.extended'),
2404 ('deleted', 'diff.extended'),
2402 ('index', 'diff.extended'),
2405 ('index', 'diff.extended'),
2403 ('similarity', 'diff.extended'),
2406 ('similarity', 'diff.extended'),
2404 ('---', 'diff.file_a'),
2407 ('---', 'diff.file_a'),
2405 ('+++', 'diff.file_b')]
2408 ('+++', 'diff.file_b')]
2406 textprefixes = [('@', 'diff.hunk'),
2409 textprefixes = [('@', 'diff.hunk'),
2407 ('-', 'diff.deleted'),
2410 ('-', 'diff.deleted'),
2408 ('+', 'diff.inserted')]
2411 ('+', 'diff.inserted')]
2409 head = False
2412 head = False
2410 for chunk in func(*args, **kw):
2413 for chunk in func(*args, **kw):
2411 lines = chunk.split('\n')
2414 lines = chunk.split('\n')
2412 for i, line in enumerate(lines):
2415 for i, line in enumerate(lines):
2413 if i != 0:
2416 if i != 0:
2414 yield ('\n', '')
2417 yield ('\n', '')
2415 if head:
2418 if head:
2416 if line.startswith('@'):
2419 if line.startswith('@'):
2417 head = False
2420 head = False
2418 else:
2421 else:
2419 if line and line[0] not in ' +-@\\':
2422 if line and line[0] not in ' +-@\\':
2420 head = True
2423 head = True
2421 stripline = line
2424 stripline = line
2422 diffline = False
2425 diffline = False
2423 if not head and line and line[0] in '+-':
2426 if not head and line and line[0] in '+-':
2424 # highlight tabs and trailing whitespace, but only in
2427 # highlight tabs and trailing whitespace, but only in
2425 # changed lines
2428 # changed lines
2426 stripline = line.rstrip()
2429 stripline = line.rstrip()
2427 diffline = True
2430 diffline = True
2428
2431
2429 prefixes = textprefixes
2432 prefixes = textprefixes
2430 if head:
2433 if head:
2431 prefixes = headprefixes
2434 prefixes = headprefixes
2432 for prefix, label in prefixes:
2435 for prefix, label in prefixes:
2433 if stripline.startswith(prefix):
2436 if stripline.startswith(prefix):
2434 if diffline:
2437 if diffline:
2435 for token in tabsplitter.findall(stripline):
2438 for token in tabsplitter.findall(stripline):
2436 if '\t' == token[0]:
2439 if '\t' == token[0]:
2437 yield (token, 'diff.tab')
2440 yield (token, 'diff.tab')
2438 else:
2441 else:
2439 yield (token, label)
2442 yield (token, label)
2440 else:
2443 else:
2441 yield (stripline, label)
2444 yield (stripline, label)
2442 break
2445 break
2443 else:
2446 else:
2444 yield (line, '')
2447 yield (line, '')
2445 if line != stripline:
2448 if line != stripline:
2446 yield (line[len(stripline):], 'diff.trailingwhitespace')
2449 yield (line[len(stripline):], 'diff.trailingwhitespace')
2447
2450
2448 def diffui(*args, **kw):
2451 def diffui(*args, **kw):
2449 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2452 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2450 return difflabel(diff, *args, **kw)
2453 return difflabel(diff, *args, **kw)
2451
2454
2452 def _filepairs(modified, added, removed, copy, opts):
2455 def _filepairs(modified, added, removed, copy, opts):
2453 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2456 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2454 before and f2 is the the name after. For added files, f1 will be None,
2457 before and f2 is the the name after. For added files, f1 will be None,
2455 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2458 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2456 or 'rename' (the latter two only if opts.git is set).'''
2459 or 'rename' (the latter two only if opts.git is set).'''
2457 gone = set()
2460 gone = set()
2458
2461
2459 copyto = dict([(v, k) for k, v in copy.items()])
2462 copyto = dict([(v, k) for k, v in copy.items()])
2460
2463
2461 addedset, removedset = set(added), set(removed)
2464 addedset, removedset = set(added), set(removed)
2462
2465
2463 for f in sorted(modified + added + removed):
2466 for f in sorted(modified + added + removed):
2464 copyop = None
2467 copyop = None
2465 f1, f2 = f, f
2468 f1, f2 = f, f
2466 if f in addedset:
2469 if f in addedset:
2467 f1 = None
2470 f1 = None
2468 if f in copy:
2471 if f in copy:
2469 if opts.git:
2472 if opts.git:
2470 f1 = copy[f]
2473 f1 = copy[f]
2471 if f1 in removedset and f1 not in gone:
2474 if f1 in removedset and f1 not in gone:
2472 copyop = 'rename'
2475 copyop = 'rename'
2473 gone.add(f1)
2476 gone.add(f1)
2474 else:
2477 else:
2475 copyop = 'copy'
2478 copyop = 'copy'
2476 elif f in removedset:
2479 elif f in removedset:
2477 f2 = None
2480 f2 = None
2478 if opts.git:
2481 if opts.git:
2479 # have we already reported a copy above?
2482 # have we already reported a copy above?
2480 if (f in copyto and copyto[f] in addedset
2483 if (f in copyto and copyto[f] in addedset
2481 and copy[copyto[f]] == f):
2484 and copy[copyto[f]] == f):
2482 continue
2485 continue
2483 yield f1, f2, copyop
2486 yield f1, f2, copyop
2484
2487
2485 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2488 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2486 copy, getfilectx, opts, losedatafn, prefix, relroot):
2489 copy, getfilectx, opts, losedatafn, prefix, relroot):
2487 '''given input data, generate a diff and yield it in blocks
2490 '''given input data, generate a diff and yield it in blocks
2488
2491
2489 If generating a diff would lose data like flags or binary data and
2492 If generating a diff would lose data like flags or binary data and
2490 losedatafn is not None, it will be called.
2493 losedatafn is not None, it will be called.
2491
2494
2492 relroot is removed and prefix is added to every path in the diff output.
2495 relroot is removed and prefix is added to every path in the diff output.
2493
2496
2494 If relroot is not empty, this function expects every path in modified,
2497 If relroot is not empty, this function expects every path in modified,
2495 added, removed and copy to start with it.'''
2498 added, removed and copy to start with it.'''
2496
2499
2497 def gitindex(text):
2500 def gitindex(text):
2498 if not text:
2501 if not text:
2499 text = ""
2502 text = ""
2500 l = len(text)
2503 l = len(text)
2501 s = hashlib.sha1('blob %d\0' % l)
2504 s = hashlib.sha1('blob %d\0' % l)
2502 s.update(text)
2505 s.update(text)
2503 return s.hexdigest()
2506 return s.hexdigest()
2504
2507
2505 if opts.noprefix:
2508 if opts.noprefix:
2506 aprefix = bprefix = ''
2509 aprefix = bprefix = ''
2507 else:
2510 else:
2508 aprefix = 'a/'
2511 aprefix = 'a/'
2509 bprefix = 'b/'
2512 bprefix = 'b/'
2510
2513
2511 def diffline(f, revs):
2514 def diffline(f, revs):
2512 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2515 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2513 return 'diff %s %s' % (revinfo, f)
2516 return 'diff %s %s' % (revinfo, f)
2514
2517
2515 def isempty(fctx):
2518 def isempty(fctx):
2516 return fctx is None or fctx.size() == 0
2519 return fctx is None or fctx.size() == 0
2517
2520
2518 date1 = util.datestr(ctx1.date())
2521 date1 = util.datestr(ctx1.date())
2519 date2 = util.datestr(ctx2.date())
2522 date2 = util.datestr(ctx2.date())
2520
2523
2521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2524 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2522
2525
2523 if relroot != '' and (repo.ui.configbool('devel', 'all')
2526 if relroot != '' and (repo.ui.configbool('devel', 'all')
2524 or repo.ui.configbool('devel', 'check-relroot')):
2527 or repo.ui.configbool('devel', 'check-relroot')):
2525 for f in modified + added + removed + copy.keys() + copy.values():
2528 for f in modified + added + removed + copy.keys() + copy.values():
2526 if f is not None and not f.startswith(relroot):
2529 if f is not None and not f.startswith(relroot):
2527 raise AssertionError(
2530 raise AssertionError(
2528 "file %s doesn't start with relroot %s" % (f, relroot))
2531 "file %s doesn't start with relroot %s" % (f, relroot))
2529
2532
2530 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2533 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2531 content1 = None
2534 content1 = None
2532 content2 = None
2535 content2 = None
2533 fctx1 = None
2536 fctx1 = None
2534 fctx2 = None
2537 fctx2 = None
2535 flag1 = None
2538 flag1 = None
2536 flag2 = None
2539 flag2 = None
2537 if f1:
2540 if f1:
2538 fctx1 = getfilectx(f1, ctx1)
2541 fctx1 = getfilectx(f1, ctx1)
2539 if opts.git or losedatafn:
2542 if opts.git or losedatafn:
2540 flag1 = ctx1.flags(f1)
2543 flag1 = ctx1.flags(f1)
2541 if f2:
2544 if f2:
2542 fctx2 = getfilectx(f2, ctx2)
2545 fctx2 = getfilectx(f2, ctx2)
2543 if opts.git or losedatafn:
2546 if opts.git or losedatafn:
2544 flag2 = ctx2.flags(f2)
2547 flag2 = ctx2.flags(f2)
2545 # if binary is True, output "summary" or "base85", but not "text diff"
2548 # if binary is True, output "summary" or "base85", but not "text diff"
2546 binary = not opts.text and any(f.isbinary()
2549 binary = not opts.text and any(f.isbinary()
2547 for f in [fctx1, fctx2] if f is not None)
2550 for f in [fctx1, fctx2] if f is not None)
2548
2551
2549 if losedatafn and not opts.git:
2552 if losedatafn and not opts.git:
2550 if (binary or
2553 if (binary or
2551 # copy/rename
2554 # copy/rename
2552 f2 in copy or
2555 f2 in copy or
2553 # empty file creation
2556 # empty file creation
2554 (not f1 and isempty(fctx2)) or
2557 (not f1 and isempty(fctx2)) or
2555 # empty file deletion
2558 # empty file deletion
2556 (isempty(fctx1) and not f2) or
2559 (isempty(fctx1) and not f2) or
2557 # create with flags
2560 # create with flags
2558 (not f1 and flag2) or
2561 (not f1 and flag2) or
2559 # change flags
2562 # change flags
2560 (f1 and f2 and flag1 != flag2)):
2563 (f1 and f2 and flag1 != flag2)):
2561 losedatafn(f2 or f1)
2564 losedatafn(f2 or f1)
2562
2565
2563 path1 = f1 or f2
2566 path1 = f1 or f2
2564 path2 = f2 or f1
2567 path2 = f2 or f1
2565 path1 = posixpath.join(prefix, path1[len(relroot):])
2568 path1 = posixpath.join(prefix, path1[len(relroot):])
2566 path2 = posixpath.join(prefix, path2[len(relroot):])
2569 path2 = posixpath.join(prefix, path2[len(relroot):])
2567 header = []
2570 header = []
2568 if opts.git:
2571 if opts.git:
2569 header.append('diff --git %s%s %s%s' %
2572 header.append('diff --git %s%s %s%s' %
2570 (aprefix, path1, bprefix, path2))
2573 (aprefix, path1, bprefix, path2))
2571 if not f1: # added
2574 if not f1: # added
2572 header.append('new file mode %s' % gitmode[flag2])
2575 header.append('new file mode %s' % gitmode[flag2])
2573 elif not f2: # removed
2576 elif not f2: # removed
2574 header.append('deleted file mode %s' % gitmode[flag1])
2577 header.append('deleted file mode %s' % gitmode[flag1])
2575 else: # modified/copied/renamed
2578 else: # modified/copied/renamed
2576 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2579 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2577 if mode1 != mode2:
2580 if mode1 != mode2:
2578 header.append('old mode %s' % mode1)
2581 header.append('old mode %s' % mode1)
2579 header.append('new mode %s' % mode2)
2582 header.append('new mode %s' % mode2)
2580 if copyop is not None:
2583 if copyop is not None:
2581 if opts.showsimilarity:
2584 if opts.showsimilarity:
2582 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2585 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2583 header.append('similarity index %d%%' % sim)
2586 header.append('similarity index %d%%' % sim)
2584 header.append('%s from %s' % (copyop, path1))
2587 header.append('%s from %s' % (copyop, path1))
2585 header.append('%s to %s' % (copyop, path2))
2588 header.append('%s to %s' % (copyop, path2))
2586 elif revs and not repo.ui.quiet:
2589 elif revs and not repo.ui.quiet:
2587 header.append(diffline(path1, revs))
2590 header.append(diffline(path1, revs))
2588
2591
2589 # fctx.is | diffopts | what to | is fctx.data()
2592 # fctx.is | diffopts | what to | is fctx.data()
2590 # binary() | text nobinary git index | output? | outputted?
2593 # binary() | text nobinary git index | output? | outputted?
2591 # ------------------------------------|----------------------------
2594 # ------------------------------------|----------------------------
2592 # yes | no no no * | summary | no
2595 # yes | no no no * | summary | no
2593 # yes | no no yes * | base85 | yes
2596 # yes | no no yes * | base85 | yes
2594 # yes | no yes no * | summary | no
2597 # yes | no yes no * | summary | no
2595 # yes | no yes yes 0 | summary | no
2598 # yes | no yes yes 0 | summary | no
2596 # yes | no yes yes >0 | summary | semi [1]
2599 # yes | no yes yes >0 | summary | semi [1]
2597 # yes | yes * * * | text diff | yes
2600 # yes | yes * * * | text diff | yes
2598 # no | * * * * | text diff | yes
2601 # no | * * * * | text diff | yes
2599 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2602 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2600 if binary and (not opts.git or (opts.git and opts.nobinary and not
2603 if binary and (not opts.git or (opts.git and opts.nobinary and not
2601 opts.index)):
2604 opts.index)):
2602 # fast path: no binary content will be displayed, content1 and
2605 # fast path: no binary content will be displayed, content1 and
2603 # content2 are only used for equivalent test. cmp() could have a
2606 # content2 are only used for equivalent test. cmp() could have a
2604 # fast path.
2607 # fast path.
2605 if fctx1 is not None:
2608 if fctx1 is not None:
2606 content1 = b'\0'
2609 content1 = b'\0'
2607 if fctx2 is not None:
2610 if fctx2 is not None:
2608 if fctx1 is not None and not fctx1.cmp(fctx2):
2611 if fctx1 is not None and not fctx1.cmp(fctx2):
2609 content2 = b'\0' # not different
2612 content2 = b'\0' # not different
2610 else:
2613 else:
2611 content2 = b'\0\0'
2614 content2 = b'\0\0'
2612 else:
2615 else:
2613 # normal path: load contents
2616 # normal path: load contents
2614 if fctx1 is not None:
2617 if fctx1 is not None:
2615 content1 = fctx1.data()
2618 content1 = fctx1.data()
2616 if fctx2 is not None:
2619 if fctx2 is not None:
2617 content2 = fctx2.data()
2620 content2 = fctx2.data()
2618
2621
2619 if binary and opts.git and not opts.nobinary:
2622 if binary and opts.git and not opts.nobinary:
2620 text = mdiff.b85diff(content1, content2)
2623 text = mdiff.b85diff(content1, content2)
2621 if text:
2624 if text:
2622 header.append('index %s..%s' %
2625 header.append('index %s..%s' %
2623 (gitindex(content1), gitindex(content2)))
2626 (gitindex(content1), gitindex(content2)))
2624 hunks = (None, [text]),
2627 hunks = (None, [text]),
2625 else:
2628 else:
2626 if opts.git and opts.index > 0:
2629 if opts.git and opts.index > 0:
2627 flag = flag1
2630 flag = flag1
2628 if flag is None:
2631 if flag is None:
2629 flag = flag2
2632 flag = flag2
2630 header.append('index %s..%s %s' %
2633 header.append('index %s..%s %s' %
2631 (gitindex(content1)[0:opts.index],
2634 (gitindex(content1)[0:opts.index],
2632 gitindex(content2)[0:opts.index],
2635 gitindex(content2)[0:opts.index],
2633 gitmode[flag]))
2636 gitmode[flag]))
2634
2637
2635 uheaders, hunks = mdiff.unidiff(content1, date1,
2638 uheaders, hunks = mdiff.unidiff(content1, date1,
2636 content2, date2,
2639 content2, date2,
2637 path1, path2, opts=opts)
2640 path1, path2, opts=opts)
2638 header.extend(uheaders)
2641 header.extend(uheaders)
2639 yield header, hunks
2642 yield header, hunks
2640
2643
2641 def diffstatsum(stats):
2644 def diffstatsum(stats):
2642 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2645 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2643 for f, a, r, b in stats:
2646 for f, a, r, b in stats:
2644 maxfile = max(maxfile, encoding.colwidth(f))
2647 maxfile = max(maxfile, encoding.colwidth(f))
2645 maxtotal = max(maxtotal, a + r)
2648 maxtotal = max(maxtotal, a + r)
2646 addtotal += a
2649 addtotal += a
2647 removetotal += r
2650 removetotal += r
2648 binary = binary or b
2651 binary = binary or b
2649
2652
2650 return maxfile, maxtotal, addtotal, removetotal, binary
2653 return maxfile, maxtotal, addtotal, removetotal, binary
2651
2654
2652 def diffstatdata(lines):
2655 def diffstatdata(lines):
2653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2656 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2654
2657
2655 results = []
2658 results = []
2656 filename, adds, removes, isbinary = None, 0, 0, False
2659 filename, adds, removes, isbinary = None, 0, 0, False
2657
2660
2658 def addresult():
2661 def addresult():
2659 if filename:
2662 if filename:
2660 results.append((filename, adds, removes, isbinary))
2663 results.append((filename, adds, removes, isbinary))
2661
2664
2662 # inheader is used to track if a line is in the
2665 # inheader is used to track if a line is in the
2663 # header portion of the diff. This helps properly account
2666 # header portion of the diff. This helps properly account
2664 # for lines that start with '--' or '++'
2667 # for lines that start with '--' or '++'
2665 inheader = False
2668 inheader = False
2666
2669
2667 for line in lines:
2670 for line in lines:
2668 if line.startswith('diff'):
2671 if line.startswith('diff'):
2669 addresult()
2672 addresult()
2670 # starting a new file diff
2673 # starting a new file diff
2671 # set numbers to 0 and reset inheader
2674 # set numbers to 0 and reset inheader
2672 inheader = True
2675 inheader = True
2673 adds, removes, isbinary = 0, 0, False
2676 adds, removes, isbinary = 0, 0, False
2674 if line.startswith('diff --git a/'):
2677 if line.startswith('diff --git a/'):
2675 filename = gitre.search(line).group(2)
2678 filename = gitre.search(line).group(2)
2676 elif line.startswith('diff -r'):
2679 elif line.startswith('diff -r'):
2677 # format: "diff -r ... -r ... filename"
2680 # format: "diff -r ... -r ... filename"
2678 filename = diffre.search(line).group(1)
2681 filename = diffre.search(line).group(1)
2679 elif line.startswith('@@'):
2682 elif line.startswith('@@'):
2680 inheader = False
2683 inheader = False
2681 elif line.startswith('+') and not inheader:
2684 elif line.startswith('+') and not inheader:
2682 adds += 1
2685 adds += 1
2683 elif line.startswith('-') and not inheader:
2686 elif line.startswith('-') and not inheader:
2684 removes += 1
2687 removes += 1
2685 elif (line.startswith('GIT binary patch') or
2688 elif (line.startswith('GIT binary patch') or
2686 line.startswith('Binary file')):
2689 line.startswith('Binary file')):
2687 isbinary = True
2690 isbinary = True
2688 addresult()
2691 addresult()
2689 return results
2692 return results
2690
2693
2691 def diffstat(lines, width=80):
2694 def diffstat(lines, width=80):
2692 output = []
2695 output = []
2693 stats = diffstatdata(lines)
2696 stats = diffstatdata(lines)
2694 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2697 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2695
2698
2696 countwidth = len(str(maxtotal))
2699 countwidth = len(str(maxtotal))
2697 if hasbinary and countwidth < 3:
2700 if hasbinary and countwidth < 3:
2698 countwidth = 3
2701 countwidth = 3
2699 graphwidth = width - countwidth - maxname - 6
2702 graphwidth = width - countwidth - maxname - 6
2700 if graphwidth < 10:
2703 if graphwidth < 10:
2701 graphwidth = 10
2704 graphwidth = 10
2702
2705
2703 def scale(i):
2706 def scale(i):
2704 if maxtotal <= graphwidth:
2707 if maxtotal <= graphwidth:
2705 return i
2708 return i
2706 # If diffstat runs out of room it doesn't print anything,
2709 # If diffstat runs out of room it doesn't print anything,
2707 # which isn't very useful, so always print at least one + or -
2710 # which isn't very useful, so always print at least one + or -
2708 # if there were at least some changes.
2711 # if there were at least some changes.
2709 return max(i * graphwidth // maxtotal, int(bool(i)))
2712 return max(i * graphwidth // maxtotal, int(bool(i)))
2710
2713
2711 for filename, adds, removes, isbinary in stats:
2714 for filename, adds, removes, isbinary in stats:
2712 if isbinary:
2715 if isbinary:
2713 count = 'Bin'
2716 count = 'Bin'
2714 else:
2717 else:
2715 count = '%d' % (adds + removes)
2718 count = '%d' % (adds + removes)
2716 pluses = '+' * scale(adds)
2719 pluses = '+' * scale(adds)
2717 minuses = '-' * scale(removes)
2720 minuses = '-' * scale(removes)
2718 output.append(' %s%s | %*s %s%s\n' %
2721 output.append(' %s%s | %*s %s%s\n' %
2719 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2722 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2720 countwidth, count, pluses, minuses))
2723 countwidth, count, pluses, minuses))
2721
2724
2722 if stats:
2725 if stats:
2723 output.append(_(' %d files changed, %d insertions(+), '
2726 output.append(_(' %d files changed, %d insertions(+), '
2724 '%d deletions(-)\n')
2727 '%d deletions(-)\n')
2725 % (len(stats), totaladds, totalremoves))
2728 % (len(stats), totaladds, totalremoves))
2726
2729
2727 return ''.join(output)
2730 return ''.join(output)
2728
2731
2729 def diffstatui(*args, **kw):
2732 def diffstatui(*args, **kw):
2730 '''like diffstat(), but yields 2-tuples of (output, label) for
2733 '''like diffstat(), but yields 2-tuples of (output, label) for
2731 ui.write()
2734 ui.write()
2732 '''
2735 '''
2733
2736
2734 for line in diffstat(*args, **kw).splitlines():
2737 for line in diffstat(*args, **kw).splitlines():
2735 if line and line[-1] in '+-':
2738 if line and line[-1] in '+-':
2736 name, graph = line.rsplit(' ', 1)
2739 name, graph = line.rsplit(' ', 1)
2737 yield (name + ' ', '')
2740 yield (name + ' ', '')
2738 m = re.search(br'\++', graph)
2741 m = re.search(br'\++', graph)
2739 if m:
2742 if m:
2740 yield (m.group(0), 'diffstat.inserted')
2743 yield (m.group(0), 'diffstat.inserted')
2741 m = re.search(br'-+', graph)
2744 m = re.search(br'-+', graph)
2742 if m:
2745 if m:
2743 yield (m.group(0), 'diffstat.deleted')
2746 yield (m.group(0), 'diffstat.deleted')
2744 else:
2747 else:
2745 yield (line, '')
2748 yield (line, '')
2746 yield ('\n', '')
2749 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now