##// END OF EJS Templates
patch: remove unused fsbackend._join()...
Martin von Zweigbergk -
r33157:5ebf39ae default
parent child Browse files
Show More
@@ -1,2749 +1,2746
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 class PatchError(Exception):
49 class PatchError(Exception):
50 pass
50 pass
51
51
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = email.Parser.Parser().parse(c)
111 m = email.Parser.Parser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 inheader = False
154 inheader = False
155 cur = []
155 cur = []
156
156
157 mimeheaders = ['content-type']
157 mimeheaders = ['content-type']
158
158
159 if not util.safehasattr(stream, 'next'):
159 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
160 # http responses, for example, have readline but not next
161 stream = fiter(stream)
161 stream = fiter(stream)
162
162
163 for line in stream:
163 for line in stream:
164 cur.append(line)
164 cur.append(line)
165 if line.startswith('# HG changeset patch'):
165 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
166 return hgsplit(stream, cur)
167 elif line.startswith('From '):
167 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
168 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
169 elif isheader(line, inheader):
170 inheader = True
170 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
172 # let email parser handle this
173 return mimesplit(stream, cur)
173 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
174 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
175 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
176 return headersplit(stream, cur)
177 # Not enough info, keep reading
177 # Not enough info, keep reading
178
178
179 # if we are here, we have a very plain patch
179 # if we are here, we have a very plain patch
180 return remainder(cur)
180 return remainder(cur)
181
181
182 ## Some facility for extensible patch parsing:
182 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
183 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
184 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
185 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
186 ('Node ID', 'nodeid'),
187 ]
187 ]
188
188
189 def extract(ui, fileobj):
189 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
190 '''extract patch from data read from fileobj.
191
191
192 patch can be a normal patch or contained in an email message.
192 patch can be a normal patch or contained in an email message.
193
193
194 return a dictionary. Standard keys are:
194 return a dictionary. Standard keys are:
195 - filename,
195 - filename,
196 - message,
196 - message,
197 - user,
197 - user,
198 - date,
198 - date,
199 - branch,
199 - branch,
200 - node,
200 - node,
201 - p1,
201 - p1,
202 - p2.
202 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
203 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
205
206 # attempt to detect the start of a patch
206 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
207 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
212
212
213 data = {}
213 data = {}
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 try:
216 try:
217 msg = email.Parser.Parser().parse(fileobj)
217 msg = email.Parser.Parser().parse(fileobj)
218
218
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 if not subject and not data['user']:
221 if not subject and not data['user']:
222 # Not an email, restore parsed headers if any
222 # Not an email, restore parsed headers if any
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224
224
225 # should try to parse msg['Date']
225 # should try to parse msg['Date']
226 parents = []
226 parents = []
227
227
228 if subject:
228 if subject:
229 if subject.startswith('[PATCH'):
229 if subject.startswith('[PATCH'):
230 pend = subject.find(']')
230 pend = subject.find(']')
231 if pend >= 0:
231 if pend >= 0:
232 subject = subject[pend + 1:].lstrip()
232 subject = subject[pend + 1:].lstrip()
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
234 ui.debug('Subject: %s\n' % subject)
234 ui.debug('Subject: %s\n' % subject)
235 if data['user']:
235 if data['user']:
236 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
237 diffs_seen = 0
237 diffs_seen = 0
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 message = ''
239 message = ''
240 for part in msg.walk():
240 for part in msg.walk():
241 content_type = part.get_content_type()
241 content_type = part.get_content_type()
242 ui.debug('Content-Type: %s\n' % content_type)
242 ui.debug('Content-Type: %s\n' % content_type)
243 if content_type not in ok_types:
243 if content_type not in ok_types:
244 continue
244 continue
245 payload = part.get_payload(decode=True)
245 payload = part.get_payload(decode=True)
246 m = diffre.search(payload)
246 m = diffre.search(payload)
247 if m:
247 if m:
248 hgpatch = False
248 hgpatch = False
249 hgpatchheader = False
249 hgpatchheader = False
250 ignoretext = False
250 ignoretext = False
251
251
252 ui.debug('found patch at byte %d\n' % m.start(0))
252 ui.debug('found patch at byte %d\n' % m.start(0))
253 diffs_seen += 1
253 diffs_seen += 1
254 cfp = stringio()
254 cfp = stringio()
255 for line in payload[:m.start(0)].splitlines():
255 for line in payload[:m.start(0)].splitlines():
256 if line.startswith('# HG changeset patch') and not hgpatch:
256 if line.startswith('# HG changeset patch') and not hgpatch:
257 ui.debug('patch generated by hg export\n')
257 ui.debug('patch generated by hg export\n')
258 hgpatch = True
258 hgpatch = True
259 hgpatchheader = True
259 hgpatchheader = True
260 # drop earlier commit message content
260 # drop earlier commit message content
261 cfp.seek(0)
261 cfp.seek(0)
262 cfp.truncate()
262 cfp.truncate()
263 subject = None
263 subject = None
264 elif hgpatchheader:
264 elif hgpatchheader:
265 if line.startswith('# User '):
265 if line.startswith('# User '):
266 data['user'] = line[7:]
266 data['user'] = line[7:]
267 ui.debug('From: %s\n' % data['user'])
267 ui.debug('From: %s\n' % data['user'])
268 elif line.startswith("# Parent "):
268 elif line.startswith("# Parent "):
269 parents.append(line[9:].lstrip())
269 parents.append(line[9:].lstrip())
270 elif line.startswith("# "):
270 elif line.startswith("# "):
271 for header, key in patchheadermap:
271 for header, key in patchheadermap:
272 prefix = '# %s ' % header
272 prefix = '# %s ' % header
273 if line.startswith(prefix):
273 if line.startswith(prefix):
274 data[key] = line[len(prefix):]
274 data[key] = line[len(prefix):]
275 else:
275 else:
276 hgpatchheader = False
276 hgpatchheader = False
277 elif line == '---':
277 elif line == '---':
278 ignoretext = True
278 ignoretext = True
279 if not hgpatchheader and not ignoretext:
279 if not hgpatchheader and not ignoretext:
280 cfp.write(line)
280 cfp.write(line)
281 cfp.write('\n')
281 cfp.write('\n')
282 message = cfp.getvalue()
282 message = cfp.getvalue()
283 if tmpfp:
283 if tmpfp:
284 tmpfp.write(payload)
284 tmpfp.write(payload)
285 if not payload.endswith('\n'):
285 if not payload.endswith('\n'):
286 tmpfp.write('\n')
286 tmpfp.write('\n')
287 elif not diffs_seen and message and content_type == 'text/plain':
287 elif not diffs_seen and message and content_type == 'text/plain':
288 message += '\n' + payload
288 message += '\n' + payload
289 except: # re-raises
289 except: # re-raises
290 tmpfp.close()
290 tmpfp.close()
291 os.unlink(tmpname)
291 os.unlink(tmpname)
292 raise
292 raise
293
293
294 if subject and not message.startswith(subject):
294 if subject and not message.startswith(subject):
295 message = '%s\n%s' % (subject, message)
295 message = '%s\n%s' % (subject, message)
296 data['message'] = message
296 data['message'] = message
297 tmpfp.close()
297 tmpfp.close()
298 if parents:
298 if parents:
299 data['p1'] = parents.pop(0)
299 data['p1'] = parents.pop(0)
300 if parents:
300 if parents:
301 data['p2'] = parents.pop(0)
301 data['p2'] = parents.pop(0)
302
302
303 if diffs_seen:
303 if diffs_seen:
304 data['filename'] = tmpname
304 data['filename'] = tmpname
305 else:
305 else:
306 os.unlink(tmpname)
306 os.unlink(tmpname)
307 return data
307 return data
308
308
309 class patchmeta(object):
309 class patchmeta(object):
310 """Patched file metadata
310 """Patched file metadata
311
311
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 'islink' is True if the file is a symlink and 'isexec' is True if
316 'islink' is True if the file is a symlink and 'isexec' is True if
317 the file is executable. Otherwise, 'mode' is None.
317 the file is executable. Otherwise, 'mode' is None.
318 """
318 """
319 def __init__(self, path):
319 def __init__(self, path):
320 self.path = path
320 self.path = path
321 self.oldpath = None
321 self.oldpath = None
322 self.mode = None
322 self.mode = None
323 self.op = 'MODIFY'
323 self.op = 'MODIFY'
324 self.binary = False
324 self.binary = False
325
325
326 def setmode(self, mode):
326 def setmode(self, mode):
327 islink = mode & 0o20000
327 islink = mode & 0o20000
328 isexec = mode & 0o100
328 isexec = mode & 0o100
329 self.mode = (islink, isexec)
329 self.mode = (islink, isexec)
330
330
331 def copy(self):
331 def copy(self):
332 other = patchmeta(self.path)
332 other = patchmeta(self.path)
333 other.oldpath = self.oldpath
333 other.oldpath = self.oldpath
334 other.mode = self.mode
334 other.mode = self.mode
335 other.op = self.op
335 other.op = self.op
336 other.binary = self.binary
336 other.binary = self.binary
337 return other
337 return other
338
338
339 def _ispatchinga(self, afile):
339 def _ispatchinga(self, afile):
340 if afile == '/dev/null':
340 if afile == '/dev/null':
341 return self.op == 'ADD'
341 return self.op == 'ADD'
342 return afile == 'a/' + (self.oldpath or self.path)
342 return afile == 'a/' + (self.oldpath or self.path)
343
343
344 def _ispatchingb(self, bfile):
344 def _ispatchingb(self, bfile):
345 if bfile == '/dev/null':
345 if bfile == '/dev/null':
346 return self.op == 'DELETE'
346 return self.op == 'DELETE'
347 return bfile == 'b/' + self.path
347 return bfile == 'b/' + self.path
348
348
349 def ispatching(self, afile, bfile):
349 def ispatching(self, afile, bfile):
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351
351
352 def __repr__(self):
352 def __repr__(self):
353 return "<patchmeta %s %r>" % (self.op, self.path)
353 return "<patchmeta %s %r>" % (self.op, self.path)
354
354
355 def readgitpatch(lr):
355 def readgitpatch(lr):
356 """extract git-style metadata about patches from <patchname>"""
356 """extract git-style metadata about patches from <patchname>"""
357
357
358 # Filter patch for git information
358 # Filter patch for git information
359 gp = None
359 gp = None
360 gitpatches = []
360 gitpatches = []
361 for line in lr:
361 for line in lr:
362 line = line.rstrip(' \r\n')
362 line = line.rstrip(' \r\n')
363 if line.startswith('diff --git a/'):
363 if line.startswith('diff --git a/'):
364 m = gitre.match(line)
364 m = gitre.match(line)
365 if m:
365 if m:
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368 dst = m.group(2)
368 dst = m.group(2)
369 gp = patchmeta(dst)
369 gp = patchmeta(dst)
370 elif gp:
370 elif gp:
371 if line.startswith('--- '):
371 if line.startswith('--- '):
372 gitpatches.append(gp)
372 gitpatches.append(gp)
373 gp = None
373 gp = None
374 continue
374 continue
375 if line.startswith('rename from '):
375 if line.startswith('rename from '):
376 gp.op = 'RENAME'
376 gp.op = 'RENAME'
377 gp.oldpath = line[12:]
377 gp.oldpath = line[12:]
378 elif line.startswith('rename to '):
378 elif line.startswith('rename to '):
379 gp.path = line[10:]
379 gp.path = line[10:]
380 elif line.startswith('copy from '):
380 elif line.startswith('copy from '):
381 gp.op = 'COPY'
381 gp.op = 'COPY'
382 gp.oldpath = line[10:]
382 gp.oldpath = line[10:]
383 elif line.startswith('copy to '):
383 elif line.startswith('copy to '):
384 gp.path = line[8:]
384 gp.path = line[8:]
385 elif line.startswith('deleted file'):
385 elif line.startswith('deleted file'):
386 gp.op = 'DELETE'
386 gp.op = 'DELETE'
387 elif line.startswith('new file mode '):
387 elif line.startswith('new file mode '):
388 gp.op = 'ADD'
388 gp.op = 'ADD'
389 gp.setmode(int(line[-6:], 8))
389 gp.setmode(int(line[-6:], 8))
390 elif line.startswith('new mode '):
390 elif line.startswith('new mode '):
391 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
392 elif line.startswith('GIT binary patch'):
392 elif line.startswith('GIT binary patch'):
393 gp.binary = True
393 gp.binary = True
394 if gp:
394 if gp:
395 gitpatches.append(gp)
395 gitpatches.append(gp)
396
396
397 return gitpatches
397 return gitpatches
398
398
399 class linereader(object):
399 class linereader(object):
400 # simple class to allow pushing lines back into the input stream
400 # simple class to allow pushing lines back into the input stream
401 def __init__(self, fp):
401 def __init__(self, fp):
402 self.fp = fp
402 self.fp = fp
403 self.buf = []
403 self.buf = []
404
404
405 def push(self, line):
405 def push(self, line):
406 if line is not None:
406 if line is not None:
407 self.buf.append(line)
407 self.buf.append(line)
408
408
409 def readline(self):
409 def readline(self):
410 if self.buf:
410 if self.buf:
411 l = self.buf[0]
411 l = self.buf[0]
412 del self.buf[0]
412 del self.buf[0]
413 return l
413 return l
414 return self.fp.readline()
414 return self.fp.readline()
415
415
416 def __iter__(self):
416 def __iter__(self):
417 return iter(self.readline, '')
417 return iter(self.readline, '')
418
418
419 class abstractbackend(object):
419 class abstractbackend(object):
420 def __init__(self, ui):
420 def __init__(self, ui):
421 self.ui = ui
421 self.ui = ui
422
422
423 def getfile(self, fname):
423 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
424 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
425 isexec)) tuple. Data is None if file is missing/deleted.
426 """
426 """
427 raise NotImplementedError
427 raise NotImplementedError
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
430 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
431 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
432 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
433 copysource is set to the original file name.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def unlink(self, fname):
437 def unlink(self, fname):
438 """Unlink target file."""
438 """Unlink target file."""
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def writerej(self, fname, failed, total, lines):
441 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
442 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
443 which failed to apply and total the total number of hunks for this
444 files.
444 files.
445 """
445 """
446 pass
446 pass
447
447
448 def exists(self, fname):
448 def exists(self, fname):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def close(self):
451 def close(self):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 class fsbackend(abstractbackend):
454 class fsbackend(abstractbackend):
455 def __init__(self, ui, basedir):
455 def __init__(self, ui, basedir):
456 super(fsbackend, self).__init__(ui)
456 super(fsbackend, self).__init__(ui)
457 self.opener = vfsmod.vfs(basedir)
457 self.opener = vfsmod.vfs(basedir)
458
458
459 def _join(self, f):
460 return os.path.join(self.opener.base, f)
461
462 def getfile(self, fname):
459 def getfile(self, fname):
463 if self.opener.islink(fname):
460 if self.opener.islink(fname):
464 return (self.opener.readlink(fname), (True, False))
461 return (self.opener.readlink(fname), (True, False))
465
462
466 isexec = False
463 isexec = False
467 try:
464 try:
468 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
469 except OSError as e:
466 except OSError as e:
470 if e.errno != errno.ENOENT:
467 if e.errno != errno.ENOENT:
471 raise
468 raise
472 try:
469 try:
473 return (self.opener.read(fname), (False, isexec))
470 return (self.opener.read(fname), (False, isexec))
474 except IOError as e:
471 except IOError as e:
475 if e.errno != errno.ENOENT:
472 if e.errno != errno.ENOENT:
476 raise
473 raise
477 return None, None
474 return None, None
478
475
479 def setfile(self, fname, data, mode, copysource):
476 def setfile(self, fname, data, mode, copysource):
480 islink, isexec = mode
477 islink, isexec = mode
481 if data is None:
478 if data is None:
482 self.opener.setflags(fname, islink, isexec)
479 self.opener.setflags(fname, islink, isexec)
483 return
480 return
484 if islink:
481 if islink:
485 self.opener.symlink(data, fname)
482 self.opener.symlink(data, fname)
486 else:
483 else:
487 self.opener.write(fname, data)
484 self.opener.write(fname, data)
488 if isexec:
485 if isexec:
489 self.opener.setflags(fname, False, True)
486 self.opener.setflags(fname, False, True)
490
487
491 def unlink(self, fname):
488 def unlink(self, fname):
492 self.opener.unlinkpath(fname, ignoremissing=True)
489 self.opener.unlinkpath(fname, ignoremissing=True)
493
490
494 def writerej(self, fname, failed, total, lines):
491 def writerej(self, fname, failed, total, lines):
495 fname = fname + ".rej"
492 fname = fname + ".rej"
496 self.ui.warn(
493 self.ui.warn(
497 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
498 (failed, total, fname))
495 (failed, total, fname))
499 fp = self.opener(fname, 'w')
496 fp = self.opener(fname, 'w')
500 fp.writelines(lines)
497 fp.writelines(lines)
501 fp.close()
498 fp.close()
502
499
503 def exists(self, fname):
500 def exists(self, fname):
504 return self.opener.lexists(fname)
501 return self.opener.lexists(fname)
505
502
506 class workingbackend(fsbackend):
503 class workingbackend(fsbackend):
507 def __init__(self, ui, repo, similarity):
504 def __init__(self, ui, repo, similarity):
508 super(workingbackend, self).__init__(ui, repo.root)
505 super(workingbackend, self).__init__(ui, repo.root)
509 self.repo = repo
506 self.repo = repo
510 self.similarity = similarity
507 self.similarity = similarity
511 self.removed = set()
508 self.removed = set()
512 self.changed = set()
509 self.changed = set()
513 self.copied = []
510 self.copied = []
514
511
515 def _checkknown(self, fname):
512 def _checkknown(self, fname):
516 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
517 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
518
515
519 def setfile(self, fname, data, mode, copysource):
516 def setfile(self, fname, data, mode, copysource):
520 self._checkknown(fname)
517 self._checkknown(fname)
521 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
522 if copysource is not None:
519 if copysource is not None:
523 self.copied.append((copysource, fname))
520 self.copied.append((copysource, fname))
524 self.changed.add(fname)
521 self.changed.add(fname)
525
522
526 def unlink(self, fname):
523 def unlink(self, fname):
527 self._checkknown(fname)
524 self._checkknown(fname)
528 super(workingbackend, self).unlink(fname)
525 super(workingbackend, self).unlink(fname)
529 self.removed.add(fname)
526 self.removed.add(fname)
530 self.changed.add(fname)
527 self.changed.add(fname)
531
528
532 def close(self):
529 def close(self):
533 wctx = self.repo[None]
530 wctx = self.repo[None]
534 changed = set(self.changed)
531 changed = set(self.changed)
535 for src, dst in self.copied:
532 for src, dst in self.copied:
536 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
537 if self.removed:
534 if self.removed:
538 wctx.forget(sorted(self.removed))
535 wctx.forget(sorted(self.removed))
539 for f in self.removed:
536 for f in self.removed:
540 if f not in self.repo.dirstate:
537 if f not in self.repo.dirstate:
541 # File was deleted and no longer belongs to the
538 # File was deleted and no longer belongs to the
542 # dirstate, it was probably marked added then
539 # dirstate, it was probably marked added then
543 # deleted, and should not be considered by
540 # deleted, and should not be considered by
544 # marktouched().
541 # marktouched().
545 changed.discard(f)
542 changed.discard(f)
546 if changed:
543 if changed:
547 scmutil.marktouched(self.repo, changed, self.similarity)
544 scmutil.marktouched(self.repo, changed, self.similarity)
548 return sorted(self.changed)
545 return sorted(self.changed)
549
546
550 class filestore(object):
547 class filestore(object):
551 def __init__(self, maxsize=None):
548 def __init__(self, maxsize=None):
552 self.opener = None
549 self.opener = None
553 self.files = {}
550 self.files = {}
554 self.created = 0
551 self.created = 0
555 self.maxsize = maxsize
552 self.maxsize = maxsize
556 if self.maxsize is None:
553 if self.maxsize is None:
557 self.maxsize = 4*(2**20)
554 self.maxsize = 4*(2**20)
558 self.size = 0
555 self.size = 0
559 self.data = {}
556 self.data = {}
560
557
561 def setfile(self, fname, data, mode, copied=None):
558 def setfile(self, fname, data, mode, copied=None):
562 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
563 self.data[fname] = (data, mode, copied)
560 self.data[fname] = (data, mode, copied)
564 self.size += len(data)
561 self.size += len(data)
565 else:
562 else:
566 if self.opener is None:
563 if self.opener is None:
567 root = tempfile.mkdtemp(prefix='hg-patch-')
564 root = tempfile.mkdtemp(prefix='hg-patch-')
568 self.opener = vfsmod.vfs(root)
565 self.opener = vfsmod.vfs(root)
569 # Avoid filename issues with these simple names
566 # Avoid filename issues with these simple names
570 fn = str(self.created)
567 fn = str(self.created)
571 self.opener.write(fn, data)
568 self.opener.write(fn, data)
572 self.created += 1
569 self.created += 1
573 self.files[fname] = (fn, mode, copied)
570 self.files[fname] = (fn, mode, copied)
574
571
575 def getfile(self, fname):
572 def getfile(self, fname):
576 if fname in self.data:
573 if fname in self.data:
577 return self.data[fname]
574 return self.data[fname]
578 if not self.opener or fname not in self.files:
575 if not self.opener or fname not in self.files:
579 return None, None, None
576 return None, None, None
580 fn, mode, copied = self.files[fname]
577 fn, mode, copied = self.files[fname]
581 return self.opener.read(fn), mode, copied
578 return self.opener.read(fn), mode, copied
582
579
583 def close(self):
580 def close(self):
584 if self.opener:
581 if self.opener:
585 shutil.rmtree(self.opener.base)
582 shutil.rmtree(self.opener.base)
586
583
587 class repobackend(abstractbackend):
584 class repobackend(abstractbackend):
588 def __init__(self, ui, repo, ctx, store):
585 def __init__(self, ui, repo, ctx, store):
589 super(repobackend, self).__init__(ui)
586 super(repobackend, self).__init__(ui)
590 self.repo = repo
587 self.repo = repo
591 self.ctx = ctx
588 self.ctx = ctx
592 self.store = store
589 self.store = store
593 self.changed = set()
590 self.changed = set()
594 self.removed = set()
591 self.removed = set()
595 self.copied = {}
592 self.copied = {}
596
593
597 def _checkknown(self, fname):
594 def _checkknown(self, fname):
598 if fname not in self.ctx:
595 if fname not in self.ctx:
599 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
600
597
601 def getfile(self, fname):
598 def getfile(self, fname):
602 try:
599 try:
603 fctx = self.ctx[fname]
600 fctx = self.ctx[fname]
604 except error.LookupError:
601 except error.LookupError:
605 return None, None
602 return None, None
606 flags = fctx.flags()
603 flags = fctx.flags()
607 return fctx.data(), ('l' in flags, 'x' in flags)
604 return fctx.data(), ('l' in flags, 'x' in flags)
608
605
609 def setfile(self, fname, data, mode, copysource):
606 def setfile(self, fname, data, mode, copysource):
610 if copysource:
607 if copysource:
611 self._checkknown(copysource)
608 self._checkknown(copysource)
612 if data is None:
609 if data is None:
613 data = self.ctx[fname].data()
610 data = self.ctx[fname].data()
614 self.store.setfile(fname, data, mode, copysource)
611 self.store.setfile(fname, data, mode, copysource)
615 self.changed.add(fname)
612 self.changed.add(fname)
616 if copysource:
613 if copysource:
617 self.copied[fname] = copysource
614 self.copied[fname] = copysource
618
615
619 def unlink(self, fname):
616 def unlink(self, fname):
620 self._checkknown(fname)
617 self._checkknown(fname)
621 self.removed.add(fname)
618 self.removed.add(fname)
622
619
623 def exists(self, fname):
620 def exists(self, fname):
624 return fname in self.ctx
621 return fname in self.ctx
625
622
626 def close(self):
623 def close(self):
627 return self.changed | self.removed
624 return self.changed | self.removed
628
625
629 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
630 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
631 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
632 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
633
630
634 class patchfile(object):
631 class patchfile(object):
635 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
636 self.fname = gp.path
633 self.fname = gp.path
637 self.eolmode = eolmode
634 self.eolmode = eolmode
638 self.eol = None
635 self.eol = None
639 self.backend = backend
636 self.backend = backend
640 self.ui = ui
637 self.ui = ui
641 self.lines = []
638 self.lines = []
642 self.exists = False
639 self.exists = False
643 self.missing = True
640 self.missing = True
644 self.mode = gp.mode
641 self.mode = gp.mode
645 self.copysource = gp.oldpath
642 self.copysource = gp.oldpath
646 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
647 self.remove = gp.op == 'DELETE'
644 self.remove = gp.op == 'DELETE'
648 if self.copysource is None:
645 if self.copysource is None:
649 data, mode = backend.getfile(self.fname)
646 data, mode = backend.getfile(self.fname)
650 else:
647 else:
651 data, mode = store.getfile(self.copysource)[:2]
648 data, mode = store.getfile(self.copysource)[:2]
652 if data is not None:
649 if data is not None:
653 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.exists = self.copysource is None or backend.exists(self.fname)
654 self.missing = False
651 self.missing = False
655 if data:
652 if data:
656 self.lines = mdiff.splitnewlines(data)
653 self.lines = mdiff.splitnewlines(data)
657 if self.mode is None:
654 if self.mode is None:
658 self.mode = mode
655 self.mode = mode
659 if self.lines:
656 if self.lines:
660 # Normalize line endings
657 # Normalize line endings
661 if self.lines[0].endswith('\r\n'):
658 if self.lines[0].endswith('\r\n'):
662 self.eol = '\r\n'
659 self.eol = '\r\n'
663 elif self.lines[0].endswith('\n'):
660 elif self.lines[0].endswith('\n'):
664 self.eol = '\n'
661 self.eol = '\n'
665 if eolmode != 'strict':
662 if eolmode != 'strict':
666 nlines = []
663 nlines = []
667 for l in self.lines:
664 for l in self.lines:
668 if l.endswith('\r\n'):
665 if l.endswith('\r\n'):
669 l = l[:-2] + '\n'
666 l = l[:-2] + '\n'
670 nlines.append(l)
667 nlines.append(l)
671 self.lines = nlines
668 self.lines = nlines
672 else:
669 else:
673 if self.create:
670 if self.create:
674 self.missing = False
671 self.missing = False
675 if self.mode is None:
672 if self.mode is None:
676 self.mode = (False, False)
673 self.mode = (False, False)
677 if self.missing:
674 if self.missing:
678 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
679 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
680 "current directory)\n"))
677 "current directory)\n"))
681
678
682 self.hash = {}
679 self.hash = {}
683 self.dirty = 0
680 self.dirty = 0
684 self.offset = 0
681 self.offset = 0
685 self.skew = 0
682 self.skew = 0
686 self.rej = []
683 self.rej = []
687 self.fileprinted = False
684 self.fileprinted = False
688 self.printfile(False)
685 self.printfile(False)
689 self.hunks = 0
686 self.hunks = 0
690
687
691 def writelines(self, fname, lines, mode):
688 def writelines(self, fname, lines, mode):
692 if self.eolmode == 'auto':
689 if self.eolmode == 'auto':
693 eol = self.eol
690 eol = self.eol
694 elif self.eolmode == 'crlf':
691 elif self.eolmode == 'crlf':
695 eol = '\r\n'
692 eol = '\r\n'
696 else:
693 else:
697 eol = '\n'
694 eol = '\n'
698
695
699 if self.eolmode != 'strict' and eol and eol != '\n':
696 if self.eolmode != 'strict' and eol and eol != '\n':
700 rawlines = []
697 rawlines = []
701 for l in lines:
698 for l in lines:
702 if l and l[-1] == '\n':
699 if l and l[-1] == '\n':
703 l = l[:-1] + eol
700 l = l[:-1] + eol
704 rawlines.append(l)
701 rawlines.append(l)
705 lines = rawlines
702 lines = rawlines
706
703
707 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
708
705
709 def printfile(self, warn):
706 def printfile(self, warn):
710 if self.fileprinted:
707 if self.fileprinted:
711 return
708 return
712 if warn or self.ui.verbose:
709 if warn or self.ui.verbose:
713 self.fileprinted = True
710 self.fileprinted = True
714 s = _("patching file %s\n") % self.fname
711 s = _("patching file %s\n") % self.fname
715 if warn:
712 if warn:
716 self.ui.warn(s)
713 self.ui.warn(s)
717 else:
714 else:
718 self.ui.note(s)
715 self.ui.note(s)
719
716
720
717
721 def findlines(self, l, linenum):
718 def findlines(self, l, linenum):
722 # looks through the hash and finds candidate lines. The
719 # looks through the hash and finds candidate lines. The
723 # result is a list of line numbers sorted based on distance
720 # result is a list of line numbers sorted based on distance
724 # from linenum
721 # from linenum
725
722
726 cand = self.hash.get(l, [])
723 cand = self.hash.get(l, [])
727 if len(cand) > 1:
724 if len(cand) > 1:
728 # resort our list of potentials forward then back.
725 # resort our list of potentials forward then back.
729 cand.sort(key=lambda x: abs(x - linenum))
726 cand.sort(key=lambda x: abs(x - linenum))
730 return cand
727 return cand
731
728
732 def write_rej(self):
729 def write_rej(self):
733 # our rejects are a little different from patch(1). This always
730 # our rejects are a little different from patch(1). This always
734 # creates rejects in the same form as the original patch. A file
731 # creates rejects in the same form as the original patch. A file
735 # header is inserted so that you can run the reject through patch again
732 # header is inserted so that you can run the reject through patch again
736 # without having to type the filename.
733 # without having to type the filename.
737 if not self.rej:
734 if not self.rej:
738 return
735 return
739 base = os.path.basename(self.fname)
736 base = os.path.basename(self.fname)
740 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
741 for x in self.rej:
738 for x in self.rej:
742 for l in x.hunk:
739 for l in x.hunk:
743 lines.append(l)
740 lines.append(l)
744 if l[-1:] != '\n':
741 if l[-1:] != '\n':
745 lines.append("\n\ No newline at end of file\n")
742 lines.append("\n\ No newline at end of file\n")
746 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
747
744
748 def apply(self, h):
745 def apply(self, h):
749 if not h.complete():
746 if not h.complete():
750 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
751 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
752 h.lenb))
749 h.lenb))
753
750
754 self.hunks += 1
751 self.hunks += 1
755
752
756 if self.missing:
753 if self.missing:
757 self.rej.append(h)
754 self.rej.append(h)
758 return -1
755 return -1
759
756
760 if self.exists and self.create:
757 if self.exists and self.create:
761 if self.copysource:
758 if self.copysource:
762 self.ui.warn(_("cannot create %s: destination already "
759 self.ui.warn(_("cannot create %s: destination already "
763 "exists\n") % self.fname)
760 "exists\n") % self.fname)
764 else:
761 else:
765 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.ui.warn(_("file %s already exists\n") % self.fname)
766 self.rej.append(h)
763 self.rej.append(h)
767 return -1
764 return -1
768
765
769 if isinstance(h, binhunk):
766 if isinstance(h, binhunk):
770 if self.remove:
767 if self.remove:
771 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
772 else:
769 else:
773 l = h.new(self.lines)
770 l = h.new(self.lines)
774 self.lines[:] = l
771 self.lines[:] = l
775 self.offset += len(l)
772 self.offset += len(l)
776 self.dirty = True
773 self.dirty = True
777 return 0
774 return 0
778
775
779 horig = h
776 horig = h
780 if (self.eolmode in ('crlf', 'lf')
777 if (self.eolmode in ('crlf', 'lf')
781 or self.eolmode == 'auto' and self.eol):
778 or self.eolmode == 'auto' and self.eol):
782 # If new eols are going to be normalized, then normalize
779 # If new eols are going to be normalized, then normalize
783 # hunk data before patching. Otherwise, preserve input
780 # hunk data before patching. Otherwise, preserve input
784 # line-endings.
781 # line-endings.
785 h = h.getnormalized()
782 h = h.getnormalized()
786
783
787 # fast case first, no offsets, no fuzz
784 # fast case first, no offsets, no fuzz
788 old, oldstart, new, newstart = h.fuzzit(0, False)
785 old, oldstart, new, newstart = h.fuzzit(0, False)
789 oldstart += self.offset
786 oldstart += self.offset
790 orig_start = oldstart
787 orig_start = oldstart
791 # if there's skew we want to emit the "(offset %d lines)" even
788 # if there's skew we want to emit the "(offset %d lines)" even
792 # when the hunk cleanly applies at start + skew, so skip the
789 # when the hunk cleanly applies at start + skew, so skip the
793 # fast case code
790 # fast case code
794 if (self.skew == 0 and
791 if (self.skew == 0 and
795 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
796 if self.remove:
793 if self.remove:
797 self.backend.unlink(self.fname)
794 self.backend.unlink(self.fname)
798 else:
795 else:
799 self.lines[oldstart:oldstart + len(old)] = new
796 self.lines[oldstart:oldstart + len(old)] = new
800 self.offset += len(new) - len(old)
797 self.offset += len(new) - len(old)
801 self.dirty = True
798 self.dirty = True
802 return 0
799 return 0
803
800
804 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
805 self.hash = {}
802 self.hash = {}
806 for x, s in enumerate(self.lines):
803 for x, s in enumerate(self.lines):
807 self.hash.setdefault(s, []).append(x)
804 self.hash.setdefault(s, []).append(x)
808
805
809 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
810 for toponly in [True, False]:
807 for toponly in [True, False]:
811 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
812 oldstart = oldstart + self.offset + self.skew
809 oldstart = oldstart + self.offset + self.skew
813 oldstart = min(oldstart, len(self.lines))
810 oldstart = min(oldstart, len(self.lines))
814 if old:
811 if old:
815 cand = self.findlines(old[0][1:], oldstart)
812 cand = self.findlines(old[0][1:], oldstart)
816 else:
813 else:
817 # Only adding lines with no or fuzzed context, just
814 # Only adding lines with no or fuzzed context, just
818 # take the skew in account
815 # take the skew in account
819 cand = [oldstart]
816 cand = [oldstart]
820
817
821 for l in cand:
818 for l in cand:
822 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
823 self.lines[l : l + len(old)] = new
820 self.lines[l : l + len(old)] = new
824 self.offset += len(new) - len(old)
821 self.offset += len(new) - len(old)
825 self.skew = l - orig_start
822 self.skew = l - orig_start
826 self.dirty = True
823 self.dirty = True
827 offset = l - orig_start - fuzzlen
824 offset = l - orig_start - fuzzlen
828 if fuzzlen:
825 if fuzzlen:
829 msg = _("Hunk #%d succeeded at %d "
826 msg = _("Hunk #%d succeeded at %d "
830 "with fuzz %d "
827 "with fuzz %d "
831 "(offset %d lines).\n")
828 "(offset %d lines).\n")
832 self.printfile(True)
829 self.printfile(True)
833 self.ui.warn(msg %
830 self.ui.warn(msg %
834 (h.number, l + 1, fuzzlen, offset))
831 (h.number, l + 1, fuzzlen, offset))
835 else:
832 else:
836 msg = _("Hunk #%d succeeded at %d "
833 msg = _("Hunk #%d succeeded at %d "
837 "(offset %d lines).\n")
834 "(offset %d lines).\n")
838 self.ui.note(msg % (h.number, l + 1, offset))
835 self.ui.note(msg % (h.number, l + 1, offset))
839 return fuzzlen
836 return fuzzlen
840 self.printfile(True)
837 self.printfile(True)
841 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
842 self.rej.append(horig)
839 self.rej.append(horig)
843 return -1
840 return -1
844
841
845 def close(self):
842 def close(self):
846 if self.dirty:
843 if self.dirty:
847 self.writelines(self.fname, self.lines, self.mode)
844 self.writelines(self.fname, self.lines, self.mode)
848 self.write_rej()
845 self.write_rej()
849 return len(self.rej)
846 return len(self.rej)
850
847
851 class header(object):
848 class header(object):
852 """patch header
849 """patch header
853 """
850 """
854 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
855 diff_re = re.compile('diff -r .* (.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
856 allhunks_re = re.compile('(?:index|deleted file) ')
853 allhunks_re = re.compile('(?:index|deleted file) ')
857 pretty_re = re.compile('(?:new file|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
858 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
859 newfile_re = re.compile('(?:new file)')
856 newfile_re = re.compile('(?:new file)')
860
857
861 def __init__(self, header):
858 def __init__(self, header):
862 self.header = header
859 self.header = header
863 self.hunks = []
860 self.hunks = []
864
861
865 def binary(self):
862 def binary(self):
866 return any(h.startswith('index ') for h in self.header)
863 return any(h.startswith('index ') for h in self.header)
867
864
868 def pretty(self, fp):
865 def pretty(self, fp):
869 for h in self.header:
866 for h in self.header:
870 if h.startswith('index '):
867 if h.startswith('index '):
871 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
872 break
869 break
873 if self.pretty_re.match(h):
870 if self.pretty_re.match(h):
874 fp.write(h)
871 fp.write(h)
875 if self.binary():
872 if self.binary():
876 fp.write(_('this is a binary file\n'))
873 fp.write(_('this is a binary file\n'))
877 break
874 break
878 if h.startswith('---'):
875 if h.startswith('---'):
879 fp.write(_('%d hunks, %d lines changed\n') %
876 fp.write(_('%d hunks, %d lines changed\n') %
880 (len(self.hunks),
877 (len(self.hunks),
881 sum([max(h.added, h.removed) for h in self.hunks])))
878 sum([max(h.added, h.removed) for h in self.hunks])))
882 break
879 break
883 fp.write(h)
880 fp.write(h)
884
881
885 def write(self, fp):
882 def write(self, fp):
886 fp.write(''.join(self.header))
883 fp.write(''.join(self.header))
887
884
888 def allhunks(self):
885 def allhunks(self):
889 return any(self.allhunks_re.match(h) for h in self.header)
886 return any(self.allhunks_re.match(h) for h in self.header)
890
887
891 def files(self):
888 def files(self):
892 match = self.diffgit_re.match(self.header[0])
889 match = self.diffgit_re.match(self.header[0])
893 if match:
890 if match:
894 fromfile, tofile = match.groups()
891 fromfile, tofile = match.groups()
895 if fromfile == tofile:
892 if fromfile == tofile:
896 return [fromfile]
893 return [fromfile]
897 return [fromfile, tofile]
894 return [fromfile, tofile]
898 else:
895 else:
899 return self.diff_re.match(self.header[0]).groups()
896 return self.diff_re.match(self.header[0]).groups()
900
897
901 def filename(self):
898 def filename(self):
902 return self.files()[-1]
899 return self.files()[-1]
903
900
904 def __repr__(self):
901 def __repr__(self):
905 return '<header %s>' % (' '.join(map(repr, self.files())))
902 return '<header %s>' % (' '.join(map(repr, self.files())))
906
903
907 def isnewfile(self):
904 def isnewfile(self):
908 return any(self.newfile_re.match(h) for h in self.header)
905 return any(self.newfile_re.match(h) for h in self.header)
909
906
910 def special(self):
907 def special(self):
911 # Special files are shown only at the header level and not at the hunk
908 # Special files are shown only at the header level and not at the hunk
912 # level for example a file that has been deleted is a special file.
909 # level for example a file that has been deleted is a special file.
913 # The user cannot change the content of the operation, in the case of
910 # The user cannot change the content of the operation, in the case of
914 # the deleted file he has to take the deletion or not take it, he
911 # the deleted file he has to take the deletion or not take it, he
915 # cannot take some of it.
912 # cannot take some of it.
916 # Newly added files are special if they are empty, they are not special
913 # Newly added files are special if they are empty, they are not special
917 # if they have some content as we want to be able to change it
914 # if they have some content as we want to be able to change it
918 nocontent = len(self.header) == 2
915 nocontent = len(self.header) == 2
919 emptynewfile = self.isnewfile() and nocontent
916 emptynewfile = self.isnewfile() and nocontent
920 return emptynewfile or \
917 return emptynewfile or \
921 any(self.special_re.match(h) for h in self.header)
918 any(self.special_re.match(h) for h in self.header)
922
919
923 class recordhunk(object):
920 class recordhunk(object):
924 """patch hunk
921 """patch hunk
925
922
926 XXX shouldn't we merge this with the other hunk class?
923 XXX shouldn't we merge this with the other hunk class?
927 """
924 """
928 maxcontext = 3
925 maxcontext = 3
929
926
930 def __init__(self, header, fromline, toline, proc, before, hunk, after):
927 def __init__(self, header, fromline, toline, proc, before, hunk, after):
931 def trimcontext(number, lines):
928 def trimcontext(number, lines):
932 delta = len(lines) - self.maxcontext
929 delta = len(lines) - self.maxcontext
933 if False and delta > 0:
930 if False and delta > 0:
934 return number + delta, lines[:self.maxcontext]
931 return number + delta, lines[:self.maxcontext]
935 return number, lines
932 return number, lines
936
933
937 self.header = header
934 self.header = header
938 self.fromline, self.before = trimcontext(fromline, before)
935 self.fromline, self.before = trimcontext(fromline, before)
939 self.toline, self.after = trimcontext(toline, after)
936 self.toline, self.after = trimcontext(toline, after)
940 self.proc = proc
937 self.proc = proc
941 self.hunk = hunk
938 self.hunk = hunk
942 self.added, self.removed = self.countchanges(self.hunk)
939 self.added, self.removed = self.countchanges(self.hunk)
943
940
944 def __eq__(self, v):
941 def __eq__(self, v):
945 if not isinstance(v, recordhunk):
942 if not isinstance(v, recordhunk):
946 return False
943 return False
947
944
948 return ((v.hunk == self.hunk) and
945 return ((v.hunk == self.hunk) and
949 (v.proc == self.proc) and
946 (v.proc == self.proc) and
950 (self.fromline == v.fromline) and
947 (self.fromline == v.fromline) and
951 (self.header.files() == v.header.files()))
948 (self.header.files() == v.header.files()))
952
949
953 def __hash__(self):
950 def __hash__(self):
954 return hash((tuple(self.hunk),
951 return hash((tuple(self.hunk),
955 tuple(self.header.files()),
952 tuple(self.header.files()),
956 self.fromline,
953 self.fromline,
957 self.proc))
954 self.proc))
958
955
959 def countchanges(self, hunk):
956 def countchanges(self, hunk):
960 """hunk -> (n+,n-)"""
957 """hunk -> (n+,n-)"""
961 add = len([h for h in hunk if h[0] == '+'])
958 add = len([h for h in hunk if h[0] == '+'])
962 rem = len([h for h in hunk if h[0] == '-'])
959 rem = len([h for h in hunk if h[0] == '-'])
963 return add, rem
960 return add, rem
964
961
965 def reversehunk(self):
962 def reversehunk(self):
966 """return another recordhunk which is the reverse of the hunk
963 """return another recordhunk which is the reverse of the hunk
967
964
968 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
965 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
969 that, swap fromline/toline and +/- signs while keep other things
966 that, swap fromline/toline and +/- signs while keep other things
970 unchanged.
967 unchanged.
971 """
968 """
972 m = {'+': '-', '-': '+'}
969 m = {'+': '-', '-': '+'}
973 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
970 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
974 return recordhunk(self.header, self.toline, self.fromline, self.proc,
971 return recordhunk(self.header, self.toline, self.fromline, self.proc,
975 self.before, hunk, self.after)
972 self.before, hunk, self.after)
976
973
977 def write(self, fp):
974 def write(self, fp):
978 delta = len(self.before) + len(self.after)
975 delta = len(self.before) + len(self.after)
979 if self.after and self.after[-1] == '\\ No newline at end of file\n':
976 if self.after and self.after[-1] == '\\ No newline at end of file\n':
980 delta -= 1
977 delta -= 1
981 fromlen = delta + self.removed
978 fromlen = delta + self.removed
982 tolen = delta + self.added
979 tolen = delta + self.added
983 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
980 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
984 (self.fromline, fromlen, self.toline, tolen,
981 (self.fromline, fromlen, self.toline, tolen,
985 self.proc and (' ' + self.proc)))
982 self.proc and (' ' + self.proc)))
986 fp.write(''.join(self.before + self.hunk + self.after))
983 fp.write(''.join(self.before + self.hunk + self.after))
987
984
988 pretty = write
985 pretty = write
989
986
990 def filename(self):
987 def filename(self):
991 return self.header.filename()
988 return self.header.filename()
992
989
993 def __repr__(self):
990 def __repr__(self):
994 return '<hunk %r@%d>' % (self.filename(), self.fromline)
991 return '<hunk %r@%d>' % (self.filename(), self.fromline)
995
992
996 def filterpatch(ui, headers, operation=None):
993 def filterpatch(ui, headers, operation=None):
997 """Interactively filter patch chunks into applied-only chunks"""
994 """Interactively filter patch chunks into applied-only chunks"""
998 if operation is None:
995 if operation is None:
999 operation = 'record'
996 operation = 'record'
1000 messages = {
997 messages = {
1001 'multiple': {
998 'multiple': {
1002 'discard': _("discard change %d/%d to '%s'?"),
999 'discard': _("discard change %d/%d to '%s'?"),
1003 'record': _("record change %d/%d to '%s'?"),
1000 'record': _("record change %d/%d to '%s'?"),
1004 'revert': _("revert change %d/%d to '%s'?"),
1001 'revert': _("revert change %d/%d to '%s'?"),
1005 }[operation],
1002 }[operation],
1006 'single': {
1003 'single': {
1007 'discard': _("discard this change to '%s'?"),
1004 'discard': _("discard this change to '%s'?"),
1008 'record': _("record this change to '%s'?"),
1005 'record': _("record this change to '%s'?"),
1009 'revert': _("revert this change to '%s'?"),
1006 'revert': _("revert this change to '%s'?"),
1010 }[operation],
1007 }[operation],
1011 'help': {
1008 'help': {
1012 'discard': _('[Ynesfdaq?]'
1009 'discard': _('[Ynesfdaq?]'
1013 '$$ &Yes, discard this change'
1010 '$$ &Yes, discard this change'
1014 '$$ &No, skip this change'
1011 '$$ &No, skip this change'
1015 '$$ &Edit this change manually'
1012 '$$ &Edit this change manually'
1016 '$$ &Skip remaining changes to this file'
1013 '$$ &Skip remaining changes to this file'
1017 '$$ Discard remaining changes to this &file'
1014 '$$ Discard remaining changes to this &file'
1018 '$$ &Done, skip remaining changes and files'
1015 '$$ &Done, skip remaining changes and files'
1019 '$$ Discard &all changes to all remaining files'
1016 '$$ Discard &all changes to all remaining files'
1020 '$$ &Quit, discarding no changes'
1017 '$$ &Quit, discarding no changes'
1021 '$$ &? (display help)'),
1018 '$$ &? (display help)'),
1022 'record': _('[Ynesfdaq?]'
1019 'record': _('[Ynesfdaq?]'
1023 '$$ &Yes, record this change'
1020 '$$ &Yes, record this change'
1024 '$$ &No, skip this change'
1021 '$$ &No, skip this change'
1025 '$$ &Edit this change manually'
1022 '$$ &Edit this change manually'
1026 '$$ &Skip remaining changes to this file'
1023 '$$ &Skip remaining changes to this file'
1027 '$$ Record remaining changes to this &file'
1024 '$$ Record remaining changes to this &file'
1028 '$$ &Done, skip remaining changes and files'
1025 '$$ &Done, skip remaining changes and files'
1029 '$$ Record &all changes to all remaining files'
1026 '$$ Record &all changes to all remaining files'
1030 '$$ &Quit, recording no changes'
1027 '$$ &Quit, recording no changes'
1031 '$$ &? (display help)'),
1028 '$$ &? (display help)'),
1032 'revert': _('[Ynesfdaq?]'
1029 'revert': _('[Ynesfdaq?]'
1033 '$$ &Yes, revert this change'
1030 '$$ &Yes, revert this change'
1034 '$$ &No, skip this change'
1031 '$$ &No, skip this change'
1035 '$$ &Edit this change manually'
1032 '$$ &Edit this change manually'
1036 '$$ &Skip remaining changes to this file'
1033 '$$ &Skip remaining changes to this file'
1037 '$$ Revert remaining changes to this &file'
1034 '$$ Revert remaining changes to this &file'
1038 '$$ &Done, skip remaining changes and files'
1035 '$$ &Done, skip remaining changes and files'
1039 '$$ Revert &all changes to all remaining files'
1036 '$$ Revert &all changes to all remaining files'
1040 '$$ &Quit, reverting no changes'
1037 '$$ &Quit, reverting no changes'
1041 '$$ &? (display help)')
1038 '$$ &? (display help)')
1042 }[operation]
1039 }[operation]
1043 }
1040 }
1044
1041
1045 def prompt(skipfile, skipall, query, chunk):
1042 def prompt(skipfile, skipall, query, chunk):
1046 """prompt query, and process base inputs
1043 """prompt query, and process base inputs
1047
1044
1048 - y/n for the rest of file
1045 - y/n for the rest of file
1049 - y/n for the rest
1046 - y/n for the rest
1050 - ? (help)
1047 - ? (help)
1051 - q (quit)
1048 - q (quit)
1052
1049
1053 Return True/False and possibly updated skipfile and skipall.
1050 Return True/False and possibly updated skipfile and skipall.
1054 """
1051 """
1055 newpatches = None
1052 newpatches = None
1056 if skipall is not None:
1053 if skipall is not None:
1057 return skipall, skipfile, skipall, newpatches
1054 return skipall, skipfile, skipall, newpatches
1058 if skipfile is not None:
1055 if skipfile is not None:
1059 return skipfile, skipfile, skipall, newpatches
1056 return skipfile, skipfile, skipall, newpatches
1060 while True:
1057 while True:
1061 resps = messages['help']
1058 resps = messages['help']
1062 r = ui.promptchoice("%s %s" % (query, resps))
1059 r = ui.promptchoice("%s %s" % (query, resps))
1063 ui.write("\n")
1060 ui.write("\n")
1064 if r == 8: # ?
1061 if r == 8: # ?
1065 for c, t in ui.extractchoices(resps)[1]:
1062 for c, t in ui.extractchoices(resps)[1]:
1066 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1063 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1067 continue
1064 continue
1068 elif r == 0: # yes
1065 elif r == 0: # yes
1069 ret = True
1066 ret = True
1070 elif r == 1: # no
1067 elif r == 1: # no
1071 ret = False
1068 ret = False
1072 elif r == 2: # Edit patch
1069 elif r == 2: # Edit patch
1073 if chunk is None:
1070 if chunk is None:
1074 ui.write(_('cannot edit patch for whole file'))
1071 ui.write(_('cannot edit patch for whole file'))
1075 ui.write("\n")
1072 ui.write("\n")
1076 continue
1073 continue
1077 if chunk.header.binary():
1074 if chunk.header.binary():
1078 ui.write(_('cannot edit patch for binary file'))
1075 ui.write(_('cannot edit patch for binary file'))
1079 ui.write("\n")
1076 ui.write("\n")
1080 continue
1077 continue
1081 # Patch comment based on the Git one (based on comment at end of
1078 # Patch comment based on the Git one (based on comment at end of
1082 # https://mercurial-scm.org/wiki/RecordExtension)
1079 # https://mercurial-scm.org/wiki/RecordExtension)
1083 phelp = '---' + _("""
1080 phelp = '---' + _("""
1084 To remove '-' lines, make them ' ' lines (context).
1081 To remove '-' lines, make them ' ' lines (context).
1085 To remove '+' lines, delete them.
1082 To remove '+' lines, delete them.
1086 Lines starting with # will be removed from the patch.
1083 Lines starting with # will be removed from the patch.
1087
1084
1088 If the patch applies cleanly, the edited hunk will immediately be
1085 If the patch applies cleanly, the edited hunk will immediately be
1089 added to the record list. If it does not apply cleanly, a rejects
1086 added to the record list. If it does not apply cleanly, a rejects
1090 file will be generated: you can use that when you try again. If
1087 file will be generated: you can use that when you try again. If
1091 all lines of the hunk are removed, then the edit is aborted and
1088 all lines of the hunk are removed, then the edit is aborted and
1092 the hunk is left unchanged.
1089 the hunk is left unchanged.
1093 """)
1090 """)
1094 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1091 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1095 suffix=".diff", text=True)
1092 suffix=".diff", text=True)
1096 ncpatchfp = None
1093 ncpatchfp = None
1097 try:
1094 try:
1098 # Write the initial patch
1095 # Write the initial patch
1099 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1096 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1100 chunk.header.write(f)
1097 chunk.header.write(f)
1101 chunk.write(f)
1098 chunk.write(f)
1102 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1099 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1103 f.close()
1100 f.close()
1104 # Start the editor and wait for it to complete
1101 # Start the editor and wait for it to complete
1105 editor = ui.geteditor()
1102 editor = ui.geteditor()
1106 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1103 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1107 environ={'HGUSER': ui.username()},
1104 environ={'HGUSER': ui.username()},
1108 blockedtag='filterpatch')
1105 blockedtag='filterpatch')
1109 if ret != 0:
1106 if ret != 0:
1110 ui.warn(_("editor exited with exit code %d\n") % ret)
1107 ui.warn(_("editor exited with exit code %d\n") % ret)
1111 continue
1108 continue
1112 # Remove comment lines
1109 # Remove comment lines
1113 patchfp = open(patchfn)
1110 patchfp = open(patchfn)
1114 ncpatchfp = stringio()
1111 ncpatchfp = stringio()
1115 for line in util.iterfile(patchfp):
1112 for line in util.iterfile(patchfp):
1116 if not line.startswith('#'):
1113 if not line.startswith('#'):
1117 ncpatchfp.write(line)
1114 ncpatchfp.write(line)
1118 patchfp.close()
1115 patchfp.close()
1119 ncpatchfp.seek(0)
1116 ncpatchfp.seek(0)
1120 newpatches = parsepatch(ncpatchfp)
1117 newpatches = parsepatch(ncpatchfp)
1121 finally:
1118 finally:
1122 os.unlink(patchfn)
1119 os.unlink(patchfn)
1123 del ncpatchfp
1120 del ncpatchfp
1124 # Signal that the chunk shouldn't be applied as-is, but
1121 # Signal that the chunk shouldn't be applied as-is, but
1125 # provide the new patch to be used instead.
1122 # provide the new patch to be used instead.
1126 ret = False
1123 ret = False
1127 elif r == 3: # Skip
1124 elif r == 3: # Skip
1128 ret = skipfile = False
1125 ret = skipfile = False
1129 elif r == 4: # file (Record remaining)
1126 elif r == 4: # file (Record remaining)
1130 ret = skipfile = True
1127 ret = skipfile = True
1131 elif r == 5: # done, skip remaining
1128 elif r == 5: # done, skip remaining
1132 ret = skipall = False
1129 ret = skipall = False
1133 elif r == 6: # all
1130 elif r == 6: # all
1134 ret = skipall = True
1131 ret = skipall = True
1135 elif r == 7: # quit
1132 elif r == 7: # quit
1136 raise error.Abort(_('user quit'))
1133 raise error.Abort(_('user quit'))
1137 return ret, skipfile, skipall, newpatches
1134 return ret, skipfile, skipall, newpatches
1138
1135
1139 seen = set()
1136 seen = set()
1140 applied = {} # 'filename' -> [] of chunks
1137 applied = {} # 'filename' -> [] of chunks
1141 skipfile, skipall = None, None
1138 skipfile, skipall = None, None
1142 pos, total = 1, sum(len(h.hunks) for h in headers)
1139 pos, total = 1, sum(len(h.hunks) for h in headers)
1143 for h in headers:
1140 for h in headers:
1144 pos += len(h.hunks)
1141 pos += len(h.hunks)
1145 skipfile = None
1142 skipfile = None
1146 fixoffset = 0
1143 fixoffset = 0
1147 hdr = ''.join(h.header)
1144 hdr = ''.join(h.header)
1148 if hdr in seen:
1145 if hdr in seen:
1149 continue
1146 continue
1150 seen.add(hdr)
1147 seen.add(hdr)
1151 if skipall is None:
1148 if skipall is None:
1152 h.pretty(ui)
1149 h.pretty(ui)
1153 msg = (_('examine changes to %s?') %
1150 msg = (_('examine changes to %s?') %
1154 _(' and ').join("'%s'" % f for f in h.files()))
1151 _(' and ').join("'%s'" % f for f in h.files()))
1155 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1152 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1156 if not r:
1153 if not r:
1157 continue
1154 continue
1158 applied[h.filename()] = [h]
1155 applied[h.filename()] = [h]
1159 if h.allhunks():
1156 if h.allhunks():
1160 applied[h.filename()] += h.hunks
1157 applied[h.filename()] += h.hunks
1161 continue
1158 continue
1162 for i, chunk in enumerate(h.hunks):
1159 for i, chunk in enumerate(h.hunks):
1163 if skipfile is None and skipall is None:
1160 if skipfile is None and skipall is None:
1164 chunk.pretty(ui)
1161 chunk.pretty(ui)
1165 if total == 1:
1162 if total == 1:
1166 msg = messages['single'] % chunk.filename()
1163 msg = messages['single'] % chunk.filename()
1167 else:
1164 else:
1168 idx = pos - len(h.hunks) + i
1165 idx = pos - len(h.hunks) + i
1169 msg = messages['multiple'] % (idx, total, chunk.filename())
1166 msg = messages['multiple'] % (idx, total, chunk.filename())
1170 r, skipfile, skipall, newpatches = prompt(skipfile,
1167 r, skipfile, skipall, newpatches = prompt(skipfile,
1171 skipall, msg, chunk)
1168 skipall, msg, chunk)
1172 if r:
1169 if r:
1173 if fixoffset:
1170 if fixoffset:
1174 chunk = copy.copy(chunk)
1171 chunk = copy.copy(chunk)
1175 chunk.toline += fixoffset
1172 chunk.toline += fixoffset
1176 applied[chunk.filename()].append(chunk)
1173 applied[chunk.filename()].append(chunk)
1177 elif newpatches is not None:
1174 elif newpatches is not None:
1178 for newpatch in newpatches:
1175 for newpatch in newpatches:
1179 for newhunk in newpatch.hunks:
1176 for newhunk in newpatch.hunks:
1180 if fixoffset:
1177 if fixoffset:
1181 newhunk.toline += fixoffset
1178 newhunk.toline += fixoffset
1182 applied[newhunk.filename()].append(newhunk)
1179 applied[newhunk.filename()].append(newhunk)
1183 else:
1180 else:
1184 fixoffset += chunk.removed - chunk.added
1181 fixoffset += chunk.removed - chunk.added
1185 return (sum([h for h in applied.itervalues()
1182 return (sum([h for h in applied.itervalues()
1186 if h[0].special() or len(h) > 1], []), {})
1183 if h[0].special() or len(h) > 1], []), {})
1187 class hunk(object):
1184 class hunk(object):
1188 def __init__(self, desc, num, lr, context):
1185 def __init__(self, desc, num, lr, context):
1189 self.number = num
1186 self.number = num
1190 self.desc = desc
1187 self.desc = desc
1191 self.hunk = [desc]
1188 self.hunk = [desc]
1192 self.a = []
1189 self.a = []
1193 self.b = []
1190 self.b = []
1194 self.starta = self.lena = None
1191 self.starta = self.lena = None
1195 self.startb = self.lenb = None
1192 self.startb = self.lenb = None
1196 if lr is not None:
1193 if lr is not None:
1197 if context:
1194 if context:
1198 self.read_context_hunk(lr)
1195 self.read_context_hunk(lr)
1199 else:
1196 else:
1200 self.read_unified_hunk(lr)
1197 self.read_unified_hunk(lr)
1201
1198
1202 def getnormalized(self):
1199 def getnormalized(self):
1203 """Return a copy with line endings normalized to LF."""
1200 """Return a copy with line endings normalized to LF."""
1204
1201
1205 def normalize(lines):
1202 def normalize(lines):
1206 nlines = []
1203 nlines = []
1207 for line in lines:
1204 for line in lines:
1208 if line.endswith('\r\n'):
1205 if line.endswith('\r\n'):
1209 line = line[:-2] + '\n'
1206 line = line[:-2] + '\n'
1210 nlines.append(line)
1207 nlines.append(line)
1211 return nlines
1208 return nlines
1212
1209
1213 # Dummy object, it is rebuilt manually
1210 # Dummy object, it is rebuilt manually
1214 nh = hunk(self.desc, self.number, None, None)
1211 nh = hunk(self.desc, self.number, None, None)
1215 nh.number = self.number
1212 nh.number = self.number
1216 nh.desc = self.desc
1213 nh.desc = self.desc
1217 nh.hunk = self.hunk
1214 nh.hunk = self.hunk
1218 nh.a = normalize(self.a)
1215 nh.a = normalize(self.a)
1219 nh.b = normalize(self.b)
1216 nh.b = normalize(self.b)
1220 nh.starta = self.starta
1217 nh.starta = self.starta
1221 nh.startb = self.startb
1218 nh.startb = self.startb
1222 nh.lena = self.lena
1219 nh.lena = self.lena
1223 nh.lenb = self.lenb
1220 nh.lenb = self.lenb
1224 return nh
1221 return nh
1225
1222
1226 def read_unified_hunk(self, lr):
1223 def read_unified_hunk(self, lr):
1227 m = unidesc.match(self.desc)
1224 m = unidesc.match(self.desc)
1228 if not m:
1225 if not m:
1229 raise PatchError(_("bad hunk #%d") % self.number)
1226 raise PatchError(_("bad hunk #%d") % self.number)
1230 self.starta, self.lena, self.startb, self.lenb = m.groups()
1227 self.starta, self.lena, self.startb, self.lenb = m.groups()
1231 if self.lena is None:
1228 if self.lena is None:
1232 self.lena = 1
1229 self.lena = 1
1233 else:
1230 else:
1234 self.lena = int(self.lena)
1231 self.lena = int(self.lena)
1235 if self.lenb is None:
1232 if self.lenb is None:
1236 self.lenb = 1
1233 self.lenb = 1
1237 else:
1234 else:
1238 self.lenb = int(self.lenb)
1235 self.lenb = int(self.lenb)
1239 self.starta = int(self.starta)
1236 self.starta = int(self.starta)
1240 self.startb = int(self.startb)
1237 self.startb = int(self.startb)
1241 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1238 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1242 self.b)
1239 self.b)
1243 # if we hit eof before finishing out the hunk, the last line will
1240 # if we hit eof before finishing out the hunk, the last line will
1244 # be zero length. Lets try to fix it up.
1241 # be zero length. Lets try to fix it up.
1245 while len(self.hunk[-1]) == 0:
1242 while len(self.hunk[-1]) == 0:
1246 del self.hunk[-1]
1243 del self.hunk[-1]
1247 del self.a[-1]
1244 del self.a[-1]
1248 del self.b[-1]
1245 del self.b[-1]
1249 self.lena -= 1
1246 self.lena -= 1
1250 self.lenb -= 1
1247 self.lenb -= 1
1251 self._fixnewline(lr)
1248 self._fixnewline(lr)
1252
1249
1253 def read_context_hunk(self, lr):
1250 def read_context_hunk(self, lr):
1254 self.desc = lr.readline()
1251 self.desc = lr.readline()
1255 m = contextdesc.match(self.desc)
1252 m = contextdesc.match(self.desc)
1256 if not m:
1253 if not m:
1257 raise PatchError(_("bad hunk #%d") % self.number)
1254 raise PatchError(_("bad hunk #%d") % self.number)
1258 self.starta, aend = m.groups()
1255 self.starta, aend = m.groups()
1259 self.starta = int(self.starta)
1256 self.starta = int(self.starta)
1260 if aend is None:
1257 if aend is None:
1261 aend = self.starta
1258 aend = self.starta
1262 self.lena = int(aend) - self.starta
1259 self.lena = int(aend) - self.starta
1263 if self.starta:
1260 if self.starta:
1264 self.lena += 1
1261 self.lena += 1
1265 for x in xrange(self.lena):
1262 for x in xrange(self.lena):
1266 l = lr.readline()
1263 l = lr.readline()
1267 if l.startswith('---'):
1264 if l.startswith('---'):
1268 # lines addition, old block is empty
1265 # lines addition, old block is empty
1269 lr.push(l)
1266 lr.push(l)
1270 break
1267 break
1271 s = l[2:]
1268 s = l[2:]
1272 if l.startswith('- ') or l.startswith('! '):
1269 if l.startswith('- ') or l.startswith('! '):
1273 u = '-' + s
1270 u = '-' + s
1274 elif l.startswith(' '):
1271 elif l.startswith(' '):
1275 u = ' ' + s
1272 u = ' ' + s
1276 else:
1273 else:
1277 raise PatchError(_("bad hunk #%d old text line %d") %
1274 raise PatchError(_("bad hunk #%d old text line %d") %
1278 (self.number, x))
1275 (self.number, x))
1279 self.a.append(u)
1276 self.a.append(u)
1280 self.hunk.append(u)
1277 self.hunk.append(u)
1281
1278
1282 l = lr.readline()
1279 l = lr.readline()
1283 if l.startswith('\ '):
1280 if l.startswith('\ '):
1284 s = self.a[-1][:-1]
1281 s = self.a[-1][:-1]
1285 self.a[-1] = s
1282 self.a[-1] = s
1286 self.hunk[-1] = s
1283 self.hunk[-1] = s
1287 l = lr.readline()
1284 l = lr.readline()
1288 m = contextdesc.match(l)
1285 m = contextdesc.match(l)
1289 if not m:
1286 if not m:
1290 raise PatchError(_("bad hunk #%d") % self.number)
1287 raise PatchError(_("bad hunk #%d") % self.number)
1291 self.startb, bend = m.groups()
1288 self.startb, bend = m.groups()
1292 self.startb = int(self.startb)
1289 self.startb = int(self.startb)
1293 if bend is None:
1290 if bend is None:
1294 bend = self.startb
1291 bend = self.startb
1295 self.lenb = int(bend) - self.startb
1292 self.lenb = int(bend) - self.startb
1296 if self.startb:
1293 if self.startb:
1297 self.lenb += 1
1294 self.lenb += 1
1298 hunki = 1
1295 hunki = 1
1299 for x in xrange(self.lenb):
1296 for x in xrange(self.lenb):
1300 l = lr.readline()
1297 l = lr.readline()
1301 if l.startswith('\ '):
1298 if l.startswith('\ '):
1302 # XXX: the only way to hit this is with an invalid line range.
1299 # XXX: the only way to hit this is with an invalid line range.
1303 # The no-eol marker is not counted in the line range, but I
1300 # The no-eol marker is not counted in the line range, but I
1304 # guess there are diff(1) out there which behave differently.
1301 # guess there are diff(1) out there which behave differently.
1305 s = self.b[-1][:-1]
1302 s = self.b[-1][:-1]
1306 self.b[-1] = s
1303 self.b[-1] = s
1307 self.hunk[hunki - 1] = s
1304 self.hunk[hunki - 1] = s
1308 continue
1305 continue
1309 if not l:
1306 if not l:
1310 # line deletions, new block is empty and we hit EOF
1307 # line deletions, new block is empty and we hit EOF
1311 lr.push(l)
1308 lr.push(l)
1312 break
1309 break
1313 s = l[2:]
1310 s = l[2:]
1314 if l.startswith('+ ') or l.startswith('! '):
1311 if l.startswith('+ ') or l.startswith('! '):
1315 u = '+' + s
1312 u = '+' + s
1316 elif l.startswith(' '):
1313 elif l.startswith(' '):
1317 u = ' ' + s
1314 u = ' ' + s
1318 elif len(self.b) == 0:
1315 elif len(self.b) == 0:
1319 # line deletions, new block is empty
1316 # line deletions, new block is empty
1320 lr.push(l)
1317 lr.push(l)
1321 break
1318 break
1322 else:
1319 else:
1323 raise PatchError(_("bad hunk #%d old text line %d") %
1320 raise PatchError(_("bad hunk #%d old text line %d") %
1324 (self.number, x))
1321 (self.number, x))
1325 self.b.append(s)
1322 self.b.append(s)
1326 while True:
1323 while True:
1327 if hunki >= len(self.hunk):
1324 if hunki >= len(self.hunk):
1328 h = ""
1325 h = ""
1329 else:
1326 else:
1330 h = self.hunk[hunki]
1327 h = self.hunk[hunki]
1331 hunki += 1
1328 hunki += 1
1332 if h == u:
1329 if h == u:
1333 break
1330 break
1334 elif h.startswith('-'):
1331 elif h.startswith('-'):
1335 continue
1332 continue
1336 else:
1333 else:
1337 self.hunk.insert(hunki - 1, u)
1334 self.hunk.insert(hunki - 1, u)
1338 break
1335 break
1339
1336
1340 if not self.a:
1337 if not self.a:
1341 # this happens when lines were only added to the hunk
1338 # this happens when lines were only added to the hunk
1342 for x in self.hunk:
1339 for x in self.hunk:
1343 if x.startswith('-') or x.startswith(' '):
1340 if x.startswith('-') or x.startswith(' '):
1344 self.a.append(x)
1341 self.a.append(x)
1345 if not self.b:
1342 if not self.b:
1346 # this happens when lines were only deleted from the hunk
1343 # this happens when lines were only deleted from the hunk
1347 for x in self.hunk:
1344 for x in self.hunk:
1348 if x.startswith('+') or x.startswith(' '):
1345 if x.startswith('+') or x.startswith(' '):
1349 self.b.append(x[1:])
1346 self.b.append(x[1:])
1350 # @@ -start,len +start,len @@
1347 # @@ -start,len +start,len @@
1351 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1348 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1352 self.startb, self.lenb)
1349 self.startb, self.lenb)
1353 self.hunk[0] = self.desc
1350 self.hunk[0] = self.desc
1354 self._fixnewline(lr)
1351 self._fixnewline(lr)
1355
1352
1356 def _fixnewline(self, lr):
1353 def _fixnewline(self, lr):
1357 l = lr.readline()
1354 l = lr.readline()
1358 if l.startswith('\ '):
1355 if l.startswith('\ '):
1359 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1356 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1360 else:
1357 else:
1361 lr.push(l)
1358 lr.push(l)
1362
1359
1363 def complete(self):
1360 def complete(self):
1364 return len(self.a) == self.lena and len(self.b) == self.lenb
1361 return len(self.a) == self.lena and len(self.b) == self.lenb
1365
1362
1366 def _fuzzit(self, old, new, fuzz, toponly):
1363 def _fuzzit(self, old, new, fuzz, toponly):
1367 # this removes context lines from the top and bottom of list 'l'. It
1364 # this removes context lines from the top and bottom of list 'l'. It
1368 # checks the hunk to make sure only context lines are removed, and then
1365 # checks the hunk to make sure only context lines are removed, and then
1369 # returns a new shortened list of lines.
1366 # returns a new shortened list of lines.
1370 fuzz = min(fuzz, len(old))
1367 fuzz = min(fuzz, len(old))
1371 if fuzz:
1368 if fuzz:
1372 top = 0
1369 top = 0
1373 bot = 0
1370 bot = 0
1374 hlen = len(self.hunk)
1371 hlen = len(self.hunk)
1375 for x in xrange(hlen - 1):
1372 for x in xrange(hlen - 1):
1376 # the hunk starts with the @@ line, so use x+1
1373 # the hunk starts with the @@ line, so use x+1
1377 if self.hunk[x + 1][0] == ' ':
1374 if self.hunk[x + 1][0] == ' ':
1378 top += 1
1375 top += 1
1379 else:
1376 else:
1380 break
1377 break
1381 if not toponly:
1378 if not toponly:
1382 for x in xrange(hlen - 1):
1379 for x in xrange(hlen - 1):
1383 if self.hunk[hlen - bot - 1][0] == ' ':
1380 if self.hunk[hlen - bot - 1][0] == ' ':
1384 bot += 1
1381 bot += 1
1385 else:
1382 else:
1386 break
1383 break
1387
1384
1388 bot = min(fuzz, bot)
1385 bot = min(fuzz, bot)
1389 top = min(fuzz, top)
1386 top = min(fuzz, top)
1390 return old[top:len(old) - bot], new[top:len(new) - bot], top
1387 return old[top:len(old) - bot], new[top:len(new) - bot], top
1391 return old, new, 0
1388 return old, new, 0
1392
1389
1393 def fuzzit(self, fuzz, toponly):
1390 def fuzzit(self, fuzz, toponly):
1394 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1391 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1395 oldstart = self.starta + top
1392 oldstart = self.starta + top
1396 newstart = self.startb + top
1393 newstart = self.startb + top
1397 # zero length hunk ranges already have their start decremented
1394 # zero length hunk ranges already have their start decremented
1398 if self.lena and oldstart > 0:
1395 if self.lena and oldstart > 0:
1399 oldstart -= 1
1396 oldstart -= 1
1400 if self.lenb and newstart > 0:
1397 if self.lenb and newstart > 0:
1401 newstart -= 1
1398 newstart -= 1
1402 return old, oldstart, new, newstart
1399 return old, oldstart, new, newstart
1403
1400
1404 class binhunk(object):
1401 class binhunk(object):
1405 'A binary patch file.'
1402 'A binary patch file.'
1406 def __init__(self, lr, fname):
1403 def __init__(self, lr, fname):
1407 self.text = None
1404 self.text = None
1408 self.delta = False
1405 self.delta = False
1409 self.hunk = ['GIT binary patch\n']
1406 self.hunk = ['GIT binary patch\n']
1410 self._fname = fname
1407 self._fname = fname
1411 self._read(lr)
1408 self._read(lr)
1412
1409
1413 def complete(self):
1410 def complete(self):
1414 return self.text is not None
1411 return self.text is not None
1415
1412
1416 def new(self, lines):
1413 def new(self, lines):
1417 if self.delta:
1414 if self.delta:
1418 return [applybindelta(self.text, ''.join(lines))]
1415 return [applybindelta(self.text, ''.join(lines))]
1419 return [self.text]
1416 return [self.text]
1420
1417
1421 def _read(self, lr):
1418 def _read(self, lr):
1422 def getline(lr, hunk):
1419 def getline(lr, hunk):
1423 l = lr.readline()
1420 l = lr.readline()
1424 hunk.append(l)
1421 hunk.append(l)
1425 return l.rstrip('\r\n')
1422 return l.rstrip('\r\n')
1426
1423
1427 size = 0
1424 size = 0
1428 while True:
1425 while True:
1429 line = getline(lr, self.hunk)
1426 line = getline(lr, self.hunk)
1430 if not line:
1427 if not line:
1431 raise PatchError(_('could not extract "%s" binary data')
1428 raise PatchError(_('could not extract "%s" binary data')
1432 % self._fname)
1429 % self._fname)
1433 if line.startswith('literal '):
1430 if line.startswith('literal '):
1434 size = int(line[8:].rstrip())
1431 size = int(line[8:].rstrip())
1435 break
1432 break
1436 if line.startswith('delta '):
1433 if line.startswith('delta '):
1437 size = int(line[6:].rstrip())
1434 size = int(line[6:].rstrip())
1438 self.delta = True
1435 self.delta = True
1439 break
1436 break
1440 dec = []
1437 dec = []
1441 line = getline(lr, self.hunk)
1438 line = getline(lr, self.hunk)
1442 while len(line) > 1:
1439 while len(line) > 1:
1443 l = line[0]
1440 l = line[0]
1444 if l <= 'Z' and l >= 'A':
1441 if l <= 'Z' and l >= 'A':
1445 l = ord(l) - ord('A') + 1
1442 l = ord(l) - ord('A') + 1
1446 else:
1443 else:
1447 l = ord(l) - ord('a') + 27
1444 l = ord(l) - ord('a') + 27
1448 try:
1445 try:
1449 dec.append(util.b85decode(line[1:])[:l])
1446 dec.append(util.b85decode(line[1:])[:l])
1450 except ValueError as e:
1447 except ValueError as e:
1451 raise PatchError(_('could not decode "%s" binary patch: %s')
1448 raise PatchError(_('could not decode "%s" binary patch: %s')
1452 % (self._fname, str(e)))
1449 % (self._fname, str(e)))
1453 line = getline(lr, self.hunk)
1450 line = getline(lr, self.hunk)
1454 text = zlib.decompress(''.join(dec))
1451 text = zlib.decompress(''.join(dec))
1455 if len(text) != size:
1452 if len(text) != size:
1456 raise PatchError(_('"%s" length is %d bytes, should be %d')
1453 raise PatchError(_('"%s" length is %d bytes, should be %d')
1457 % (self._fname, len(text), size))
1454 % (self._fname, len(text), size))
1458 self.text = text
1455 self.text = text
1459
1456
1460 def parsefilename(str):
1457 def parsefilename(str):
1461 # --- filename \t|space stuff
1458 # --- filename \t|space stuff
1462 s = str[4:].rstrip('\r\n')
1459 s = str[4:].rstrip('\r\n')
1463 i = s.find('\t')
1460 i = s.find('\t')
1464 if i < 0:
1461 if i < 0:
1465 i = s.find(' ')
1462 i = s.find(' ')
1466 if i < 0:
1463 if i < 0:
1467 return s
1464 return s
1468 return s[:i]
1465 return s[:i]
1469
1466
1470 def reversehunks(hunks):
1467 def reversehunks(hunks):
1471 '''reverse the signs in the hunks given as argument
1468 '''reverse the signs in the hunks given as argument
1472
1469
1473 This function operates on hunks coming out of patch.filterpatch, that is
1470 This function operates on hunks coming out of patch.filterpatch, that is
1474 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1471 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1475
1472
1476 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1473 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1477 ... --- a/folder1/g
1474 ... --- a/folder1/g
1478 ... +++ b/folder1/g
1475 ... +++ b/folder1/g
1479 ... @@ -1,7 +1,7 @@
1476 ... @@ -1,7 +1,7 @@
1480 ... +firstline
1477 ... +firstline
1481 ... c
1478 ... c
1482 ... 1
1479 ... 1
1483 ... 2
1480 ... 2
1484 ... + 3
1481 ... + 3
1485 ... -4
1482 ... -4
1486 ... 5
1483 ... 5
1487 ... d
1484 ... d
1488 ... +lastline"""
1485 ... +lastline"""
1489 >>> hunks = parsepatch(rawpatch)
1486 >>> hunks = parsepatch(rawpatch)
1490 >>> hunkscomingfromfilterpatch = []
1487 >>> hunkscomingfromfilterpatch = []
1491 >>> for h in hunks:
1488 >>> for h in hunks:
1492 ... hunkscomingfromfilterpatch.append(h)
1489 ... hunkscomingfromfilterpatch.append(h)
1493 ... hunkscomingfromfilterpatch.extend(h.hunks)
1490 ... hunkscomingfromfilterpatch.extend(h.hunks)
1494
1491
1495 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1492 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1496 >>> from . import util
1493 >>> from . import util
1497 >>> fp = util.stringio()
1494 >>> fp = util.stringio()
1498 >>> for c in reversedhunks:
1495 >>> for c in reversedhunks:
1499 ... c.write(fp)
1496 ... c.write(fp)
1500 >>> fp.seek(0)
1497 >>> fp.seek(0)
1501 >>> reversedpatch = fp.read()
1498 >>> reversedpatch = fp.read()
1502 >>> print reversedpatch
1499 >>> print reversedpatch
1503 diff --git a/folder1/g b/folder1/g
1500 diff --git a/folder1/g b/folder1/g
1504 --- a/folder1/g
1501 --- a/folder1/g
1505 +++ b/folder1/g
1502 +++ b/folder1/g
1506 @@ -1,4 +1,3 @@
1503 @@ -1,4 +1,3 @@
1507 -firstline
1504 -firstline
1508 c
1505 c
1509 1
1506 1
1510 2
1507 2
1511 @@ -2,6 +1,6 @@
1508 @@ -2,6 +1,6 @@
1512 c
1509 c
1513 1
1510 1
1514 2
1511 2
1515 - 3
1512 - 3
1516 +4
1513 +4
1517 5
1514 5
1518 d
1515 d
1519 @@ -6,3 +5,2 @@
1516 @@ -6,3 +5,2 @@
1520 5
1517 5
1521 d
1518 d
1522 -lastline
1519 -lastline
1523
1520
1524 '''
1521 '''
1525
1522
1526 newhunks = []
1523 newhunks = []
1527 for c in hunks:
1524 for c in hunks:
1528 if util.safehasattr(c, 'reversehunk'):
1525 if util.safehasattr(c, 'reversehunk'):
1529 c = c.reversehunk()
1526 c = c.reversehunk()
1530 newhunks.append(c)
1527 newhunks.append(c)
1531 return newhunks
1528 return newhunks
1532
1529
1533 def parsepatch(originalchunks):
1530 def parsepatch(originalchunks):
1534 """patch -> [] of headers -> [] of hunks """
1531 """patch -> [] of headers -> [] of hunks """
1535 class parser(object):
1532 class parser(object):
1536 """patch parsing state machine"""
1533 """patch parsing state machine"""
1537 def __init__(self):
1534 def __init__(self):
1538 self.fromline = 0
1535 self.fromline = 0
1539 self.toline = 0
1536 self.toline = 0
1540 self.proc = ''
1537 self.proc = ''
1541 self.header = None
1538 self.header = None
1542 self.context = []
1539 self.context = []
1543 self.before = []
1540 self.before = []
1544 self.hunk = []
1541 self.hunk = []
1545 self.headers = []
1542 self.headers = []
1546
1543
1547 def addrange(self, limits):
1544 def addrange(self, limits):
1548 fromstart, fromend, tostart, toend, proc = limits
1545 fromstart, fromend, tostart, toend, proc = limits
1549 self.fromline = int(fromstart)
1546 self.fromline = int(fromstart)
1550 self.toline = int(tostart)
1547 self.toline = int(tostart)
1551 self.proc = proc
1548 self.proc = proc
1552
1549
1553 def addcontext(self, context):
1550 def addcontext(self, context):
1554 if self.hunk:
1551 if self.hunk:
1555 h = recordhunk(self.header, self.fromline, self.toline,
1552 h = recordhunk(self.header, self.fromline, self.toline,
1556 self.proc, self.before, self.hunk, context)
1553 self.proc, self.before, self.hunk, context)
1557 self.header.hunks.append(h)
1554 self.header.hunks.append(h)
1558 self.fromline += len(self.before) + h.removed
1555 self.fromline += len(self.before) + h.removed
1559 self.toline += len(self.before) + h.added
1556 self.toline += len(self.before) + h.added
1560 self.before = []
1557 self.before = []
1561 self.hunk = []
1558 self.hunk = []
1562 self.context = context
1559 self.context = context
1563
1560
1564 def addhunk(self, hunk):
1561 def addhunk(self, hunk):
1565 if self.context:
1562 if self.context:
1566 self.before = self.context
1563 self.before = self.context
1567 self.context = []
1564 self.context = []
1568 self.hunk = hunk
1565 self.hunk = hunk
1569
1566
1570 def newfile(self, hdr):
1567 def newfile(self, hdr):
1571 self.addcontext([])
1568 self.addcontext([])
1572 h = header(hdr)
1569 h = header(hdr)
1573 self.headers.append(h)
1570 self.headers.append(h)
1574 self.header = h
1571 self.header = h
1575
1572
1576 def addother(self, line):
1573 def addother(self, line):
1577 pass # 'other' lines are ignored
1574 pass # 'other' lines are ignored
1578
1575
1579 def finished(self):
1576 def finished(self):
1580 self.addcontext([])
1577 self.addcontext([])
1581 return self.headers
1578 return self.headers
1582
1579
1583 transitions = {
1580 transitions = {
1584 'file': {'context': addcontext,
1581 'file': {'context': addcontext,
1585 'file': newfile,
1582 'file': newfile,
1586 'hunk': addhunk,
1583 'hunk': addhunk,
1587 'range': addrange},
1584 'range': addrange},
1588 'context': {'file': newfile,
1585 'context': {'file': newfile,
1589 'hunk': addhunk,
1586 'hunk': addhunk,
1590 'range': addrange,
1587 'range': addrange,
1591 'other': addother},
1588 'other': addother},
1592 'hunk': {'context': addcontext,
1589 'hunk': {'context': addcontext,
1593 'file': newfile,
1590 'file': newfile,
1594 'range': addrange},
1591 'range': addrange},
1595 'range': {'context': addcontext,
1592 'range': {'context': addcontext,
1596 'hunk': addhunk},
1593 'hunk': addhunk},
1597 'other': {'other': addother},
1594 'other': {'other': addother},
1598 }
1595 }
1599
1596
1600 p = parser()
1597 p = parser()
1601 fp = stringio()
1598 fp = stringio()
1602 fp.write(''.join(originalchunks))
1599 fp.write(''.join(originalchunks))
1603 fp.seek(0)
1600 fp.seek(0)
1604
1601
1605 state = 'context'
1602 state = 'context'
1606 for newstate, data in scanpatch(fp):
1603 for newstate, data in scanpatch(fp):
1607 try:
1604 try:
1608 p.transitions[state][newstate](p, data)
1605 p.transitions[state][newstate](p, data)
1609 except KeyError:
1606 except KeyError:
1610 raise PatchError('unhandled transition: %s -> %s' %
1607 raise PatchError('unhandled transition: %s -> %s' %
1611 (state, newstate))
1608 (state, newstate))
1612 state = newstate
1609 state = newstate
1613 del fp
1610 del fp
1614 return p.finished()
1611 return p.finished()
1615
1612
1616 def pathtransform(path, strip, prefix):
1613 def pathtransform(path, strip, prefix):
1617 '''turn a path from a patch into a path suitable for the repository
1614 '''turn a path from a patch into a path suitable for the repository
1618
1615
1619 prefix, if not empty, is expected to be normalized with a / at the end.
1616 prefix, if not empty, is expected to be normalized with a / at the end.
1620
1617
1621 Returns (stripped components, path in repository).
1618 Returns (stripped components, path in repository).
1622
1619
1623 >>> pathtransform('a/b/c', 0, '')
1620 >>> pathtransform('a/b/c', 0, '')
1624 ('', 'a/b/c')
1621 ('', 'a/b/c')
1625 >>> pathtransform(' a/b/c ', 0, '')
1622 >>> pathtransform(' a/b/c ', 0, '')
1626 ('', ' a/b/c')
1623 ('', ' a/b/c')
1627 >>> pathtransform(' a/b/c ', 2, '')
1624 >>> pathtransform(' a/b/c ', 2, '')
1628 ('a/b/', 'c')
1625 ('a/b/', 'c')
1629 >>> pathtransform('a/b/c', 0, 'd/e/')
1626 >>> pathtransform('a/b/c', 0, 'd/e/')
1630 ('', 'd/e/a/b/c')
1627 ('', 'd/e/a/b/c')
1631 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1628 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1632 ('a//b/', 'd/e/c')
1629 ('a//b/', 'd/e/c')
1633 >>> pathtransform('a/b/c', 3, '')
1630 >>> pathtransform('a/b/c', 3, '')
1634 Traceback (most recent call last):
1631 Traceback (most recent call last):
1635 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1632 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1636 '''
1633 '''
1637 pathlen = len(path)
1634 pathlen = len(path)
1638 i = 0
1635 i = 0
1639 if strip == 0:
1636 if strip == 0:
1640 return '', prefix + path.rstrip()
1637 return '', prefix + path.rstrip()
1641 count = strip
1638 count = strip
1642 while count > 0:
1639 while count > 0:
1643 i = path.find('/', i)
1640 i = path.find('/', i)
1644 if i == -1:
1641 if i == -1:
1645 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1642 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1646 (count, strip, path))
1643 (count, strip, path))
1647 i += 1
1644 i += 1
1648 # consume '//' in the path
1645 # consume '//' in the path
1649 while i < pathlen - 1 and path[i] == '/':
1646 while i < pathlen - 1 and path[i] == '/':
1650 i += 1
1647 i += 1
1651 count -= 1
1648 count -= 1
1652 return path[:i].lstrip(), prefix + path[i:].rstrip()
1649 return path[:i].lstrip(), prefix + path[i:].rstrip()
1653
1650
1654 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1651 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1655 nulla = afile_orig == "/dev/null"
1652 nulla = afile_orig == "/dev/null"
1656 nullb = bfile_orig == "/dev/null"
1653 nullb = bfile_orig == "/dev/null"
1657 create = nulla and hunk.starta == 0 and hunk.lena == 0
1654 create = nulla and hunk.starta == 0 and hunk.lena == 0
1658 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1655 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1659 abase, afile = pathtransform(afile_orig, strip, prefix)
1656 abase, afile = pathtransform(afile_orig, strip, prefix)
1660 gooda = not nulla and backend.exists(afile)
1657 gooda = not nulla and backend.exists(afile)
1661 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1658 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1662 if afile == bfile:
1659 if afile == bfile:
1663 goodb = gooda
1660 goodb = gooda
1664 else:
1661 else:
1665 goodb = not nullb and backend.exists(bfile)
1662 goodb = not nullb and backend.exists(bfile)
1666 missing = not goodb and not gooda and not create
1663 missing = not goodb and not gooda and not create
1667
1664
1668 # some diff programs apparently produce patches where the afile is
1665 # some diff programs apparently produce patches where the afile is
1669 # not /dev/null, but afile starts with bfile
1666 # not /dev/null, but afile starts with bfile
1670 abasedir = afile[:afile.rfind('/') + 1]
1667 abasedir = afile[:afile.rfind('/') + 1]
1671 bbasedir = bfile[:bfile.rfind('/') + 1]
1668 bbasedir = bfile[:bfile.rfind('/') + 1]
1672 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1669 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1673 and hunk.starta == 0 and hunk.lena == 0):
1670 and hunk.starta == 0 and hunk.lena == 0):
1674 create = True
1671 create = True
1675 missing = False
1672 missing = False
1676
1673
1677 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1674 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1678 # diff is between a file and its backup. In this case, the original
1675 # diff is between a file and its backup. In this case, the original
1679 # file should be patched (see original mpatch code).
1676 # file should be patched (see original mpatch code).
1680 isbackup = (abase == bbase and bfile.startswith(afile))
1677 isbackup = (abase == bbase and bfile.startswith(afile))
1681 fname = None
1678 fname = None
1682 if not missing:
1679 if not missing:
1683 if gooda and goodb:
1680 if gooda and goodb:
1684 if isbackup:
1681 if isbackup:
1685 fname = afile
1682 fname = afile
1686 else:
1683 else:
1687 fname = bfile
1684 fname = bfile
1688 elif gooda:
1685 elif gooda:
1689 fname = afile
1686 fname = afile
1690
1687
1691 if not fname:
1688 if not fname:
1692 if not nullb:
1689 if not nullb:
1693 if isbackup:
1690 if isbackup:
1694 fname = afile
1691 fname = afile
1695 else:
1692 else:
1696 fname = bfile
1693 fname = bfile
1697 elif not nulla:
1694 elif not nulla:
1698 fname = afile
1695 fname = afile
1699 else:
1696 else:
1700 raise PatchError(_("undefined source and destination files"))
1697 raise PatchError(_("undefined source and destination files"))
1701
1698
1702 gp = patchmeta(fname)
1699 gp = patchmeta(fname)
1703 if create:
1700 if create:
1704 gp.op = 'ADD'
1701 gp.op = 'ADD'
1705 elif remove:
1702 elif remove:
1706 gp.op = 'DELETE'
1703 gp.op = 'DELETE'
1707 return gp
1704 return gp
1708
1705
1709 def scanpatch(fp):
1706 def scanpatch(fp):
1710 """like patch.iterhunks, but yield different events
1707 """like patch.iterhunks, but yield different events
1711
1708
1712 - ('file', [header_lines + fromfile + tofile])
1709 - ('file', [header_lines + fromfile + tofile])
1713 - ('context', [context_lines])
1710 - ('context', [context_lines])
1714 - ('hunk', [hunk_lines])
1711 - ('hunk', [hunk_lines])
1715 - ('range', (-start,len, +start,len, proc))
1712 - ('range', (-start,len, +start,len, proc))
1716 """
1713 """
1717 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1714 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1718 lr = linereader(fp)
1715 lr = linereader(fp)
1719
1716
1720 def scanwhile(first, p):
1717 def scanwhile(first, p):
1721 """scan lr while predicate holds"""
1718 """scan lr while predicate holds"""
1722 lines = [first]
1719 lines = [first]
1723 for line in iter(lr.readline, ''):
1720 for line in iter(lr.readline, ''):
1724 if p(line):
1721 if p(line):
1725 lines.append(line)
1722 lines.append(line)
1726 else:
1723 else:
1727 lr.push(line)
1724 lr.push(line)
1728 break
1725 break
1729 return lines
1726 return lines
1730
1727
1731 for line in iter(lr.readline, ''):
1728 for line in iter(lr.readline, ''):
1732 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1729 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1733 def notheader(line):
1730 def notheader(line):
1734 s = line.split(None, 1)
1731 s = line.split(None, 1)
1735 return not s or s[0] not in ('---', 'diff')
1732 return not s or s[0] not in ('---', 'diff')
1736 header = scanwhile(line, notheader)
1733 header = scanwhile(line, notheader)
1737 fromfile = lr.readline()
1734 fromfile = lr.readline()
1738 if fromfile.startswith('---'):
1735 if fromfile.startswith('---'):
1739 tofile = lr.readline()
1736 tofile = lr.readline()
1740 header += [fromfile, tofile]
1737 header += [fromfile, tofile]
1741 else:
1738 else:
1742 lr.push(fromfile)
1739 lr.push(fromfile)
1743 yield 'file', header
1740 yield 'file', header
1744 elif line[0] == ' ':
1741 elif line[0] == ' ':
1745 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1742 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1746 elif line[0] in '-+':
1743 elif line[0] in '-+':
1747 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1744 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1748 else:
1745 else:
1749 m = lines_re.match(line)
1746 m = lines_re.match(line)
1750 if m:
1747 if m:
1751 yield 'range', m.groups()
1748 yield 'range', m.groups()
1752 else:
1749 else:
1753 yield 'other', line
1750 yield 'other', line
1754
1751
1755 def scangitpatch(lr, firstline):
1752 def scangitpatch(lr, firstline):
1756 """
1753 """
1757 Git patches can emit:
1754 Git patches can emit:
1758 - rename a to b
1755 - rename a to b
1759 - change b
1756 - change b
1760 - copy a to c
1757 - copy a to c
1761 - change c
1758 - change c
1762
1759
1763 We cannot apply this sequence as-is, the renamed 'a' could not be
1760 We cannot apply this sequence as-is, the renamed 'a' could not be
1764 found for it would have been renamed already. And we cannot copy
1761 found for it would have been renamed already. And we cannot copy
1765 from 'b' instead because 'b' would have been changed already. So
1762 from 'b' instead because 'b' would have been changed already. So
1766 we scan the git patch for copy and rename commands so we can
1763 we scan the git patch for copy and rename commands so we can
1767 perform the copies ahead of time.
1764 perform the copies ahead of time.
1768 """
1765 """
1769 pos = 0
1766 pos = 0
1770 try:
1767 try:
1771 pos = lr.fp.tell()
1768 pos = lr.fp.tell()
1772 fp = lr.fp
1769 fp = lr.fp
1773 except IOError:
1770 except IOError:
1774 fp = stringio(lr.fp.read())
1771 fp = stringio(lr.fp.read())
1775 gitlr = linereader(fp)
1772 gitlr = linereader(fp)
1776 gitlr.push(firstline)
1773 gitlr.push(firstline)
1777 gitpatches = readgitpatch(gitlr)
1774 gitpatches = readgitpatch(gitlr)
1778 fp.seek(pos)
1775 fp.seek(pos)
1779 return gitpatches
1776 return gitpatches
1780
1777
1781 def iterhunks(fp):
1778 def iterhunks(fp):
1782 """Read a patch and yield the following events:
1779 """Read a patch and yield the following events:
1783 - ("file", afile, bfile, firsthunk): select a new target file.
1780 - ("file", afile, bfile, firsthunk): select a new target file.
1784 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1781 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1785 "file" event.
1782 "file" event.
1786 - ("git", gitchanges): current diff is in git format, gitchanges
1783 - ("git", gitchanges): current diff is in git format, gitchanges
1787 maps filenames to gitpatch records. Unique event.
1784 maps filenames to gitpatch records. Unique event.
1788 """
1785 """
1789 afile = ""
1786 afile = ""
1790 bfile = ""
1787 bfile = ""
1791 state = None
1788 state = None
1792 hunknum = 0
1789 hunknum = 0
1793 emitfile = newfile = False
1790 emitfile = newfile = False
1794 gitpatches = None
1791 gitpatches = None
1795
1792
1796 # our states
1793 # our states
1797 BFILE = 1
1794 BFILE = 1
1798 context = None
1795 context = None
1799 lr = linereader(fp)
1796 lr = linereader(fp)
1800
1797
1801 for x in iter(lr.readline, ''):
1798 for x in iter(lr.readline, ''):
1802 if state == BFILE and (
1799 if state == BFILE and (
1803 (not context and x[0] == '@')
1800 (not context and x[0] == '@')
1804 or (context is not False and x.startswith('***************'))
1801 or (context is not False and x.startswith('***************'))
1805 or x.startswith('GIT binary patch')):
1802 or x.startswith('GIT binary patch')):
1806 gp = None
1803 gp = None
1807 if (gitpatches and
1804 if (gitpatches and
1808 gitpatches[-1].ispatching(afile, bfile)):
1805 gitpatches[-1].ispatching(afile, bfile)):
1809 gp = gitpatches.pop()
1806 gp = gitpatches.pop()
1810 if x.startswith('GIT binary patch'):
1807 if x.startswith('GIT binary patch'):
1811 h = binhunk(lr, gp.path)
1808 h = binhunk(lr, gp.path)
1812 else:
1809 else:
1813 if context is None and x.startswith('***************'):
1810 if context is None and x.startswith('***************'):
1814 context = True
1811 context = True
1815 h = hunk(x, hunknum + 1, lr, context)
1812 h = hunk(x, hunknum + 1, lr, context)
1816 hunknum += 1
1813 hunknum += 1
1817 if emitfile:
1814 if emitfile:
1818 emitfile = False
1815 emitfile = False
1819 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1816 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1820 yield 'hunk', h
1817 yield 'hunk', h
1821 elif x.startswith('diff --git a/'):
1818 elif x.startswith('diff --git a/'):
1822 m = gitre.match(x.rstrip(' \r\n'))
1819 m = gitre.match(x.rstrip(' \r\n'))
1823 if not m:
1820 if not m:
1824 continue
1821 continue
1825 if gitpatches is None:
1822 if gitpatches is None:
1826 # scan whole input for git metadata
1823 # scan whole input for git metadata
1827 gitpatches = scangitpatch(lr, x)
1824 gitpatches = scangitpatch(lr, x)
1828 yield 'git', [g.copy() for g in gitpatches
1825 yield 'git', [g.copy() for g in gitpatches
1829 if g.op in ('COPY', 'RENAME')]
1826 if g.op in ('COPY', 'RENAME')]
1830 gitpatches.reverse()
1827 gitpatches.reverse()
1831 afile = 'a/' + m.group(1)
1828 afile = 'a/' + m.group(1)
1832 bfile = 'b/' + m.group(2)
1829 bfile = 'b/' + m.group(2)
1833 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1830 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1834 gp = gitpatches.pop()
1831 gp = gitpatches.pop()
1835 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1832 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1836 if not gitpatches:
1833 if not gitpatches:
1837 raise PatchError(_('failed to synchronize metadata for "%s"')
1834 raise PatchError(_('failed to synchronize metadata for "%s"')
1838 % afile[2:])
1835 % afile[2:])
1839 gp = gitpatches[-1]
1836 gp = gitpatches[-1]
1840 newfile = True
1837 newfile = True
1841 elif x.startswith('---'):
1838 elif x.startswith('---'):
1842 # check for a unified diff
1839 # check for a unified diff
1843 l2 = lr.readline()
1840 l2 = lr.readline()
1844 if not l2.startswith('+++'):
1841 if not l2.startswith('+++'):
1845 lr.push(l2)
1842 lr.push(l2)
1846 continue
1843 continue
1847 newfile = True
1844 newfile = True
1848 context = False
1845 context = False
1849 afile = parsefilename(x)
1846 afile = parsefilename(x)
1850 bfile = parsefilename(l2)
1847 bfile = parsefilename(l2)
1851 elif x.startswith('***'):
1848 elif x.startswith('***'):
1852 # check for a context diff
1849 # check for a context diff
1853 l2 = lr.readline()
1850 l2 = lr.readline()
1854 if not l2.startswith('---'):
1851 if not l2.startswith('---'):
1855 lr.push(l2)
1852 lr.push(l2)
1856 continue
1853 continue
1857 l3 = lr.readline()
1854 l3 = lr.readline()
1858 lr.push(l3)
1855 lr.push(l3)
1859 if not l3.startswith("***************"):
1856 if not l3.startswith("***************"):
1860 lr.push(l2)
1857 lr.push(l2)
1861 continue
1858 continue
1862 newfile = True
1859 newfile = True
1863 context = True
1860 context = True
1864 afile = parsefilename(x)
1861 afile = parsefilename(x)
1865 bfile = parsefilename(l2)
1862 bfile = parsefilename(l2)
1866
1863
1867 if newfile:
1864 if newfile:
1868 newfile = False
1865 newfile = False
1869 emitfile = True
1866 emitfile = True
1870 state = BFILE
1867 state = BFILE
1871 hunknum = 0
1868 hunknum = 0
1872
1869
1873 while gitpatches:
1870 while gitpatches:
1874 gp = gitpatches.pop()
1871 gp = gitpatches.pop()
1875 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1872 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1876
1873
1877 def applybindelta(binchunk, data):
1874 def applybindelta(binchunk, data):
1878 """Apply a binary delta hunk
1875 """Apply a binary delta hunk
1879 The algorithm used is the algorithm from git's patch-delta.c
1876 The algorithm used is the algorithm from git's patch-delta.c
1880 """
1877 """
1881 def deltahead(binchunk):
1878 def deltahead(binchunk):
1882 i = 0
1879 i = 0
1883 for c in binchunk:
1880 for c in binchunk:
1884 i += 1
1881 i += 1
1885 if not (ord(c) & 0x80):
1882 if not (ord(c) & 0x80):
1886 return i
1883 return i
1887 return i
1884 return i
1888 out = ""
1885 out = ""
1889 s = deltahead(binchunk)
1886 s = deltahead(binchunk)
1890 binchunk = binchunk[s:]
1887 binchunk = binchunk[s:]
1891 s = deltahead(binchunk)
1888 s = deltahead(binchunk)
1892 binchunk = binchunk[s:]
1889 binchunk = binchunk[s:]
1893 i = 0
1890 i = 0
1894 while i < len(binchunk):
1891 while i < len(binchunk):
1895 cmd = ord(binchunk[i])
1892 cmd = ord(binchunk[i])
1896 i += 1
1893 i += 1
1897 if (cmd & 0x80):
1894 if (cmd & 0x80):
1898 offset = 0
1895 offset = 0
1899 size = 0
1896 size = 0
1900 if (cmd & 0x01):
1897 if (cmd & 0x01):
1901 offset = ord(binchunk[i])
1898 offset = ord(binchunk[i])
1902 i += 1
1899 i += 1
1903 if (cmd & 0x02):
1900 if (cmd & 0x02):
1904 offset |= ord(binchunk[i]) << 8
1901 offset |= ord(binchunk[i]) << 8
1905 i += 1
1902 i += 1
1906 if (cmd & 0x04):
1903 if (cmd & 0x04):
1907 offset |= ord(binchunk[i]) << 16
1904 offset |= ord(binchunk[i]) << 16
1908 i += 1
1905 i += 1
1909 if (cmd & 0x08):
1906 if (cmd & 0x08):
1910 offset |= ord(binchunk[i]) << 24
1907 offset |= ord(binchunk[i]) << 24
1911 i += 1
1908 i += 1
1912 if (cmd & 0x10):
1909 if (cmd & 0x10):
1913 size = ord(binchunk[i])
1910 size = ord(binchunk[i])
1914 i += 1
1911 i += 1
1915 if (cmd & 0x20):
1912 if (cmd & 0x20):
1916 size |= ord(binchunk[i]) << 8
1913 size |= ord(binchunk[i]) << 8
1917 i += 1
1914 i += 1
1918 if (cmd & 0x40):
1915 if (cmd & 0x40):
1919 size |= ord(binchunk[i]) << 16
1916 size |= ord(binchunk[i]) << 16
1920 i += 1
1917 i += 1
1921 if size == 0:
1918 if size == 0:
1922 size = 0x10000
1919 size = 0x10000
1923 offset_end = offset + size
1920 offset_end = offset + size
1924 out += data[offset:offset_end]
1921 out += data[offset:offset_end]
1925 elif cmd != 0:
1922 elif cmd != 0:
1926 offset_end = i + cmd
1923 offset_end = i + cmd
1927 out += binchunk[i:offset_end]
1924 out += binchunk[i:offset_end]
1928 i += cmd
1925 i += cmd
1929 else:
1926 else:
1930 raise PatchError(_('unexpected delta opcode 0'))
1927 raise PatchError(_('unexpected delta opcode 0'))
1931 return out
1928 return out
1932
1929
1933 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1930 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1934 """Reads a patch from fp and tries to apply it.
1931 """Reads a patch from fp and tries to apply it.
1935
1932
1936 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1933 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1937 there was any fuzz.
1934 there was any fuzz.
1938
1935
1939 If 'eolmode' is 'strict', the patch content and patched file are
1936 If 'eolmode' is 'strict', the patch content and patched file are
1940 read in binary mode. Otherwise, line endings are ignored when
1937 read in binary mode. Otherwise, line endings are ignored when
1941 patching then normalized according to 'eolmode'.
1938 patching then normalized according to 'eolmode'.
1942 """
1939 """
1943 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1940 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1944 prefix=prefix, eolmode=eolmode)
1941 prefix=prefix, eolmode=eolmode)
1945
1942
1946 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1943 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1947 eolmode='strict'):
1944 eolmode='strict'):
1948
1945
1949 if prefix:
1946 if prefix:
1950 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1947 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1951 prefix)
1948 prefix)
1952 if prefix != '':
1949 if prefix != '':
1953 prefix += '/'
1950 prefix += '/'
1954 def pstrip(p):
1951 def pstrip(p):
1955 return pathtransform(p, strip - 1, prefix)[1]
1952 return pathtransform(p, strip - 1, prefix)[1]
1956
1953
1957 rejects = 0
1954 rejects = 0
1958 err = 0
1955 err = 0
1959 current_file = None
1956 current_file = None
1960
1957
1961 for state, values in iterhunks(fp):
1958 for state, values in iterhunks(fp):
1962 if state == 'hunk':
1959 if state == 'hunk':
1963 if not current_file:
1960 if not current_file:
1964 continue
1961 continue
1965 ret = current_file.apply(values)
1962 ret = current_file.apply(values)
1966 if ret > 0:
1963 if ret > 0:
1967 err = 1
1964 err = 1
1968 elif state == 'file':
1965 elif state == 'file':
1969 if current_file:
1966 if current_file:
1970 rejects += current_file.close()
1967 rejects += current_file.close()
1971 current_file = None
1968 current_file = None
1972 afile, bfile, first_hunk, gp = values
1969 afile, bfile, first_hunk, gp = values
1973 if gp:
1970 if gp:
1974 gp.path = pstrip(gp.path)
1971 gp.path = pstrip(gp.path)
1975 if gp.oldpath:
1972 if gp.oldpath:
1976 gp.oldpath = pstrip(gp.oldpath)
1973 gp.oldpath = pstrip(gp.oldpath)
1977 else:
1974 else:
1978 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1975 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1979 prefix)
1976 prefix)
1980 if gp.op == 'RENAME':
1977 if gp.op == 'RENAME':
1981 backend.unlink(gp.oldpath)
1978 backend.unlink(gp.oldpath)
1982 if not first_hunk:
1979 if not first_hunk:
1983 if gp.op == 'DELETE':
1980 if gp.op == 'DELETE':
1984 backend.unlink(gp.path)
1981 backend.unlink(gp.path)
1985 continue
1982 continue
1986 data, mode = None, None
1983 data, mode = None, None
1987 if gp.op in ('RENAME', 'COPY'):
1984 if gp.op in ('RENAME', 'COPY'):
1988 data, mode = store.getfile(gp.oldpath)[:2]
1985 data, mode = store.getfile(gp.oldpath)[:2]
1989 if data is None:
1986 if data is None:
1990 # This means that the old path does not exist
1987 # This means that the old path does not exist
1991 raise PatchError(_("source file '%s' does not exist")
1988 raise PatchError(_("source file '%s' does not exist")
1992 % gp.oldpath)
1989 % gp.oldpath)
1993 if gp.mode:
1990 if gp.mode:
1994 mode = gp.mode
1991 mode = gp.mode
1995 if gp.op == 'ADD':
1992 if gp.op == 'ADD':
1996 # Added files without content have no hunk and
1993 # Added files without content have no hunk and
1997 # must be created
1994 # must be created
1998 data = ''
1995 data = ''
1999 if data or mode:
1996 if data or mode:
2000 if (gp.op in ('ADD', 'RENAME', 'COPY')
1997 if (gp.op in ('ADD', 'RENAME', 'COPY')
2001 and backend.exists(gp.path)):
1998 and backend.exists(gp.path)):
2002 raise PatchError(_("cannot create %s: destination "
1999 raise PatchError(_("cannot create %s: destination "
2003 "already exists") % gp.path)
2000 "already exists") % gp.path)
2004 backend.setfile(gp.path, data, mode, gp.oldpath)
2001 backend.setfile(gp.path, data, mode, gp.oldpath)
2005 continue
2002 continue
2006 try:
2003 try:
2007 current_file = patcher(ui, gp, backend, store,
2004 current_file = patcher(ui, gp, backend, store,
2008 eolmode=eolmode)
2005 eolmode=eolmode)
2009 except PatchError as inst:
2006 except PatchError as inst:
2010 ui.warn(str(inst) + '\n')
2007 ui.warn(str(inst) + '\n')
2011 current_file = None
2008 current_file = None
2012 rejects += 1
2009 rejects += 1
2013 continue
2010 continue
2014 elif state == 'git':
2011 elif state == 'git':
2015 for gp in values:
2012 for gp in values:
2016 path = pstrip(gp.oldpath)
2013 path = pstrip(gp.oldpath)
2017 data, mode = backend.getfile(path)
2014 data, mode = backend.getfile(path)
2018 if data is None:
2015 if data is None:
2019 # The error ignored here will trigger a getfile()
2016 # The error ignored here will trigger a getfile()
2020 # error in a place more appropriate for error
2017 # error in a place more appropriate for error
2021 # handling, and will not interrupt the patching
2018 # handling, and will not interrupt the patching
2022 # process.
2019 # process.
2023 pass
2020 pass
2024 else:
2021 else:
2025 store.setfile(path, data, mode)
2022 store.setfile(path, data, mode)
2026 else:
2023 else:
2027 raise error.Abort(_('unsupported parser state: %s') % state)
2024 raise error.Abort(_('unsupported parser state: %s') % state)
2028
2025
2029 if current_file:
2026 if current_file:
2030 rejects += current_file.close()
2027 rejects += current_file.close()
2031
2028
2032 if rejects:
2029 if rejects:
2033 return -1
2030 return -1
2034 return err
2031 return err
2035
2032
2036 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2033 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2037 similarity):
2034 similarity):
2038 """use <patcher> to apply <patchname> to the working directory.
2035 """use <patcher> to apply <patchname> to the working directory.
2039 returns whether patch was applied with fuzz factor."""
2036 returns whether patch was applied with fuzz factor."""
2040
2037
2041 fuzz = False
2038 fuzz = False
2042 args = []
2039 args = []
2043 cwd = repo.root
2040 cwd = repo.root
2044 if cwd:
2041 if cwd:
2045 args.append('-d %s' % util.shellquote(cwd))
2042 args.append('-d %s' % util.shellquote(cwd))
2046 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2043 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2047 util.shellquote(patchname)))
2044 util.shellquote(patchname)))
2048 try:
2045 try:
2049 for line in util.iterfile(fp):
2046 for line in util.iterfile(fp):
2050 line = line.rstrip()
2047 line = line.rstrip()
2051 ui.note(line + '\n')
2048 ui.note(line + '\n')
2052 if line.startswith('patching file '):
2049 if line.startswith('patching file '):
2053 pf = util.parsepatchoutput(line)
2050 pf = util.parsepatchoutput(line)
2054 printed_file = False
2051 printed_file = False
2055 files.add(pf)
2052 files.add(pf)
2056 elif line.find('with fuzz') >= 0:
2053 elif line.find('with fuzz') >= 0:
2057 fuzz = True
2054 fuzz = True
2058 if not printed_file:
2055 if not printed_file:
2059 ui.warn(pf + '\n')
2056 ui.warn(pf + '\n')
2060 printed_file = True
2057 printed_file = True
2061 ui.warn(line + '\n')
2058 ui.warn(line + '\n')
2062 elif line.find('saving rejects to file') >= 0:
2059 elif line.find('saving rejects to file') >= 0:
2063 ui.warn(line + '\n')
2060 ui.warn(line + '\n')
2064 elif line.find('FAILED') >= 0:
2061 elif line.find('FAILED') >= 0:
2065 if not printed_file:
2062 if not printed_file:
2066 ui.warn(pf + '\n')
2063 ui.warn(pf + '\n')
2067 printed_file = True
2064 printed_file = True
2068 ui.warn(line + '\n')
2065 ui.warn(line + '\n')
2069 finally:
2066 finally:
2070 if files:
2067 if files:
2071 scmutil.marktouched(repo, files, similarity)
2068 scmutil.marktouched(repo, files, similarity)
2072 code = fp.close()
2069 code = fp.close()
2073 if code:
2070 if code:
2074 raise PatchError(_("patch command failed: %s") %
2071 raise PatchError(_("patch command failed: %s") %
2075 util.explainexit(code)[0])
2072 util.explainexit(code)[0])
2076 return fuzz
2073 return fuzz
2077
2074
2078 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2075 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2079 eolmode='strict'):
2076 eolmode='strict'):
2080 if files is None:
2077 if files is None:
2081 files = set()
2078 files = set()
2082 if eolmode is None:
2079 if eolmode is None:
2083 eolmode = ui.config('patch', 'eol', 'strict')
2080 eolmode = ui.config('patch', 'eol', 'strict')
2084 if eolmode.lower() not in eolmodes:
2081 if eolmode.lower() not in eolmodes:
2085 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2082 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2086 eolmode = eolmode.lower()
2083 eolmode = eolmode.lower()
2087
2084
2088 store = filestore()
2085 store = filestore()
2089 try:
2086 try:
2090 fp = open(patchobj, 'rb')
2087 fp = open(patchobj, 'rb')
2091 except TypeError:
2088 except TypeError:
2092 fp = patchobj
2089 fp = patchobj
2093 try:
2090 try:
2094 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2091 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2095 eolmode=eolmode)
2092 eolmode=eolmode)
2096 finally:
2093 finally:
2097 if fp != patchobj:
2094 if fp != patchobj:
2098 fp.close()
2095 fp.close()
2099 files.update(backend.close())
2096 files.update(backend.close())
2100 store.close()
2097 store.close()
2101 if ret < 0:
2098 if ret < 0:
2102 raise PatchError(_('patch failed to apply'))
2099 raise PatchError(_('patch failed to apply'))
2103 return ret > 0
2100 return ret > 0
2104
2101
2105 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2102 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2106 eolmode='strict', similarity=0):
2103 eolmode='strict', similarity=0):
2107 """use builtin patch to apply <patchobj> to the working directory.
2104 """use builtin patch to apply <patchobj> to the working directory.
2108 returns whether patch was applied with fuzz factor."""
2105 returns whether patch was applied with fuzz factor."""
2109 backend = workingbackend(ui, repo, similarity)
2106 backend = workingbackend(ui, repo, similarity)
2110 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2107 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2111
2108
2112 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2109 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2113 eolmode='strict'):
2110 eolmode='strict'):
2114 backend = repobackend(ui, repo, ctx, store)
2111 backend = repobackend(ui, repo, ctx, store)
2115 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2112 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2116
2113
2117 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2114 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2118 similarity=0):
2115 similarity=0):
2119 """Apply <patchname> to the working directory.
2116 """Apply <patchname> to the working directory.
2120
2117
2121 'eolmode' specifies how end of lines should be handled. It can be:
2118 'eolmode' specifies how end of lines should be handled. It can be:
2122 - 'strict': inputs are read in binary mode, EOLs are preserved
2119 - 'strict': inputs are read in binary mode, EOLs are preserved
2123 - 'crlf': EOLs are ignored when patching and reset to CRLF
2120 - 'crlf': EOLs are ignored when patching and reset to CRLF
2124 - 'lf': EOLs are ignored when patching and reset to LF
2121 - 'lf': EOLs are ignored when patching and reset to LF
2125 - None: get it from user settings, default to 'strict'
2122 - None: get it from user settings, default to 'strict'
2126 'eolmode' is ignored when using an external patcher program.
2123 'eolmode' is ignored when using an external patcher program.
2127
2124
2128 Returns whether patch was applied with fuzz factor.
2125 Returns whether patch was applied with fuzz factor.
2129 """
2126 """
2130 patcher = ui.config('ui', 'patch')
2127 patcher = ui.config('ui', 'patch')
2131 if files is None:
2128 if files is None:
2132 files = set()
2129 files = set()
2133 if patcher:
2130 if patcher:
2134 return _externalpatch(ui, repo, patcher, patchname, strip,
2131 return _externalpatch(ui, repo, patcher, patchname, strip,
2135 files, similarity)
2132 files, similarity)
2136 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2133 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2137 similarity)
2134 similarity)
2138
2135
2139 def changedfiles(ui, repo, patchpath, strip=1):
2136 def changedfiles(ui, repo, patchpath, strip=1):
2140 backend = fsbackend(ui, repo.root)
2137 backend = fsbackend(ui, repo.root)
2141 with open(patchpath, 'rb') as fp:
2138 with open(patchpath, 'rb') as fp:
2142 changed = set()
2139 changed = set()
2143 for state, values in iterhunks(fp):
2140 for state, values in iterhunks(fp):
2144 if state == 'file':
2141 if state == 'file':
2145 afile, bfile, first_hunk, gp = values
2142 afile, bfile, first_hunk, gp = values
2146 if gp:
2143 if gp:
2147 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2144 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2148 if gp.oldpath:
2145 if gp.oldpath:
2149 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2146 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2150 else:
2147 else:
2151 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2148 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2152 '')
2149 '')
2153 changed.add(gp.path)
2150 changed.add(gp.path)
2154 if gp.op == 'RENAME':
2151 if gp.op == 'RENAME':
2155 changed.add(gp.oldpath)
2152 changed.add(gp.oldpath)
2156 elif state not in ('hunk', 'git'):
2153 elif state not in ('hunk', 'git'):
2157 raise error.Abort(_('unsupported parser state: %s') % state)
2154 raise error.Abort(_('unsupported parser state: %s') % state)
2158 return changed
2155 return changed
2159
2156
2160 class GitDiffRequired(Exception):
2157 class GitDiffRequired(Exception):
2161 pass
2158 pass
2162
2159
2163 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2160 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2164 '''return diffopts with all features supported and parsed'''
2161 '''return diffopts with all features supported and parsed'''
2165 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2162 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2166 git=True, whitespace=True, formatchanging=True)
2163 git=True, whitespace=True, formatchanging=True)
2167
2164
2168 diffopts = diffallopts
2165 diffopts = diffallopts
2169
2166
2170 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2167 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2171 whitespace=False, formatchanging=False):
2168 whitespace=False, formatchanging=False):
2172 '''return diffopts with only opted-in features parsed
2169 '''return diffopts with only opted-in features parsed
2173
2170
2174 Features:
2171 Features:
2175 - git: git-style diffs
2172 - git: git-style diffs
2176 - whitespace: whitespace options like ignoreblanklines and ignorews
2173 - whitespace: whitespace options like ignoreblanklines and ignorews
2177 - formatchanging: options that will likely break or cause correctness issues
2174 - formatchanging: options that will likely break or cause correctness issues
2178 with most diff parsers
2175 with most diff parsers
2179 '''
2176 '''
2180 def get(key, name=None, getter=ui.configbool, forceplain=None):
2177 def get(key, name=None, getter=ui.configbool, forceplain=None):
2181 if opts:
2178 if opts:
2182 v = opts.get(key)
2179 v = opts.get(key)
2183 # diffopts flags are either None-default (which is passed
2180 # diffopts flags are either None-default (which is passed
2184 # through unchanged, so we can identify unset values), or
2181 # through unchanged, so we can identify unset values), or
2185 # some other falsey default (eg --unified, which defaults
2182 # some other falsey default (eg --unified, which defaults
2186 # to an empty string). We only want to override the config
2183 # to an empty string). We only want to override the config
2187 # entries from hgrc with command line values if they
2184 # entries from hgrc with command line values if they
2188 # appear to have been set, which is any truthy value,
2185 # appear to have been set, which is any truthy value,
2189 # True, or False.
2186 # True, or False.
2190 if v or isinstance(v, bool):
2187 if v or isinstance(v, bool):
2191 return v
2188 return v
2192 if forceplain is not None and ui.plain():
2189 if forceplain is not None and ui.plain():
2193 return forceplain
2190 return forceplain
2194 return getter(section, name or key, None, untrusted=untrusted)
2191 return getter(section, name or key, None, untrusted=untrusted)
2195
2192
2196 # core options, expected to be understood by every diff parser
2193 # core options, expected to be understood by every diff parser
2197 buildopts = {
2194 buildopts = {
2198 'nodates': get('nodates'),
2195 'nodates': get('nodates'),
2199 'showfunc': get('show_function', 'showfunc'),
2196 'showfunc': get('show_function', 'showfunc'),
2200 'context': get('unified', getter=ui.config),
2197 'context': get('unified', getter=ui.config),
2201 }
2198 }
2202
2199
2203 if git:
2200 if git:
2204 buildopts['git'] = get('git')
2201 buildopts['git'] = get('git')
2205
2202
2206 # since this is in the experimental section, we need to call
2203 # since this is in the experimental section, we need to call
2207 # ui.configbool directory
2204 # ui.configbool directory
2208 buildopts['showsimilarity'] = ui.configbool('experimental',
2205 buildopts['showsimilarity'] = ui.configbool('experimental',
2209 'extendedheader.similarity')
2206 'extendedheader.similarity')
2210
2207
2211 # need to inspect the ui object instead of using get() since we want to
2208 # need to inspect the ui object instead of using get() since we want to
2212 # test for an int
2209 # test for an int
2213 hconf = ui.config('experimental', 'extendedheader.index')
2210 hconf = ui.config('experimental', 'extendedheader.index')
2214 if hconf is not None:
2211 if hconf is not None:
2215 hlen = None
2212 hlen = None
2216 try:
2213 try:
2217 # the hash config could be an integer (for length of hash) or a
2214 # the hash config could be an integer (for length of hash) or a
2218 # word (e.g. short, full, none)
2215 # word (e.g. short, full, none)
2219 hlen = int(hconf)
2216 hlen = int(hconf)
2220 if hlen < 0 or hlen > 40:
2217 if hlen < 0 or hlen > 40:
2221 msg = _("invalid length for extendedheader.index: '%d'\n")
2218 msg = _("invalid length for extendedheader.index: '%d'\n")
2222 ui.warn(msg % hlen)
2219 ui.warn(msg % hlen)
2223 except ValueError:
2220 except ValueError:
2224 # default value
2221 # default value
2225 if hconf == 'short' or hconf == '':
2222 if hconf == 'short' or hconf == '':
2226 hlen = 12
2223 hlen = 12
2227 elif hconf == 'full':
2224 elif hconf == 'full':
2228 hlen = 40
2225 hlen = 40
2229 elif hconf != 'none':
2226 elif hconf != 'none':
2230 msg = _("invalid value for extendedheader.index: '%s'\n")
2227 msg = _("invalid value for extendedheader.index: '%s'\n")
2231 ui.warn(msg % hconf)
2228 ui.warn(msg % hconf)
2232 finally:
2229 finally:
2233 buildopts['index'] = hlen
2230 buildopts['index'] = hlen
2234
2231
2235 if whitespace:
2232 if whitespace:
2236 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2233 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2237 buildopts['ignorewsamount'] = get('ignore_space_change',
2234 buildopts['ignorewsamount'] = get('ignore_space_change',
2238 'ignorewsamount')
2235 'ignorewsamount')
2239 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2236 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2240 'ignoreblanklines')
2237 'ignoreblanklines')
2241 if formatchanging:
2238 if formatchanging:
2242 buildopts['text'] = opts and opts.get('text')
2239 buildopts['text'] = opts and opts.get('text')
2243 binary = None if opts is None else opts.get('binary')
2240 binary = None if opts is None else opts.get('binary')
2244 buildopts['nobinary'] = (not binary if binary is not None
2241 buildopts['nobinary'] = (not binary if binary is not None
2245 else get('nobinary', forceplain=False))
2242 else get('nobinary', forceplain=False))
2246 buildopts['noprefix'] = get('noprefix', forceplain=False)
2243 buildopts['noprefix'] = get('noprefix', forceplain=False)
2247
2244
2248 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2245 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2249
2246
2250 def diff(repo, node1=None, node2=None, match=None, changes=None,
2247 def diff(repo, node1=None, node2=None, match=None, changes=None,
2251 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2248 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2252 '''yields diff of changes to files between two nodes, or node and
2249 '''yields diff of changes to files between two nodes, or node and
2253 working directory.
2250 working directory.
2254
2251
2255 if node1 is None, use first dirstate parent instead.
2252 if node1 is None, use first dirstate parent instead.
2256 if node2 is None, compare node1 with working directory.
2253 if node2 is None, compare node1 with working directory.
2257
2254
2258 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2255 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2259 every time some change cannot be represented with the current
2256 every time some change cannot be represented with the current
2260 patch format. Return False to upgrade to git patch format, True to
2257 patch format. Return False to upgrade to git patch format, True to
2261 accept the loss or raise an exception to abort the diff. It is
2258 accept the loss or raise an exception to abort the diff. It is
2262 called with the name of current file being diffed as 'fn'. If set
2259 called with the name of current file being diffed as 'fn'. If set
2263 to None, patches will always be upgraded to git format when
2260 to None, patches will always be upgraded to git format when
2264 necessary.
2261 necessary.
2265
2262
2266 prefix is a filename prefix that is prepended to all filenames on
2263 prefix is a filename prefix that is prepended to all filenames on
2267 display (used for subrepos).
2264 display (used for subrepos).
2268
2265
2269 relroot, if not empty, must be normalized with a trailing /. Any match
2266 relroot, if not empty, must be normalized with a trailing /. Any match
2270 patterns that fall outside it will be ignored.
2267 patterns that fall outside it will be ignored.
2271
2268
2272 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2269 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2273 information.'''
2270 information.'''
2274 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2271 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2275 changes=changes, opts=opts,
2272 changes=changes, opts=opts,
2276 losedatafn=losedatafn, prefix=prefix,
2273 losedatafn=losedatafn, prefix=prefix,
2277 relroot=relroot, copy=copy):
2274 relroot=relroot, copy=copy):
2278 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2275 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2279 if header and (text or len(header) > 1):
2276 if header and (text or len(header) > 1):
2280 yield '\n'.join(header) + '\n'
2277 yield '\n'.join(header) + '\n'
2281 if text:
2278 if text:
2282 yield text
2279 yield text
2283
2280
2284 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2281 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2285 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2282 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2286 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2283 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2287 where `header` is a list of diff headers and `hunks` is an iterable of
2284 where `header` is a list of diff headers and `hunks` is an iterable of
2288 (`hunkrange`, `hunklines`) tuples.
2285 (`hunkrange`, `hunklines`) tuples.
2289
2286
2290 See diff() for the meaning of parameters.
2287 See diff() for the meaning of parameters.
2291 """
2288 """
2292
2289
2293 if opts is None:
2290 if opts is None:
2294 opts = mdiff.defaultopts
2291 opts = mdiff.defaultopts
2295
2292
2296 if not node1 and not node2:
2293 if not node1 and not node2:
2297 node1 = repo.dirstate.p1()
2294 node1 = repo.dirstate.p1()
2298
2295
2299 def lrugetfilectx():
2296 def lrugetfilectx():
2300 cache = {}
2297 cache = {}
2301 order = collections.deque()
2298 order = collections.deque()
2302 def getfilectx(f, ctx):
2299 def getfilectx(f, ctx):
2303 fctx = ctx.filectx(f, filelog=cache.get(f))
2300 fctx = ctx.filectx(f, filelog=cache.get(f))
2304 if f not in cache:
2301 if f not in cache:
2305 if len(cache) > 20:
2302 if len(cache) > 20:
2306 del cache[order.popleft()]
2303 del cache[order.popleft()]
2307 cache[f] = fctx.filelog()
2304 cache[f] = fctx.filelog()
2308 else:
2305 else:
2309 order.remove(f)
2306 order.remove(f)
2310 order.append(f)
2307 order.append(f)
2311 return fctx
2308 return fctx
2312 return getfilectx
2309 return getfilectx
2313 getfilectx = lrugetfilectx()
2310 getfilectx = lrugetfilectx()
2314
2311
2315 ctx1 = repo[node1]
2312 ctx1 = repo[node1]
2316 ctx2 = repo[node2]
2313 ctx2 = repo[node2]
2317
2314
2318 relfiltered = False
2315 relfiltered = False
2319 if relroot != '' and match.always():
2316 if relroot != '' and match.always():
2320 # as a special case, create a new matcher with just the relroot
2317 # as a special case, create a new matcher with just the relroot
2321 pats = [relroot]
2318 pats = [relroot]
2322 match = scmutil.match(ctx2, pats, default='path')
2319 match = scmutil.match(ctx2, pats, default='path')
2323 relfiltered = True
2320 relfiltered = True
2324
2321
2325 if not changes:
2322 if not changes:
2326 changes = repo.status(ctx1, ctx2, match=match)
2323 changes = repo.status(ctx1, ctx2, match=match)
2327 modified, added, removed = changes[:3]
2324 modified, added, removed = changes[:3]
2328
2325
2329 if not modified and not added and not removed:
2326 if not modified and not added and not removed:
2330 return []
2327 return []
2331
2328
2332 if repo.ui.debugflag:
2329 if repo.ui.debugflag:
2333 hexfunc = hex
2330 hexfunc = hex
2334 else:
2331 else:
2335 hexfunc = short
2332 hexfunc = short
2336 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2337
2334
2338 if copy is None:
2335 if copy is None:
2339 copy = {}
2336 copy = {}
2340 if opts.git or opts.upgrade:
2337 if opts.git or opts.upgrade:
2341 copy = copies.pathcopies(ctx1, ctx2, match=match)
2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2342
2339
2343 if relroot is not None:
2340 if relroot is not None:
2344 if not relfiltered:
2341 if not relfiltered:
2345 # XXX this would ideally be done in the matcher, but that is
2342 # XXX this would ideally be done in the matcher, but that is
2346 # generally meant to 'or' patterns, not 'and' them. In this case we
2343 # generally meant to 'or' patterns, not 'and' them. In this case we
2347 # need to 'and' all the patterns from the matcher with relroot.
2344 # need to 'and' all the patterns from the matcher with relroot.
2348 def filterrel(l):
2345 def filterrel(l):
2349 return [f for f in l if f.startswith(relroot)]
2346 return [f for f in l if f.startswith(relroot)]
2350 modified = filterrel(modified)
2347 modified = filterrel(modified)
2351 added = filterrel(added)
2348 added = filterrel(added)
2352 removed = filterrel(removed)
2349 removed = filterrel(removed)
2353 relfiltered = True
2350 relfiltered = True
2354 # filter out copies where either side isn't inside the relative root
2351 # filter out copies where either side isn't inside the relative root
2355 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2352 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2356 if dst.startswith(relroot)
2353 if dst.startswith(relroot)
2357 and src.startswith(relroot)))
2354 and src.startswith(relroot)))
2358
2355
2359 modifiedset = set(modified)
2356 modifiedset = set(modified)
2360 addedset = set(added)
2357 addedset = set(added)
2361 removedset = set(removed)
2358 removedset = set(removed)
2362 for f in modified:
2359 for f in modified:
2363 if f not in ctx1:
2360 if f not in ctx1:
2364 # Fix up added, since merged-in additions appear as
2361 # Fix up added, since merged-in additions appear as
2365 # modifications during merges
2362 # modifications during merges
2366 modifiedset.remove(f)
2363 modifiedset.remove(f)
2367 addedset.add(f)
2364 addedset.add(f)
2368 for f in removed:
2365 for f in removed:
2369 if f not in ctx1:
2366 if f not in ctx1:
2370 # Merged-in additions that are then removed are reported as removed.
2367 # Merged-in additions that are then removed are reported as removed.
2371 # They are not in ctx1, so We don't want to show them in the diff.
2368 # They are not in ctx1, so We don't want to show them in the diff.
2372 removedset.remove(f)
2369 removedset.remove(f)
2373 modified = sorted(modifiedset)
2370 modified = sorted(modifiedset)
2374 added = sorted(addedset)
2371 added = sorted(addedset)
2375 removed = sorted(removedset)
2372 removed = sorted(removedset)
2376 for dst, src in copy.items():
2373 for dst, src in copy.items():
2377 if src not in ctx1:
2374 if src not in ctx1:
2378 # Files merged in during a merge and then copied/renamed are
2375 # Files merged in during a merge and then copied/renamed are
2379 # reported as copies. We want to show them in the diff as additions.
2376 # reported as copies. We want to show them in the diff as additions.
2380 del copy[dst]
2377 del copy[dst]
2381
2378
2382 def difffn(opts, losedata):
2379 def difffn(opts, losedata):
2383 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2380 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2384 copy, getfilectx, opts, losedata, prefix, relroot)
2381 copy, getfilectx, opts, losedata, prefix, relroot)
2385 if opts.upgrade and not opts.git:
2382 if opts.upgrade and not opts.git:
2386 try:
2383 try:
2387 def losedata(fn):
2384 def losedata(fn):
2388 if not losedatafn or not losedatafn(fn=fn):
2385 if not losedatafn or not losedatafn(fn=fn):
2389 raise GitDiffRequired
2386 raise GitDiffRequired
2390 # Buffer the whole output until we are sure it can be generated
2387 # Buffer the whole output until we are sure it can be generated
2391 return list(difffn(opts.copy(git=False), losedata))
2388 return list(difffn(opts.copy(git=False), losedata))
2392 except GitDiffRequired:
2389 except GitDiffRequired:
2393 return difffn(opts.copy(git=True), None)
2390 return difffn(opts.copy(git=True), None)
2394 else:
2391 else:
2395 return difffn(opts, None)
2392 return difffn(opts, None)
2396
2393
2397 def difflabel(func, *args, **kw):
2394 def difflabel(func, *args, **kw):
2398 '''yields 2-tuples of (output, label) based on the output of func()'''
2395 '''yields 2-tuples of (output, label) based on the output of func()'''
2399 headprefixes = [('diff', 'diff.diffline'),
2396 headprefixes = [('diff', 'diff.diffline'),
2400 ('copy', 'diff.extended'),
2397 ('copy', 'diff.extended'),
2401 ('rename', 'diff.extended'),
2398 ('rename', 'diff.extended'),
2402 ('old', 'diff.extended'),
2399 ('old', 'diff.extended'),
2403 ('new', 'diff.extended'),
2400 ('new', 'diff.extended'),
2404 ('deleted', 'diff.extended'),
2401 ('deleted', 'diff.extended'),
2405 ('index', 'diff.extended'),
2402 ('index', 'diff.extended'),
2406 ('similarity', 'diff.extended'),
2403 ('similarity', 'diff.extended'),
2407 ('---', 'diff.file_a'),
2404 ('---', 'diff.file_a'),
2408 ('+++', 'diff.file_b')]
2405 ('+++', 'diff.file_b')]
2409 textprefixes = [('@', 'diff.hunk'),
2406 textprefixes = [('@', 'diff.hunk'),
2410 ('-', 'diff.deleted'),
2407 ('-', 'diff.deleted'),
2411 ('+', 'diff.inserted')]
2408 ('+', 'diff.inserted')]
2412 head = False
2409 head = False
2413 for chunk in func(*args, **kw):
2410 for chunk in func(*args, **kw):
2414 lines = chunk.split('\n')
2411 lines = chunk.split('\n')
2415 for i, line in enumerate(lines):
2412 for i, line in enumerate(lines):
2416 if i != 0:
2413 if i != 0:
2417 yield ('\n', '')
2414 yield ('\n', '')
2418 if head:
2415 if head:
2419 if line.startswith('@'):
2416 if line.startswith('@'):
2420 head = False
2417 head = False
2421 else:
2418 else:
2422 if line and line[0] not in ' +-@\\':
2419 if line and line[0] not in ' +-@\\':
2423 head = True
2420 head = True
2424 stripline = line
2421 stripline = line
2425 diffline = False
2422 diffline = False
2426 if not head and line and line[0] in '+-':
2423 if not head and line and line[0] in '+-':
2427 # highlight tabs and trailing whitespace, but only in
2424 # highlight tabs and trailing whitespace, but only in
2428 # changed lines
2425 # changed lines
2429 stripline = line.rstrip()
2426 stripline = line.rstrip()
2430 diffline = True
2427 diffline = True
2431
2428
2432 prefixes = textprefixes
2429 prefixes = textprefixes
2433 if head:
2430 if head:
2434 prefixes = headprefixes
2431 prefixes = headprefixes
2435 for prefix, label in prefixes:
2432 for prefix, label in prefixes:
2436 if stripline.startswith(prefix):
2433 if stripline.startswith(prefix):
2437 if diffline:
2434 if diffline:
2438 for token in tabsplitter.findall(stripline):
2435 for token in tabsplitter.findall(stripline):
2439 if '\t' == token[0]:
2436 if '\t' == token[0]:
2440 yield (token, 'diff.tab')
2437 yield (token, 'diff.tab')
2441 else:
2438 else:
2442 yield (token, label)
2439 yield (token, label)
2443 else:
2440 else:
2444 yield (stripline, label)
2441 yield (stripline, label)
2445 break
2442 break
2446 else:
2443 else:
2447 yield (line, '')
2444 yield (line, '')
2448 if line != stripline:
2445 if line != stripline:
2449 yield (line[len(stripline):], 'diff.trailingwhitespace')
2446 yield (line[len(stripline):], 'diff.trailingwhitespace')
2450
2447
2451 def diffui(*args, **kw):
2448 def diffui(*args, **kw):
2452 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2449 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2453 return difflabel(diff, *args, **kw)
2450 return difflabel(diff, *args, **kw)
2454
2451
2455 def _filepairs(modified, added, removed, copy, opts):
2452 def _filepairs(modified, added, removed, copy, opts):
2456 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2453 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2457 before and f2 is the the name after. For added files, f1 will be None,
2454 before and f2 is the the name after. For added files, f1 will be None,
2458 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2455 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2459 or 'rename' (the latter two only if opts.git is set).'''
2456 or 'rename' (the latter two only if opts.git is set).'''
2460 gone = set()
2457 gone = set()
2461
2458
2462 copyto = dict([(v, k) for k, v in copy.items()])
2459 copyto = dict([(v, k) for k, v in copy.items()])
2463
2460
2464 addedset, removedset = set(added), set(removed)
2461 addedset, removedset = set(added), set(removed)
2465
2462
2466 for f in sorted(modified + added + removed):
2463 for f in sorted(modified + added + removed):
2467 copyop = None
2464 copyop = None
2468 f1, f2 = f, f
2465 f1, f2 = f, f
2469 if f in addedset:
2466 if f in addedset:
2470 f1 = None
2467 f1 = None
2471 if f in copy:
2468 if f in copy:
2472 if opts.git:
2469 if opts.git:
2473 f1 = copy[f]
2470 f1 = copy[f]
2474 if f1 in removedset and f1 not in gone:
2471 if f1 in removedset and f1 not in gone:
2475 copyop = 'rename'
2472 copyop = 'rename'
2476 gone.add(f1)
2473 gone.add(f1)
2477 else:
2474 else:
2478 copyop = 'copy'
2475 copyop = 'copy'
2479 elif f in removedset:
2476 elif f in removedset:
2480 f2 = None
2477 f2 = None
2481 if opts.git:
2478 if opts.git:
2482 # have we already reported a copy above?
2479 # have we already reported a copy above?
2483 if (f in copyto and copyto[f] in addedset
2480 if (f in copyto and copyto[f] in addedset
2484 and copy[copyto[f]] == f):
2481 and copy[copyto[f]] == f):
2485 continue
2482 continue
2486 yield f1, f2, copyop
2483 yield f1, f2, copyop
2487
2484
2488 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2485 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2489 copy, getfilectx, opts, losedatafn, prefix, relroot):
2486 copy, getfilectx, opts, losedatafn, prefix, relroot):
2490 '''given input data, generate a diff and yield it in blocks
2487 '''given input data, generate a diff and yield it in blocks
2491
2488
2492 If generating a diff would lose data like flags or binary data and
2489 If generating a diff would lose data like flags or binary data and
2493 losedatafn is not None, it will be called.
2490 losedatafn is not None, it will be called.
2494
2491
2495 relroot is removed and prefix is added to every path in the diff output.
2492 relroot is removed and prefix is added to every path in the diff output.
2496
2493
2497 If relroot is not empty, this function expects every path in modified,
2494 If relroot is not empty, this function expects every path in modified,
2498 added, removed and copy to start with it.'''
2495 added, removed and copy to start with it.'''
2499
2496
2500 def gitindex(text):
2497 def gitindex(text):
2501 if not text:
2498 if not text:
2502 text = ""
2499 text = ""
2503 l = len(text)
2500 l = len(text)
2504 s = hashlib.sha1('blob %d\0' % l)
2501 s = hashlib.sha1('blob %d\0' % l)
2505 s.update(text)
2502 s.update(text)
2506 return s.hexdigest()
2503 return s.hexdigest()
2507
2504
2508 if opts.noprefix:
2505 if opts.noprefix:
2509 aprefix = bprefix = ''
2506 aprefix = bprefix = ''
2510 else:
2507 else:
2511 aprefix = 'a/'
2508 aprefix = 'a/'
2512 bprefix = 'b/'
2509 bprefix = 'b/'
2513
2510
2514 def diffline(f, revs):
2511 def diffline(f, revs):
2515 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2512 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2516 return 'diff %s %s' % (revinfo, f)
2513 return 'diff %s %s' % (revinfo, f)
2517
2514
2518 def isempty(fctx):
2515 def isempty(fctx):
2519 return fctx is None or fctx.size() == 0
2516 return fctx is None or fctx.size() == 0
2520
2517
2521 date1 = util.datestr(ctx1.date())
2518 date1 = util.datestr(ctx1.date())
2522 date2 = util.datestr(ctx2.date())
2519 date2 = util.datestr(ctx2.date())
2523
2520
2524 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2525
2522
2526 if relroot != '' and (repo.ui.configbool('devel', 'all')
2523 if relroot != '' and (repo.ui.configbool('devel', 'all')
2527 or repo.ui.configbool('devel', 'check-relroot')):
2524 or repo.ui.configbool('devel', 'check-relroot')):
2528 for f in modified + added + removed + copy.keys() + copy.values():
2525 for f in modified + added + removed + copy.keys() + copy.values():
2529 if f is not None and not f.startswith(relroot):
2526 if f is not None and not f.startswith(relroot):
2530 raise AssertionError(
2527 raise AssertionError(
2531 "file %s doesn't start with relroot %s" % (f, relroot))
2528 "file %s doesn't start with relroot %s" % (f, relroot))
2532
2529
2533 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2530 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2534 content1 = None
2531 content1 = None
2535 content2 = None
2532 content2 = None
2536 fctx1 = None
2533 fctx1 = None
2537 fctx2 = None
2534 fctx2 = None
2538 flag1 = None
2535 flag1 = None
2539 flag2 = None
2536 flag2 = None
2540 if f1:
2537 if f1:
2541 fctx1 = getfilectx(f1, ctx1)
2538 fctx1 = getfilectx(f1, ctx1)
2542 if opts.git or losedatafn:
2539 if opts.git or losedatafn:
2543 flag1 = ctx1.flags(f1)
2540 flag1 = ctx1.flags(f1)
2544 if f2:
2541 if f2:
2545 fctx2 = getfilectx(f2, ctx2)
2542 fctx2 = getfilectx(f2, ctx2)
2546 if opts.git or losedatafn:
2543 if opts.git or losedatafn:
2547 flag2 = ctx2.flags(f2)
2544 flag2 = ctx2.flags(f2)
2548 # if binary is True, output "summary" or "base85", but not "text diff"
2545 # if binary is True, output "summary" or "base85", but not "text diff"
2549 binary = not opts.text and any(f.isbinary()
2546 binary = not opts.text and any(f.isbinary()
2550 for f in [fctx1, fctx2] if f is not None)
2547 for f in [fctx1, fctx2] if f is not None)
2551
2548
2552 if losedatafn and not opts.git:
2549 if losedatafn and not opts.git:
2553 if (binary or
2550 if (binary or
2554 # copy/rename
2551 # copy/rename
2555 f2 in copy or
2552 f2 in copy or
2556 # empty file creation
2553 # empty file creation
2557 (not f1 and isempty(fctx2)) or
2554 (not f1 and isempty(fctx2)) or
2558 # empty file deletion
2555 # empty file deletion
2559 (isempty(fctx1) and not f2) or
2556 (isempty(fctx1) and not f2) or
2560 # create with flags
2557 # create with flags
2561 (not f1 and flag2) or
2558 (not f1 and flag2) or
2562 # change flags
2559 # change flags
2563 (f1 and f2 and flag1 != flag2)):
2560 (f1 and f2 and flag1 != flag2)):
2564 losedatafn(f2 or f1)
2561 losedatafn(f2 or f1)
2565
2562
2566 path1 = f1 or f2
2563 path1 = f1 or f2
2567 path2 = f2 or f1
2564 path2 = f2 or f1
2568 path1 = posixpath.join(prefix, path1[len(relroot):])
2565 path1 = posixpath.join(prefix, path1[len(relroot):])
2569 path2 = posixpath.join(prefix, path2[len(relroot):])
2566 path2 = posixpath.join(prefix, path2[len(relroot):])
2570 header = []
2567 header = []
2571 if opts.git:
2568 if opts.git:
2572 header.append('diff --git %s%s %s%s' %
2569 header.append('diff --git %s%s %s%s' %
2573 (aprefix, path1, bprefix, path2))
2570 (aprefix, path1, bprefix, path2))
2574 if not f1: # added
2571 if not f1: # added
2575 header.append('new file mode %s' % gitmode[flag2])
2572 header.append('new file mode %s' % gitmode[flag2])
2576 elif not f2: # removed
2573 elif not f2: # removed
2577 header.append('deleted file mode %s' % gitmode[flag1])
2574 header.append('deleted file mode %s' % gitmode[flag1])
2578 else: # modified/copied/renamed
2575 else: # modified/copied/renamed
2579 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2576 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2580 if mode1 != mode2:
2577 if mode1 != mode2:
2581 header.append('old mode %s' % mode1)
2578 header.append('old mode %s' % mode1)
2582 header.append('new mode %s' % mode2)
2579 header.append('new mode %s' % mode2)
2583 if copyop is not None:
2580 if copyop is not None:
2584 if opts.showsimilarity:
2581 if opts.showsimilarity:
2585 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2582 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2586 header.append('similarity index %d%%' % sim)
2583 header.append('similarity index %d%%' % sim)
2587 header.append('%s from %s' % (copyop, path1))
2584 header.append('%s from %s' % (copyop, path1))
2588 header.append('%s to %s' % (copyop, path2))
2585 header.append('%s to %s' % (copyop, path2))
2589 elif revs and not repo.ui.quiet:
2586 elif revs and not repo.ui.quiet:
2590 header.append(diffline(path1, revs))
2587 header.append(diffline(path1, revs))
2591
2588
2592 # fctx.is | diffopts | what to | is fctx.data()
2589 # fctx.is | diffopts | what to | is fctx.data()
2593 # binary() | text nobinary git index | output? | outputted?
2590 # binary() | text nobinary git index | output? | outputted?
2594 # ------------------------------------|----------------------------
2591 # ------------------------------------|----------------------------
2595 # yes | no no no * | summary | no
2592 # yes | no no no * | summary | no
2596 # yes | no no yes * | base85 | yes
2593 # yes | no no yes * | base85 | yes
2597 # yes | no yes no * | summary | no
2594 # yes | no yes no * | summary | no
2598 # yes | no yes yes 0 | summary | no
2595 # yes | no yes yes 0 | summary | no
2599 # yes | no yes yes >0 | summary | semi [1]
2596 # yes | no yes yes >0 | summary | semi [1]
2600 # yes | yes * * * | text diff | yes
2597 # yes | yes * * * | text diff | yes
2601 # no | * * * * | text diff | yes
2598 # no | * * * * | text diff | yes
2602 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2599 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2603 if binary and (not opts.git or (opts.git and opts.nobinary and not
2600 if binary and (not opts.git or (opts.git and opts.nobinary and not
2604 opts.index)):
2601 opts.index)):
2605 # fast path: no binary content will be displayed, content1 and
2602 # fast path: no binary content will be displayed, content1 and
2606 # content2 are only used for equivalent test. cmp() could have a
2603 # content2 are only used for equivalent test. cmp() could have a
2607 # fast path.
2604 # fast path.
2608 if fctx1 is not None:
2605 if fctx1 is not None:
2609 content1 = b'\0'
2606 content1 = b'\0'
2610 if fctx2 is not None:
2607 if fctx2 is not None:
2611 if fctx1 is not None and not fctx1.cmp(fctx2):
2608 if fctx1 is not None and not fctx1.cmp(fctx2):
2612 content2 = b'\0' # not different
2609 content2 = b'\0' # not different
2613 else:
2610 else:
2614 content2 = b'\0\0'
2611 content2 = b'\0\0'
2615 else:
2612 else:
2616 # normal path: load contents
2613 # normal path: load contents
2617 if fctx1 is not None:
2614 if fctx1 is not None:
2618 content1 = fctx1.data()
2615 content1 = fctx1.data()
2619 if fctx2 is not None:
2616 if fctx2 is not None:
2620 content2 = fctx2.data()
2617 content2 = fctx2.data()
2621
2618
2622 if binary and opts.git and not opts.nobinary:
2619 if binary and opts.git and not opts.nobinary:
2623 text = mdiff.b85diff(content1, content2)
2620 text = mdiff.b85diff(content1, content2)
2624 if text:
2621 if text:
2625 header.append('index %s..%s' %
2622 header.append('index %s..%s' %
2626 (gitindex(content1), gitindex(content2)))
2623 (gitindex(content1), gitindex(content2)))
2627 hunks = (None, [text]),
2624 hunks = (None, [text]),
2628 else:
2625 else:
2629 if opts.git and opts.index > 0:
2626 if opts.git and opts.index > 0:
2630 flag = flag1
2627 flag = flag1
2631 if flag is None:
2628 if flag is None:
2632 flag = flag2
2629 flag = flag2
2633 header.append('index %s..%s %s' %
2630 header.append('index %s..%s %s' %
2634 (gitindex(content1)[0:opts.index],
2631 (gitindex(content1)[0:opts.index],
2635 gitindex(content2)[0:opts.index],
2632 gitindex(content2)[0:opts.index],
2636 gitmode[flag]))
2633 gitmode[flag]))
2637
2634
2638 uheaders, hunks = mdiff.unidiff(content1, date1,
2635 uheaders, hunks = mdiff.unidiff(content1, date1,
2639 content2, date2,
2636 content2, date2,
2640 path1, path2, opts=opts)
2637 path1, path2, opts=opts)
2641 header.extend(uheaders)
2638 header.extend(uheaders)
2642 yield header, hunks
2639 yield header, hunks
2643
2640
2644 def diffstatsum(stats):
2641 def diffstatsum(stats):
2645 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2642 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2646 for f, a, r, b in stats:
2643 for f, a, r, b in stats:
2647 maxfile = max(maxfile, encoding.colwidth(f))
2644 maxfile = max(maxfile, encoding.colwidth(f))
2648 maxtotal = max(maxtotal, a + r)
2645 maxtotal = max(maxtotal, a + r)
2649 addtotal += a
2646 addtotal += a
2650 removetotal += r
2647 removetotal += r
2651 binary = binary or b
2648 binary = binary or b
2652
2649
2653 return maxfile, maxtotal, addtotal, removetotal, binary
2650 return maxfile, maxtotal, addtotal, removetotal, binary
2654
2651
2655 def diffstatdata(lines):
2652 def diffstatdata(lines):
2656 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2657
2654
2658 results = []
2655 results = []
2659 filename, adds, removes, isbinary = None, 0, 0, False
2656 filename, adds, removes, isbinary = None, 0, 0, False
2660
2657
2661 def addresult():
2658 def addresult():
2662 if filename:
2659 if filename:
2663 results.append((filename, adds, removes, isbinary))
2660 results.append((filename, adds, removes, isbinary))
2664
2661
2665 # inheader is used to track if a line is in the
2662 # inheader is used to track if a line is in the
2666 # header portion of the diff. This helps properly account
2663 # header portion of the diff. This helps properly account
2667 # for lines that start with '--' or '++'
2664 # for lines that start with '--' or '++'
2668 inheader = False
2665 inheader = False
2669
2666
2670 for line in lines:
2667 for line in lines:
2671 if line.startswith('diff'):
2668 if line.startswith('diff'):
2672 addresult()
2669 addresult()
2673 # starting a new file diff
2670 # starting a new file diff
2674 # set numbers to 0 and reset inheader
2671 # set numbers to 0 and reset inheader
2675 inheader = True
2672 inheader = True
2676 adds, removes, isbinary = 0, 0, False
2673 adds, removes, isbinary = 0, 0, False
2677 if line.startswith('diff --git a/'):
2674 if line.startswith('diff --git a/'):
2678 filename = gitre.search(line).group(2)
2675 filename = gitre.search(line).group(2)
2679 elif line.startswith('diff -r'):
2676 elif line.startswith('diff -r'):
2680 # format: "diff -r ... -r ... filename"
2677 # format: "diff -r ... -r ... filename"
2681 filename = diffre.search(line).group(1)
2678 filename = diffre.search(line).group(1)
2682 elif line.startswith('@@'):
2679 elif line.startswith('@@'):
2683 inheader = False
2680 inheader = False
2684 elif line.startswith('+') and not inheader:
2681 elif line.startswith('+') and not inheader:
2685 adds += 1
2682 adds += 1
2686 elif line.startswith('-') and not inheader:
2683 elif line.startswith('-') and not inheader:
2687 removes += 1
2684 removes += 1
2688 elif (line.startswith('GIT binary patch') or
2685 elif (line.startswith('GIT binary patch') or
2689 line.startswith('Binary file')):
2686 line.startswith('Binary file')):
2690 isbinary = True
2687 isbinary = True
2691 addresult()
2688 addresult()
2692 return results
2689 return results
2693
2690
2694 def diffstat(lines, width=80):
2691 def diffstat(lines, width=80):
2695 output = []
2692 output = []
2696 stats = diffstatdata(lines)
2693 stats = diffstatdata(lines)
2697 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2694 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2698
2695
2699 countwidth = len(str(maxtotal))
2696 countwidth = len(str(maxtotal))
2700 if hasbinary and countwidth < 3:
2697 if hasbinary and countwidth < 3:
2701 countwidth = 3
2698 countwidth = 3
2702 graphwidth = width - countwidth - maxname - 6
2699 graphwidth = width - countwidth - maxname - 6
2703 if graphwidth < 10:
2700 if graphwidth < 10:
2704 graphwidth = 10
2701 graphwidth = 10
2705
2702
2706 def scale(i):
2703 def scale(i):
2707 if maxtotal <= graphwidth:
2704 if maxtotal <= graphwidth:
2708 return i
2705 return i
2709 # If diffstat runs out of room it doesn't print anything,
2706 # If diffstat runs out of room it doesn't print anything,
2710 # which isn't very useful, so always print at least one + or -
2707 # which isn't very useful, so always print at least one + or -
2711 # if there were at least some changes.
2708 # if there were at least some changes.
2712 return max(i * graphwidth // maxtotal, int(bool(i)))
2709 return max(i * graphwidth // maxtotal, int(bool(i)))
2713
2710
2714 for filename, adds, removes, isbinary in stats:
2711 for filename, adds, removes, isbinary in stats:
2715 if isbinary:
2712 if isbinary:
2716 count = 'Bin'
2713 count = 'Bin'
2717 else:
2714 else:
2718 count = '%d' % (adds + removes)
2715 count = '%d' % (adds + removes)
2719 pluses = '+' * scale(adds)
2716 pluses = '+' * scale(adds)
2720 minuses = '-' * scale(removes)
2717 minuses = '-' * scale(removes)
2721 output.append(' %s%s | %*s %s%s\n' %
2718 output.append(' %s%s | %*s %s%s\n' %
2722 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2719 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2723 countwidth, count, pluses, minuses))
2720 countwidth, count, pluses, minuses))
2724
2721
2725 if stats:
2722 if stats:
2726 output.append(_(' %d files changed, %d insertions(+), '
2723 output.append(_(' %d files changed, %d insertions(+), '
2727 '%d deletions(-)\n')
2724 '%d deletions(-)\n')
2728 % (len(stats), totaladds, totalremoves))
2725 % (len(stats), totaladds, totalremoves))
2729
2726
2730 return ''.join(output)
2727 return ''.join(output)
2731
2728
2732 def diffstatui(*args, **kw):
2729 def diffstatui(*args, **kw):
2733 '''like diffstat(), but yields 2-tuples of (output, label) for
2730 '''like diffstat(), but yields 2-tuples of (output, label) for
2734 ui.write()
2731 ui.write()
2735 '''
2732 '''
2736
2733
2737 for line in diffstat(*args, **kw).splitlines():
2734 for line in diffstat(*args, **kw).splitlines():
2738 if line and line[-1] in '+-':
2735 if line and line[-1] in '+-':
2739 name, graph = line.rsplit(' ', 1)
2736 name, graph = line.rsplit(' ', 1)
2740 yield (name + ' ', '')
2737 yield (name + ' ', '')
2741 m = re.search(br'\++', graph)
2738 m = re.search(br'\++', graph)
2742 if m:
2739 if m:
2743 yield (m.group(0), 'diffstat.inserted')
2740 yield (m.group(0), 'diffstat.inserted')
2744 m = re.search(br'-+', graph)
2741 m = re.search(br'-+', graph)
2745 if m:
2742 if m:
2746 yield (m.group(0), 'diffstat.deleted')
2743 yield (m.group(0), 'diffstat.deleted')
2747 else:
2744 else:
2748 yield (line, '')
2745 yield (line, '')
2749 yield ('\n', '')
2746 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now