##// END OF EJS Templates
diff: don't crash when merged-in addition is copied...
Martin von Zweigbergk -
r27902:51b6ce25 default
parent child Browse files
Show More
@@ -1,2587 +1,2592 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import cStringIO
11 import cStringIO
12 import collections
12 import collections
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 scmutil,
36 scmutil,
37 util,
37 util,
38 )
38 )
39
39
40 gitre = re.compile('diff --git a/(.*) b/(.*)')
40 gitre = re.compile('diff --git a/(.*) b/(.*)')
41 tabsplitter = re.compile(r'(\t+|[^\t]+)')
41 tabsplitter = re.compile(r'(\t+|[^\t]+)')
42
42
43 class PatchError(Exception):
43 class PatchError(Exception):
44 pass
44 pass
45
45
46
46
47 # public functions
47 # public functions
48
48
49 def split(stream):
49 def split(stream):
50 '''return an iterator of individual patches from a stream'''
50 '''return an iterator of individual patches from a stream'''
51 def isheader(line, inheader):
51 def isheader(line, inheader):
52 if inheader and line[0] in (' ', '\t'):
52 if inheader and line[0] in (' ', '\t'):
53 # continuation
53 # continuation
54 return True
54 return True
55 if line[0] in (' ', '-', '+'):
55 if line[0] in (' ', '-', '+'):
56 # diff line - don't check for header pattern in there
56 # diff line - don't check for header pattern in there
57 return False
57 return False
58 l = line.split(': ', 1)
58 l = line.split(': ', 1)
59 return len(l) == 2 and ' ' not in l[0]
59 return len(l) == 2 and ' ' not in l[0]
60
60
61 def chunk(lines):
61 def chunk(lines):
62 return cStringIO.StringIO(''.join(lines))
62 return cStringIO.StringIO(''.join(lines))
63
63
64 def hgsplit(stream, cur):
64 def hgsplit(stream, cur):
65 inheader = True
65 inheader = True
66
66
67 for line in stream:
67 for line in stream:
68 if not line.strip():
68 if not line.strip():
69 inheader = False
69 inheader = False
70 if not inheader and line.startswith('# HG changeset patch'):
70 if not inheader and line.startswith('# HG changeset patch'):
71 yield chunk(cur)
71 yield chunk(cur)
72 cur = []
72 cur = []
73 inheader = True
73 inheader = True
74
74
75 cur.append(line)
75 cur.append(line)
76
76
77 if cur:
77 if cur:
78 yield chunk(cur)
78 yield chunk(cur)
79
79
80 def mboxsplit(stream, cur):
80 def mboxsplit(stream, cur):
81 for line in stream:
81 for line in stream:
82 if line.startswith('From '):
82 if line.startswith('From '):
83 for c in split(chunk(cur[1:])):
83 for c in split(chunk(cur[1:])):
84 yield c
84 yield c
85 cur = []
85 cur = []
86
86
87 cur.append(line)
87 cur.append(line)
88
88
89 if cur:
89 if cur:
90 for c in split(chunk(cur[1:])):
90 for c in split(chunk(cur[1:])):
91 yield c
91 yield c
92
92
93 def mimesplit(stream, cur):
93 def mimesplit(stream, cur):
94 def msgfp(m):
94 def msgfp(m):
95 fp = cStringIO.StringIO()
95 fp = cStringIO.StringIO()
96 g = email.Generator.Generator(fp, mangle_from_=False)
96 g = email.Generator.Generator(fp, mangle_from_=False)
97 g.flatten(m)
97 g.flatten(m)
98 fp.seek(0)
98 fp.seek(0)
99 return fp
99 return fp
100
100
101 for line in stream:
101 for line in stream:
102 cur.append(line)
102 cur.append(line)
103 c = chunk(cur)
103 c = chunk(cur)
104
104
105 m = email.Parser.Parser().parse(c)
105 m = email.Parser.Parser().parse(c)
106 if not m.is_multipart():
106 if not m.is_multipart():
107 yield msgfp(m)
107 yield msgfp(m)
108 else:
108 else:
109 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
109 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
110 for part in m.walk():
110 for part in m.walk():
111 ct = part.get_content_type()
111 ct = part.get_content_type()
112 if ct not in ok_types:
112 if ct not in ok_types:
113 continue
113 continue
114 yield msgfp(part)
114 yield msgfp(part)
115
115
116 def headersplit(stream, cur):
116 def headersplit(stream, cur):
117 inheader = False
117 inheader = False
118
118
119 for line in stream:
119 for line in stream:
120 if not inheader and isheader(line, inheader):
120 if not inheader and isheader(line, inheader):
121 yield chunk(cur)
121 yield chunk(cur)
122 cur = []
122 cur = []
123 inheader = True
123 inheader = True
124 if inheader and not isheader(line, inheader):
124 if inheader and not isheader(line, inheader):
125 inheader = False
125 inheader = False
126
126
127 cur.append(line)
127 cur.append(line)
128
128
129 if cur:
129 if cur:
130 yield chunk(cur)
130 yield chunk(cur)
131
131
132 def remainder(cur):
132 def remainder(cur):
133 yield chunk(cur)
133 yield chunk(cur)
134
134
135 class fiter(object):
135 class fiter(object):
136 def __init__(self, fp):
136 def __init__(self, fp):
137 self.fp = fp
137 self.fp = fp
138
138
139 def __iter__(self):
139 def __iter__(self):
140 return self
140 return self
141
141
142 def next(self):
142 def next(self):
143 l = self.fp.readline()
143 l = self.fp.readline()
144 if not l:
144 if not l:
145 raise StopIteration
145 raise StopIteration
146 return l
146 return l
147
147
148 inheader = False
148 inheader = False
149 cur = []
149 cur = []
150
150
151 mimeheaders = ['content-type']
151 mimeheaders = ['content-type']
152
152
153 if not util.safehasattr(stream, 'next'):
153 if not util.safehasattr(stream, 'next'):
154 # http responses, for example, have readline but not next
154 # http responses, for example, have readline but not next
155 stream = fiter(stream)
155 stream = fiter(stream)
156
156
157 for line in stream:
157 for line in stream:
158 cur.append(line)
158 cur.append(line)
159 if line.startswith('# HG changeset patch'):
159 if line.startswith('# HG changeset patch'):
160 return hgsplit(stream, cur)
160 return hgsplit(stream, cur)
161 elif line.startswith('From '):
161 elif line.startswith('From '):
162 return mboxsplit(stream, cur)
162 return mboxsplit(stream, cur)
163 elif isheader(line, inheader):
163 elif isheader(line, inheader):
164 inheader = True
164 inheader = True
165 if line.split(':', 1)[0].lower() in mimeheaders:
165 if line.split(':', 1)[0].lower() in mimeheaders:
166 # let email parser handle this
166 # let email parser handle this
167 return mimesplit(stream, cur)
167 return mimesplit(stream, cur)
168 elif line.startswith('--- ') and inheader:
168 elif line.startswith('--- ') and inheader:
169 # No evil headers seen by diff start, split by hand
169 # No evil headers seen by diff start, split by hand
170 return headersplit(stream, cur)
170 return headersplit(stream, cur)
171 # Not enough info, keep reading
171 # Not enough info, keep reading
172
172
173 # if we are here, we have a very plain patch
173 # if we are here, we have a very plain patch
174 return remainder(cur)
174 return remainder(cur)
175
175
176 ## Some facility for extensible patch parsing:
176 ## Some facility for extensible patch parsing:
177 # list of pairs ("header to match", "data key")
177 # list of pairs ("header to match", "data key")
178 patchheadermap = [('Date', 'date'),
178 patchheadermap = [('Date', 'date'),
179 ('Branch', 'branch'),
179 ('Branch', 'branch'),
180 ('Node ID', 'nodeid'),
180 ('Node ID', 'nodeid'),
181 ]
181 ]
182
182
183 def extract(ui, fileobj):
183 def extract(ui, fileobj):
184 '''extract patch from data read from fileobj.
184 '''extract patch from data read from fileobj.
185
185
186 patch can be a normal patch or contained in an email message.
186 patch can be a normal patch or contained in an email message.
187
187
188 return a dictionary. Standard keys are:
188 return a dictionary. Standard keys are:
189 - filename,
189 - filename,
190 - message,
190 - message,
191 - user,
191 - user,
192 - date,
192 - date,
193 - branch,
193 - branch,
194 - node,
194 - node,
195 - p1,
195 - p1,
196 - p2.
196 - p2.
197 Any item can be missing from the dictionary. If filename is missing,
197 Any item can be missing from the dictionary. If filename is missing,
198 fileobj did not contain a patch. Caller must unlink filename when done.'''
198 fileobj did not contain a patch. Caller must unlink filename when done.'''
199
199
200 # attempt to detect the start of a patch
200 # attempt to detect the start of a patch
201 # (this heuristic is borrowed from quilt)
201 # (this heuristic is borrowed from quilt)
202 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
202 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
203 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
203 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
204 r'---[ \t].*?^\+\+\+[ \t]|'
204 r'---[ \t].*?^\+\+\+[ \t]|'
205 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
205 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
206
206
207 data = {}
207 data = {}
208 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
208 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
209 tmpfp = os.fdopen(fd, 'w')
209 tmpfp = os.fdopen(fd, 'w')
210 try:
210 try:
211 msg = email.Parser.Parser().parse(fileobj)
211 msg = email.Parser.Parser().parse(fileobj)
212
212
213 subject = msg['Subject']
213 subject = msg['Subject']
214 data['user'] = msg['From']
214 data['user'] = msg['From']
215 if not subject and not data['user']:
215 if not subject and not data['user']:
216 # Not an email, restore parsed headers if any
216 # Not an email, restore parsed headers if any
217 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
217 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
218
218
219 # should try to parse msg['Date']
219 # should try to parse msg['Date']
220 parents = []
220 parents = []
221
221
222 if subject:
222 if subject:
223 if subject.startswith('[PATCH'):
223 if subject.startswith('[PATCH'):
224 pend = subject.find(']')
224 pend = subject.find(']')
225 if pend >= 0:
225 if pend >= 0:
226 subject = subject[pend + 1:].lstrip()
226 subject = subject[pend + 1:].lstrip()
227 subject = re.sub(r'\n[ \t]+', ' ', subject)
227 subject = re.sub(r'\n[ \t]+', ' ', subject)
228 ui.debug('Subject: %s\n' % subject)
228 ui.debug('Subject: %s\n' % subject)
229 if data['user']:
229 if data['user']:
230 ui.debug('From: %s\n' % data['user'])
230 ui.debug('From: %s\n' % data['user'])
231 diffs_seen = 0
231 diffs_seen = 0
232 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
232 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
233 message = ''
233 message = ''
234 for part in msg.walk():
234 for part in msg.walk():
235 content_type = part.get_content_type()
235 content_type = part.get_content_type()
236 ui.debug('Content-Type: %s\n' % content_type)
236 ui.debug('Content-Type: %s\n' % content_type)
237 if content_type not in ok_types:
237 if content_type not in ok_types:
238 continue
238 continue
239 payload = part.get_payload(decode=True)
239 payload = part.get_payload(decode=True)
240 m = diffre.search(payload)
240 m = diffre.search(payload)
241 if m:
241 if m:
242 hgpatch = False
242 hgpatch = False
243 hgpatchheader = False
243 hgpatchheader = False
244 ignoretext = False
244 ignoretext = False
245
245
246 ui.debug('found patch at byte %d\n' % m.start(0))
246 ui.debug('found patch at byte %d\n' % m.start(0))
247 diffs_seen += 1
247 diffs_seen += 1
248 cfp = cStringIO.StringIO()
248 cfp = cStringIO.StringIO()
249 for line in payload[:m.start(0)].splitlines():
249 for line in payload[:m.start(0)].splitlines():
250 if line.startswith('# HG changeset patch') and not hgpatch:
250 if line.startswith('# HG changeset patch') and not hgpatch:
251 ui.debug('patch generated by hg export\n')
251 ui.debug('patch generated by hg export\n')
252 hgpatch = True
252 hgpatch = True
253 hgpatchheader = True
253 hgpatchheader = True
254 # drop earlier commit message content
254 # drop earlier commit message content
255 cfp.seek(0)
255 cfp.seek(0)
256 cfp.truncate()
256 cfp.truncate()
257 subject = None
257 subject = None
258 elif hgpatchheader:
258 elif hgpatchheader:
259 if line.startswith('# User '):
259 if line.startswith('# User '):
260 data['user'] = line[7:]
260 data['user'] = line[7:]
261 ui.debug('From: %s\n' % data['user'])
261 ui.debug('From: %s\n' % data['user'])
262 elif line.startswith("# Parent "):
262 elif line.startswith("# Parent "):
263 parents.append(line[9:].lstrip())
263 parents.append(line[9:].lstrip())
264 elif line.startswith("# "):
264 elif line.startswith("# "):
265 for header, key in patchheadermap:
265 for header, key in patchheadermap:
266 prefix = '# %s ' % header
266 prefix = '# %s ' % header
267 if line.startswith(prefix):
267 if line.startswith(prefix):
268 data[key] = line[len(prefix):]
268 data[key] = line[len(prefix):]
269 else:
269 else:
270 hgpatchheader = False
270 hgpatchheader = False
271 elif line == '---':
271 elif line == '---':
272 ignoretext = True
272 ignoretext = True
273 if not hgpatchheader and not ignoretext:
273 if not hgpatchheader and not ignoretext:
274 cfp.write(line)
274 cfp.write(line)
275 cfp.write('\n')
275 cfp.write('\n')
276 message = cfp.getvalue()
276 message = cfp.getvalue()
277 if tmpfp:
277 if tmpfp:
278 tmpfp.write(payload)
278 tmpfp.write(payload)
279 if not payload.endswith('\n'):
279 if not payload.endswith('\n'):
280 tmpfp.write('\n')
280 tmpfp.write('\n')
281 elif not diffs_seen and message and content_type == 'text/plain':
281 elif not diffs_seen and message and content_type == 'text/plain':
282 message += '\n' + payload
282 message += '\n' + payload
283 except: # re-raises
283 except: # re-raises
284 tmpfp.close()
284 tmpfp.close()
285 os.unlink(tmpname)
285 os.unlink(tmpname)
286 raise
286 raise
287
287
288 if subject and not message.startswith(subject):
288 if subject and not message.startswith(subject):
289 message = '%s\n%s' % (subject, message)
289 message = '%s\n%s' % (subject, message)
290 data['message'] = message
290 data['message'] = message
291 tmpfp.close()
291 tmpfp.close()
292 if parents:
292 if parents:
293 data['p1'] = parents.pop(0)
293 data['p1'] = parents.pop(0)
294 if parents:
294 if parents:
295 data['p2'] = parents.pop(0)
295 data['p2'] = parents.pop(0)
296
296
297 if diffs_seen:
297 if diffs_seen:
298 data['filename'] = tmpname
298 data['filename'] = tmpname
299 else:
299 else:
300 os.unlink(tmpname)
300 os.unlink(tmpname)
301 return data
301 return data
302
302
303 class patchmeta(object):
303 class patchmeta(object):
304 """Patched file metadata
304 """Patched file metadata
305
305
306 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
306 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
307 or COPY. 'path' is patched file path. 'oldpath' is set to the
307 or COPY. 'path' is patched file path. 'oldpath' is set to the
308 origin file when 'op' is either COPY or RENAME, None otherwise. If
308 origin file when 'op' is either COPY or RENAME, None otherwise. If
309 file mode is changed, 'mode' is a tuple (islink, isexec) where
309 file mode is changed, 'mode' is a tuple (islink, isexec) where
310 'islink' is True if the file is a symlink and 'isexec' is True if
310 'islink' is True if the file is a symlink and 'isexec' is True if
311 the file is executable. Otherwise, 'mode' is None.
311 the file is executable. Otherwise, 'mode' is None.
312 """
312 """
313 def __init__(self, path):
313 def __init__(self, path):
314 self.path = path
314 self.path = path
315 self.oldpath = None
315 self.oldpath = None
316 self.mode = None
316 self.mode = None
317 self.op = 'MODIFY'
317 self.op = 'MODIFY'
318 self.binary = False
318 self.binary = False
319
319
320 def setmode(self, mode):
320 def setmode(self, mode):
321 islink = mode & 0o20000
321 islink = mode & 0o20000
322 isexec = mode & 0o100
322 isexec = mode & 0o100
323 self.mode = (islink, isexec)
323 self.mode = (islink, isexec)
324
324
325 def copy(self):
325 def copy(self):
326 other = patchmeta(self.path)
326 other = patchmeta(self.path)
327 other.oldpath = self.oldpath
327 other.oldpath = self.oldpath
328 other.mode = self.mode
328 other.mode = self.mode
329 other.op = self.op
329 other.op = self.op
330 other.binary = self.binary
330 other.binary = self.binary
331 return other
331 return other
332
332
333 def _ispatchinga(self, afile):
333 def _ispatchinga(self, afile):
334 if afile == '/dev/null':
334 if afile == '/dev/null':
335 return self.op == 'ADD'
335 return self.op == 'ADD'
336 return afile == 'a/' + (self.oldpath or self.path)
336 return afile == 'a/' + (self.oldpath or self.path)
337
337
338 def _ispatchingb(self, bfile):
338 def _ispatchingb(self, bfile):
339 if bfile == '/dev/null':
339 if bfile == '/dev/null':
340 return self.op == 'DELETE'
340 return self.op == 'DELETE'
341 return bfile == 'b/' + self.path
341 return bfile == 'b/' + self.path
342
342
343 def ispatching(self, afile, bfile):
343 def ispatching(self, afile, bfile):
344 return self._ispatchinga(afile) and self._ispatchingb(bfile)
344 return self._ispatchinga(afile) and self._ispatchingb(bfile)
345
345
346 def __repr__(self):
346 def __repr__(self):
347 return "<patchmeta %s %r>" % (self.op, self.path)
347 return "<patchmeta %s %r>" % (self.op, self.path)
348
348
349 def readgitpatch(lr):
349 def readgitpatch(lr):
350 """extract git-style metadata about patches from <patchname>"""
350 """extract git-style metadata about patches from <patchname>"""
351
351
352 # Filter patch for git information
352 # Filter patch for git information
353 gp = None
353 gp = None
354 gitpatches = []
354 gitpatches = []
355 for line in lr:
355 for line in lr:
356 line = line.rstrip(' \r\n')
356 line = line.rstrip(' \r\n')
357 if line.startswith('diff --git a/'):
357 if line.startswith('diff --git a/'):
358 m = gitre.match(line)
358 m = gitre.match(line)
359 if m:
359 if m:
360 if gp:
360 if gp:
361 gitpatches.append(gp)
361 gitpatches.append(gp)
362 dst = m.group(2)
362 dst = m.group(2)
363 gp = patchmeta(dst)
363 gp = patchmeta(dst)
364 elif gp:
364 elif gp:
365 if line.startswith('--- '):
365 if line.startswith('--- '):
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 gp = None
367 gp = None
368 continue
368 continue
369 if line.startswith('rename from '):
369 if line.startswith('rename from '):
370 gp.op = 'RENAME'
370 gp.op = 'RENAME'
371 gp.oldpath = line[12:]
371 gp.oldpath = line[12:]
372 elif line.startswith('rename to '):
372 elif line.startswith('rename to '):
373 gp.path = line[10:]
373 gp.path = line[10:]
374 elif line.startswith('copy from '):
374 elif line.startswith('copy from '):
375 gp.op = 'COPY'
375 gp.op = 'COPY'
376 gp.oldpath = line[10:]
376 gp.oldpath = line[10:]
377 elif line.startswith('copy to '):
377 elif line.startswith('copy to '):
378 gp.path = line[8:]
378 gp.path = line[8:]
379 elif line.startswith('deleted file'):
379 elif line.startswith('deleted file'):
380 gp.op = 'DELETE'
380 gp.op = 'DELETE'
381 elif line.startswith('new file mode '):
381 elif line.startswith('new file mode '):
382 gp.op = 'ADD'
382 gp.op = 'ADD'
383 gp.setmode(int(line[-6:], 8))
383 gp.setmode(int(line[-6:], 8))
384 elif line.startswith('new mode '):
384 elif line.startswith('new mode '):
385 gp.setmode(int(line[-6:], 8))
385 gp.setmode(int(line[-6:], 8))
386 elif line.startswith('GIT binary patch'):
386 elif line.startswith('GIT binary patch'):
387 gp.binary = True
387 gp.binary = True
388 if gp:
388 if gp:
389 gitpatches.append(gp)
389 gitpatches.append(gp)
390
390
391 return gitpatches
391 return gitpatches
392
392
393 class linereader(object):
393 class linereader(object):
394 # simple class to allow pushing lines back into the input stream
394 # simple class to allow pushing lines back into the input stream
395 def __init__(self, fp):
395 def __init__(self, fp):
396 self.fp = fp
396 self.fp = fp
397 self.buf = []
397 self.buf = []
398
398
399 def push(self, line):
399 def push(self, line):
400 if line is not None:
400 if line is not None:
401 self.buf.append(line)
401 self.buf.append(line)
402
402
403 def readline(self):
403 def readline(self):
404 if self.buf:
404 if self.buf:
405 l = self.buf[0]
405 l = self.buf[0]
406 del self.buf[0]
406 del self.buf[0]
407 return l
407 return l
408 return self.fp.readline()
408 return self.fp.readline()
409
409
410 def __iter__(self):
410 def __iter__(self):
411 while True:
411 while True:
412 l = self.readline()
412 l = self.readline()
413 if not l:
413 if not l:
414 break
414 break
415 yield l
415 yield l
416
416
417 class abstractbackend(object):
417 class abstractbackend(object):
418 def __init__(self, ui):
418 def __init__(self, ui):
419 self.ui = ui
419 self.ui = ui
420
420
421 def getfile(self, fname):
421 def getfile(self, fname):
422 """Return target file data and flags as a (data, (islink,
422 """Return target file data and flags as a (data, (islink,
423 isexec)) tuple. Data is None if file is missing/deleted.
423 isexec)) tuple. Data is None if file is missing/deleted.
424 """
424 """
425 raise NotImplementedError
425 raise NotImplementedError
426
426
427 def setfile(self, fname, data, mode, copysource):
427 def setfile(self, fname, data, mode, copysource):
428 """Write data to target file fname and set its mode. mode is a
428 """Write data to target file fname and set its mode. mode is a
429 (islink, isexec) tuple. If data is None, the file content should
429 (islink, isexec) tuple. If data is None, the file content should
430 be left unchanged. If the file is modified after being copied,
430 be left unchanged. If the file is modified after being copied,
431 copysource is set to the original file name.
431 copysource is set to the original file name.
432 """
432 """
433 raise NotImplementedError
433 raise NotImplementedError
434
434
435 def unlink(self, fname):
435 def unlink(self, fname):
436 """Unlink target file."""
436 """Unlink target file."""
437 raise NotImplementedError
437 raise NotImplementedError
438
438
439 def writerej(self, fname, failed, total, lines):
439 def writerej(self, fname, failed, total, lines):
440 """Write rejected lines for fname. total is the number of hunks
440 """Write rejected lines for fname. total is the number of hunks
441 which failed to apply and total the total number of hunks for this
441 which failed to apply and total the total number of hunks for this
442 files.
442 files.
443 """
443 """
444 pass
444 pass
445
445
446 def exists(self, fname):
446 def exists(self, fname):
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 class fsbackend(abstractbackend):
449 class fsbackend(abstractbackend):
450 def __init__(self, ui, basedir):
450 def __init__(self, ui, basedir):
451 super(fsbackend, self).__init__(ui)
451 super(fsbackend, self).__init__(ui)
452 self.opener = scmutil.opener(basedir)
452 self.opener = scmutil.opener(basedir)
453
453
454 def _join(self, f):
454 def _join(self, f):
455 return os.path.join(self.opener.base, f)
455 return os.path.join(self.opener.base, f)
456
456
457 def getfile(self, fname):
457 def getfile(self, fname):
458 if self.opener.islink(fname):
458 if self.opener.islink(fname):
459 return (self.opener.readlink(fname), (True, False))
459 return (self.opener.readlink(fname), (True, False))
460
460
461 isexec = False
461 isexec = False
462 try:
462 try:
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 except OSError as e:
464 except OSError as e:
465 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
466 raise
466 raise
467 try:
467 try:
468 return (self.opener.read(fname), (False, isexec))
468 return (self.opener.read(fname), (False, isexec))
469 except IOError as e:
469 except IOError as e:
470 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
471 raise
471 raise
472 return None, None
472 return None, None
473
473
474 def setfile(self, fname, data, mode, copysource):
474 def setfile(self, fname, data, mode, copysource):
475 islink, isexec = mode
475 islink, isexec = mode
476 if data is None:
476 if data is None:
477 self.opener.setflags(fname, islink, isexec)
477 self.opener.setflags(fname, islink, isexec)
478 return
478 return
479 if islink:
479 if islink:
480 self.opener.symlink(data, fname)
480 self.opener.symlink(data, fname)
481 else:
481 else:
482 self.opener.write(fname, data)
482 self.opener.write(fname, data)
483 if isexec:
483 if isexec:
484 self.opener.setflags(fname, False, True)
484 self.opener.setflags(fname, False, True)
485
485
486 def unlink(self, fname):
486 def unlink(self, fname):
487 self.opener.unlinkpath(fname, ignoremissing=True)
487 self.opener.unlinkpath(fname, ignoremissing=True)
488
488
489 def writerej(self, fname, failed, total, lines):
489 def writerej(self, fname, failed, total, lines):
490 fname = fname + ".rej"
490 fname = fname + ".rej"
491 self.ui.warn(
491 self.ui.warn(
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 (failed, total, fname))
493 (failed, total, fname))
494 fp = self.opener(fname, 'w')
494 fp = self.opener(fname, 'w')
495 fp.writelines(lines)
495 fp.writelines(lines)
496 fp.close()
496 fp.close()
497
497
498 def exists(self, fname):
498 def exists(self, fname):
499 return self.opener.lexists(fname)
499 return self.opener.lexists(fname)
500
500
501 class workingbackend(fsbackend):
501 class workingbackend(fsbackend):
502 def __init__(self, ui, repo, similarity):
502 def __init__(self, ui, repo, similarity):
503 super(workingbackend, self).__init__(ui, repo.root)
503 super(workingbackend, self).__init__(ui, repo.root)
504 self.repo = repo
504 self.repo = repo
505 self.similarity = similarity
505 self.similarity = similarity
506 self.removed = set()
506 self.removed = set()
507 self.changed = set()
507 self.changed = set()
508 self.copied = []
508 self.copied = []
509
509
510 def _checkknown(self, fname):
510 def _checkknown(self, fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513
513
514 def setfile(self, fname, data, mode, copysource):
514 def setfile(self, fname, data, mode, copysource):
515 self._checkknown(fname)
515 self._checkknown(fname)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 if copysource is not None:
517 if copysource is not None:
518 self.copied.append((copysource, fname))
518 self.copied.append((copysource, fname))
519 self.changed.add(fname)
519 self.changed.add(fname)
520
520
521 def unlink(self, fname):
521 def unlink(self, fname):
522 self._checkknown(fname)
522 self._checkknown(fname)
523 super(workingbackend, self).unlink(fname)
523 super(workingbackend, self).unlink(fname)
524 self.removed.add(fname)
524 self.removed.add(fname)
525 self.changed.add(fname)
525 self.changed.add(fname)
526
526
527 def close(self):
527 def close(self):
528 wctx = self.repo[None]
528 wctx = self.repo[None]
529 changed = set(self.changed)
529 changed = set(self.changed)
530 for src, dst in self.copied:
530 for src, dst in self.copied:
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 if self.removed:
532 if self.removed:
533 wctx.forget(sorted(self.removed))
533 wctx.forget(sorted(self.removed))
534 for f in self.removed:
534 for f in self.removed:
535 if f not in self.repo.dirstate:
535 if f not in self.repo.dirstate:
536 # File was deleted and no longer belongs to the
536 # File was deleted and no longer belongs to the
537 # dirstate, it was probably marked added then
537 # dirstate, it was probably marked added then
538 # deleted, and should not be considered by
538 # deleted, and should not be considered by
539 # marktouched().
539 # marktouched().
540 changed.discard(f)
540 changed.discard(f)
541 if changed:
541 if changed:
542 scmutil.marktouched(self.repo, changed, self.similarity)
542 scmutil.marktouched(self.repo, changed, self.similarity)
543 return sorted(self.changed)
543 return sorted(self.changed)
544
544
545 class filestore(object):
545 class filestore(object):
546 def __init__(self, maxsize=None):
546 def __init__(self, maxsize=None):
547 self.opener = None
547 self.opener = None
548 self.files = {}
548 self.files = {}
549 self.created = 0
549 self.created = 0
550 self.maxsize = maxsize
550 self.maxsize = maxsize
551 if self.maxsize is None:
551 if self.maxsize is None:
552 self.maxsize = 4*(2**20)
552 self.maxsize = 4*(2**20)
553 self.size = 0
553 self.size = 0
554 self.data = {}
554 self.data = {}
555
555
556 def setfile(self, fname, data, mode, copied=None):
556 def setfile(self, fname, data, mode, copied=None):
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 self.data[fname] = (data, mode, copied)
558 self.data[fname] = (data, mode, copied)
559 self.size += len(data)
559 self.size += len(data)
560 else:
560 else:
561 if self.opener is None:
561 if self.opener is None:
562 root = tempfile.mkdtemp(prefix='hg-patch-')
562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 self.opener = scmutil.opener(root)
563 self.opener = scmutil.opener(root)
564 # Avoid filename issues with these simple names
564 # Avoid filename issues with these simple names
565 fn = str(self.created)
565 fn = str(self.created)
566 self.opener.write(fn, data)
566 self.opener.write(fn, data)
567 self.created += 1
567 self.created += 1
568 self.files[fname] = (fn, mode, copied)
568 self.files[fname] = (fn, mode, copied)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 if fname in self.data:
571 if fname in self.data:
572 return self.data[fname]
572 return self.data[fname]
573 if not self.opener or fname not in self.files:
573 if not self.opener or fname not in self.files:
574 return None, None, None
574 return None, None, None
575 fn, mode, copied = self.files[fname]
575 fn, mode, copied = self.files[fname]
576 return self.opener.read(fn), mode, copied
576 return self.opener.read(fn), mode, copied
577
577
578 def close(self):
578 def close(self):
579 if self.opener:
579 if self.opener:
580 shutil.rmtree(self.opener.base)
580 shutil.rmtree(self.opener.base)
581
581
582 class repobackend(abstractbackend):
582 class repobackend(abstractbackend):
583 def __init__(self, ui, repo, ctx, store):
583 def __init__(self, ui, repo, ctx, store):
584 super(repobackend, self).__init__(ui)
584 super(repobackend, self).__init__(ui)
585 self.repo = repo
585 self.repo = repo
586 self.ctx = ctx
586 self.ctx = ctx
587 self.store = store
587 self.store = store
588 self.changed = set()
588 self.changed = set()
589 self.removed = set()
589 self.removed = set()
590 self.copied = {}
590 self.copied = {}
591
591
592 def _checkknown(self, fname):
592 def _checkknown(self, fname):
593 if fname not in self.ctx:
593 if fname not in self.ctx:
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595
595
596 def getfile(self, fname):
596 def getfile(self, fname):
597 try:
597 try:
598 fctx = self.ctx[fname]
598 fctx = self.ctx[fname]
599 except error.LookupError:
599 except error.LookupError:
600 return None, None
600 return None, None
601 flags = fctx.flags()
601 flags = fctx.flags()
602 return fctx.data(), ('l' in flags, 'x' in flags)
602 return fctx.data(), ('l' in flags, 'x' in flags)
603
603
604 def setfile(self, fname, data, mode, copysource):
604 def setfile(self, fname, data, mode, copysource):
605 if copysource:
605 if copysource:
606 self._checkknown(copysource)
606 self._checkknown(copysource)
607 if data is None:
607 if data is None:
608 data = self.ctx[fname].data()
608 data = self.ctx[fname].data()
609 self.store.setfile(fname, data, mode, copysource)
609 self.store.setfile(fname, data, mode, copysource)
610 self.changed.add(fname)
610 self.changed.add(fname)
611 if copysource:
611 if copysource:
612 self.copied[fname] = copysource
612 self.copied[fname] = copysource
613
613
614 def unlink(self, fname):
614 def unlink(self, fname):
615 self._checkknown(fname)
615 self._checkknown(fname)
616 self.removed.add(fname)
616 self.removed.add(fname)
617
617
618 def exists(self, fname):
618 def exists(self, fname):
619 return fname in self.ctx
619 return fname in self.ctx
620
620
621 def close(self):
621 def close(self):
622 return self.changed | self.removed
622 return self.changed | self.removed
623
623
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628
628
629 class patchfile(object):
629 class patchfile(object):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 self.fname = gp.path
631 self.fname = gp.path
632 self.eolmode = eolmode
632 self.eolmode = eolmode
633 self.eol = None
633 self.eol = None
634 self.backend = backend
634 self.backend = backend
635 self.ui = ui
635 self.ui = ui
636 self.lines = []
636 self.lines = []
637 self.exists = False
637 self.exists = False
638 self.missing = True
638 self.missing = True
639 self.mode = gp.mode
639 self.mode = gp.mode
640 self.copysource = gp.oldpath
640 self.copysource = gp.oldpath
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.remove = gp.op == 'DELETE'
642 self.remove = gp.op == 'DELETE'
643 if self.copysource is None:
643 if self.copysource is None:
644 data, mode = backend.getfile(self.fname)
644 data, mode = backend.getfile(self.fname)
645 else:
645 else:
646 data, mode = store.getfile(self.copysource)[:2]
646 data, mode = store.getfile(self.copysource)[:2]
647 if data is not None:
647 if data is not None:
648 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.missing = False
649 self.missing = False
650 if data:
650 if data:
651 self.lines = mdiff.splitnewlines(data)
651 self.lines = mdiff.splitnewlines(data)
652 if self.mode is None:
652 if self.mode is None:
653 self.mode = mode
653 self.mode = mode
654 if self.lines:
654 if self.lines:
655 # Normalize line endings
655 # Normalize line endings
656 if self.lines[0].endswith('\r\n'):
656 if self.lines[0].endswith('\r\n'):
657 self.eol = '\r\n'
657 self.eol = '\r\n'
658 elif self.lines[0].endswith('\n'):
658 elif self.lines[0].endswith('\n'):
659 self.eol = '\n'
659 self.eol = '\n'
660 if eolmode != 'strict':
660 if eolmode != 'strict':
661 nlines = []
661 nlines = []
662 for l in self.lines:
662 for l in self.lines:
663 if l.endswith('\r\n'):
663 if l.endswith('\r\n'):
664 l = l[:-2] + '\n'
664 l = l[:-2] + '\n'
665 nlines.append(l)
665 nlines.append(l)
666 self.lines = nlines
666 self.lines = nlines
667 else:
667 else:
668 if self.create:
668 if self.create:
669 self.missing = False
669 self.missing = False
670 if self.mode is None:
670 if self.mode is None:
671 self.mode = (False, False)
671 self.mode = (False, False)
672 if self.missing:
672 if self.missing:
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674
674
675 self.hash = {}
675 self.hash = {}
676 self.dirty = 0
676 self.dirty = 0
677 self.offset = 0
677 self.offset = 0
678 self.skew = 0
678 self.skew = 0
679 self.rej = []
679 self.rej = []
680 self.fileprinted = False
680 self.fileprinted = False
681 self.printfile(False)
681 self.printfile(False)
682 self.hunks = 0
682 self.hunks = 0
683
683
684 def writelines(self, fname, lines, mode):
684 def writelines(self, fname, lines, mode):
685 if self.eolmode == 'auto':
685 if self.eolmode == 'auto':
686 eol = self.eol
686 eol = self.eol
687 elif self.eolmode == 'crlf':
687 elif self.eolmode == 'crlf':
688 eol = '\r\n'
688 eol = '\r\n'
689 else:
689 else:
690 eol = '\n'
690 eol = '\n'
691
691
692 if self.eolmode != 'strict' and eol and eol != '\n':
692 if self.eolmode != 'strict' and eol and eol != '\n':
693 rawlines = []
693 rawlines = []
694 for l in lines:
694 for l in lines:
695 if l and l[-1] == '\n':
695 if l and l[-1] == '\n':
696 l = l[:-1] + eol
696 l = l[:-1] + eol
697 rawlines.append(l)
697 rawlines.append(l)
698 lines = rawlines
698 lines = rawlines
699
699
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701
701
702 def printfile(self, warn):
702 def printfile(self, warn):
703 if self.fileprinted:
703 if self.fileprinted:
704 return
704 return
705 if warn or self.ui.verbose:
705 if warn or self.ui.verbose:
706 self.fileprinted = True
706 self.fileprinted = True
707 s = _("patching file %s\n") % self.fname
707 s = _("patching file %s\n") % self.fname
708 if warn:
708 if warn:
709 self.ui.warn(s)
709 self.ui.warn(s)
710 else:
710 else:
711 self.ui.note(s)
711 self.ui.note(s)
712
712
713
713
714 def findlines(self, l, linenum):
714 def findlines(self, l, linenum):
715 # looks through the hash and finds candidate lines. The
715 # looks through the hash and finds candidate lines. The
716 # result is a list of line numbers sorted based on distance
716 # result is a list of line numbers sorted based on distance
717 # from linenum
717 # from linenum
718
718
719 cand = self.hash.get(l, [])
719 cand = self.hash.get(l, [])
720 if len(cand) > 1:
720 if len(cand) > 1:
721 # resort our list of potentials forward then back.
721 # resort our list of potentials forward then back.
722 cand.sort(key=lambda x: abs(x - linenum))
722 cand.sort(key=lambda x: abs(x - linenum))
723 return cand
723 return cand
724
724
725 def write_rej(self):
725 def write_rej(self):
726 # our rejects are a little different from patch(1). This always
726 # our rejects are a little different from patch(1). This always
727 # creates rejects in the same form as the original patch. A file
727 # creates rejects in the same form as the original patch. A file
728 # header is inserted so that you can run the reject through patch again
728 # header is inserted so that you can run the reject through patch again
729 # without having to type the filename.
729 # without having to type the filename.
730 if not self.rej:
730 if not self.rej:
731 return
731 return
732 base = os.path.basename(self.fname)
732 base = os.path.basename(self.fname)
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 for x in self.rej:
734 for x in self.rej:
735 for l in x.hunk:
735 for l in x.hunk:
736 lines.append(l)
736 lines.append(l)
737 if l[-1] != '\n':
737 if l[-1] != '\n':
738 lines.append("\n\ No newline at end of file\n")
738 lines.append("\n\ No newline at end of file\n")
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740
740
741 def apply(self, h):
741 def apply(self, h):
742 if not h.complete():
742 if not h.complete():
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 h.lenb))
745 h.lenb))
746
746
747 self.hunks += 1
747 self.hunks += 1
748
748
749 if self.missing:
749 if self.missing:
750 self.rej.append(h)
750 self.rej.append(h)
751 return -1
751 return -1
752
752
753 if self.exists and self.create:
753 if self.exists and self.create:
754 if self.copysource:
754 if self.copysource:
755 self.ui.warn(_("cannot create %s: destination already "
755 self.ui.warn(_("cannot create %s: destination already "
756 "exists\n") % self.fname)
756 "exists\n") % self.fname)
757 else:
757 else:
758 self.ui.warn(_("file %s already exists\n") % self.fname)
758 self.ui.warn(_("file %s already exists\n") % self.fname)
759 self.rej.append(h)
759 self.rej.append(h)
760 return -1
760 return -1
761
761
762 if isinstance(h, binhunk):
762 if isinstance(h, binhunk):
763 if self.remove:
763 if self.remove:
764 self.backend.unlink(self.fname)
764 self.backend.unlink(self.fname)
765 else:
765 else:
766 l = h.new(self.lines)
766 l = h.new(self.lines)
767 self.lines[:] = l
767 self.lines[:] = l
768 self.offset += len(l)
768 self.offset += len(l)
769 self.dirty = True
769 self.dirty = True
770 return 0
770 return 0
771
771
772 horig = h
772 horig = h
773 if (self.eolmode in ('crlf', 'lf')
773 if (self.eolmode in ('crlf', 'lf')
774 or self.eolmode == 'auto' and self.eol):
774 or self.eolmode == 'auto' and self.eol):
775 # If new eols are going to be normalized, then normalize
775 # If new eols are going to be normalized, then normalize
776 # hunk data before patching. Otherwise, preserve input
776 # hunk data before patching. Otherwise, preserve input
777 # line-endings.
777 # line-endings.
778 h = h.getnormalized()
778 h = h.getnormalized()
779
779
780 # fast case first, no offsets, no fuzz
780 # fast case first, no offsets, no fuzz
781 old, oldstart, new, newstart = h.fuzzit(0, False)
781 old, oldstart, new, newstart = h.fuzzit(0, False)
782 oldstart += self.offset
782 oldstart += self.offset
783 orig_start = oldstart
783 orig_start = oldstart
784 # if there's skew we want to emit the "(offset %d lines)" even
784 # if there's skew we want to emit the "(offset %d lines)" even
785 # when the hunk cleanly applies at start + skew, so skip the
785 # when the hunk cleanly applies at start + skew, so skip the
786 # fast case code
786 # fast case code
787 if (self.skew == 0 and
787 if (self.skew == 0 and
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 if self.remove:
789 if self.remove:
790 self.backend.unlink(self.fname)
790 self.backend.unlink(self.fname)
791 else:
791 else:
792 self.lines[oldstart:oldstart + len(old)] = new
792 self.lines[oldstart:oldstart + len(old)] = new
793 self.offset += len(new) - len(old)
793 self.offset += len(new) - len(old)
794 self.dirty = True
794 self.dirty = True
795 return 0
795 return 0
796
796
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 self.hash = {}
798 self.hash = {}
799 for x, s in enumerate(self.lines):
799 for x, s in enumerate(self.lines):
800 self.hash.setdefault(s, []).append(x)
800 self.hash.setdefault(s, []).append(x)
801
801
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 for toponly in [True, False]:
803 for toponly in [True, False]:
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 oldstart = oldstart + self.offset + self.skew
805 oldstart = oldstart + self.offset + self.skew
806 oldstart = min(oldstart, len(self.lines))
806 oldstart = min(oldstart, len(self.lines))
807 if old:
807 if old:
808 cand = self.findlines(old[0][1:], oldstart)
808 cand = self.findlines(old[0][1:], oldstart)
809 else:
809 else:
810 # Only adding lines with no or fuzzed context, just
810 # Only adding lines with no or fuzzed context, just
811 # take the skew in account
811 # take the skew in account
812 cand = [oldstart]
812 cand = [oldstart]
813
813
814 for l in cand:
814 for l in cand:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 self.lines[l : l + len(old)] = new
816 self.lines[l : l + len(old)] = new
817 self.offset += len(new) - len(old)
817 self.offset += len(new) - len(old)
818 self.skew = l - orig_start
818 self.skew = l - orig_start
819 self.dirty = True
819 self.dirty = True
820 offset = l - orig_start - fuzzlen
820 offset = l - orig_start - fuzzlen
821 if fuzzlen:
821 if fuzzlen:
822 msg = _("Hunk #%d succeeded at %d "
822 msg = _("Hunk #%d succeeded at %d "
823 "with fuzz %d "
823 "with fuzz %d "
824 "(offset %d lines).\n")
824 "(offset %d lines).\n")
825 self.printfile(True)
825 self.printfile(True)
826 self.ui.warn(msg %
826 self.ui.warn(msg %
827 (h.number, l + 1, fuzzlen, offset))
827 (h.number, l + 1, fuzzlen, offset))
828 else:
828 else:
829 msg = _("Hunk #%d succeeded at %d "
829 msg = _("Hunk #%d succeeded at %d "
830 "(offset %d lines).\n")
830 "(offset %d lines).\n")
831 self.ui.note(msg % (h.number, l + 1, offset))
831 self.ui.note(msg % (h.number, l + 1, offset))
832 return fuzzlen
832 return fuzzlen
833 self.printfile(True)
833 self.printfile(True)
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 self.rej.append(horig)
835 self.rej.append(horig)
836 return -1
836 return -1
837
837
838 def close(self):
838 def close(self):
839 if self.dirty:
839 if self.dirty:
840 self.writelines(self.fname, self.lines, self.mode)
840 self.writelines(self.fname, self.lines, self.mode)
841 self.write_rej()
841 self.write_rej()
842 return len(self.rej)
842 return len(self.rej)
843
843
844 class header(object):
844 class header(object):
845 """patch header
845 """patch header
846 """
846 """
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
849 allhunks_re = re.compile('(?:index|deleted file) ')
849 allhunks_re = re.compile('(?:index|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 newfile_re = re.compile('(?:new file)')
852 newfile_re = re.compile('(?:new file)')
853
853
854 def __init__(self, header):
854 def __init__(self, header):
855 self.header = header
855 self.header = header
856 self.hunks = []
856 self.hunks = []
857
857
858 def binary(self):
858 def binary(self):
859 return any(h.startswith('index ') for h in self.header)
859 return any(h.startswith('index ') for h in self.header)
860
860
861 def pretty(self, fp):
861 def pretty(self, fp):
862 for h in self.header:
862 for h in self.header:
863 if h.startswith('index '):
863 if h.startswith('index '):
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 break
865 break
866 if self.pretty_re.match(h):
866 if self.pretty_re.match(h):
867 fp.write(h)
867 fp.write(h)
868 if self.binary():
868 if self.binary():
869 fp.write(_('this is a binary file\n'))
869 fp.write(_('this is a binary file\n'))
870 break
870 break
871 if h.startswith('---'):
871 if h.startswith('---'):
872 fp.write(_('%d hunks, %d lines changed\n') %
872 fp.write(_('%d hunks, %d lines changed\n') %
873 (len(self.hunks),
873 (len(self.hunks),
874 sum([max(h.added, h.removed) for h in self.hunks])))
874 sum([max(h.added, h.removed) for h in self.hunks])))
875 break
875 break
876 fp.write(h)
876 fp.write(h)
877
877
878 def write(self, fp):
878 def write(self, fp):
879 fp.write(''.join(self.header))
879 fp.write(''.join(self.header))
880
880
881 def allhunks(self):
881 def allhunks(self):
882 return any(self.allhunks_re.match(h) for h in self.header)
882 return any(self.allhunks_re.match(h) for h in self.header)
883
883
884 def files(self):
884 def files(self):
885 match = self.diffgit_re.match(self.header[0])
885 match = self.diffgit_re.match(self.header[0])
886 if match:
886 if match:
887 fromfile, tofile = match.groups()
887 fromfile, tofile = match.groups()
888 if fromfile == tofile:
888 if fromfile == tofile:
889 return [fromfile]
889 return [fromfile]
890 return [fromfile, tofile]
890 return [fromfile, tofile]
891 else:
891 else:
892 return self.diff_re.match(self.header[0]).groups()
892 return self.diff_re.match(self.header[0]).groups()
893
893
894 def filename(self):
894 def filename(self):
895 return self.files()[-1]
895 return self.files()[-1]
896
896
897 def __repr__(self):
897 def __repr__(self):
898 return '<header %s>' % (' '.join(map(repr, self.files())))
898 return '<header %s>' % (' '.join(map(repr, self.files())))
899
899
900 def isnewfile(self):
900 def isnewfile(self):
901 return any(self.newfile_re.match(h) for h in self.header)
901 return any(self.newfile_re.match(h) for h in self.header)
902
902
903 def special(self):
903 def special(self):
904 # Special files are shown only at the header level and not at the hunk
904 # Special files are shown only at the header level and not at the hunk
905 # level for example a file that has been deleted is a special file.
905 # level for example a file that has been deleted is a special file.
906 # The user cannot change the content of the operation, in the case of
906 # The user cannot change the content of the operation, in the case of
907 # the deleted file he has to take the deletion or not take it, he
907 # the deleted file he has to take the deletion or not take it, he
908 # cannot take some of it.
908 # cannot take some of it.
909 # Newly added files are special if they are empty, they are not special
909 # Newly added files are special if they are empty, they are not special
910 # if they have some content as we want to be able to change it
910 # if they have some content as we want to be able to change it
911 nocontent = len(self.header) == 2
911 nocontent = len(self.header) == 2
912 emptynewfile = self.isnewfile() and nocontent
912 emptynewfile = self.isnewfile() and nocontent
913 return emptynewfile or \
913 return emptynewfile or \
914 any(self.special_re.match(h) for h in self.header)
914 any(self.special_re.match(h) for h in self.header)
915
915
916 class recordhunk(object):
916 class recordhunk(object):
917 """patch hunk
917 """patch hunk
918
918
919 XXX shouldn't we merge this with the other hunk class?
919 XXX shouldn't we merge this with the other hunk class?
920 """
920 """
921 maxcontext = 3
921 maxcontext = 3
922
922
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 def trimcontext(number, lines):
924 def trimcontext(number, lines):
925 delta = len(lines) - self.maxcontext
925 delta = len(lines) - self.maxcontext
926 if False and delta > 0:
926 if False and delta > 0:
927 return number + delta, lines[:self.maxcontext]
927 return number + delta, lines[:self.maxcontext]
928 return number, lines
928 return number, lines
929
929
930 self.header = header
930 self.header = header
931 self.fromline, self.before = trimcontext(fromline, before)
931 self.fromline, self.before = trimcontext(fromline, before)
932 self.toline, self.after = trimcontext(toline, after)
932 self.toline, self.after = trimcontext(toline, after)
933 self.proc = proc
933 self.proc = proc
934 self.hunk = hunk
934 self.hunk = hunk
935 self.added, self.removed = self.countchanges(self.hunk)
935 self.added, self.removed = self.countchanges(self.hunk)
936
936
937 def __eq__(self, v):
937 def __eq__(self, v):
938 if not isinstance(v, recordhunk):
938 if not isinstance(v, recordhunk):
939 return False
939 return False
940
940
941 return ((v.hunk == self.hunk) and
941 return ((v.hunk == self.hunk) and
942 (v.proc == self.proc) and
942 (v.proc == self.proc) and
943 (self.fromline == v.fromline) and
943 (self.fromline == v.fromline) and
944 (self.header.files() == v.header.files()))
944 (self.header.files() == v.header.files()))
945
945
946 def __hash__(self):
946 def __hash__(self):
947 return hash((tuple(self.hunk),
947 return hash((tuple(self.hunk),
948 tuple(self.header.files()),
948 tuple(self.header.files()),
949 self.fromline,
949 self.fromline,
950 self.proc))
950 self.proc))
951
951
952 def countchanges(self, hunk):
952 def countchanges(self, hunk):
953 """hunk -> (n+,n-)"""
953 """hunk -> (n+,n-)"""
954 add = len([h for h in hunk if h[0] == '+'])
954 add = len([h for h in hunk if h[0] == '+'])
955 rem = len([h for h in hunk if h[0] == '-'])
955 rem = len([h for h in hunk if h[0] == '-'])
956 return add, rem
956 return add, rem
957
957
958 def write(self, fp):
958 def write(self, fp):
959 delta = len(self.before) + len(self.after)
959 delta = len(self.before) + len(self.after)
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 delta -= 1
961 delta -= 1
962 fromlen = delta + self.removed
962 fromlen = delta + self.removed
963 tolen = delta + self.added
963 tolen = delta + self.added
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 (self.fromline, fromlen, self.toline, tolen,
965 (self.fromline, fromlen, self.toline, tolen,
966 self.proc and (' ' + self.proc)))
966 self.proc and (' ' + self.proc)))
967 fp.write(''.join(self.before + self.hunk + self.after))
967 fp.write(''.join(self.before + self.hunk + self.after))
968
968
969 pretty = write
969 pretty = write
970
970
971 def filename(self):
971 def filename(self):
972 return self.header.filename()
972 return self.header.filename()
973
973
974 def __repr__(self):
974 def __repr__(self):
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976
976
977 def filterpatch(ui, headers, operation=None):
977 def filterpatch(ui, headers, operation=None):
978 """Interactively filter patch chunks into applied-only chunks"""
978 """Interactively filter patch chunks into applied-only chunks"""
979 if operation is None:
979 if operation is None:
980 operation = _('record')
980 operation = _('record')
981
981
982 def prompt(skipfile, skipall, query, chunk):
982 def prompt(skipfile, skipall, query, chunk):
983 """prompt query, and process base inputs
983 """prompt query, and process base inputs
984
984
985 - y/n for the rest of file
985 - y/n for the rest of file
986 - y/n for the rest
986 - y/n for the rest
987 - ? (help)
987 - ? (help)
988 - q (quit)
988 - q (quit)
989
989
990 Return True/False and possibly updated skipfile and skipall.
990 Return True/False and possibly updated skipfile and skipall.
991 """
991 """
992 newpatches = None
992 newpatches = None
993 if skipall is not None:
993 if skipall is not None:
994 return skipall, skipfile, skipall, newpatches
994 return skipall, skipfile, skipall, newpatches
995 if skipfile is not None:
995 if skipfile is not None:
996 return skipfile, skipfile, skipall, newpatches
996 return skipfile, skipfile, skipall, newpatches
997 while True:
997 while True:
998 resps = _('[Ynesfdaq?]'
998 resps = _('[Ynesfdaq?]'
999 '$$ &Yes, record this change'
999 '$$ &Yes, record this change'
1000 '$$ &No, skip this change'
1000 '$$ &No, skip this change'
1001 '$$ &Edit this change manually'
1001 '$$ &Edit this change manually'
1002 '$$ &Skip remaining changes to this file'
1002 '$$ &Skip remaining changes to this file'
1003 '$$ Record remaining changes to this &file'
1003 '$$ Record remaining changes to this &file'
1004 '$$ &Done, skip remaining changes and files'
1004 '$$ &Done, skip remaining changes and files'
1005 '$$ Record &all changes to all remaining files'
1005 '$$ Record &all changes to all remaining files'
1006 '$$ &Quit, recording no changes'
1006 '$$ &Quit, recording no changes'
1007 '$$ &? (display help)')
1007 '$$ &? (display help)')
1008 r = ui.promptchoice("%s %s" % (query, resps))
1008 r = ui.promptchoice("%s %s" % (query, resps))
1009 ui.write("\n")
1009 ui.write("\n")
1010 if r == 8: # ?
1010 if r == 8: # ?
1011 for c, t in ui.extractchoices(resps)[1]:
1011 for c, t in ui.extractchoices(resps)[1]:
1012 ui.write('%s - %s\n' % (c, t.lower()))
1012 ui.write('%s - %s\n' % (c, t.lower()))
1013 continue
1013 continue
1014 elif r == 0: # yes
1014 elif r == 0: # yes
1015 ret = True
1015 ret = True
1016 elif r == 1: # no
1016 elif r == 1: # no
1017 ret = False
1017 ret = False
1018 elif r == 2: # Edit patch
1018 elif r == 2: # Edit patch
1019 if chunk is None:
1019 if chunk is None:
1020 ui.write(_('cannot edit patch for whole file'))
1020 ui.write(_('cannot edit patch for whole file'))
1021 ui.write("\n")
1021 ui.write("\n")
1022 continue
1022 continue
1023 if chunk.header.binary():
1023 if chunk.header.binary():
1024 ui.write(_('cannot edit patch for binary file'))
1024 ui.write(_('cannot edit patch for binary file'))
1025 ui.write("\n")
1025 ui.write("\n")
1026 continue
1026 continue
1027 # Patch comment based on the Git one (based on comment at end of
1027 # Patch comment based on the Git one (based on comment at end of
1028 # https://mercurial-scm.org/wiki/RecordExtension)
1028 # https://mercurial-scm.org/wiki/RecordExtension)
1029 phelp = '---' + _("""
1029 phelp = '---' + _("""
1030 To remove '-' lines, make them ' ' lines (context).
1030 To remove '-' lines, make them ' ' lines (context).
1031 To remove '+' lines, delete them.
1031 To remove '+' lines, delete them.
1032 Lines starting with # will be removed from the patch.
1032 Lines starting with # will be removed from the patch.
1033
1033
1034 If the patch applies cleanly, the edited hunk will immediately be
1034 If the patch applies cleanly, the edited hunk will immediately be
1035 added to the record list. If it does not apply cleanly, a rejects
1035 added to the record list. If it does not apply cleanly, a rejects
1036 file will be generated: you can use that when you try again. If
1036 file will be generated: you can use that when you try again. If
1037 all lines of the hunk are removed, then the edit is aborted and
1037 all lines of the hunk are removed, then the edit is aborted and
1038 the hunk is left unchanged.
1038 the hunk is left unchanged.
1039 """)
1039 """)
1040 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1040 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1041 suffix=".diff", text=True)
1041 suffix=".diff", text=True)
1042 ncpatchfp = None
1042 ncpatchfp = None
1043 try:
1043 try:
1044 # Write the initial patch
1044 # Write the initial patch
1045 f = os.fdopen(patchfd, "w")
1045 f = os.fdopen(patchfd, "w")
1046 chunk.header.write(f)
1046 chunk.header.write(f)
1047 chunk.write(f)
1047 chunk.write(f)
1048 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1048 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1049 f.close()
1049 f.close()
1050 # Start the editor and wait for it to complete
1050 # Start the editor and wait for it to complete
1051 editor = ui.geteditor()
1051 editor = ui.geteditor()
1052 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1052 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1053 environ={'HGUSER': ui.username()})
1053 environ={'HGUSER': ui.username()})
1054 if ret != 0:
1054 if ret != 0:
1055 ui.warn(_("editor exited with exit code %d\n") % ret)
1055 ui.warn(_("editor exited with exit code %d\n") % ret)
1056 continue
1056 continue
1057 # Remove comment lines
1057 # Remove comment lines
1058 patchfp = open(patchfn)
1058 patchfp = open(patchfn)
1059 ncpatchfp = cStringIO.StringIO()
1059 ncpatchfp = cStringIO.StringIO()
1060 for line in patchfp:
1060 for line in patchfp:
1061 if not line.startswith('#'):
1061 if not line.startswith('#'):
1062 ncpatchfp.write(line)
1062 ncpatchfp.write(line)
1063 patchfp.close()
1063 patchfp.close()
1064 ncpatchfp.seek(0)
1064 ncpatchfp.seek(0)
1065 newpatches = parsepatch(ncpatchfp)
1065 newpatches = parsepatch(ncpatchfp)
1066 finally:
1066 finally:
1067 os.unlink(patchfn)
1067 os.unlink(patchfn)
1068 del ncpatchfp
1068 del ncpatchfp
1069 # Signal that the chunk shouldn't be applied as-is, but
1069 # Signal that the chunk shouldn't be applied as-is, but
1070 # provide the new patch to be used instead.
1070 # provide the new patch to be used instead.
1071 ret = False
1071 ret = False
1072 elif r == 3: # Skip
1072 elif r == 3: # Skip
1073 ret = skipfile = False
1073 ret = skipfile = False
1074 elif r == 4: # file (Record remaining)
1074 elif r == 4: # file (Record remaining)
1075 ret = skipfile = True
1075 ret = skipfile = True
1076 elif r == 5: # done, skip remaining
1076 elif r == 5: # done, skip remaining
1077 ret = skipall = False
1077 ret = skipall = False
1078 elif r == 6: # all
1078 elif r == 6: # all
1079 ret = skipall = True
1079 ret = skipall = True
1080 elif r == 7: # quit
1080 elif r == 7: # quit
1081 raise error.Abort(_('user quit'))
1081 raise error.Abort(_('user quit'))
1082 return ret, skipfile, skipall, newpatches
1082 return ret, skipfile, skipall, newpatches
1083
1083
1084 seen = set()
1084 seen = set()
1085 applied = {} # 'filename' -> [] of chunks
1085 applied = {} # 'filename' -> [] of chunks
1086 skipfile, skipall = None, None
1086 skipfile, skipall = None, None
1087 pos, total = 1, sum(len(h.hunks) for h in headers)
1087 pos, total = 1, sum(len(h.hunks) for h in headers)
1088 for h in headers:
1088 for h in headers:
1089 pos += len(h.hunks)
1089 pos += len(h.hunks)
1090 skipfile = None
1090 skipfile = None
1091 fixoffset = 0
1091 fixoffset = 0
1092 hdr = ''.join(h.header)
1092 hdr = ''.join(h.header)
1093 if hdr in seen:
1093 if hdr in seen:
1094 continue
1094 continue
1095 seen.add(hdr)
1095 seen.add(hdr)
1096 if skipall is None:
1096 if skipall is None:
1097 h.pretty(ui)
1097 h.pretty(ui)
1098 msg = (_('examine changes to %s?') %
1098 msg = (_('examine changes to %s?') %
1099 _(' and ').join("'%s'" % f for f in h.files()))
1099 _(' and ').join("'%s'" % f for f in h.files()))
1100 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1100 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1101 if not r:
1101 if not r:
1102 continue
1102 continue
1103 applied[h.filename()] = [h]
1103 applied[h.filename()] = [h]
1104 if h.allhunks():
1104 if h.allhunks():
1105 applied[h.filename()] += h.hunks
1105 applied[h.filename()] += h.hunks
1106 continue
1106 continue
1107 for i, chunk in enumerate(h.hunks):
1107 for i, chunk in enumerate(h.hunks):
1108 if skipfile is None and skipall is None:
1108 if skipfile is None and skipall is None:
1109 chunk.pretty(ui)
1109 chunk.pretty(ui)
1110 if total == 1:
1110 if total == 1:
1111 msg = _("record this change to '%s'?") % chunk.filename()
1111 msg = _("record this change to '%s'?") % chunk.filename()
1112 else:
1112 else:
1113 idx = pos - len(h.hunks) + i
1113 idx = pos - len(h.hunks) + i
1114 msg = _("record change %d/%d to '%s'?") % (idx, total,
1114 msg = _("record change %d/%d to '%s'?") % (idx, total,
1115 chunk.filename())
1115 chunk.filename())
1116 r, skipfile, skipall, newpatches = prompt(skipfile,
1116 r, skipfile, skipall, newpatches = prompt(skipfile,
1117 skipall, msg, chunk)
1117 skipall, msg, chunk)
1118 if r:
1118 if r:
1119 if fixoffset:
1119 if fixoffset:
1120 chunk = copy.copy(chunk)
1120 chunk = copy.copy(chunk)
1121 chunk.toline += fixoffset
1121 chunk.toline += fixoffset
1122 applied[chunk.filename()].append(chunk)
1122 applied[chunk.filename()].append(chunk)
1123 elif newpatches is not None:
1123 elif newpatches is not None:
1124 for newpatch in newpatches:
1124 for newpatch in newpatches:
1125 for newhunk in newpatch.hunks:
1125 for newhunk in newpatch.hunks:
1126 if fixoffset:
1126 if fixoffset:
1127 newhunk.toline += fixoffset
1127 newhunk.toline += fixoffset
1128 applied[newhunk.filename()].append(newhunk)
1128 applied[newhunk.filename()].append(newhunk)
1129 else:
1129 else:
1130 fixoffset += chunk.removed - chunk.added
1130 fixoffset += chunk.removed - chunk.added
1131 return (sum([h for h in applied.itervalues()
1131 return (sum([h for h in applied.itervalues()
1132 if h[0].special() or len(h) > 1], []), {})
1132 if h[0].special() or len(h) > 1], []), {})
1133 class hunk(object):
1133 class hunk(object):
1134 def __init__(self, desc, num, lr, context):
1134 def __init__(self, desc, num, lr, context):
1135 self.number = num
1135 self.number = num
1136 self.desc = desc
1136 self.desc = desc
1137 self.hunk = [desc]
1137 self.hunk = [desc]
1138 self.a = []
1138 self.a = []
1139 self.b = []
1139 self.b = []
1140 self.starta = self.lena = None
1140 self.starta = self.lena = None
1141 self.startb = self.lenb = None
1141 self.startb = self.lenb = None
1142 if lr is not None:
1142 if lr is not None:
1143 if context:
1143 if context:
1144 self.read_context_hunk(lr)
1144 self.read_context_hunk(lr)
1145 else:
1145 else:
1146 self.read_unified_hunk(lr)
1146 self.read_unified_hunk(lr)
1147
1147
1148 def getnormalized(self):
1148 def getnormalized(self):
1149 """Return a copy with line endings normalized to LF."""
1149 """Return a copy with line endings normalized to LF."""
1150
1150
1151 def normalize(lines):
1151 def normalize(lines):
1152 nlines = []
1152 nlines = []
1153 for line in lines:
1153 for line in lines:
1154 if line.endswith('\r\n'):
1154 if line.endswith('\r\n'):
1155 line = line[:-2] + '\n'
1155 line = line[:-2] + '\n'
1156 nlines.append(line)
1156 nlines.append(line)
1157 return nlines
1157 return nlines
1158
1158
1159 # Dummy object, it is rebuilt manually
1159 # Dummy object, it is rebuilt manually
1160 nh = hunk(self.desc, self.number, None, None)
1160 nh = hunk(self.desc, self.number, None, None)
1161 nh.number = self.number
1161 nh.number = self.number
1162 nh.desc = self.desc
1162 nh.desc = self.desc
1163 nh.hunk = self.hunk
1163 nh.hunk = self.hunk
1164 nh.a = normalize(self.a)
1164 nh.a = normalize(self.a)
1165 nh.b = normalize(self.b)
1165 nh.b = normalize(self.b)
1166 nh.starta = self.starta
1166 nh.starta = self.starta
1167 nh.startb = self.startb
1167 nh.startb = self.startb
1168 nh.lena = self.lena
1168 nh.lena = self.lena
1169 nh.lenb = self.lenb
1169 nh.lenb = self.lenb
1170 return nh
1170 return nh
1171
1171
1172 def read_unified_hunk(self, lr):
1172 def read_unified_hunk(self, lr):
1173 m = unidesc.match(self.desc)
1173 m = unidesc.match(self.desc)
1174 if not m:
1174 if not m:
1175 raise PatchError(_("bad hunk #%d") % self.number)
1175 raise PatchError(_("bad hunk #%d") % self.number)
1176 self.starta, self.lena, self.startb, self.lenb = m.groups()
1176 self.starta, self.lena, self.startb, self.lenb = m.groups()
1177 if self.lena is None:
1177 if self.lena is None:
1178 self.lena = 1
1178 self.lena = 1
1179 else:
1179 else:
1180 self.lena = int(self.lena)
1180 self.lena = int(self.lena)
1181 if self.lenb is None:
1181 if self.lenb is None:
1182 self.lenb = 1
1182 self.lenb = 1
1183 else:
1183 else:
1184 self.lenb = int(self.lenb)
1184 self.lenb = int(self.lenb)
1185 self.starta = int(self.starta)
1185 self.starta = int(self.starta)
1186 self.startb = int(self.startb)
1186 self.startb = int(self.startb)
1187 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1187 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1188 self.b)
1188 self.b)
1189 # if we hit eof before finishing out the hunk, the last line will
1189 # if we hit eof before finishing out the hunk, the last line will
1190 # be zero length. Lets try to fix it up.
1190 # be zero length. Lets try to fix it up.
1191 while len(self.hunk[-1]) == 0:
1191 while len(self.hunk[-1]) == 0:
1192 del self.hunk[-1]
1192 del self.hunk[-1]
1193 del self.a[-1]
1193 del self.a[-1]
1194 del self.b[-1]
1194 del self.b[-1]
1195 self.lena -= 1
1195 self.lena -= 1
1196 self.lenb -= 1
1196 self.lenb -= 1
1197 self._fixnewline(lr)
1197 self._fixnewline(lr)
1198
1198
1199 def read_context_hunk(self, lr):
1199 def read_context_hunk(self, lr):
1200 self.desc = lr.readline()
1200 self.desc = lr.readline()
1201 m = contextdesc.match(self.desc)
1201 m = contextdesc.match(self.desc)
1202 if not m:
1202 if not m:
1203 raise PatchError(_("bad hunk #%d") % self.number)
1203 raise PatchError(_("bad hunk #%d") % self.number)
1204 self.starta, aend = m.groups()
1204 self.starta, aend = m.groups()
1205 self.starta = int(self.starta)
1205 self.starta = int(self.starta)
1206 if aend is None:
1206 if aend is None:
1207 aend = self.starta
1207 aend = self.starta
1208 self.lena = int(aend) - self.starta
1208 self.lena = int(aend) - self.starta
1209 if self.starta:
1209 if self.starta:
1210 self.lena += 1
1210 self.lena += 1
1211 for x in xrange(self.lena):
1211 for x in xrange(self.lena):
1212 l = lr.readline()
1212 l = lr.readline()
1213 if l.startswith('---'):
1213 if l.startswith('---'):
1214 # lines addition, old block is empty
1214 # lines addition, old block is empty
1215 lr.push(l)
1215 lr.push(l)
1216 break
1216 break
1217 s = l[2:]
1217 s = l[2:]
1218 if l.startswith('- ') or l.startswith('! '):
1218 if l.startswith('- ') or l.startswith('! '):
1219 u = '-' + s
1219 u = '-' + s
1220 elif l.startswith(' '):
1220 elif l.startswith(' '):
1221 u = ' ' + s
1221 u = ' ' + s
1222 else:
1222 else:
1223 raise PatchError(_("bad hunk #%d old text line %d") %
1223 raise PatchError(_("bad hunk #%d old text line %d") %
1224 (self.number, x))
1224 (self.number, x))
1225 self.a.append(u)
1225 self.a.append(u)
1226 self.hunk.append(u)
1226 self.hunk.append(u)
1227
1227
1228 l = lr.readline()
1228 l = lr.readline()
1229 if l.startswith('\ '):
1229 if l.startswith('\ '):
1230 s = self.a[-1][:-1]
1230 s = self.a[-1][:-1]
1231 self.a[-1] = s
1231 self.a[-1] = s
1232 self.hunk[-1] = s
1232 self.hunk[-1] = s
1233 l = lr.readline()
1233 l = lr.readline()
1234 m = contextdesc.match(l)
1234 m = contextdesc.match(l)
1235 if not m:
1235 if not m:
1236 raise PatchError(_("bad hunk #%d") % self.number)
1236 raise PatchError(_("bad hunk #%d") % self.number)
1237 self.startb, bend = m.groups()
1237 self.startb, bend = m.groups()
1238 self.startb = int(self.startb)
1238 self.startb = int(self.startb)
1239 if bend is None:
1239 if bend is None:
1240 bend = self.startb
1240 bend = self.startb
1241 self.lenb = int(bend) - self.startb
1241 self.lenb = int(bend) - self.startb
1242 if self.startb:
1242 if self.startb:
1243 self.lenb += 1
1243 self.lenb += 1
1244 hunki = 1
1244 hunki = 1
1245 for x in xrange(self.lenb):
1245 for x in xrange(self.lenb):
1246 l = lr.readline()
1246 l = lr.readline()
1247 if l.startswith('\ '):
1247 if l.startswith('\ '):
1248 # XXX: the only way to hit this is with an invalid line range.
1248 # XXX: the only way to hit this is with an invalid line range.
1249 # The no-eol marker is not counted in the line range, but I
1249 # The no-eol marker is not counted in the line range, but I
1250 # guess there are diff(1) out there which behave differently.
1250 # guess there are diff(1) out there which behave differently.
1251 s = self.b[-1][:-1]
1251 s = self.b[-1][:-1]
1252 self.b[-1] = s
1252 self.b[-1] = s
1253 self.hunk[hunki - 1] = s
1253 self.hunk[hunki - 1] = s
1254 continue
1254 continue
1255 if not l:
1255 if not l:
1256 # line deletions, new block is empty and we hit EOF
1256 # line deletions, new block is empty and we hit EOF
1257 lr.push(l)
1257 lr.push(l)
1258 break
1258 break
1259 s = l[2:]
1259 s = l[2:]
1260 if l.startswith('+ ') or l.startswith('! '):
1260 if l.startswith('+ ') or l.startswith('! '):
1261 u = '+' + s
1261 u = '+' + s
1262 elif l.startswith(' '):
1262 elif l.startswith(' '):
1263 u = ' ' + s
1263 u = ' ' + s
1264 elif len(self.b) == 0:
1264 elif len(self.b) == 0:
1265 # line deletions, new block is empty
1265 # line deletions, new block is empty
1266 lr.push(l)
1266 lr.push(l)
1267 break
1267 break
1268 else:
1268 else:
1269 raise PatchError(_("bad hunk #%d old text line %d") %
1269 raise PatchError(_("bad hunk #%d old text line %d") %
1270 (self.number, x))
1270 (self.number, x))
1271 self.b.append(s)
1271 self.b.append(s)
1272 while True:
1272 while True:
1273 if hunki >= len(self.hunk):
1273 if hunki >= len(self.hunk):
1274 h = ""
1274 h = ""
1275 else:
1275 else:
1276 h = self.hunk[hunki]
1276 h = self.hunk[hunki]
1277 hunki += 1
1277 hunki += 1
1278 if h == u:
1278 if h == u:
1279 break
1279 break
1280 elif h.startswith('-'):
1280 elif h.startswith('-'):
1281 continue
1281 continue
1282 else:
1282 else:
1283 self.hunk.insert(hunki - 1, u)
1283 self.hunk.insert(hunki - 1, u)
1284 break
1284 break
1285
1285
1286 if not self.a:
1286 if not self.a:
1287 # this happens when lines were only added to the hunk
1287 # this happens when lines were only added to the hunk
1288 for x in self.hunk:
1288 for x in self.hunk:
1289 if x.startswith('-') or x.startswith(' '):
1289 if x.startswith('-') or x.startswith(' '):
1290 self.a.append(x)
1290 self.a.append(x)
1291 if not self.b:
1291 if not self.b:
1292 # this happens when lines were only deleted from the hunk
1292 # this happens when lines were only deleted from the hunk
1293 for x in self.hunk:
1293 for x in self.hunk:
1294 if x.startswith('+') or x.startswith(' '):
1294 if x.startswith('+') or x.startswith(' '):
1295 self.b.append(x[1:])
1295 self.b.append(x[1:])
1296 # @@ -start,len +start,len @@
1296 # @@ -start,len +start,len @@
1297 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1297 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1298 self.startb, self.lenb)
1298 self.startb, self.lenb)
1299 self.hunk[0] = self.desc
1299 self.hunk[0] = self.desc
1300 self._fixnewline(lr)
1300 self._fixnewline(lr)
1301
1301
1302 def _fixnewline(self, lr):
1302 def _fixnewline(self, lr):
1303 l = lr.readline()
1303 l = lr.readline()
1304 if l.startswith('\ '):
1304 if l.startswith('\ '):
1305 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1305 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1306 else:
1306 else:
1307 lr.push(l)
1307 lr.push(l)
1308
1308
1309 def complete(self):
1309 def complete(self):
1310 return len(self.a) == self.lena and len(self.b) == self.lenb
1310 return len(self.a) == self.lena and len(self.b) == self.lenb
1311
1311
1312 def _fuzzit(self, old, new, fuzz, toponly):
1312 def _fuzzit(self, old, new, fuzz, toponly):
1313 # this removes context lines from the top and bottom of list 'l'. It
1313 # this removes context lines from the top and bottom of list 'l'. It
1314 # checks the hunk to make sure only context lines are removed, and then
1314 # checks the hunk to make sure only context lines are removed, and then
1315 # returns a new shortened list of lines.
1315 # returns a new shortened list of lines.
1316 fuzz = min(fuzz, len(old))
1316 fuzz = min(fuzz, len(old))
1317 if fuzz:
1317 if fuzz:
1318 top = 0
1318 top = 0
1319 bot = 0
1319 bot = 0
1320 hlen = len(self.hunk)
1320 hlen = len(self.hunk)
1321 for x in xrange(hlen - 1):
1321 for x in xrange(hlen - 1):
1322 # the hunk starts with the @@ line, so use x+1
1322 # the hunk starts with the @@ line, so use x+1
1323 if self.hunk[x + 1][0] == ' ':
1323 if self.hunk[x + 1][0] == ' ':
1324 top += 1
1324 top += 1
1325 else:
1325 else:
1326 break
1326 break
1327 if not toponly:
1327 if not toponly:
1328 for x in xrange(hlen - 1):
1328 for x in xrange(hlen - 1):
1329 if self.hunk[hlen - bot - 1][0] == ' ':
1329 if self.hunk[hlen - bot - 1][0] == ' ':
1330 bot += 1
1330 bot += 1
1331 else:
1331 else:
1332 break
1332 break
1333
1333
1334 bot = min(fuzz, bot)
1334 bot = min(fuzz, bot)
1335 top = min(fuzz, top)
1335 top = min(fuzz, top)
1336 return old[top:len(old) - bot], new[top:len(new) - bot], top
1336 return old[top:len(old) - bot], new[top:len(new) - bot], top
1337 return old, new, 0
1337 return old, new, 0
1338
1338
1339 def fuzzit(self, fuzz, toponly):
1339 def fuzzit(self, fuzz, toponly):
1340 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1340 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1341 oldstart = self.starta + top
1341 oldstart = self.starta + top
1342 newstart = self.startb + top
1342 newstart = self.startb + top
1343 # zero length hunk ranges already have their start decremented
1343 # zero length hunk ranges already have their start decremented
1344 if self.lena and oldstart > 0:
1344 if self.lena and oldstart > 0:
1345 oldstart -= 1
1345 oldstart -= 1
1346 if self.lenb and newstart > 0:
1346 if self.lenb and newstart > 0:
1347 newstart -= 1
1347 newstart -= 1
1348 return old, oldstart, new, newstart
1348 return old, oldstart, new, newstart
1349
1349
1350 class binhunk(object):
1350 class binhunk(object):
1351 'A binary patch file.'
1351 'A binary patch file.'
1352 def __init__(self, lr, fname):
1352 def __init__(self, lr, fname):
1353 self.text = None
1353 self.text = None
1354 self.delta = False
1354 self.delta = False
1355 self.hunk = ['GIT binary patch\n']
1355 self.hunk = ['GIT binary patch\n']
1356 self._fname = fname
1356 self._fname = fname
1357 self._read(lr)
1357 self._read(lr)
1358
1358
1359 def complete(self):
1359 def complete(self):
1360 return self.text is not None
1360 return self.text is not None
1361
1361
1362 def new(self, lines):
1362 def new(self, lines):
1363 if self.delta:
1363 if self.delta:
1364 return [applybindelta(self.text, ''.join(lines))]
1364 return [applybindelta(self.text, ''.join(lines))]
1365 return [self.text]
1365 return [self.text]
1366
1366
1367 def _read(self, lr):
1367 def _read(self, lr):
1368 def getline(lr, hunk):
1368 def getline(lr, hunk):
1369 l = lr.readline()
1369 l = lr.readline()
1370 hunk.append(l)
1370 hunk.append(l)
1371 return l.rstrip('\r\n')
1371 return l.rstrip('\r\n')
1372
1372
1373 size = 0
1373 size = 0
1374 while True:
1374 while True:
1375 line = getline(lr, self.hunk)
1375 line = getline(lr, self.hunk)
1376 if not line:
1376 if not line:
1377 raise PatchError(_('could not extract "%s" binary data')
1377 raise PatchError(_('could not extract "%s" binary data')
1378 % self._fname)
1378 % self._fname)
1379 if line.startswith('literal '):
1379 if line.startswith('literal '):
1380 size = int(line[8:].rstrip())
1380 size = int(line[8:].rstrip())
1381 break
1381 break
1382 if line.startswith('delta '):
1382 if line.startswith('delta '):
1383 size = int(line[6:].rstrip())
1383 size = int(line[6:].rstrip())
1384 self.delta = True
1384 self.delta = True
1385 break
1385 break
1386 dec = []
1386 dec = []
1387 line = getline(lr, self.hunk)
1387 line = getline(lr, self.hunk)
1388 while len(line) > 1:
1388 while len(line) > 1:
1389 l = line[0]
1389 l = line[0]
1390 if l <= 'Z' and l >= 'A':
1390 if l <= 'Z' and l >= 'A':
1391 l = ord(l) - ord('A') + 1
1391 l = ord(l) - ord('A') + 1
1392 else:
1392 else:
1393 l = ord(l) - ord('a') + 27
1393 l = ord(l) - ord('a') + 27
1394 try:
1394 try:
1395 dec.append(base85.b85decode(line[1:])[:l])
1395 dec.append(base85.b85decode(line[1:])[:l])
1396 except ValueError as e:
1396 except ValueError as e:
1397 raise PatchError(_('could not decode "%s" binary patch: %s')
1397 raise PatchError(_('could not decode "%s" binary patch: %s')
1398 % (self._fname, str(e)))
1398 % (self._fname, str(e)))
1399 line = getline(lr, self.hunk)
1399 line = getline(lr, self.hunk)
1400 text = zlib.decompress(''.join(dec))
1400 text = zlib.decompress(''.join(dec))
1401 if len(text) != size:
1401 if len(text) != size:
1402 raise PatchError(_('"%s" length is %d bytes, should be %d')
1402 raise PatchError(_('"%s" length is %d bytes, should be %d')
1403 % (self._fname, len(text), size))
1403 % (self._fname, len(text), size))
1404 self.text = text
1404 self.text = text
1405
1405
1406 def parsefilename(str):
1406 def parsefilename(str):
1407 # --- filename \t|space stuff
1407 # --- filename \t|space stuff
1408 s = str[4:].rstrip('\r\n')
1408 s = str[4:].rstrip('\r\n')
1409 i = s.find('\t')
1409 i = s.find('\t')
1410 if i < 0:
1410 if i < 0:
1411 i = s.find(' ')
1411 i = s.find(' ')
1412 if i < 0:
1412 if i < 0:
1413 return s
1413 return s
1414 return s[:i]
1414 return s[:i]
1415
1415
1416 def reversehunks(hunks):
1416 def reversehunks(hunks):
1417 '''reverse the signs in the hunks given as argument
1417 '''reverse the signs in the hunks given as argument
1418
1418
1419 This function operates on hunks coming out of patch.filterpatch, that is
1419 This function operates on hunks coming out of patch.filterpatch, that is
1420 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1420 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1421
1421
1422 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1422 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1423 ... --- a/folder1/g
1423 ... --- a/folder1/g
1424 ... +++ b/folder1/g
1424 ... +++ b/folder1/g
1425 ... @@ -1,7 +1,7 @@
1425 ... @@ -1,7 +1,7 @@
1426 ... +firstline
1426 ... +firstline
1427 ... c
1427 ... c
1428 ... 1
1428 ... 1
1429 ... 2
1429 ... 2
1430 ... + 3
1430 ... + 3
1431 ... -4
1431 ... -4
1432 ... 5
1432 ... 5
1433 ... d
1433 ... d
1434 ... +lastline"""
1434 ... +lastline"""
1435 >>> hunks = parsepatch(rawpatch)
1435 >>> hunks = parsepatch(rawpatch)
1436 >>> hunkscomingfromfilterpatch = []
1436 >>> hunkscomingfromfilterpatch = []
1437 >>> for h in hunks:
1437 >>> for h in hunks:
1438 ... hunkscomingfromfilterpatch.append(h)
1438 ... hunkscomingfromfilterpatch.append(h)
1439 ... hunkscomingfromfilterpatch.extend(h.hunks)
1439 ... hunkscomingfromfilterpatch.extend(h.hunks)
1440
1440
1441 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1441 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1442 >>> fp = cStringIO.StringIO()
1442 >>> fp = cStringIO.StringIO()
1443 >>> for c in reversedhunks:
1443 >>> for c in reversedhunks:
1444 ... c.write(fp)
1444 ... c.write(fp)
1445 >>> fp.seek(0)
1445 >>> fp.seek(0)
1446 >>> reversedpatch = fp.read()
1446 >>> reversedpatch = fp.read()
1447 >>> print reversedpatch
1447 >>> print reversedpatch
1448 diff --git a/folder1/g b/folder1/g
1448 diff --git a/folder1/g b/folder1/g
1449 --- a/folder1/g
1449 --- a/folder1/g
1450 +++ b/folder1/g
1450 +++ b/folder1/g
1451 @@ -1,4 +1,3 @@
1451 @@ -1,4 +1,3 @@
1452 -firstline
1452 -firstline
1453 c
1453 c
1454 1
1454 1
1455 2
1455 2
1456 @@ -1,6 +2,6 @@
1456 @@ -1,6 +2,6 @@
1457 c
1457 c
1458 1
1458 1
1459 2
1459 2
1460 - 3
1460 - 3
1461 +4
1461 +4
1462 5
1462 5
1463 d
1463 d
1464 @@ -5,3 +6,2 @@
1464 @@ -5,3 +6,2 @@
1465 5
1465 5
1466 d
1466 d
1467 -lastline
1467 -lastline
1468
1468
1469 '''
1469 '''
1470
1470
1471 from . import crecord as crecordmod
1471 from . import crecord as crecordmod
1472 newhunks = []
1472 newhunks = []
1473 for c in hunks:
1473 for c in hunks:
1474 if isinstance(c, crecordmod.uihunk):
1474 if isinstance(c, crecordmod.uihunk):
1475 # curses hunks encapsulate the record hunk in _hunk
1475 # curses hunks encapsulate the record hunk in _hunk
1476 c = c._hunk
1476 c = c._hunk
1477 if isinstance(c, recordhunk):
1477 if isinstance(c, recordhunk):
1478 for j, line in enumerate(c.hunk):
1478 for j, line in enumerate(c.hunk):
1479 if line.startswith("-"):
1479 if line.startswith("-"):
1480 c.hunk[j] = "+" + c.hunk[j][1:]
1480 c.hunk[j] = "+" + c.hunk[j][1:]
1481 elif line.startswith("+"):
1481 elif line.startswith("+"):
1482 c.hunk[j] = "-" + c.hunk[j][1:]
1482 c.hunk[j] = "-" + c.hunk[j][1:]
1483 c.added, c.removed = c.removed, c.added
1483 c.added, c.removed = c.removed, c.added
1484 newhunks.append(c)
1484 newhunks.append(c)
1485 return newhunks
1485 return newhunks
1486
1486
1487 def parsepatch(originalchunks):
1487 def parsepatch(originalchunks):
1488 """patch -> [] of headers -> [] of hunks """
1488 """patch -> [] of headers -> [] of hunks """
1489 class parser(object):
1489 class parser(object):
1490 """patch parsing state machine"""
1490 """patch parsing state machine"""
1491 def __init__(self):
1491 def __init__(self):
1492 self.fromline = 0
1492 self.fromline = 0
1493 self.toline = 0
1493 self.toline = 0
1494 self.proc = ''
1494 self.proc = ''
1495 self.header = None
1495 self.header = None
1496 self.context = []
1496 self.context = []
1497 self.before = []
1497 self.before = []
1498 self.hunk = []
1498 self.hunk = []
1499 self.headers = []
1499 self.headers = []
1500
1500
1501 def addrange(self, limits):
1501 def addrange(self, limits):
1502 fromstart, fromend, tostart, toend, proc = limits
1502 fromstart, fromend, tostart, toend, proc = limits
1503 self.fromline = int(fromstart)
1503 self.fromline = int(fromstart)
1504 self.toline = int(tostart)
1504 self.toline = int(tostart)
1505 self.proc = proc
1505 self.proc = proc
1506
1506
1507 def addcontext(self, context):
1507 def addcontext(self, context):
1508 if self.hunk:
1508 if self.hunk:
1509 h = recordhunk(self.header, self.fromline, self.toline,
1509 h = recordhunk(self.header, self.fromline, self.toline,
1510 self.proc, self.before, self.hunk, context)
1510 self.proc, self.before, self.hunk, context)
1511 self.header.hunks.append(h)
1511 self.header.hunks.append(h)
1512 self.fromline += len(self.before) + h.removed
1512 self.fromline += len(self.before) + h.removed
1513 self.toline += len(self.before) + h.added
1513 self.toline += len(self.before) + h.added
1514 self.before = []
1514 self.before = []
1515 self.hunk = []
1515 self.hunk = []
1516 self.context = context
1516 self.context = context
1517
1517
1518 def addhunk(self, hunk):
1518 def addhunk(self, hunk):
1519 if self.context:
1519 if self.context:
1520 self.before = self.context
1520 self.before = self.context
1521 self.context = []
1521 self.context = []
1522 self.hunk = hunk
1522 self.hunk = hunk
1523
1523
1524 def newfile(self, hdr):
1524 def newfile(self, hdr):
1525 self.addcontext([])
1525 self.addcontext([])
1526 h = header(hdr)
1526 h = header(hdr)
1527 self.headers.append(h)
1527 self.headers.append(h)
1528 self.header = h
1528 self.header = h
1529
1529
1530 def addother(self, line):
1530 def addother(self, line):
1531 pass # 'other' lines are ignored
1531 pass # 'other' lines are ignored
1532
1532
1533 def finished(self):
1533 def finished(self):
1534 self.addcontext([])
1534 self.addcontext([])
1535 return self.headers
1535 return self.headers
1536
1536
1537 transitions = {
1537 transitions = {
1538 'file': {'context': addcontext,
1538 'file': {'context': addcontext,
1539 'file': newfile,
1539 'file': newfile,
1540 'hunk': addhunk,
1540 'hunk': addhunk,
1541 'range': addrange},
1541 'range': addrange},
1542 'context': {'file': newfile,
1542 'context': {'file': newfile,
1543 'hunk': addhunk,
1543 'hunk': addhunk,
1544 'range': addrange,
1544 'range': addrange,
1545 'other': addother},
1545 'other': addother},
1546 'hunk': {'context': addcontext,
1546 'hunk': {'context': addcontext,
1547 'file': newfile,
1547 'file': newfile,
1548 'range': addrange},
1548 'range': addrange},
1549 'range': {'context': addcontext,
1549 'range': {'context': addcontext,
1550 'hunk': addhunk},
1550 'hunk': addhunk},
1551 'other': {'other': addother},
1551 'other': {'other': addother},
1552 }
1552 }
1553
1553
1554 p = parser()
1554 p = parser()
1555 fp = cStringIO.StringIO()
1555 fp = cStringIO.StringIO()
1556 fp.write(''.join(originalchunks))
1556 fp.write(''.join(originalchunks))
1557 fp.seek(0)
1557 fp.seek(0)
1558
1558
1559 state = 'context'
1559 state = 'context'
1560 for newstate, data in scanpatch(fp):
1560 for newstate, data in scanpatch(fp):
1561 try:
1561 try:
1562 p.transitions[state][newstate](p, data)
1562 p.transitions[state][newstate](p, data)
1563 except KeyError:
1563 except KeyError:
1564 raise PatchError('unhandled transition: %s -> %s' %
1564 raise PatchError('unhandled transition: %s -> %s' %
1565 (state, newstate))
1565 (state, newstate))
1566 state = newstate
1566 state = newstate
1567 del fp
1567 del fp
1568 return p.finished()
1568 return p.finished()
1569
1569
1570 def pathtransform(path, strip, prefix):
1570 def pathtransform(path, strip, prefix):
1571 '''turn a path from a patch into a path suitable for the repository
1571 '''turn a path from a patch into a path suitable for the repository
1572
1572
1573 prefix, if not empty, is expected to be normalized with a / at the end.
1573 prefix, if not empty, is expected to be normalized with a / at the end.
1574
1574
1575 Returns (stripped components, path in repository).
1575 Returns (stripped components, path in repository).
1576
1576
1577 >>> pathtransform('a/b/c', 0, '')
1577 >>> pathtransform('a/b/c', 0, '')
1578 ('', 'a/b/c')
1578 ('', 'a/b/c')
1579 >>> pathtransform(' a/b/c ', 0, '')
1579 >>> pathtransform(' a/b/c ', 0, '')
1580 ('', ' a/b/c')
1580 ('', ' a/b/c')
1581 >>> pathtransform(' a/b/c ', 2, '')
1581 >>> pathtransform(' a/b/c ', 2, '')
1582 ('a/b/', 'c')
1582 ('a/b/', 'c')
1583 >>> pathtransform('a/b/c', 0, 'd/e/')
1583 >>> pathtransform('a/b/c', 0, 'd/e/')
1584 ('', 'd/e/a/b/c')
1584 ('', 'd/e/a/b/c')
1585 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1585 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1586 ('a//b/', 'd/e/c')
1586 ('a//b/', 'd/e/c')
1587 >>> pathtransform('a/b/c', 3, '')
1587 >>> pathtransform('a/b/c', 3, '')
1588 Traceback (most recent call last):
1588 Traceback (most recent call last):
1589 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1589 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1590 '''
1590 '''
1591 pathlen = len(path)
1591 pathlen = len(path)
1592 i = 0
1592 i = 0
1593 if strip == 0:
1593 if strip == 0:
1594 return '', prefix + path.rstrip()
1594 return '', prefix + path.rstrip()
1595 count = strip
1595 count = strip
1596 while count > 0:
1596 while count > 0:
1597 i = path.find('/', i)
1597 i = path.find('/', i)
1598 if i == -1:
1598 if i == -1:
1599 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1599 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1600 (count, strip, path))
1600 (count, strip, path))
1601 i += 1
1601 i += 1
1602 # consume '//' in the path
1602 # consume '//' in the path
1603 while i < pathlen - 1 and path[i] == '/':
1603 while i < pathlen - 1 and path[i] == '/':
1604 i += 1
1604 i += 1
1605 count -= 1
1605 count -= 1
1606 return path[:i].lstrip(), prefix + path[i:].rstrip()
1606 return path[:i].lstrip(), prefix + path[i:].rstrip()
1607
1607
1608 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1608 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1609 nulla = afile_orig == "/dev/null"
1609 nulla = afile_orig == "/dev/null"
1610 nullb = bfile_orig == "/dev/null"
1610 nullb = bfile_orig == "/dev/null"
1611 create = nulla and hunk.starta == 0 and hunk.lena == 0
1611 create = nulla and hunk.starta == 0 and hunk.lena == 0
1612 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1612 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1613 abase, afile = pathtransform(afile_orig, strip, prefix)
1613 abase, afile = pathtransform(afile_orig, strip, prefix)
1614 gooda = not nulla and backend.exists(afile)
1614 gooda = not nulla and backend.exists(afile)
1615 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1615 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1616 if afile == bfile:
1616 if afile == bfile:
1617 goodb = gooda
1617 goodb = gooda
1618 else:
1618 else:
1619 goodb = not nullb and backend.exists(bfile)
1619 goodb = not nullb and backend.exists(bfile)
1620 missing = not goodb and not gooda and not create
1620 missing = not goodb and not gooda and not create
1621
1621
1622 # some diff programs apparently produce patches where the afile is
1622 # some diff programs apparently produce patches where the afile is
1623 # not /dev/null, but afile starts with bfile
1623 # not /dev/null, but afile starts with bfile
1624 abasedir = afile[:afile.rfind('/') + 1]
1624 abasedir = afile[:afile.rfind('/') + 1]
1625 bbasedir = bfile[:bfile.rfind('/') + 1]
1625 bbasedir = bfile[:bfile.rfind('/') + 1]
1626 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1626 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1627 and hunk.starta == 0 and hunk.lena == 0):
1627 and hunk.starta == 0 and hunk.lena == 0):
1628 create = True
1628 create = True
1629 missing = False
1629 missing = False
1630
1630
1631 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1631 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1632 # diff is between a file and its backup. In this case, the original
1632 # diff is between a file and its backup. In this case, the original
1633 # file should be patched (see original mpatch code).
1633 # file should be patched (see original mpatch code).
1634 isbackup = (abase == bbase and bfile.startswith(afile))
1634 isbackup = (abase == bbase and bfile.startswith(afile))
1635 fname = None
1635 fname = None
1636 if not missing:
1636 if not missing:
1637 if gooda and goodb:
1637 if gooda and goodb:
1638 if isbackup:
1638 if isbackup:
1639 fname = afile
1639 fname = afile
1640 else:
1640 else:
1641 fname = bfile
1641 fname = bfile
1642 elif gooda:
1642 elif gooda:
1643 fname = afile
1643 fname = afile
1644
1644
1645 if not fname:
1645 if not fname:
1646 if not nullb:
1646 if not nullb:
1647 if isbackup:
1647 if isbackup:
1648 fname = afile
1648 fname = afile
1649 else:
1649 else:
1650 fname = bfile
1650 fname = bfile
1651 elif not nulla:
1651 elif not nulla:
1652 fname = afile
1652 fname = afile
1653 else:
1653 else:
1654 raise PatchError(_("undefined source and destination files"))
1654 raise PatchError(_("undefined source and destination files"))
1655
1655
1656 gp = patchmeta(fname)
1656 gp = patchmeta(fname)
1657 if create:
1657 if create:
1658 gp.op = 'ADD'
1658 gp.op = 'ADD'
1659 elif remove:
1659 elif remove:
1660 gp.op = 'DELETE'
1660 gp.op = 'DELETE'
1661 return gp
1661 return gp
1662
1662
1663 def scanpatch(fp):
1663 def scanpatch(fp):
1664 """like patch.iterhunks, but yield different events
1664 """like patch.iterhunks, but yield different events
1665
1665
1666 - ('file', [header_lines + fromfile + tofile])
1666 - ('file', [header_lines + fromfile + tofile])
1667 - ('context', [context_lines])
1667 - ('context', [context_lines])
1668 - ('hunk', [hunk_lines])
1668 - ('hunk', [hunk_lines])
1669 - ('range', (-start,len, +start,len, proc))
1669 - ('range', (-start,len, +start,len, proc))
1670 """
1670 """
1671 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1671 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1672 lr = linereader(fp)
1672 lr = linereader(fp)
1673
1673
1674 def scanwhile(first, p):
1674 def scanwhile(first, p):
1675 """scan lr while predicate holds"""
1675 """scan lr while predicate holds"""
1676 lines = [first]
1676 lines = [first]
1677 while True:
1677 while True:
1678 line = lr.readline()
1678 line = lr.readline()
1679 if not line:
1679 if not line:
1680 break
1680 break
1681 if p(line):
1681 if p(line):
1682 lines.append(line)
1682 lines.append(line)
1683 else:
1683 else:
1684 lr.push(line)
1684 lr.push(line)
1685 break
1685 break
1686 return lines
1686 return lines
1687
1687
1688 while True:
1688 while True:
1689 line = lr.readline()
1689 line = lr.readline()
1690 if not line:
1690 if not line:
1691 break
1691 break
1692 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1692 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1693 def notheader(line):
1693 def notheader(line):
1694 s = line.split(None, 1)
1694 s = line.split(None, 1)
1695 return not s or s[0] not in ('---', 'diff')
1695 return not s or s[0] not in ('---', 'diff')
1696 header = scanwhile(line, notheader)
1696 header = scanwhile(line, notheader)
1697 fromfile = lr.readline()
1697 fromfile = lr.readline()
1698 if fromfile.startswith('---'):
1698 if fromfile.startswith('---'):
1699 tofile = lr.readline()
1699 tofile = lr.readline()
1700 header += [fromfile, tofile]
1700 header += [fromfile, tofile]
1701 else:
1701 else:
1702 lr.push(fromfile)
1702 lr.push(fromfile)
1703 yield 'file', header
1703 yield 'file', header
1704 elif line[0] == ' ':
1704 elif line[0] == ' ':
1705 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1705 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1706 elif line[0] in '-+':
1706 elif line[0] in '-+':
1707 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1707 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1708 else:
1708 else:
1709 m = lines_re.match(line)
1709 m = lines_re.match(line)
1710 if m:
1710 if m:
1711 yield 'range', m.groups()
1711 yield 'range', m.groups()
1712 else:
1712 else:
1713 yield 'other', line
1713 yield 'other', line
1714
1714
1715 def scangitpatch(lr, firstline):
1715 def scangitpatch(lr, firstline):
1716 """
1716 """
1717 Git patches can emit:
1717 Git patches can emit:
1718 - rename a to b
1718 - rename a to b
1719 - change b
1719 - change b
1720 - copy a to c
1720 - copy a to c
1721 - change c
1721 - change c
1722
1722
1723 We cannot apply this sequence as-is, the renamed 'a' could not be
1723 We cannot apply this sequence as-is, the renamed 'a' could not be
1724 found for it would have been renamed already. And we cannot copy
1724 found for it would have been renamed already. And we cannot copy
1725 from 'b' instead because 'b' would have been changed already. So
1725 from 'b' instead because 'b' would have been changed already. So
1726 we scan the git patch for copy and rename commands so we can
1726 we scan the git patch for copy and rename commands so we can
1727 perform the copies ahead of time.
1727 perform the copies ahead of time.
1728 """
1728 """
1729 pos = 0
1729 pos = 0
1730 try:
1730 try:
1731 pos = lr.fp.tell()
1731 pos = lr.fp.tell()
1732 fp = lr.fp
1732 fp = lr.fp
1733 except IOError:
1733 except IOError:
1734 fp = cStringIO.StringIO(lr.fp.read())
1734 fp = cStringIO.StringIO(lr.fp.read())
1735 gitlr = linereader(fp)
1735 gitlr = linereader(fp)
1736 gitlr.push(firstline)
1736 gitlr.push(firstline)
1737 gitpatches = readgitpatch(gitlr)
1737 gitpatches = readgitpatch(gitlr)
1738 fp.seek(pos)
1738 fp.seek(pos)
1739 return gitpatches
1739 return gitpatches
1740
1740
1741 def iterhunks(fp):
1741 def iterhunks(fp):
1742 """Read a patch and yield the following events:
1742 """Read a patch and yield the following events:
1743 - ("file", afile, bfile, firsthunk): select a new target file.
1743 - ("file", afile, bfile, firsthunk): select a new target file.
1744 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1744 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1745 "file" event.
1745 "file" event.
1746 - ("git", gitchanges): current diff is in git format, gitchanges
1746 - ("git", gitchanges): current diff is in git format, gitchanges
1747 maps filenames to gitpatch records. Unique event.
1747 maps filenames to gitpatch records. Unique event.
1748 """
1748 """
1749 afile = ""
1749 afile = ""
1750 bfile = ""
1750 bfile = ""
1751 state = None
1751 state = None
1752 hunknum = 0
1752 hunknum = 0
1753 emitfile = newfile = False
1753 emitfile = newfile = False
1754 gitpatches = None
1754 gitpatches = None
1755
1755
1756 # our states
1756 # our states
1757 BFILE = 1
1757 BFILE = 1
1758 context = None
1758 context = None
1759 lr = linereader(fp)
1759 lr = linereader(fp)
1760
1760
1761 while True:
1761 while True:
1762 x = lr.readline()
1762 x = lr.readline()
1763 if not x:
1763 if not x:
1764 break
1764 break
1765 if state == BFILE and (
1765 if state == BFILE and (
1766 (not context and x[0] == '@')
1766 (not context and x[0] == '@')
1767 or (context is not False and x.startswith('***************'))
1767 or (context is not False and x.startswith('***************'))
1768 or x.startswith('GIT binary patch')):
1768 or x.startswith('GIT binary patch')):
1769 gp = None
1769 gp = None
1770 if (gitpatches and
1770 if (gitpatches and
1771 gitpatches[-1].ispatching(afile, bfile)):
1771 gitpatches[-1].ispatching(afile, bfile)):
1772 gp = gitpatches.pop()
1772 gp = gitpatches.pop()
1773 if x.startswith('GIT binary patch'):
1773 if x.startswith('GIT binary patch'):
1774 h = binhunk(lr, gp.path)
1774 h = binhunk(lr, gp.path)
1775 else:
1775 else:
1776 if context is None and x.startswith('***************'):
1776 if context is None and x.startswith('***************'):
1777 context = True
1777 context = True
1778 h = hunk(x, hunknum + 1, lr, context)
1778 h = hunk(x, hunknum + 1, lr, context)
1779 hunknum += 1
1779 hunknum += 1
1780 if emitfile:
1780 if emitfile:
1781 emitfile = False
1781 emitfile = False
1782 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1782 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1783 yield 'hunk', h
1783 yield 'hunk', h
1784 elif x.startswith('diff --git a/'):
1784 elif x.startswith('diff --git a/'):
1785 m = gitre.match(x.rstrip(' \r\n'))
1785 m = gitre.match(x.rstrip(' \r\n'))
1786 if not m:
1786 if not m:
1787 continue
1787 continue
1788 if gitpatches is None:
1788 if gitpatches is None:
1789 # scan whole input for git metadata
1789 # scan whole input for git metadata
1790 gitpatches = scangitpatch(lr, x)
1790 gitpatches = scangitpatch(lr, x)
1791 yield 'git', [g.copy() for g in gitpatches
1791 yield 'git', [g.copy() for g in gitpatches
1792 if g.op in ('COPY', 'RENAME')]
1792 if g.op in ('COPY', 'RENAME')]
1793 gitpatches.reverse()
1793 gitpatches.reverse()
1794 afile = 'a/' + m.group(1)
1794 afile = 'a/' + m.group(1)
1795 bfile = 'b/' + m.group(2)
1795 bfile = 'b/' + m.group(2)
1796 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1796 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1797 gp = gitpatches.pop()
1797 gp = gitpatches.pop()
1798 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1798 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1799 if not gitpatches:
1799 if not gitpatches:
1800 raise PatchError(_('failed to synchronize metadata for "%s"')
1800 raise PatchError(_('failed to synchronize metadata for "%s"')
1801 % afile[2:])
1801 % afile[2:])
1802 gp = gitpatches[-1]
1802 gp = gitpatches[-1]
1803 newfile = True
1803 newfile = True
1804 elif x.startswith('---'):
1804 elif x.startswith('---'):
1805 # check for a unified diff
1805 # check for a unified diff
1806 l2 = lr.readline()
1806 l2 = lr.readline()
1807 if not l2.startswith('+++'):
1807 if not l2.startswith('+++'):
1808 lr.push(l2)
1808 lr.push(l2)
1809 continue
1809 continue
1810 newfile = True
1810 newfile = True
1811 context = False
1811 context = False
1812 afile = parsefilename(x)
1812 afile = parsefilename(x)
1813 bfile = parsefilename(l2)
1813 bfile = parsefilename(l2)
1814 elif x.startswith('***'):
1814 elif x.startswith('***'):
1815 # check for a context diff
1815 # check for a context diff
1816 l2 = lr.readline()
1816 l2 = lr.readline()
1817 if not l2.startswith('---'):
1817 if not l2.startswith('---'):
1818 lr.push(l2)
1818 lr.push(l2)
1819 continue
1819 continue
1820 l3 = lr.readline()
1820 l3 = lr.readline()
1821 lr.push(l3)
1821 lr.push(l3)
1822 if not l3.startswith("***************"):
1822 if not l3.startswith("***************"):
1823 lr.push(l2)
1823 lr.push(l2)
1824 continue
1824 continue
1825 newfile = True
1825 newfile = True
1826 context = True
1826 context = True
1827 afile = parsefilename(x)
1827 afile = parsefilename(x)
1828 bfile = parsefilename(l2)
1828 bfile = parsefilename(l2)
1829
1829
1830 if newfile:
1830 if newfile:
1831 newfile = False
1831 newfile = False
1832 emitfile = True
1832 emitfile = True
1833 state = BFILE
1833 state = BFILE
1834 hunknum = 0
1834 hunknum = 0
1835
1835
1836 while gitpatches:
1836 while gitpatches:
1837 gp = gitpatches.pop()
1837 gp = gitpatches.pop()
1838 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1838 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1839
1839
1840 def applybindelta(binchunk, data):
1840 def applybindelta(binchunk, data):
1841 """Apply a binary delta hunk
1841 """Apply a binary delta hunk
1842 The algorithm used is the algorithm from git's patch-delta.c
1842 The algorithm used is the algorithm from git's patch-delta.c
1843 """
1843 """
1844 def deltahead(binchunk):
1844 def deltahead(binchunk):
1845 i = 0
1845 i = 0
1846 for c in binchunk:
1846 for c in binchunk:
1847 i += 1
1847 i += 1
1848 if not (ord(c) & 0x80):
1848 if not (ord(c) & 0x80):
1849 return i
1849 return i
1850 return i
1850 return i
1851 out = ""
1851 out = ""
1852 s = deltahead(binchunk)
1852 s = deltahead(binchunk)
1853 binchunk = binchunk[s:]
1853 binchunk = binchunk[s:]
1854 s = deltahead(binchunk)
1854 s = deltahead(binchunk)
1855 binchunk = binchunk[s:]
1855 binchunk = binchunk[s:]
1856 i = 0
1856 i = 0
1857 while i < len(binchunk):
1857 while i < len(binchunk):
1858 cmd = ord(binchunk[i])
1858 cmd = ord(binchunk[i])
1859 i += 1
1859 i += 1
1860 if (cmd & 0x80):
1860 if (cmd & 0x80):
1861 offset = 0
1861 offset = 0
1862 size = 0
1862 size = 0
1863 if (cmd & 0x01):
1863 if (cmd & 0x01):
1864 offset = ord(binchunk[i])
1864 offset = ord(binchunk[i])
1865 i += 1
1865 i += 1
1866 if (cmd & 0x02):
1866 if (cmd & 0x02):
1867 offset |= ord(binchunk[i]) << 8
1867 offset |= ord(binchunk[i]) << 8
1868 i += 1
1868 i += 1
1869 if (cmd & 0x04):
1869 if (cmd & 0x04):
1870 offset |= ord(binchunk[i]) << 16
1870 offset |= ord(binchunk[i]) << 16
1871 i += 1
1871 i += 1
1872 if (cmd & 0x08):
1872 if (cmd & 0x08):
1873 offset |= ord(binchunk[i]) << 24
1873 offset |= ord(binchunk[i]) << 24
1874 i += 1
1874 i += 1
1875 if (cmd & 0x10):
1875 if (cmd & 0x10):
1876 size = ord(binchunk[i])
1876 size = ord(binchunk[i])
1877 i += 1
1877 i += 1
1878 if (cmd & 0x20):
1878 if (cmd & 0x20):
1879 size |= ord(binchunk[i]) << 8
1879 size |= ord(binchunk[i]) << 8
1880 i += 1
1880 i += 1
1881 if (cmd & 0x40):
1881 if (cmd & 0x40):
1882 size |= ord(binchunk[i]) << 16
1882 size |= ord(binchunk[i]) << 16
1883 i += 1
1883 i += 1
1884 if size == 0:
1884 if size == 0:
1885 size = 0x10000
1885 size = 0x10000
1886 offset_end = offset + size
1886 offset_end = offset + size
1887 out += data[offset:offset_end]
1887 out += data[offset:offset_end]
1888 elif cmd != 0:
1888 elif cmd != 0:
1889 offset_end = i + cmd
1889 offset_end = i + cmd
1890 out += binchunk[i:offset_end]
1890 out += binchunk[i:offset_end]
1891 i += cmd
1891 i += cmd
1892 else:
1892 else:
1893 raise PatchError(_('unexpected delta opcode 0'))
1893 raise PatchError(_('unexpected delta opcode 0'))
1894 return out
1894 return out
1895
1895
1896 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1896 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1897 """Reads a patch from fp and tries to apply it.
1897 """Reads a patch from fp and tries to apply it.
1898
1898
1899 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1899 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1900 there was any fuzz.
1900 there was any fuzz.
1901
1901
1902 If 'eolmode' is 'strict', the patch content and patched file are
1902 If 'eolmode' is 'strict', the patch content and patched file are
1903 read in binary mode. Otherwise, line endings are ignored when
1903 read in binary mode. Otherwise, line endings are ignored when
1904 patching then normalized according to 'eolmode'.
1904 patching then normalized according to 'eolmode'.
1905 """
1905 """
1906 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1906 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1907 prefix=prefix, eolmode=eolmode)
1907 prefix=prefix, eolmode=eolmode)
1908
1908
1909 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1909 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1910 eolmode='strict'):
1910 eolmode='strict'):
1911
1911
1912 if prefix:
1912 if prefix:
1913 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1913 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1914 prefix)
1914 prefix)
1915 if prefix != '':
1915 if prefix != '':
1916 prefix += '/'
1916 prefix += '/'
1917 def pstrip(p):
1917 def pstrip(p):
1918 return pathtransform(p, strip - 1, prefix)[1]
1918 return pathtransform(p, strip - 1, prefix)[1]
1919
1919
1920 rejects = 0
1920 rejects = 0
1921 err = 0
1921 err = 0
1922 current_file = None
1922 current_file = None
1923
1923
1924 for state, values in iterhunks(fp):
1924 for state, values in iterhunks(fp):
1925 if state == 'hunk':
1925 if state == 'hunk':
1926 if not current_file:
1926 if not current_file:
1927 continue
1927 continue
1928 ret = current_file.apply(values)
1928 ret = current_file.apply(values)
1929 if ret > 0:
1929 if ret > 0:
1930 err = 1
1930 err = 1
1931 elif state == 'file':
1931 elif state == 'file':
1932 if current_file:
1932 if current_file:
1933 rejects += current_file.close()
1933 rejects += current_file.close()
1934 current_file = None
1934 current_file = None
1935 afile, bfile, first_hunk, gp = values
1935 afile, bfile, first_hunk, gp = values
1936 if gp:
1936 if gp:
1937 gp.path = pstrip(gp.path)
1937 gp.path = pstrip(gp.path)
1938 if gp.oldpath:
1938 if gp.oldpath:
1939 gp.oldpath = pstrip(gp.oldpath)
1939 gp.oldpath = pstrip(gp.oldpath)
1940 else:
1940 else:
1941 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1941 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1942 prefix)
1942 prefix)
1943 if gp.op == 'RENAME':
1943 if gp.op == 'RENAME':
1944 backend.unlink(gp.oldpath)
1944 backend.unlink(gp.oldpath)
1945 if not first_hunk:
1945 if not first_hunk:
1946 if gp.op == 'DELETE':
1946 if gp.op == 'DELETE':
1947 backend.unlink(gp.path)
1947 backend.unlink(gp.path)
1948 continue
1948 continue
1949 data, mode = None, None
1949 data, mode = None, None
1950 if gp.op in ('RENAME', 'COPY'):
1950 if gp.op in ('RENAME', 'COPY'):
1951 data, mode = store.getfile(gp.oldpath)[:2]
1951 data, mode = store.getfile(gp.oldpath)[:2]
1952 # FIXME: failing getfile has never been handled here
1952 # FIXME: failing getfile has never been handled here
1953 assert data is not None
1953 assert data is not None
1954 if gp.mode:
1954 if gp.mode:
1955 mode = gp.mode
1955 mode = gp.mode
1956 if gp.op == 'ADD':
1956 if gp.op == 'ADD':
1957 # Added files without content have no hunk and
1957 # Added files without content have no hunk and
1958 # must be created
1958 # must be created
1959 data = ''
1959 data = ''
1960 if data or mode:
1960 if data or mode:
1961 if (gp.op in ('ADD', 'RENAME', 'COPY')
1961 if (gp.op in ('ADD', 'RENAME', 'COPY')
1962 and backend.exists(gp.path)):
1962 and backend.exists(gp.path)):
1963 raise PatchError(_("cannot create %s: destination "
1963 raise PatchError(_("cannot create %s: destination "
1964 "already exists") % gp.path)
1964 "already exists") % gp.path)
1965 backend.setfile(gp.path, data, mode, gp.oldpath)
1965 backend.setfile(gp.path, data, mode, gp.oldpath)
1966 continue
1966 continue
1967 try:
1967 try:
1968 current_file = patcher(ui, gp, backend, store,
1968 current_file = patcher(ui, gp, backend, store,
1969 eolmode=eolmode)
1969 eolmode=eolmode)
1970 except PatchError as inst:
1970 except PatchError as inst:
1971 ui.warn(str(inst) + '\n')
1971 ui.warn(str(inst) + '\n')
1972 current_file = None
1972 current_file = None
1973 rejects += 1
1973 rejects += 1
1974 continue
1974 continue
1975 elif state == 'git':
1975 elif state == 'git':
1976 for gp in values:
1976 for gp in values:
1977 path = pstrip(gp.oldpath)
1977 path = pstrip(gp.oldpath)
1978 data, mode = backend.getfile(path)
1978 data, mode = backend.getfile(path)
1979 if data is None:
1979 if data is None:
1980 # The error ignored here will trigger a getfile()
1980 # The error ignored here will trigger a getfile()
1981 # error in a place more appropriate for error
1981 # error in a place more appropriate for error
1982 # handling, and will not interrupt the patching
1982 # handling, and will not interrupt the patching
1983 # process.
1983 # process.
1984 pass
1984 pass
1985 else:
1985 else:
1986 store.setfile(path, data, mode)
1986 store.setfile(path, data, mode)
1987 else:
1987 else:
1988 raise error.Abort(_('unsupported parser state: %s') % state)
1988 raise error.Abort(_('unsupported parser state: %s') % state)
1989
1989
1990 if current_file:
1990 if current_file:
1991 rejects += current_file.close()
1991 rejects += current_file.close()
1992
1992
1993 if rejects:
1993 if rejects:
1994 return -1
1994 return -1
1995 return err
1995 return err
1996
1996
1997 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1997 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1998 similarity):
1998 similarity):
1999 """use <patcher> to apply <patchname> to the working directory.
1999 """use <patcher> to apply <patchname> to the working directory.
2000 returns whether patch was applied with fuzz factor."""
2000 returns whether patch was applied with fuzz factor."""
2001
2001
2002 fuzz = False
2002 fuzz = False
2003 args = []
2003 args = []
2004 cwd = repo.root
2004 cwd = repo.root
2005 if cwd:
2005 if cwd:
2006 args.append('-d %s' % util.shellquote(cwd))
2006 args.append('-d %s' % util.shellquote(cwd))
2007 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2007 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2008 util.shellquote(patchname)))
2008 util.shellquote(patchname)))
2009 try:
2009 try:
2010 for line in fp:
2010 for line in fp:
2011 line = line.rstrip()
2011 line = line.rstrip()
2012 ui.note(line + '\n')
2012 ui.note(line + '\n')
2013 if line.startswith('patching file '):
2013 if line.startswith('patching file '):
2014 pf = util.parsepatchoutput(line)
2014 pf = util.parsepatchoutput(line)
2015 printed_file = False
2015 printed_file = False
2016 files.add(pf)
2016 files.add(pf)
2017 elif line.find('with fuzz') >= 0:
2017 elif line.find('with fuzz') >= 0:
2018 fuzz = True
2018 fuzz = True
2019 if not printed_file:
2019 if not printed_file:
2020 ui.warn(pf + '\n')
2020 ui.warn(pf + '\n')
2021 printed_file = True
2021 printed_file = True
2022 ui.warn(line + '\n')
2022 ui.warn(line + '\n')
2023 elif line.find('saving rejects to file') >= 0:
2023 elif line.find('saving rejects to file') >= 0:
2024 ui.warn(line + '\n')
2024 ui.warn(line + '\n')
2025 elif line.find('FAILED') >= 0:
2025 elif line.find('FAILED') >= 0:
2026 if not printed_file:
2026 if not printed_file:
2027 ui.warn(pf + '\n')
2027 ui.warn(pf + '\n')
2028 printed_file = True
2028 printed_file = True
2029 ui.warn(line + '\n')
2029 ui.warn(line + '\n')
2030 finally:
2030 finally:
2031 if files:
2031 if files:
2032 scmutil.marktouched(repo, files, similarity)
2032 scmutil.marktouched(repo, files, similarity)
2033 code = fp.close()
2033 code = fp.close()
2034 if code:
2034 if code:
2035 raise PatchError(_("patch command failed: %s") %
2035 raise PatchError(_("patch command failed: %s") %
2036 util.explainexit(code)[0])
2036 util.explainexit(code)[0])
2037 return fuzz
2037 return fuzz
2038
2038
2039 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2039 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2040 eolmode='strict'):
2040 eolmode='strict'):
2041 if files is None:
2041 if files is None:
2042 files = set()
2042 files = set()
2043 if eolmode is None:
2043 if eolmode is None:
2044 eolmode = ui.config('patch', 'eol', 'strict')
2044 eolmode = ui.config('patch', 'eol', 'strict')
2045 if eolmode.lower() not in eolmodes:
2045 if eolmode.lower() not in eolmodes:
2046 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2046 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2047 eolmode = eolmode.lower()
2047 eolmode = eolmode.lower()
2048
2048
2049 store = filestore()
2049 store = filestore()
2050 try:
2050 try:
2051 fp = open(patchobj, 'rb')
2051 fp = open(patchobj, 'rb')
2052 except TypeError:
2052 except TypeError:
2053 fp = patchobj
2053 fp = patchobj
2054 try:
2054 try:
2055 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2055 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2056 eolmode=eolmode)
2056 eolmode=eolmode)
2057 finally:
2057 finally:
2058 if fp != patchobj:
2058 if fp != patchobj:
2059 fp.close()
2059 fp.close()
2060 files.update(backend.close())
2060 files.update(backend.close())
2061 store.close()
2061 store.close()
2062 if ret < 0:
2062 if ret < 0:
2063 raise PatchError(_('patch failed to apply'))
2063 raise PatchError(_('patch failed to apply'))
2064 return ret > 0
2064 return ret > 0
2065
2065
2066 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2066 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2067 eolmode='strict', similarity=0):
2067 eolmode='strict', similarity=0):
2068 """use builtin patch to apply <patchobj> to the working directory.
2068 """use builtin patch to apply <patchobj> to the working directory.
2069 returns whether patch was applied with fuzz factor."""
2069 returns whether patch was applied with fuzz factor."""
2070 backend = workingbackend(ui, repo, similarity)
2070 backend = workingbackend(ui, repo, similarity)
2071 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2071 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2072
2072
2073 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2073 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2074 eolmode='strict'):
2074 eolmode='strict'):
2075 backend = repobackend(ui, repo, ctx, store)
2075 backend = repobackend(ui, repo, ctx, store)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077
2077
2078 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2078 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2079 similarity=0):
2079 similarity=0):
2080 """Apply <patchname> to the working directory.
2080 """Apply <patchname> to the working directory.
2081
2081
2082 'eolmode' specifies how end of lines should be handled. It can be:
2082 'eolmode' specifies how end of lines should be handled. It can be:
2083 - 'strict': inputs are read in binary mode, EOLs are preserved
2083 - 'strict': inputs are read in binary mode, EOLs are preserved
2084 - 'crlf': EOLs are ignored when patching and reset to CRLF
2084 - 'crlf': EOLs are ignored when patching and reset to CRLF
2085 - 'lf': EOLs are ignored when patching and reset to LF
2085 - 'lf': EOLs are ignored when patching and reset to LF
2086 - None: get it from user settings, default to 'strict'
2086 - None: get it from user settings, default to 'strict'
2087 'eolmode' is ignored when using an external patcher program.
2087 'eolmode' is ignored when using an external patcher program.
2088
2088
2089 Returns whether patch was applied with fuzz factor.
2089 Returns whether patch was applied with fuzz factor.
2090 """
2090 """
2091 patcher = ui.config('ui', 'patch')
2091 patcher = ui.config('ui', 'patch')
2092 if files is None:
2092 if files is None:
2093 files = set()
2093 files = set()
2094 if patcher:
2094 if patcher:
2095 return _externalpatch(ui, repo, patcher, patchname, strip,
2095 return _externalpatch(ui, repo, patcher, patchname, strip,
2096 files, similarity)
2096 files, similarity)
2097 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2097 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2098 similarity)
2098 similarity)
2099
2099
2100 def changedfiles(ui, repo, patchpath, strip=1):
2100 def changedfiles(ui, repo, patchpath, strip=1):
2101 backend = fsbackend(ui, repo.root)
2101 backend = fsbackend(ui, repo.root)
2102 with open(patchpath, 'rb') as fp:
2102 with open(patchpath, 'rb') as fp:
2103 changed = set()
2103 changed = set()
2104 for state, values in iterhunks(fp):
2104 for state, values in iterhunks(fp):
2105 if state == 'file':
2105 if state == 'file':
2106 afile, bfile, first_hunk, gp = values
2106 afile, bfile, first_hunk, gp = values
2107 if gp:
2107 if gp:
2108 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2108 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2109 if gp.oldpath:
2109 if gp.oldpath:
2110 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2110 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2111 else:
2111 else:
2112 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2112 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2113 '')
2113 '')
2114 changed.add(gp.path)
2114 changed.add(gp.path)
2115 if gp.op == 'RENAME':
2115 if gp.op == 'RENAME':
2116 changed.add(gp.oldpath)
2116 changed.add(gp.oldpath)
2117 elif state not in ('hunk', 'git'):
2117 elif state not in ('hunk', 'git'):
2118 raise error.Abort(_('unsupported parser state: %s') % state)
2118 raise error.Abort(_('unsupported parser state: %s') % state)
2119 return changed
2119 return changed
2120
2120
2121 class GitDiffRequired(Exception):
2121 class GitDiffRequired(Exception):
2122 pass
2122 pass
2123
2123
2124 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2124 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2125 '''return diffopts with all features supported and parsed'''
2125 '''return diffopts with all features supported and parsed'''
2126 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2126 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2127 git=True, whitespace=True, formatchanging=True)
2127 git=True, whitespace=True, formatchanging=True)
2128
2128
2129 diffopts = diffallopts
2129 diffopts = diffallopts
2130
2130
2131 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2131 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2132 whitespace=False, formatchanging=False):
2132 whitespace=False, formatchanging=False):
2133 '''return diffopts with only opted-in features parsed
2133 '''return diffopts with only opted-in features parsed
2134
2134
2135 Features:
2135 Features:
2136 - git: git-style diffs
2136 - git: git-style diffs
2137 - whitespace: whitespace options like ignoreblanklines and ignorews
2137 - whitespace: whitespace options like ignoreblanklines and ignorews
2138 - formatchanging: options that will likely break or cause correctness issues
2138 - formatchanging: options that will likely break or cause correctness issues
2139 with most diff parsers
2139 with most diff parsers
2140 '''
2140 '''
2141 def get(key, name=None, getter=ui.configbool, forceplain=None):
2141 def get(key, name=None, getter=ui.configbool, forceplain=None):
2142 if opts:
2142 if opts:
2143 v = opts.get(key)
2143 v = opts.get(key)
2144 if v:
2144 if v:
2145 return v
2145 return v
2146 if forceplain is not None and ui.plain():
2146 if forceplain is not None and ui.plain():
2147 return forceplain
2147 return forceplain
2148 return getter(section, name or key, None, untrusted=untrusted)
2148 return getter(section, name or key, None, untrusted=untrusted)
2149
2149
2150 # core options, expected to be understood by every diff parser
2150 # core options, expected to be understood by every diff parser
2151 buildopts = {
2151 buildopts = {
2152 'nodates': get('nodates'),
2152 'nodates': get('nodates'),
2153 'showfunc': get('show_function', 'showfunc'),
2153 'showfunc': get('show_function', 'showfunc'),
2154 'context': get('unified', getter=ui.config),
2154 'context': get('unified', getter=ui.config),
2155 }
2155 }
2156
2156
2157 if git:
2157 if git:
2158 buildopts['git'] = get('git')
2158 buildopts['git'] = get('git')
2159 if whitespace:
2159 if whitespace:
2160 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2160 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2161 buildopts['ignorewsamount'] = get('ignore_space_change',
2161 buildopts['ignorewsamount'] = get('ignore_space_change',
2162 'ignorewsamount')
2162 'ignorewsamount')
2163 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2163 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2164 'ignoreblanklines')
2164 'ignoreblanklines')
2165 if formatchanging:
2165 if formatchanging:
2166 buildopts['text'] = opts and opts.get('text')
2166 buildopts['text'] = opts and opts.get('text')
2167 buildopts['nobinary'] = get('nobinary', forceplain=False)
2167 buildopts['nobinary'] = get('nobinary', forceplain=False)
2168 buildopts['noprefix'] = get('noprefix', forceplain=False)
2168 buildopts['noprefix'] = get('noprefix', forceplain=False)
2169
2169
2170 return mdiff.diffopts(**buildopts)
2170 return mdiff.diffopts(**buildopts)
2171
2171
2172 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2172 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2173 losedatafn=None, prefix='', relroot=''):
2173 losedatafn=None, prefix='', relroot=''):
2174 '''yields diff of changes to files between two nodes, or node and
2174 '''yields diff of changes to files between two nodes, or node and
2175 working directory.
2175 working directory.
2176
2176
2177 if node1 is None, use first dirstate parent instead.
2177 if node1 is None, use first dirstate parent instead.
2178 if node2 is None, compare node1 with working directory.
2178 if node2 is None, compare node1 with working directory.
2179
2179
2180 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2180 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2181 every time some change cannot be represented with the current
2181 every time some change cannot be represented with the current
2182 patch format. Return False to upgrade to git patch format, True to
2182 patch format. Return False to upgrade to git patch format, True to
2183 accept the loss or raise an exception to abort the diff. It is
2183 accept the loss or raise an exception to abort the diff. It is
2184 called with the name of current file being diffed as 'fn'. If set
2184 called with the name of current file being diffed as 'fn'. If set
2185 to None, patches will always be upgraded to git format when
2185 to None, patches will always be upgraded to git format when
2186 necessary.
2186 necessary.
2187
2187
2188 prefix is a filename prefix that is prepended to all filenames on
2188 prefix is a filename prefix that is prepended to all filenames on
2189 display (used for subrepos).
2189 display (used for subrepos).
2190
2190
2191 relroot, if not empty, must be normalized with a trailing /. Any match
2191 relroot, if not empty, must be normalized with a trailing /. Any match
2192 patterns that fall outside it will be ignored.'''
2192 patterns that fall outside it will be ignored.'''
2193
2193
2194 if opts is None:
2194 if opts is None:
2195 opts = mdiff.defaultopts
2195 opts = mdiff.defaultopts
2196
2196
2197 if not node1 and not node2:
2197 if not node1 and not node2:
2198 node1 = repo.dirstate.p1()
2198 node1 = repo.dirstate.p1()
2199
2199
2200 def lrugetfilectx():
2200 def lrugetfilectx():
2201 cache = {}
2201 cache = {}
2202 order = collections.deque()
2202 order = collections.deque()
2203 def getfilectx(f, ctx):
2203 def getfilectx(f, ctx):
2204 fctx = ctx.filectx(f, filelog=cache.get(f))
2204 fctx = ctx.filectx(f, filelog=cache.get(f))
2205 if f not in cache:
2205 if f not in cache:
2206 if len(cache) > 20:
2206 if len(cache) > 20:
2207 del cache[order.popleft()]
2207 del cache[order.popleft()]
2208 cache[f] = fctx.filelog()
2208 cache[f] = fctx.filelog()
2209 else:
2209 else:
2210 order.remove(f)
2210 order.remove(f)
2211 order.append(f)
2211 order.append(f)
2212 return fctx
2212 return fctx
2213 return getfilectx
2213 return getfilectx
2214 getfilectx = lrugetfilectx()
2214 getfilectx = lrugetfilectx()
2215
2215
2216 ctx1 = repo[node1]
2216 ctx1 = repo[node1]
2217 ctx2 = repo[node2]
2217 ctx2 = repo[node2]
2218
2218
2219 relfiltered = False
2219 relfiltered = False
2220 if relroot != '' and match.always():
2220 if relroot != '' and match.always():
2221 # as a special case, create a new matcher with just the relroot
2221 # as a special case, create a new matcher with just the relroot
2222 pats = [relroot]
2222 pats = [relroot]
2223 match = scmutil.match(ctx2, pats, default='path')
2223 match = scmutil.match(ctx2, pats, default='path')
2224 relfiltered = True
2224 relfiltered = True
2225
2225
2226 if not changes:
2226 if not changes:
2227 changes = repo.status(ctx1, ctx2, match=match)
2227 changes = repo.status(ctx1, ctx2, match=match)
2228 modified, added, removed = changes[:3]
2228 modified, added, removed = changes[:3]
2229
2229
2230 if not modified and not added and not removed:
2230 if not modified and not added and not removed:
2231 return []
2231 return []
2232
2232
2233 if repo.ui.debugflag:
2233 if repo.ui.debugflag:
2234 hexfunc = hex
2234 hexfunc = hex
2235 else:
2235 else:
2236 hexfunc = short
2236 hexfunc = short
2237 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2237 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2238
2238
2239 copy = {}
2239 copy = {}
2240 if opts.git or opts.upgrade:
2240 if opts.git or opts.upgrade:
2241 copy = copies.pathcopies(ctx1, ctx2, match=match)
2241 copy = copies.pathcopies(ctx1, ctx2, match=match)
2242
2242
2243 if relroot is not None:
2243 if relroot is not None:
2244 if not relfiltered:
2244 if not relfiltered:
2245 # XXX this would ideally be done in the matcher, but that is
2245 # XXX this would ideally be done in the matcher, but that is
2246 # generally meant to 'or' patterns, not 'and' them. In this case we
2246 # generally meant to 'or' patterns, not 'and' them. In this case we
2247 # need to 'and' all the patterns from the matcher with relroot.
2247 # need to 'and' all the patterns from the matcher with relroot.
2248 def filterrel(l):
2248 def filterrel(l):
2249 return [f for f in l if f.startswith(relroot)]
2249 return [f for f in l if f.startswith(relroot)]
2250 modified = filterrel(modified)
2250 modified = filterrel(modified)
2251 added = filterrel(added)
2251 added = filterrel(added)
2252 removed = filterrel(removed)
2252 removed = filterrel(removed)
2253 relfiltered = True
2253 relfiltered = True
2254 # filter out copies where either side isn't inside the relative root
2254 # filter out copies where either side isn't inside the relative root
2255 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2255 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2256 if dst.startswith(relroot)
2256 if dst.startswith(relroot)
2257 and src.startswith(relroot)))
2257 and src.startswith(relroot)))
2258
2258
2259 modifiedset = set(modified)
2259 modifiedset = set(modified)
2260 addedset = set(added)
2260 addedset = set(added)
2261 removedset = set(removed)
2261 removedset = set(removed)
2262 for f in modified:
2262 for f in modified:
2263 if f not in ctx1:
2263 if f not in ctx1:
2264 # Fix up added, since merged-in additions appear as
2264 # Fix up added, since merged-in additions appear as
2265 # modifications during merges
2265 # modifications during merges
2266 modifiedset.remove(f)
2266 modifiedset.remove(f)
2267 addedset.add(f)
2267 addedset.add(f)
2268 for f in removed:
2268 for f in removed:
2269 if f not in ctx1:
2269 if f not in ctx1:
2270 # Merged-in additions that are then removed are reported as removed.
2270 # Merged-in additions that are then removed are reported as removed.
2271 # They are not in ctx1, so We don't want to show them in the diff.
2271 # They are not in ctx1, so We don't want to show them in the diff.
2272 removedset.remove(f)
2272 removedset.remove(f)
2273 modified = sorted(modifiedset)
2273 modified = sorted(modifiedset)
2274 added = sorted(addedset)
2274 added = sorted(addedset)
2275 removed = sorted(removedset)
2275 removed = sorted(removedset)
2276 for dst, src in copy.items():
2277 if src not in ctx1:
2278 # Files merged in during a merge and then copied/renamed are
2279 # reported as copies. We want to show them in the diff as additions.
2280 del copy[dst]
2276
2281
2277 def difffn(opts, losedata):
2282 def difffn(opts, losedata):
2278 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2283 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2279 copy, getfilectx, opts, losedata, prefix, relroot)
2284 copy, getfilectx, opts, losedata, prefix, relroot)
2280 if opts.upgrade and not opts.git:
2285 if opts.upgrade and not opts.git:
2281 try:
2286 try:
2282 def losedata(fn):
2287 def losedata(fn):
2283 if not losedatafn or not losedatafn(fn=fn):
2288 if not losedatafn or not losedatafn(fn=fn):
2284 raise GitDiffRequired
2289 raise GitDiffRequired
2285 # Buffer the whole output until we are sure it can be generated
2290 # Buffer the whole output until we are sure it can be generated
2286 return list(difffn(opts.copy(git=False), losedata))
2291 return list(difffn(opts.copy(git=False), losedata))
2287 except GitDiffRequired:
2292 except GitDiffRequired:
2288 return difffn(opts.copy(git=True), None)
2293 return difffn(opts.copy(git=True), None)
2289 else:
2294 else:
2290 return difffn(opts, None)
2295 return difffn(opts, None)
2291
2296
2292 def difflabel(func, *args, **kw):
2297 def difflabel(func, *args, **kw):
2293 '''yields 2-tuples of (output, label) based on the output of func()'''
2298 '''yields 2-tuples of (output, label) based on the output of func()'''
2294 headprefixes = [('diff', 'diff.diffline'),
2299 headprefixes = [('diff', 'diff.diffline'),
2295 ('copy', 'diff.extended'),
2300 ('copy', 'diff.extended'),
2296 ('rename', 'diff.extended'),
2301 ('rename', 'diff.extended'),
2297 ('old', 'diff.extended'),
2302 ('old', 'diff.extended'),
2298 ('new', 'diff.extended'),
2303 ('new', 'diff.extended'),
2299 ('deleted', 'diff.extended'),
2304 ('deleted', 'diff.extended'),
2300 ('---', 'diff.file_a'),
2305 ('---', 'diff.file_a'),
2301 ('+++', 'diff.file_b')]
2306 ('+++', 'diff.file_b')]
2302 textprefixes = [('@', 'diff.hunk'),
2307 textprefixes = [('@', 'diff.hunk'),
2303 ('-', 'diff.deleted'),
2308 ('-', 'diff.deleted'),
2304 ('+', 'diff.inserted')]
2309 ('+', 'diff.inserted')]
2305 head = False
2310 head = False
2306 for chunk in func(*args, **kw):
2311 for chunk in func(*args, **kw):
2307 lines = chunk.split('\n')
2312 lines = chunk.split('\n')
2308 for i, line in enumerate(lines):
2313 for i, line in enumerate(lines):
2309 if i != 0:
2314 if i != 0:
2310 yield ('\n', '')
2315 yield ('\n', '')
2311 if head:
2316 if head:
2312 if line.startswith('@'):
2317 if line.startswith('@'):
2313 head = False
2318 head = False
2314 else:
2319 else:
2315 if line and line[0] not in ' +-@\\':
2320 if line and line[0] not in ' +-@\\':
2316 head = True
2321 head = True
2317 stripline = line
2322 stripline = line
2318 diffline = False
2323 diffline = False
2319 if not head and line and line[0] in '+-':
2324 if not head and line and line[0] in '+-':
2320 # highlight tabs and trailing whitespace, but only in
2325 # highlight tabs and trailing whitespace, but only in
2321 # changed lines
2326 # changed lines
2322 stripline = line.rstrip()
2327 stripline = line.rstrip()
2323 diffline = True
2328 diffline = True
2324
2329
2325 prefixes = textprefixes
2330 prefixes = textprefixes
2326 if head:
2331 if head:
2327 prefixes = headprefixes
2332 prefixes = headprefixes
2328 for prefix, label in prefixes:
2333 for prefix, label in prefixes:
2329 if stripline.startswith(prefix):
2334 if stripline.startswith(prefix):
2330 if diffline:
2335 if diffline:
2331 for token in tabsplitter.findall(stripline):
2336 for token in tabsplitter.findall(stripline):
2332 if '\t' == token[0]:
2337 if '\t' == token[0]:
2333 yield (token, 'diff.tab')
2338 yield (token, 'diff.tab')
2334 else:
2339 else:
2335 yield (token, label)
2340 yield (token, label)
2336 else:
2341 else:
2337 yield (stripline, label)
2342 yield (stripline, label)
2338 break
2343 break
2339 else:
2344 else:
2340 yield (line, '')
2345 yield (line, '')
2341 if line != stripline:
2346 if line != stripline:
2342 yield (line[len(stripline):], 'diff.trailingwhitespace')
2347 yield (line[len(stripline):], 'diff.trailingwhitespace')
2343
2348
2344 def diffui(*args, **kw):
2349 def diffui(*args, **kw):
2345 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2350 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2346 return difflabel(diff, *args, **kw)
2351 return difflabel(diff, *args, **kw)
2347
2352
2348 def _filepairs(modified, added, removed, copy, opts):
2353 def _filepairs(modified, added, removed, copy, opts):
2349 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2354 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2350 before and f2 is the the name after. For added files, f1 will be None,
2355 before and f2 is the the name after. For added files, f1 will be None,
2351 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2356 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2352 or 'rename' (the latter two only if opts.git is set).'''
2357 or 'rename' (the latter two only if opts.git is set).'''
2353 gone = set()
2358 gone = set()
2354
2359
2355 copyto = dict([(v, k) for k, v in copy.items()])
2360 copyto = dict([(v, k) for k, v in copy.items()])
2356
2361
2357 addedset, removedset = set(added), set(removed)
2362 addedset, removedset = set(added), set(removed)
2358
2363
2359 for f in sorted(modified + added + removed):
2364 for f in sorted(modified + added + removed):
2360 copyop = None
2365 copyop = None
2361 f1, f2 = f, f
2366 f1, f2 = f, f
2362 if f in addedset:
2367 if f in addedset:
2363 f1 = None
2368 f1 = None
2364 if f in copy:
2369 if f in copy:
2365 if opts.git:
2370 if opts.git:
2366 f1 = copy[f]
2371 f1 = copy[f]
2367 if f1 in removedset and f1 not in gone:
2372 if f1 in removedset and f1 not in gone:
2368 copyop = 'rename'
2373 copyop = 'rename'
2369 gone.add(f1)
2374 gone.add(f1)
2370 else:
2375 else:
2371 copyop = 'copy'
2376 copyop = 'copy'
2372 elif f in removedset:
2377 elif f in removedset:
2373 f2 = None
2378 f2 = None
2374 if opts.git:
2379 if opts.git:
2375 # have we already reported a copy above?
2380 # have we already reported a copy above?
2376 if (f in copyto and copyto[f] in addedset
2381 if (f in copyto and copyto[f] in addedset
2377 and copy[copyto[f]] == f):
2382 and copy[copyto[f]] == f):
2378 continue
2383 continue
2379 yield f1, f2, copyop
2384 yield f1, f2, copyop
2380
2385
2381 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2386 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2382 copy, getfilectx, opts, losedatafn, prefix, relroot):
2387 copy, getfilectx, opts, losedatafn, prefix, relroot):
2383 '''given input data, generate a diff and yield it in blocks
2388 '''given input data, generate a diff and yield it in blocks
2384
2389
2385 If generating a diff would lose data like flags or binary data and
2390 If generating a diff would lose data like flags or binary data and
2386 losedatafn is not None, it will be called.
2391 losedatafn is not None, it will be called.
2387
2392
2388 relroot is removed and prefix is added to every path in the diff output.
2393 relroot is removed and prefix is added to every path in the diff output.
2389
2394
2390 If relroot is not empty, this function expects every path in modified,
2395 If relroot is not empty, this function expects every path in modified,
2391 added, removed and copy to start with it.'''
2396 added, removed and copy to start with it.'''
2392
2397
2393 def gitindex(text):
2398 def gitindex(text):
2394 if not text:
2399 if not text:
2395 text = ""
2400 text = ""
2396 l = len(text)
2401 l = len(text)
2397 s = util.sha1('blob %d\0' % l)
2402 s = util.sha1('blob %d\0' % l)
2398 s.update(text)
2403 s.update(text)
2399 return s.hexdigest()
2404 return s.hexdigest()
2400
2405
2401 if opts.noprefix:
2406 if opts.noprefix:
2402 aprefix = bprefix = ''
2407 aprefix = bprefix = ''
2403 else:
2408 else:
2404 aprefix = 'a/'
2409 aprefix = 'a/'
2405 bprefix = 'b/'
2410 bprefix = 'b/'
2406
2411
2407 def diffline(f, revs):
2412 def diffline(f, revs):
2408 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2413 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2409 return 'diff %s %s' % (revinfo, f)
2414 return 'diff %s %s' % (revinfo, f)
2410
2415
2411 date1 = util.datestr(ctx1.date())
2416 date1 = util.datestr(ctx1.date())
2412 date2 = util.datestr(ctx2.date())
2417 date2 = util.datestr(ctx2.date())
2413
2418
2414 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2419 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2415
2420
2416 if relroot != '' and (repo.ui.configbool('devel', 'all')
2421 if relroot != '' and (repo.ui.configbool('devel', 'all')
2417 or repo.ui.configbool('devel', 'check-relroot')):
2422 or repo.ui.configbool('devel', 'check-relroot')):
2418 for f in modified + added + removed + copy.keys() + copy.values():
2423 for f in modified + added + removed + copy.keys() + copy.values():
2419 if f is not None and not f.startswith(relroot):
2424 if f is not None and not f.startswith(relroot):
2420 raise AssertionError(
2425 raise AssertionError(
2421 "file %s doesn't start with relroot %s" % (f, relroot))
2426 "file %s doesn't start with relroot %s" % (f, relroot))
2422
2427
2423 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2428 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2424 content1 = None
2429 content1 = None
2425 content2 = None
2430 content2 = None
2426 flag1 = None
2431 flag1 = None
2427 flag2 = None
2432 flag2 = None
2428 if f1:
2433 if f1:
2429 content1 = getfilectx(f1, ctx1).data()
2434 content1 = getfilectx(f1, ctx1).data()
2430 if opts.git or losedatafn:
2435 if opts.git or losedatafn:
2431 flag1 = ctx1.flags(f1)
2436 flag1 = ctx1.flags(f1)
2432 if f2:
2437 if f2:
2433 content2 = getfilectx(f2, ctx2).data()
2438 content2 = getfilectx(f2, ctx2).data()
2434 if opts.git or losedatafn:
2439 if opts.git or losedatafn:
2435 flag2 = ctx2.flags(f2)
2440 flag2 = ctx2.flags(f2)
2436 binary = False
2441 binary = False
2437 if opts.git or losedatafn:
2442 if opts.git or losedatafn:
2438 binary = util.binary(content1) or util.binary(content2)
2443 binary = util.binary(content1) or util.binary(content2)
2439
2444
2440 if losedatafn and not opts.git:
2445 if losedatafn and not opts.git:
2441 if (binary or
2446 if (binary or
2442 # copy/rename
2447 # copy/rename
2443 f2 in copy or
2448 f2 in copy or
2444 # empty file creation
2449 # empty file creation
2445 (not f1 and not content2) or
2450 (not f1 and not content2) or
2446 # empty file deletion
2451 # empty file deletion
2447 (not content1 and not f2) or
2452 (not content1 and not f2) or
2448 # create with flags
2453 # create with flags
2449 (not f1 and flag2) or
2454 (not f1 and flag2) or
2450 # change flags
2455 # change flags
2451 (f1 and f2 and flag1 != flag2)):
2456 (f1 and f2 and flag1 != flag2)):
2452 losedatafn(f2 or f1)
2457 losedatafn(f2 or f1)
2453
2458
2454 path1 = f1 or f2
2459 path1 = f1 or f2
2455 path2 = f2 or f1
2460 path2 = f2 or f1
2456 path1 = posixpath.join(prefix, path1[len(relroot):])
2461 path1 = posixpath.join(prefix, path1[len(relroot):])
2457 path2 = posixpath.join(prefix, path2[len(relroot):])
2462 path2 = posixpath.join(prefix, path2[len(relroot):])
2458 header = []
2463 header = []
2459 if opts.git:
2464 if opts.git:
2460 header.append('diff --git %s%s %s%s' %
2465 header.append('diff --git %s%s %s%s' %
2461 (aprefix, path1, bprefix, path2))
2466 (aprefix, path1, bprefix, path2))
2462 if not f1: # added
2467 if not f1: # added
2463 header.append('new file mode %s' % gitmode[flag2])
2468 header.append('new file mode %s' % gitmode[flag2])
2464 elif not f2: # removed
2469 elif not f2: # removed
2465 header.append('deleted file mode %s' % gitmode[flag1])
2470 header.append('deleted file mode %s' % gitmode[flag1])
2466 else: # modified/copied/renamed
2471 else: # modified/copied/renamed
2467 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2472 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2468 if mode1 != mode2:
2473 if mode1 != mode2:
2469 header.append('old mode %s' % mode1)
2474 header.append('old mode %s' % mode1)
2470 header.append('new mode %s' % mode2)
2475 header.append('new mode %s' % mode2)
2471 if copyop is not None:
2476 if copyop is not None:
2472 header.append('%s from %s' % (copyop, path1))
2477 header.append('%s from %s' % (copyop, path1))
2473 header.append('%s to %s' % (copyop, path2))
2478 header.append('%s to %s' % (copyop, path2))
2474 elif revs and not repo.ui.quiet:
2479 elif revs and not repo.ui.quiet:
2475 header.append(diffline(path1, revs))
2480 header.append(diffline(path1, revs))
2476
2481
2477 if binary and opts.git and not opts.nobinary:
2482 if binary and opts.git and not opts.nobinary:
2478 text = mdiff.b85diff(content1, content2)
2483 text = mdiff.b85diff(content1, content2)
2479 if text:
2484 if text:
2480 header.append('index %s..%s' %
2485 header.append('index %s..%s' %
2481 (gitindex(content1), gitindex(content2)))
2486 (gitindex(content1), gitindex(content2)))
2482 else:
2487 else:
2483 text = mdiff.unidiff(content1, date1,
2488 text = mdiff.unidiff(content1, date1,
2484 content2, date2,
2489 content2, date2,
2485 path1, path2, opts=opts)
2490 path1, path2, opts=opts)
2486 if header and (text or len(header) > 1):
2491 if header and (text or len(header) > 1):
2487 yield '\n'.join(header) + '\n'
2492 yield '\n'.join(header) + '\n'
2488 if text:
2493 if text:
2489 yield text
2494 yield text
2490
2495
2491 def diffstatsum(stats):
2496 def diffstatsum(stats):
2492 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2497 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2493 for f, a, r, b in stats:
2498 for f, a, r, b in stats:
2494 maxfile = max(maxfile, encoding.colwidth(f))
2499 maxfile = max(maxfile, encoding.colwidth(f))
2495 maxtotal = max(maxtotal, a + r)
2500 maxtotal = max(maxtotal, a + r)
2496 addtotal += a
2501 addtotal += a
2497 removetotal += r
2502 removetotal += r
2498 binary = binary or b
2503 binary = binary or b
2499
2504
2500 return maxfile, maxtotal, addtotal, removetotal, binary
2505 return maxfile, maxtotal, addtotal, removetotal, binary
2501
2506
2502 def diffstatdata(lines):
2507 def diffstatdata(lines):
2503 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2508 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2504
2509
2505 results = []
2510 results = []
2506 filename, adds, removes, isbinary = None, 0, 0, False
2511 filename, adds, removes, isbinary = None, 0, 0, False
2507
2512
2508 def addresult():
2513 def addresult():
2509 if filename:
2514 if filename:
2510 results.append((filename, adds, removes, isbinary))
2515 results.append((filename, adds, removes, isbinary))
2511
2516
2512 for line in lines:
2517 for line in lines:
2513 if line.startswith('diff'):
2518 if line.startswith('diff'):
2514 addresult()
2519 addresult()
2515 # set numbers to 0 anyway when starting new file
2520 # set numbers to 0 anyway when starting new file
2516 adds, removes, isbinary = 0, 0, False
2521 adds, removes, isbinary = 0, 0, False
2517 if line.startswith('diff --git a/'):
2522 if line.startswith('diff --git a/'):
2518 filename = gitre.search(line).group(2)
2523 filename = gitre.search(line).group(2)
2519 elif line.startswith('diff -r'):
2524 elif line.startswith('diff -r'):
2520 # format: "diff -r ... -r ... filename"
2525 # format: "diff -r ... -r ... filename"
2521 filename = diffre.search(line).group(1)
2526 filename = diffre.search(line).group(1)
2522 elif line.startswith('+') and not line.startswith('+++ '):
2527 elif line.startswith('+') and not line.startswith('+++ '):
2523 adds += 1
2528 adds += 1
2524 elif line.startswith('-') and not line.startswith('--- '):
2529 elif line.startswith('-') and not line.startswith('--- '):
2525 removes += 1
2530 removes += 1
2526 elif (line.startswith('GIT binary patch') or
2531 elif (line.startswith('GIT binary patch') or
2527 line.startswith('Binary file')):
2532 line.startswith('Binary file')):
2528 isbinary = True
2533 isbinary = True
2529 addresult()
2534 addresult()
2530 return results
2535 return results
2531
2536
2532 def diffstat(lines, width=80, git=False):
2537 def diffstat(lines, width=80, git=False):
2533 output = []
2538 output = []
2534 stats = diffstatdata(lines)
2539 stats = diffstatdata(lines)
2535 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2540 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2536
2541
2537 countwidth = len(str(maxtotal))
2542 countwidth = len(str(maxtotal))
2538 if hasbinary and countwidth < 3:
2543 if hasbinary and countwidth < 3:
2539 countwidth = 3
2544 countwidth = 3
2540 graphwidth = width - countwidth - maxname - 6
2545 graphwidth = width - countwidth - maxname - 6
2541 if graphwidth < 10:
2546 if graphwidth < 10:
2542 graphwidth = 10
2547 graphwidth = 10
2543
2548
2544 def scale(i):
2549 def scale(i):
2545 if maxtotal <= graphwidth:
2550 if maxtotal <= graphwidth:
2546 return i
2551 return i
2547 # If diffstat runs out of room it doesn't print anything,
2552 # If diffstat runs out of room it doesn't print anything,
2548 # which isn't very useful, so always print at least one + or -
2553 # which isn't very useful, so always print at least one + or -
2549 # if there were at least some changes.
2554 # if there were at least some changes.
2550 return max(i * graphwidth // maxtotal, int(bool(i)))
2555 return max(i * graphwidth // maxtotal, int(bool(i)))
2551
2556
2552 for filename, adds, removes, isbinary in stats:
2557 for filename, adds, removes, isbinary in stats:
2553 if isbinary:
2558 if isbinary:
2554 count = 'Bin'
2559 count = 'Bin'
2555 else:
2560 else:
2556 count = adds + removes
2561 count = adds + removes
2557 pluses = '+' * scale(adds)
2562 pluses = '+' * scale(adds)
2558 minuses = '-' * scale(removes)
2563 minuses = '-' * scale(removes)
2559 output.append(' %s%s | %*s %s%s\n' %
2564 output.append(' %s%s | %*s %s%s\n' %
2560 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2565 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2561 countwidth, count, pluses, minuses))
2566 countwidth, count, pluses, minuses))
2562
2567
2563 if stats:
2568 if stats:
2564 output.append(_(' %d files changed, %d insertions(+), '
2569 output.append(_(' %d files changed, %d insertions(+), '
2565 '%d deletions(-)\n')
2570 '%d deletions(-)\n')
2566 % (len(stats), totaladds, totalremoves))
2571 % (len(stats), totaladds, totalremoves))
2567
2572
2568 return ''.join(output)
2573 return ''.join(output)
2569
2574
2570 def diffstatui(*args, **kw):
2575 def diffstatui(*args, **kw):
2571 '''like diffstat(), but yields 2-tuples of (output, label) for
2576 '''like diffstat(), but yields 2-tuples of (output, label) for
2572 ui.write()
2577 ui.write()
2573 '''
2578 '''
2574
2579
2575 for line in diffstat(*args, **kw).splitlines():
2580 for line in diffstat(*args, **kw).splitlines():
2576 if line and line[-1] in '+-':
2581 if line and line[-1] in '+-':
2577 name, graph = line.rsplit(' ', 1)
2582 name, graph = line.rsplit(' ', 1)
2578 yield (name + ' ', '')
2583 yield (name + ' ', '')
2579 m = re.search(r'\++', graph)
2584 m = re.search(r'\++', graph)
2580 if m:
2585 if m:
2581 yield (m.group(0), 'diffstat.inserted')
2586 yield (m.group(0), 'diffstat.inserted')
2582 m = re.search(r'-+', graph)
2587 m = re.search(r'-+', graph)
2583 if m:
2588 if m:
2584 yield (m.group(0), 'diffstat.deleted')
2589 yield (m.group(0), 'diffstat.deleted')
2585 else:
2590 else:
2586 yield (line, '')
2591 yield (line, '')
2587 yield ('\n', '')
2592 yield ('\n', '')
@@ -1,70 +1,77 b''
1 $ hg init
1 $ hg init
2 $ touch a
2 $ touch a
3 $ hg add a
3 $ hg add a
4 $ hg ci -m "a"
4 $ hg ci -m "a"
5
5
6 $ echo 123 > b
6 $ echo 123 > b
7 $ hg add b
7 $ hg add b
8 $ hg diff --nodates
8 $ hg diff --nodates
9 diff -r 3903775176ed b
9 diff -r 3903775176ed b
10 --- /dev/null
10 --- /dev/null
11 +++ b/b
11 +++ b/b
12 @@ -0,0 +1,1 @@
12 @@ -0,0 +1,1 @@
13 +123
13 +123
14
14
15 $ hg diff --nodates -r tip
15 $ hg diff --nodates -r tip
16 diff -r 3903775176ed b
16 diff -r 3903775176ed b
17 --- /dev/null
17 --- /dev/null
18 +++ b/b
18 +++ b/b
19 @@ -0,0 +1,1 @@
19 @@ -0,0 +1,1 @@
20 +123
20 +123
21
21
22 $ echo foo > a
22 $ echo foo > a
23 $ hg diff --nodates
23 $ hg diff --nodates
24 diff -r 3903775176ed a
24 diff -r 3903775176ed a
25 --- a/a
25 --- a/a
26 +++ b/a
26 +++ b/a
27 @@ -0,0 +1,1 @@
27 @@ -0,0 +1,1 @@
28 +foo
28 +foo
29 diff -r 3903775176ed b
29 diff -r 3903775176ed b
30 --- /dev/null
30 --- /dev/null
31 +++ b/b
31 +++ b/b
32 @@ -0,0 +1,1 @@
32 @@ -0,0 +1,1 @@
33 +123
33 +123
34
34
35 $ hg diff -r ""
35 $ hg diff -r ""
36 hg: parse error: empty query
36 hg: parse error: empty query
37 [255]
37 [255]
38 $ hg diff -r tip -r ""
38 $ hg diff -r tip -r ""
39 hg: parse error: empty query
39 hg: parse error: empty query
40 [255]
40 [255]
41
41
42 Remove a file that was added via merge. Since the file is not in parent 1,
42 Remove a file that was added via merge. Since the file is not in parent 1,
43 it should not be in the diff.
43 it should not be in the diff.
44
44
45 $ hg ci -m 'a=foo' a
45 $ hg ci -m 'a=foo' a
46 $ hg co -Cq null
46 $ hg co -Cq null
47 $ echo 123 > b
47 $ echo 123 > b
48 $ hg add b
48 $ hg add b
49 $ hg ci -m "b"
49 $ hg ci -m "b"
50 created new head
50 created new head
51 $ hg merge 1
51 $ hg merge 1
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 (branch merge, don't forget to commit)
53 (branch merge, don't forget to commit)
54 $ hg rm -f a
54 $ hg rm -f a
55 $ hg diff --nodates
55 $ hg diff --nodates
56
56
57 Rename a file that was added via merge. Since the rename source is not in
57 Rename a file that was added via merge. Since the rename source is not in
58 parent 1, the diff should be relative to /dev/null
58 parent 1, the diff should be relative to /dev/null
59
59
60 $ hg co -Cq 2
60 $ hg co -Cq 2
61 $ hg merge 1
61 $ hg merge 1
62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 (branch merge, don't forget to commit)
63 (branch merge, don't forget to commit)
64 $ hg mv a a2
64 $ hg mv a a2
65 $ hg diff --nodates
65 $ hg diff --nodates
66 diff -r cf44b38435e5 a2
66 diff -r cf44b38435e5 a2
67 --- /dev/null
67 --- /dev/null
68 +++ b/a2
68 +++ b/a2
69 @@ -0,0 +1,1 @@
69 @@ -0,0 +1,1 @@
70 +foo
70 +foo
71 $ hg diff --nodates --git
72 diff --git a/a2 b/a2
73 new file mode 100644
74 --- /dev/null
75 +++ b/a2
76 @@ -0,0 +1,1 @@
77 +foo
General Comments 0
You need to be logged in to leave comments. Login now