##// END OF EJS Templates
diffstat: support filenames with whitespaces on renames...
Navaneeth Suresh -
r41446:4a33a6bf default
parent child Browse files
Show More
@@ -1,2870 +1,2870
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 diffhelper,
30 diffhelper,
31 diffutil,
31 diffutil,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 from .utils import (
43 from .utils import (
44 dateutil,
44 dateutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60 def split(stream):
60 def split(stream):
61 '''return an iterator of individual patches from a stream'''
61 '''return an iterator of individual patches from a stream'''
62 def isheader(line, inheader):
62 def isheader(line, inheader):
63 if inheader and line.startswith((' ', '\t')):
63 if inheader and line.startswith((' ', '\t')):
64 # continuation
64 # continuation
65 return True
65 return True
66 if line.startswith((' ', '-', '+')):
66 if line.startswith((' ', '-', '+')):
67 # diff line - don't check for header pattern in there
67 # diff line - don't check for header pattern in there
68 return False
68 return False
69 l = line.split(': ', 1)
69 l = line.split(': ', 1)
70 return len(l) == 2 and ' ' not in l[0]
70 return len(l) == 2 and ' ' not in l[0]
71
71
72 def chunk(lines):
72 def chunk(lines):
73 return stringio(''.join(lines))
73 return stringio(''.join(lines))
74
74
75 def hgsplit(stream, cur):
75 def hgsplit(stream, cur):
76 inheader = True
76 inheader = True
77
77
78 for line in stream:
78 for line in stream:
79 if not line.strip():
79 if not line.strip():
80 inheader = False
80 inheader = False
81 if not inheader and line.startswith('# HG changeset patch'):
81 if not inheader and line.startswith('# HG changeset patch'):
82 yield chunk(cur)
82 yield chunk(cur)
83 cur = []
83 cur = []
84 inheader = True
84 inheader = True
85
85
86 cur.append(line)
86 cur.append(line)
87
87
88 if cur:
88 if cur:
89 yield chunk(cur)
89 yield chunk(cur)
90
90
91 def mboxsplit(stream, cur):
91 def mboxsplit(stream, cur):
92 for line in stream:
92 for line in stream:
93 if line.startswith('From '):
93 if line.startswith('From '):
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96 cur = []
96 cur = []
97
97
98 cur.append(line)
98 cur.append(line)
99
99
100 if cur:
100 if cur:
101 for c in split(chunk(cur[1:])):
101 for c in split(chunk(cur[1:])):
102 yield c
102 yield c
103
103
104 def mimesplit(stream, cur):
104 def mimesplit(stream, cur):
105 def msgfp(m):
105 def msgfp(m):
106 fp = stringio()
106 fp = stringio()
107 g = email.Generator.Generator(fp, mangle_from_=False)
107 g = email.Generator.Generator(fp, mangle_from_=False)
108 g.flatten(m)
108 g.flatten(m)
109 fp.seek(0)
109 fp.seek(0)
110 return fp
110 return fp
111
111
112 for line in stream:
112 for line in stream:
113 cur.append(line)
113 cur.append(line)
114 c = chunk(cur)
114 c = chunk(cur)
115
115
116 m = mail.parse(c)
116 m = mail.parse(c)
117 if not m.is_multipart():
117 if not m.is_multipart():
118 yield msgfp(m)
118 yield msgfp(m)
119 else:
119 else:
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 for part in m.walk():
121 for part in m.walk():
122 ct = part.get_content_type()
122 ct = part.get_content_type()
123 if ct not in ok_types:
123 if ct not in ok_types:
124 continue
124 continue
125 yield msgfp(part)
125 yield msgfp(part)
126
126
127 def headersplit(stream, cur):
127 def headersplit(stream, cur):
128 inheader = False
128 inheader = False
129
129
130 for line in stream:
130 for line in stream:
131 if not inheader and isheader(line, inheader):
131 if not inheader and isheader(line, inheader):
132 yield chunk(cur)
132 yield chunk(cur)
133 cur = []
133 cur = []
134 inheader = True
134 inheader = True
135 if inheader and not isheader(line, inheader):
135 if inheader and not isheader(line, inheader):
136 inheader = False
136 inheader = False
137
137
138 cur.append(line)
138 cur.append(line)
139
139
140 if cur:
140 if cur:
141 yield chunk(cur)
141 yield chunk(cur)
142
142
143 def remainder(cur):
143 def remainder(cur):
144 yield chunk(cur)
144 yield chunk(cur)
145
145
146 class fiter(object):
146 class fiter(object):
147 def __init__(self, fp):
147 def __init__(self, fp):
148 self.fp = fp
148 self.fp = fp
149
149
150 def __iter__(self):
150 def __iter__(self):
151 return self
151 return self
152
152
153 def next(self):
153 def next(self):
154 l = self.fp.readline()
154 l = self.fp.readline()
155 if not l:
155 if not l:
156 raise StopIteration
156 raise StopIteration
157 return l
157 return l
158
158
159 __next__ = next
159 __next__ = next
160
160
161 inheader = False
161 inheader = False
162 cur = []
162 cur = []
163
163
164 mimeheaders = ['content-type']
164 mimeheaders = ['content-type']
165
165
166 if not util.safehasattr(stream, 'next'):
166 if not util.safehasattr(stream, 'next'):
167 # http responses, for example, have readline but not next
167 # http responses, for example, have readline but not next
168 stream = fiter(stream)
168 stream = fiter(stream)
169
169
170 for line in stream:
170 for line in stream:
171 cur.append(line)
171 cur.append(line)
172 if line.startswith('# HG changeset patch'):
172 if line.startswith('# HG changeset patch'):
173 return hgsplit(stream, cur)
173 return hgsplit(stream, cur)
174 elif line.startswith('From '):
174 elif line.startswith('From '):
175 return mboxsplit(stream, cur)
175 return mboxsplit(stream, cur)
176 elif isheader(line, inheader):
176 elif isheader(line, inheader):
177 inheader = True
177 inheader = True
178 if line.split(':', 1)[0].lower() in mimeheaders:
178 if line.split(':', 1)[0].lower() in mimeheaders:
179 # let email parser handle this
179 # let email parser handle this
180 return mimesplit(stream, cur)
180 return mimesplit(stream, cur)
181 elif line.startswith('--- ') and inheader:
181 elif line.startswith('--- ') and inheader:
182 # No evil headers seen by diff start, split by hand
182 # No evil headers seen by diff start, split by hand
183 return headersplit(stream, cur)
183 return headersplit(stream, cur)
184 # Not enough info, keep reading
184 # Not enough info, keep reading
185
185
186 # if we are here, we have a very plain patch
186 # if we are here, we have a very plain patch
187 return remainder(cur)
187 return remainder(cur)
188
188
189 ## Some facility for extensible patch parsing:
189 ## Some facility for extensible patch parsing:
190 # list of pairs ("header to match", "data key")
190 # list of pairs ("header to match", "data key")
191 patchheadermap = [('Date', 'date'),
191 patchheadermap = [('Date', 'date'),
192 ('Branch', 'branch'),
192 ('Branch', 'branch'),
193 ('Node ID', 'nodeid'),
193 ('Node ID', 'nodeid'),
194 ]
194 ]
195
195
196 @contextlib.contextmanager
196 @contextlib.contextmanager
197 def extract(ui, fileobj):
197 def extract(ui, fileobj):
198 '''extract patch from data read from fileobj.
198 '''extract patch from data read from fileobj.
199
199
200 patch can be a normal patch or contained in an email message.
200 patch can be a normal patch or contained in an email message.
201
201
202 return a dictionary. Standard keys are:
202 return a dictionary. Standard keys are:
203 - filename,
203 - filename,
204 - message,
204 - message,
205 - user,
205 - user,
206 - date,
206 - date,
207 - branch,
207 - branch,
208 - node,
208 - node,
209 - p1,
209 - p1,
210 - p2.
210 - p2.
211 Any item can be missing from the dictionary. If filename is missing,
211 Any item can be missing from the dictionary. If filename is missing,
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
213
213
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, r'wb')
215 tmpfp = os.fdopen(fd, r'wb')
216 try:
216 try:
217 yield _extract(ui, fileobj, tmpname, tmpfp)
217 yield _extract(ui, fileobj, tmpname, tmpfp)
218 finally:
218 finally:
219 tmpfp.close()
219 tmpfp.close()
220 os.unlink(tmpname)
220 os.unlink(tmpname)
221
221
222 def _extract(ui, fileobj, tmpname, tmpfp):
222 def _extract(ui, fileobj, tmpname, tmpfp):
223
223
224 # attempt to detect the start of a patch
224 # attempt to detect the start of a patch
225 # (this heuristic is borrowed from quilt)
225 # (this heuristic is borrowed from quilt)
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
229 br'\*\*\*[ \t].*?^---[ \t])',
229 br'\*\*\*[ \t].*?^---[ \t])',
230 re.MULTILINE | re.DOTALL)
230 re.MULTILINE | re.DOTALL)
231
231
232 data = {}
232 data = {}
233
233
234 msg = mail.parse(fileobj)
234 msg = mail.parse(fileobj)
235
235
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 if not subject and not data['user']:
238 if not subject and not data['user']:
239 # Not an email, restore parsed headers if any
239 # Not an email, restore parsed headers if any
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 for h in msg.items()) + '\n'
241 for h in msg.items()) + '\n'
242
242
243 # should try to parse msg['Date']
243 # should try to parse msg['Date']
244 parents = []
244 parents = []
245
245
246 if subject:
246 if subject:
247 if subject.startswith('[PATCH'):
247 if subject.startswith('[PATCH'):
248 pend = subject.find(']')
248 pend = subject.find(']')
249 if pend >= 0:
249 if pend >= 0:
250 subject = subject[pend + 1:].lstrip()
250 subject = subject[pend + 1:].lstrip()
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 ui.debug('Subject: %s\n' % subject)
252 ui.debug('Subject: %s\n' % subject)
253 if data['user']:
253 if data['user']:
254 ui.debug('From: %s\n' % data['user'])
254 ui.debug('From: %s\n' % data['user'])
255 diffs_seen = 0
255 diffs_seen = 0
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 message = ''
257 message = ''
258 for part in msg.walk():
258 for part in msg.walk():
259 content_type = pycompat.bytestr(part.get_content_type())
259 content_type = pycompat.bytestr(part.get_content_type())
260 ui.debug('Content-Type: %s\n' % content_type)
260 ui.debug('Content-Type: %s\n' % content_type)
261 if content_type not in ok_types:
261 if content_type not in ok_types:
262 continue
262 continue
263 payload = part.get_payload(decode=True)
263 payload = part.get_payload(decode=True)
264 m = diffre.search(payload)
264 m = diffre.search(payload)
265 if m:
265 if m:
266 hgpatch = False
266 hgpatch = False
267 hgpatchheader = False
267 hgpatchheader = False
268 ignoretext = False
268 ignoretext = False
269
269
270 ui.debug('found patch at byte %d\n' % m.start(0))
270 ui.debug('found patch at byte %d\n' % m.start(0))
271 diffs_seen += 1
271 diffs_seen += 1
272 cfp = stringio()
272 cfp = stringio()
273 for line in payload[:m.start(0)].splitlines():
273 for line in payload[:m.start(0)].splitlines():
274 if line.startswith('# HG changeset patch') and not hgpatch:
274 if line.startswith('# HG changeset patch') and not hgpatch:
275 ui.debug('patch generated by hg export\n')
275 ui.debug('patch generated by hg export\n')
276 hgpatch = True
276 hgpatch = True
277 hgpatchheader = True
277 hgpatchheader = True
278 # drop earlier commit message content
278 # drop earlier commit message content
279 cfp.seek(0)
279 cfp.seek(0)
280 cfp.truncate()
280 cfp.truncate()
281 subject = None
281 subject = None
282 elif hgpatchheader:
282 elif hgpatchheader:
283 if line.startswith('# User '):
283 if line.startswith('# User '):
284 data['user'] = line[7:]
284 data['user'] = line[7:]
285 ui.debug('From: %s\n' % data['user'])
285 ui.debug('From: %s\n' % data['user'])
286 elif line.startswith("# Parent "):
286 elif line.startswith("# Parent "):
287 parents.append(line[9:].lstrip())
287 parents.append(line[9:].lstrip())
288 elif line.startswith("# "):
288 elif line.startswith("# "):
289 for header, key in patchheadermap:
289 for header, key in patchheadermap:
290 prefix = '# %s ' % header
290 prefix = '# %s ' % header
291 if line.startswith(prefix):
291 if line.startswith(prefix):
292 data[key] = line[len(prefix):]
292 data[key] = line[len(prefix):]
293 else:
293 else:
294 hgpatchheader = False
294 hgpatchheader = False
295 elif line == '---':
295 elif line == '---':
296 ignoretext = True
296 ignoretext = True
297 if not hgpatchheader and not ignoretext:
297 if not hgpatchheader and not ignoretext:
298 cfp.write(line)
298 cfp.write(line)
299 cfp.write('\n')
299 cfp.write('\n')
300 message = cfp.getvalue()
300 message = cfp.getvalue()
301 if tmpfp:
301 if tmpfp:
302 tmpfp.write(payload)
302 tmpfp.write(payload)
303 if not payload.endswith('\n'):
303 if not payload.endswith('\n'):
304 tmpfp.write('\n')
304 tmpfp.write('\n')
305 elif not diffs_seen and message and content_type == 'text/plain':
305 elif not diffs_seen and message and content_type == 'text/plain':
306 message += '\n' + payload
306 message += '\n' + payload
307
307
308 if subject and not message.startswith(subject):
308 if subject and not message.startswith(subject):
309 message = '%s\n%s' % (subject, message)
309 message = '%s\n%s' % (subject, message)
310 data['message'] = message
310 data['message'] = message
311 tmpfp.close()
311 tmpfp.close()
312 if parents:
312 if parents:
313 data['p1'] = parents.pop(0)
313 data['p1'] = parents.pop(0)
314 if parents:
314 if parents:
315 data['p2'] = parents.pop(0)
315 data['p2'] = parents.pop(0)
316
316
317 if diffs_seen:
317 if diffs_seen:
318 data['filename'] = tmpname
318 data['filename'] = tmpname
319
319
320 return data
320 return data
321
321
322 class patchmeta(object):
322 class patchmeta(object):
323 """Patched file metadata
323 """Patched file metadata
324
324
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 'islink' is True if the file is a symlink and 'isexec' is True if
329 'islink' is True if the file is a symlink and 'isexec' is True if
330 the file is executable. Otherwise, 'mode' is None.
330 the file is executable. Otherwise, 'mode' is None.
331 """
331 """
332 def __init__(self, path):
332 def __init__(self, path):
333 self.path = path
333 self.path = path
334 self.oldpath = None
334 self.oldpath = None
335 self.mode = None
335 self.mode = None
336 self.op = 'MODIFY'
336 self.op = 'MODIFY'
337 self.binary = False
337 self.binary = False
338
338
339 def setmode(self, mode):
339 def setmode(self, mode):
340 islink = mode & 0o20000
340 islink = mode & 0o20000
341 isexec = mode & 0o100
341 isexec = mode & 0o100
342 self.mode = (islink, isexec)
342 self.mode = (islink, isexec)
343
343
344 def copy(self):
344 def copy(self):
345 other = patchmeta(self.path)
345 other = patchmeta(self.path)
346 other.oldpath = self.oldpath
346 other.oldpath = self.oldpath
347 other.mode = self.mode
347 other.mode = self.mode
348 other.op = self.op
348 other.op = self.op
349 other.binary = self.binary
349 other.binary = self.binary
350 return other
350 return other
351
351
352 def _ispatchinga(self, afile):
352 def _ispatchinga(self, afile):
353 if afile == '/dev/null':
353 if afile == '/dev/null':
354 return self.op == 'ADD'
354 return self.op == 'ADD'
355 return afile == 'a/' + (self.oldpath or self.path)
355 return afile == 'a/' + (self.oldpath or self.path)
356
356
357 def _ispatchingb(self, bfile):
357 def _ispatchingb(self, bfile):
358 if bfile == '/dev/null':
358 if bfile == '/dev/null':
359 return self.op == 'DELETE'
359 return self.op == 'DELETE'
360 return bfile == 'b/' + self.path
360 return bfile == 'b/' + self.path
361
361
362 def ispatching(self, afile, bfile):
362 def ispatching(self, afile, bfile):
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364
364
365 def __repr__(self):
365 def __repr__(self):
366 return "<patchmeta %s %r>" % (self.op, self.path)
366 return "<patchmeta %s %r>" % (self.op, self.path)
367
367
368 def readgitpatch(lr):
368 def readgitpatch(lr):
369 """extract git-style metadata about patches from <patchname>"""
369 """extract git-style metadata about patches from <patchname>"""
370
370
371 # Filter patch for git information
371 # Filter patch for git information
372 gp = None
372 gp = None
373 gitpatches = []
373 gitpatches = []
374 for line in lr:
374 for line in lr:
375 line = line.rstrip(' \r\n')
375 line = line.rstrip(' \r\n')
376 if line.startswith('diff --git a/'):
376 if line.startswith('diff --git a/'):
377 m = gitre.match(line)
377 m = gitre.match(line)
378 if m:
378 if m:
379 if gp:
379 if gp:
380 gitpatches.append(gp)
380 gitpatches.append(gp)
381 dst = m.group(2)
381 dst = m.group(2)
382 gp = patchmeta(dst)
382 gp = patchmeta(dst)
383 elif gp:
383 elif gp:
384 if line.startswith('--- '):
384 if line.startswith('--- '):
385 gitpatches.append(gp)
385 gitpatches.append(gp)
386 gp = None
386 gp = None
387 continue
387 continue
388 if line.startswith('rename from '):
388 if line.startswith('rename from '):
389 gp.op = 'RENAME'
389 gp.op = 'RENAME'
390 gp.oldpath = line[12:]
390 gp.oldpath = line[12:]
391 elif line.startswith('rename to '):
391 elif line.startswith('rename to '):
392 gp.path = line[10:]
392 gp.path = line[10:]
393 elif line.startswith('copy from '):
393 elif line.startswith('copy from '):
394 gp.op = 'COPY'
394 gp.op = 'COPY'
395 gp.oldpath = line[10:]
395 gp.oldpath = line[10:]
396 elif line.startswith('copy to '):
396 elif line.startswith('copy to '):
397 gp.path = line[8:]
397 gp.path = line[8:]
398 elif line.startswith('deleted file'):
398 elif line.startswith('deleted file'):
399 gp.op = 'DELETE'
399 gp.op = 'DELETE'
400 elif line.startswith('new file mode '):
400 elif line.startswith('new file mode '):
401 gp.op = 'ADD'
401 gp.op = 'ADD'
402 gp.setmode(int(line[-6:], 8))
402 gp.setmode(int(line[-6:], 8))
403 elif line.startswith('new mode '):
403 elif line.startswith('new mode '):
404 gp.setmode(int(line[-6:], 8))
404 gp.setmode(int(line[-6:], 8))
405 elif line.startswith('GIT binary patch'):
405 elif line.startswith('GIT binary patch'):
406 gp.binary = True
406 gp.binary = True
407 if gp:
407 if gp:
408 gitpatches.append(gp)
408 gitpatches.append(gp)
409
409
410 return gitpatches
410 return gitpatches
411
411
412 class linereader(object):
412 class linereader(object):
413 # simple class to allow pushing lines back into the input stream
413 # simple class to allow pushing lines back into the input stream
414 def __init__(self, fp):
414 def __init__(self, fp):
415 self.fp = fp
415 self.fp = fp
416 self.buf = []
416 self.buf = []
417
417
418 def push(self, line):
418 def push(self, line):
419 if line is not None:
419 if line is not None:
420 self.buf.append(line)
420 self.buf.append(line)
421
421
422 def readline(self):
422 def readline(self):
423 if self.buf:
423 if self.buf:
424 l = self.buf[0]
424 l = self.buf[0]
425 del self.buf[0]
425 del self.buf[0]
426 return l
426 return l
427 return self.fp.readline()
427 return self.fp.readline()
428
428
429 def __iter__(self):
429 def __iter__(self):
430 return iter(self.readline, '')
430 return iter(self.readline, '')
431
431
432 class abstractbackend(object):
432 class abstractbackend(object):
433 def __init__(self, ui):
433 def __init__(self, ui):
434 self.ui = ui
434 self.ui = ui
435
435
436 def getfile(self, fname):
436 def getfile(self, fname):
437 """Return target file data and flags as a (data, (islink,
437 """Return target file data and flags as a (data, (islink,
438 isexec)) tuple. Data is None if file is missing/deleted.
438 isexec)) tuple. Data is None if file is missing/deleted.
439 """
439 """
440 raise NotImplementedError
440 raise NotImplementedError
441
441
442 def setfile(self, fname, data, mode, copysource):
442 def setfile(self, fname, data, mode, copysource):
443 """Write data to target file fname and set its mode. mode is a
443 """Write data to target file fname and set its mode. mode is a
444 (islink, isexec) tuple. If data is None, the file content should
444 (islink, isexec) tuple. If data is None, the file content should
445 be left unchanged. If the file is modified after being copied,
445 be left unchanged. If the file is modified after being copied,
446 copysource is set to the original file name.
446 copysource is set to the original file name.
447 """
447 """
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 def unlink(self, fname):
450 def unlink(self, fname):
451 """Unlink target file."""
451 """Unlink target file."""
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 def writerej(self, fname, failed, total, lines):
454 def writerej(self, fname, failed, total, lines):
455 """Write rejected lines for fname. total is the number of hunks
455 """Write rejected lines for fname. total is the number of hunks
456 which failed to apply and total the total number of hunks for this
456 which failed to apply and total the total number of hunks for this
457 files.
457 files.
458 """
458 """
459
459
460 def exists(self, fname):
460 def exists(self, fname):
461 raise NotImplementedError
461 raise NotImplementedError
462
462
463 def close(self):
463 def close(self):
464 raise NotImplementedError
464 raise NotImplementedError
465
465
466 class fsbackend(abstractbackend):
466 class fsbackend(abstractbackend):
467 def __init__(self, ui, basedir):
467 def __init__(self, ui, basedir):
468 super(fsbackend, self).__init__(ui)
468 super(fsbackend, self).__init__(ui)
469 self.opener = vfsmod.vfs(basedir)
469 self.opener = vfsmod.vfs(basedir)
470
470
471 def getfile(self, fname):
471 def getfile(self, fname):
472 if self.opener.islink(fname):
472 if self.opener.islink(fname):
473 return (self.opener.readlink(fname), (True, False))
473 return (self.opener.readlink(fname), (True, False))
474
474
475 isexec = False
475 isexec = False
476 try:
476 try:
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 except OSError as e:
478 except OSError as e:
479 if e.errno != errno.ENOENT:
479 if e.errno != errno.ENOENT:
480 raise
480 raise
481 try:
481 try:
482 return (self.opener.read(fname), (False, isexec))
482 return (self.opener.read(fname), (False, isexec))
483 except IOError as e:
483 except IOError as e:
484 if e.errno != errno.ENOENT:
484 if e.errno != errno.ENOENT:
485 raise
485 raise
486 return None, None
486 return None, None
487
487
488 def setfile(self, fname, data, mode, copysource):
488 def setfile(self, fname, data, mode, copysource):
489 islink, isexec = mode
489 islink, isexec = mode
490 if data is None:
490 if data is None:
491 self.opener.setflags(fname, islink, isexec)
491 self.opener.setflags(fname, islink, isexec)
492 return
492 return
493 if islink:
493 if islink:
494 self.opener.symlink(data, fname)
494 self.opener.symlink(data, fname)
495 else:
495 else:
496 self.opener.write(fname, data)
496 self.opener.write(fname, data)
497 if isexec:
497 if isexec:
498 self.opener.setflags(fname, False, True)
498 self.opener.setflags(fname, False, True)
499
499
500 def unlink(self, fname):
500 def unlink(self, fname):
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503
503
504 def writerej(self, fname, failed, total, lines):
504 def writerej(self, fname, failed, total, lines):
505 fname = fname + ".rej"
505 fname = fname + ".rej"
506 self.ui.warn(
506 self.ui.warn(
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 (failed, total, fname))
508 (failed, total, fname))
509 fp = self.opener(fname, 'w')
509 fp = self.opener(fname, 'w')
510 fp.writelines(lines)
510 fp.writelines(lines)
511 fp.close()
511 fp.close()
512
512
513 def exists(self, fname):
513 def exists(self, fname):
514 return self.opener.lexists(fname)
514 return self.opener.lexists(fname)
515
515
516 class workingbackend(fsbackend):
516 class workingbackend(fsbackend):
517 def __init__(self, ui, repo, similarity):
517 def __init__(self, ui, repo, similarity):
518 super(workingbackend, self).__init__(ui, repo.root)
518 super(workingbackend, self).__init__(ui, repo.root)
519 self.repo = repo
519 self.repo = repo
520 self.similarity = similarity
520 self.similarity = similarity
521 self.removed = set()
521 self.removed = set()
522 self.changed = set()
522 self.changed = set()
523 self.copied = []
523 self.copied = []
524
524
525 def _checkknown(self, fname):
525 def _checkknown(self, fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528
528
529 def setfile(self, fname, data, mode, copysource):
529 def setfile(self, fname, data, mode, copysource):
530 self._checkknown(fname)
530 self._checkknown(fname)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 if copysource is not None:
532 if copysource is not None:
533 self.copied.append((copysource, fname))
533 self.copied.append((copysource, fname))
534 self.changed.add(fname)
534 self.changed.add(fname)
535
535
536 def unlink(self, fname):
536 def unlink(self, fname):
537 self._checkknown(fname)
537 self._checkknown(fname)
538 super(workingbackend, self).unlink(fname)
538 super(workingbackend, self).unlink(fname)
539 self.removed.add(fname)
539 self.removed.add(fname)
540 self.changed.add(fname)
540 self.changed.add(fname)
541
541
542 def close(self):
542 def close(self):
543 wctx = self.repo[None]
543 wctx = self.repo[None]
544 changed = set(self.changed)
544 changed = set(self.changed)
545 for src, dst in self.copied:
545 for src, dst in self.copied:
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 if self.removed:
547 if self.removed:
548 wctx.forget(sorted(self.removed))
548 wctx.forget(sorted(self.removed))
549 for f in self.removed:
549 for f in self.removed:
550 if f not in self.repo.dirstate:
550 if f not in self.repo.dirstate:
551 # File was deleted and no longer belongs to the
551 # File was deleted and no longer belongs to the
552 # dirstate, it was probably marked added then
552 # dirstate, it was probably marked added then
553 # deleted, and should not be considered by
553 # deleted, and should not be considered by
554 # marktouched().
554 # marktouched().
555 changed.discard(f)
555 changed.discard(f)
556 if changed:
556 if changed:
557 scmutil.marktouched(self.repo, changed, self.similarity)
557 scmutil.marktouched(self.repo, changed, self.similarity)
558 return sorted(self.changed)
558 return sorted(self.changed)
559
559
560 class filestore(object):
560 class filestore(object):
561 def __init__(self, maxsize=None):
561 def __init__(self, maxsize=None):
562 self.opener = None
562 self.opener = None
563 self.files = {}
563 self.files = {}
564 self.created = 0
564 self.created = 0
565 self.maxsize = maxsize
565 self.maxsize = maxsize
566 if self.maxsize is None:
566 if self.maxsize is None:
567 self.maxsize = 4*(2**20)
567 self.maxsize = 4*(2**20)
568 self.size = 0
568 self.size = 0
569 self.data = {}
569 self.data = {}
570
570
571 def setfile(self, fname, data, mode, copied=None):
571 def setfile(self, fname, data, mode, copied=None):
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 self.data[fname] = (data, mode, copied)
573 self.data[fname] = (data, mode, copied)
574 self.size += len(data)
574 self.size += len(data)
575 else:
575 else:
576 if self.opener is None:
576 if self.opener is None:
577 root = pycompat.mkdtemp(prefix='hg-patch-')
577 root = pycompat.mkdtemp(prefix='hg-patch-')
578 self.opener = vfsmod.vfs(root)
578 self.opener = vfsmod.vfs(root)
579 # Avoid filename issues with these simple names
579 # Avoid filename issues with these simple names
580 fn = '%d' % self.created
580 fn = '%d' % self.created
581 self.opener.write(fn, data)
581 self.opener.write(fn, data)
582 self.created += 1
582 self.created += 1
583 self.files[fname] = (fn, mode, copied)
583 self.files[fname] = (fn, mode, copied)
584
584
585 def getfile(self, fname):
585 def getfile(self, fname):
586 if fname in self.data:
586 if fname in self.data:
587 return self.data[fname]
587 return self.data[fname]
588 if not self.opener or fname not in self.files:
588 if not self.opener or fname not in self.files:
589 return None, None, None
589 return None, None, None
590 fn, mode, copied = self.files[fname]
590 fn, mode, copied = self.files[fname]
591 return self.opener.read(fn), mode, copied
591 return self.opener.read(fn), mode, copied
592
592
593 def close(self):
593 def close(self):
594 if self.opener:
594 if self.opener:
595 shutil.rmtree(self.opener.base)
595 shutil.rmtree(self.opener.base)
596
596
597 class repobackend(abstractbackend):
597 class repobackend(abstractbackend):
598 def __init__(self, ui, repo, ctx, store):
598 def __init__(self, ui, repo, ctx, store):
599 super(repobackend, self).__init__(ui)
599 super(repobackend, self).__init__(ui)
600 self.repo = repo
600 self.repo = repo
601 self.ctx = ctx
601 self.ctx = ctx
602 self.store = store
602 self.store = store
603 self.changed = set()
603 self.changed = set()
604 self.removed = set()
604 self.removed = set()
605 self.copied = {}
605 self.copied = {}
606
606
607 def _checkknown(self, fname):
607 def _checkknown(self, fname):
608 if fname not in self.ctx:
608 if fname not in self.ctx:
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610
610
611 def getfile(self, fname):
611 def getfile(self, fname):
612 try:
612 try:
613 fctx = self.ctx[fname]
613 fctx = self.ctx[fname]
614 except error.LookupError:
614 except error.LookupError:
615 return None, None
615 return None, None
616 flags = fctx.flags()
616 flags = fctx.flags()
617 return fctx.data(), ('l' in flags, 'x' in flags)
617 return fctx.data(), ('l' in flags, 'x' in flags)
618
618
619 def setfile(self, fname, data, mode, copysource):
619 def setfile(self, fname, data, mode, copysource):
620 if copysource:
620 if copysource:
621 self._checkknown(copysource)
621 self._checkknown(copysource)
622 if data is None:
622 if data is None:
623 data = self.ctx[fname].data()
623 data = self.ctx[fname].data()
624 self.store.setfile(fname, data, mode, copysource)
624 self.store.setfile(fname, data, mode, copysource)
625 self.changed.add(fname)
625 self.changed.add(fname)
626 if copysource:
626 if copysource:
627 self.copied[fname] = copysource
627 self.copied[fname] = copysource
628
628
629 def unlink(self, fname):
629 def unlink(self, fname):
630 self._checkknown(fname)
630 self._checkknown(fname)
631 self.removed.add(fname)
631 self.removed.add(fname)
632
632
633 def exists(self, fname):
633 def exists(self, fname):
634 return fname in self.ctx
634 return fname in self.ctx
635
635
636 def close(self):
636 def close(self):
637 return self.changed | self.removed
637 return self.changed | self.removed
638
638
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
640 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
641 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643
643
644 class patchfile(object):
644 class patchfile(object):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 self.fname = gp.path
646 self.fname = gp.path
647 self.eolmode = eolmode
647 self.eolmode = eolmode
648 self.eol = None
648 self.eol = None
649 self.backend = backend
649 self.backend = backend
650 self.ui = ui
650 self.ui = ui
651 self.lines = []
651 self.lines = []
652 self.exists = False
652 self.exists = False
653 self.missing = True
653 self.missing = True
654 self.mode = gp.mode
654 self.mode = gp.mode
655 self.copysource = gp.oldpath
655 self.copysource = gp.oldpath
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 self.remove = gp.op == 'DELETE'
657 self.remove = gp.op == 'DELETE'
658 if self.copysource is None:
658 if self.copysource is None:
659 data, mode = backend.getfile(self.fname)
659 data, mode = backend.getfile(self.fname)
660 else:
660 else:
661 data, mode = store.getfile(self.copysource)[:2]
661 data, mode = store.getfile(self.copysource)[:2]
662 if data is not None:
662 if data is not None:
663 self.exists = self.copysource is None or backend.exists(self.fname)
663 self.exists = self.copysource is None or backend.exists(self.fname)
664 self.missing = False
664 self.missing = False
665 if data:
665 if data:
666 self.lines = mdiff.splitnewlines(data)
666 self.lines = mdiff.splitnewlines(data)
667 if self.mode is None:
667 if self.mode is None:
668 self.mode = mode
668 self.mode = mode
669 if self.lines:
669 if self.lines:
670 # Normalize line endings
670 # Normalize line endings
671 if self.lines[0].endswith('\r\n'):
671 if self.lines[0].endswith('\r\n'):
672 self.eol = '\r\n'
672 self.eol = '\r\n'
673 elif self.lines[0].endswith('\n'):
673 elif self.lines[0].endswith('\n'):
674 self.eol = '\n'
674 self.eol = '\n'
675 if eolmode != 'strict':
675 if eolmode != 'strict':
676 nlines = []
676 nlines = []
677 for l in self.lines:
677 for l in self.lines:
678 if l.endswith('\r\n'):
678 if l.endswith('\r\n'):
679 l = l[:-2] + '\n'
679 l = l[:-2] + '\n'
680 nlines.append(l)
680 nlines.append(l)
681 self.lines = nlines
681 self.lines = nlines
682 else:
682 else:
683 if self.create:
683 if self.create:
684 self.missing = False
684 self.missing = False
685 if self.mode is None:
685 if self.mode is None:
686 self.mode = (False, False)
686 self.mode = (False, False)
687 if self.missing:
687 if self.missing:
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 "current directory)\n"))
690 "current directory)\n"))
691
691
692 self.hash = {}
692 self.hash = {}
693 self.dirty = 0
693 self.dirty = 0
694 self.offset = 0
694 self.offset = 0
695 self.skew = 0
695 self.skew = 0
696 self.rej = []
696 self.rej = []
697 self.fileprinted = False
697 self.fileprinted = False
698 self.printfile(False)
698 self.printfile(False)
699 self.hunks = 0
699 self.hunks = 0
700
700
701 def writelines(self, fname, lines, mode):
701 def writelines(self, fname, lines, mode):
702 if self.eolmode == 'auto':
702 if self.eolmode == 'auto':
703 eol = self.eol
703 eol = self.eol
704 elif self.eolmode == 'crlf':
704 elif self.eolmode == 'crlf':
705 eol = '\r\n'
705 eol = '\r\n'
706 else:
706 else:
707 eol = '\n'
707 eol = '\n'
708
708
709 if self.eolmode != 'strict' and eol and eol != '\n':
709 if self.eolmode != 'strict' and eol and eol != '\n':
710 rawlines = []
710 rawlines = []
711 for l in lines:
711 for l in lines:
712 if l and l.endswith('\n'):
712 if l and l.endswith('\n'):
713 l = l[:-1] + eol
713 l = l[:-1] + eol
714 rawlines.append(l)
714 rawlines.append(l)
715 lines = rawlines
715 lines = rawlines
716
716
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718
718
719 def printfile(self, warn):
719 def printfile(self, warn):
720 if self.fileprinted:
720 if self.fileprinted:
721 return
721 return
722 if warn or self.ui.verbose:
722 if warn or self.ui.verbose:
723 self.fileprinted = True
723 self.fileprinted = True
724 s = _("patching file %s\n") % self.fname
724 s = _("patching file %s\n") % self.fname
725 if warn:
725 if warn:
726 self.ui.warn(s)
726 self.ui.warn(s)
727 else:
727 else:
728 self.ui.note(s)
728 self.ui.note(s)
729
729
730
730
731 def findlines(self, l, linenum):
731 def findlines(self, l, linenum):
732 # looks through the hash and finds candidate lines. The
732 # looks through the hash and finds candidate lines. The
733 # result is a list of line numbers sorted based on distance
733 # result is a list of line numbers sorted based on distance
734 # from linenum
734 # from linenum
735
735
736 cand = self.hash.get(l, [])
736 cand = self.hash.get(l, [])
737 if len(cand) > 1:
737 if len(cand) > 1:
738 # resort our list of potentials forward then back.
738 # resort our list of potentials forward then back.
739 cand.sort(key=lambda x: abs(x - linenum))
739 cand.sort(key=lambda x: abs(x - linenum))
740 return cand
740 return cand
741
741
742 def write_rej(self):
742 def write_rej(self):
743 # our rejects are a little different from patch(1). This always
743 # our rejects are a little different from patch(1). This always
744 # creates rejects in the same form as the original patch. A file
744 # creates rejects in the same form as the original patch. A file
745 # header is inserted so that you can run the reject through patch again
745 # header is inserted so that you can run the reject through patch again
746 # without having to type the filename.
746 # without having to type the filename.
747 if not self.rej:
747 if not self.rej:
748 return
748 return
749 base = os.path.basename(self.fname)
749 base = os.path.basename(self.fname)
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 for x in self.rej:
751 for x in self.rej:
752 for l in x.hunk:
752 for l in x.hunk:
753 lines.append(l)
753 lines.append(l)
754 if l[-1:] != '\n':
754 if l[-1:] != '\n':
755 lines.append("\n\ No newline at end of file\n")
755 lines.append("\n\ No newline at end of file\n")
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757
757
758 def apply(self, h):
758 def apply(self, h):
759 if not h.complete():
759 if not h.complete():
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 h.lenb))
762 h.lenb))
763
763
764 self.hunks += 1
764 self.hunks += 1
765
765
766 if self.missing:
766 if self.missing:
767 self.rej.append(h)
767 self.rej.append(h)
768 return -1
768 return -1
769
769
770 if self.exists and self.create:
770 if self.exists and self.create:
771 if self.copysource:
771 if self.copysource:
772 self.ui.warn(_("cannot create %s: destination already "
772 self.ui.warn(_("cannot create %s: destination already "
773 "exists\n") % self.fname)
773 "exists\n") % self.fname)
774 else:
774 else:
775 self.ui.warn(_("file %s already exists\n") % self.fname)
775 self.ui.warn(_("file %s already exists\n") % self.fname)
776 self.rej.append(h)
776 self.rej.append(h)
777 return -1
777 return -1
778
778
779 if isinstance(h, binhunk):
779 if isinstance(h, binhunk):
780 if self.remove:
780 if self.remove:
781 self.backend.unlink(self.fname)
781 self.backend.unlink(self.fname)
782 else:
782 else:
783 l = h.new(self.lines)
783 l = h.new(self.lines)
784 self.lines[:] = l
784 self.lines[:] = l
785 self.offset += len(l)
785 self.offset += len(l)
786 self.dirty = True
786 self.dirty = True
787 return 0
787 return 0
788
788
789 horig = h
789 horig = h
790 if (self.eolmode in ('crlf', 'lf')
790 if (self.eolmode in ('crlf', 'lf')
791 or self.eolmode == 'auto' and self.eol):
791 or self.eolmode == 'auto' and self.eol):
792 # If new eols are going to be normalized, then normalize
792 # If new eols are going to be normalized, then normalize
793 # hunk data before patching. Otherwise, preserve input
793 # hunk data before patching. Otherwise, preserve input
794 # line-endings.
794 # line-endings.
795 h = h.getnormalized()
795 h = h.getnormalized()
796
796
797 # fast case first, no offsets, no fuzz
797 # fast case first, no offsets, no fuzz
798 old, oldstart, new, newstart = h.fuzzit(0, False)
798 old, oldstart, new, newstart = h.fuzzit(0, False)
799 oldstart += self.offset
799 oldstart += self.offset
800 orig_start = oldstart
800 orig_start = oldstart
801 # if there's skew we want to emit the "(offset %d lines)" even
801 # if there's skew we want to emit the "(offset %d lines)" even
802 # when the hunk cleanly applies at start + skew, so skip the
802 # when the hunk cleanly applies at start + skew, so skip the
803 # fast case code
803 # fast case code
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 if self.remove:
805 if self.remove:
806 self.backend.unlink(self.fname)
806 self.backend.unlink(self.fname)
807 else:
807 else:
808 self.lines[oldstart:oldstart + len(old)] = new
808 self.lines[oldstart:oldstart + len(old)] = new
809 self.offset += len(new) - len(old)
809 self.offset += len(new) - len(old)
810 self.dirty = True
810 self.dirty = True
811 return 0
811 return 0
812
812
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 self.hash = {}
814 self.hash = {}
815 for x, s in enumerate(self.lines):
815 for x, s in enumerate(self.lines):
816 self.hash.setdefault(s, []).append(x)
816 self.hash.setdefault(s, []).append(x)
817
817
818 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
818 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
819 for toponly in [True, False]:
819 for toponly in [True, False]:
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 oldstart = oldstart + self.offset + self.skew
821 oldstart = oldstart + self.offset + self.skew
822 oldstart = min(oldstart, len(self.lines))
822 oldstart = min(oldstart, len(self.lines))
823 if old:
823 if old:
824 cand = self.findlines(old[0][1:], oldstart)
824 cand = self.findlines(old[0][1:], oldstart)
825 else:
825 else:
826 # Only adding lines with no or fuzzed context, just
826 # Only adding lines with no or fuzzed context, just
827 # take the skew in account
827 # take the skew in account
828 cand = [oldstart]
828 cand = [oldstart]
829
829
830 for l in cand:
830 for l in cand:
831 if not old or diffhelper.testhunk(old, self.lines, l):
831 if not old or diffhelper.testhunk(old, self.lines, l):
832 self.lines[l : l + len(old)] = new
832 self.lines[l : l + len(old)] = new
833 self.offset += len(new) - len(old)
833 self.offset += len(new) - len(old)
834 self.skew = l - orig_start
834 self.skew = l - orig_start
835 self.dirty = True
835 self.dirty = True
836 offset = l - orig_start - fuzzlen
836 offset = l - orig_start - fuzzlen
837 if fuzzlen:
837 if fuzzlen:
838 msg = _("Hunk #%d succeeded at %d "
838 msg = _("Hunk #%d succeeded at %d "
839 "with fuzz %d "
839 "with fuzz %d "
840 "(offset %d lines).\n")
840 "(offset %d lines).\n")
841 self.printfile(True)
841 self.printfile(True)
842 self.ui.warn(msg %
842 self.ui.warn(msg %
843 (h.number, l + 1, fuzzlen, offset))
843 (h.number, l + 1, fuzzlen, offset))
844 else:
844 else:
845 msg = _("Hunk #%d succeeded at %d "
845 msg = _("Hunk #%d succeeded at %d "
846 "(offset %d lines).\n")
846 "(offset %d lines).\n")
847 self.ui.note(msg % (h.number, l + 1, offset))
847 self.ui.note(msg % (h.number, l + 1, offset))
848 return fuzzlen
848 return fuzzlen
849 self.printfile(True)
849 self.printfile(True)
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 self.rej.append(horig)
851 self.rej.append(horig)
852 return -1
852 return -1
853
853
854 def close(self):
854 def close(self):
855 if self.dirty:
855 if self.dirty:
856 self.writelines(self.fname, self.lines, self.mode)
856 self.writelines(self.fname, self.lines, self.mode)
857 self.write_rej()
857 self.write_rej()
858 return len(self.rej)
858 return len(self.rej)
859
859
860 class header(object):
860 class header(object):
861 """patch header
861 """patch header
862 """
862 """
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
865 allhunks_re = re.compile('(?:index|deleted file) ')
865 allhunks_re = re.compile('(?:index|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 newfile_re = re.compile('(?:new file)')
868 newfile_re = re.compile('(?:new file)')
869
869
870 def __init__(self, header):
870 def __init__(self, header):
871 self.header = header
871 self.header = header
872 self.hunks = []
872 self.hunks = []
873
873
874 def binary(self):
874 def binary(self):
875 return any(h.startswith('index ') for h in self.header)
875 return any(h.startswith('index ') for h in self.header)
876
876
877 def pretty(self, fp):
877 def pretty(self, fp):
878 for h in self.header:
878 for h in self.header:
879 if h.startswith('index '):
879 if h.startswith('index '):
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 break
881 break
882 if self.pretty_re.match(h):
882 if self.pretty_re.match(h):
883 fp.write(h)
883 fp.write(h)
884 if self.binary():
884 if self.binary():
885 fp.write(_('this is a binary file\n'))
885 fp.write(_('this is a binary file\n'))
886 break
886 break
887 if h.startswith('---'):
887 if h.startswith('---'):
888 fp.write(_('%d hunks, %d lines changed\n') %
888 fp.write(_('%d hunks, %d lines changed\n') %
889 (len(self.hunks),
889 (len(self.hunks),
890 sum([max(h.added, h.removed) for h in self.hunks])))
890 sum([max(h.added, h.removed) for h in self.hunks])))
891 break
891 break
892 fp.write(h)
892 fp.write(h)
893
893
894 def write(self, fp):
894 def write(self, fp):
895 fp.write(''.join(self.header))
895 fp.write(''.join(self.header))
896
896
897 def allhunks(self):
897 def allhunks(self):
898 return any(self.allhunks_re.match(h) for h in self.header)
898 return any(self.allhunks_re.match(h) for h in self.header)
899
899
900 def files(self):
900 def files(self):
901 match = self.diffgit_re.match(self.header[0])
901 match = self.diffgit_re.match(self.header[0])
902 if match:
902 if match:
903 fromfile, tofile = match.groups()
903 fromfile, tofile = match.groups()
904 if fromfile == tofile:
904 if fromfile == tofile:
905 return [fromfile]
905 return [fromfile]
906 return [fromfile, tofile]
906 return [fromfile, tofile]
907 else:
907 else:
908 return self.diff_re.match(self.header[0]).groups()
908 return self.diff_re.match(self.header[0]).groups()
909
909
910 def filename(self):
910 def filename(self):
911 return self.files()[-1]
911 return self.files()[-1]
912
912
913 def __repr__(self):
913 def __repr__(self):
914 return '<header %s>' % (' '.join(map(repr, self.files())))
914 return '<header %s>' % (' '.join(map(repr, self.files())))
915
915
916 def isnewfile(self):
916 def isnewfile(self):
917 return any(self.newfile_re.match(h) for h in self.header)
917 return any(self.newfile_re.match(h) for h in self.header)
918
918
919 def special(self):
919 def special(self):
920 # Special files are shown only at the header level and not at the hunk
920 # Special files are shown only at the header level and not at the hunk
921 # level for example a file that has been deleted is a special file.
921 # level for example a file that has been deleted is a special file.
922 # The user cannot change the content of the operation, in the case of
922 # The user cannot change the content of the operation, in the case of
923 # the deleted file he has to take the deletion or not take it, he
923 # the deleted file he has to take the deletion or not take it, he
924 # cannot take some of it.
924 # cannot take some of it.
925 # Newly added files are special if they are empty, they are not special
925 # Newly added files are special if they are empty, they are not special
926 # if they have some content as we want to be able to change it
926 # if they have some content as we want to be able to change it
927 nocontent = len(self.header) == 2
927 nocontent = len(self.header) == 2
928 emptynewfile = self.isnewfile() and nocontent
928 emptynewfile = self.isnewfile() and nocontent
929 return emptynewfile or \
929 return emptynewfile or \
930 any(self.special_re.match(h) for h in self.header)
930 any(self.special_re.match(h) for h in self.header)
931
931
932 class recordhunk(object):
932 class recordhunk(object):
933 """patch hunk
933 """patch hunk
934
934
935 XXX shouldn't we merge this with the other hunk class?
935 XXX shouldn't we merge this with the other hunk class?
936 """
936 """
937
937
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 maxcontext=None):
939 maxcontext=None):
940 def trimcontext(lines, reverse=False):
940 def trimcontext(lines, reverse=False):
941 if maxcontext is not None:
941 if maxcontext is not None:
942 delta = len(lines) - maxcontext
942 delta = len(lines) - maxcontext
943 if delta > 0:
943 if delta > 0:
944 if reverse:
944 if reverse:
945 return delta, lines[delta:]
945 return delta, lines[delta:]
946 else:
946 else:
947 return delta, lines[:maxcontext]
947 return delta, lines[:maxcontext]
948 return 0, lines
948 return 0, lines
949
949
950 self.header = header
950 self.header = header
951 trimedbefore, self.before = trimcontext(before, True)
951 trimedbefore, self.before = trimcontext(before, True)
952 self.fromline = fromline + trimedbefore
952 self.fromline = fromline + trimedbefore
953 self.toline = toline + trimedbefore
953 self.toline = toline + trimedbefore
954 _trimedafter, self.after = trimcontext(after, False)
954 _trimedafter, self.after = trimcontext(after, False)
955 self.proc = proc
955 self.proc = proc
956 self.hunk = hunk
956 self.hunk = hunk
957 self.added, self.removed = self.countchanges(self.hunk)
957 self.added, self.removed = self.countchanges(self.hunk)
958
958
959 def __eq__(self, v):
959 def __eq__(self, v):
960 if not isinstance(v, recordhunk):
960 if not isinstance(v, recordhunk):
961 return False
961 return False
962
962
963 return ((v.hunk == self.hunk) and
963 return ((v.hunk == self.hunk) and
964 (v.proc == self.proc) and
964 (v.proc == self.proc) and
965 (self.fromline == v.fromline) and
965 (self.fromline == v.fromline) and
966 (self.header.files() == v.header.files()))
966 (self.header.files() == v.header.files()))
967
967
968 def __hash__(self):
968 def __hash__(self):
969 return hash((tuple(self.hunk),
969 return hash((tuple(self.hunk),
970 tuple(self.header.files()),
970 tuple(self.header.files()),
971 self.fromline,
971 self.fromline,
972 self.proc))
972 self.proc))
973
973
974 def countchanges(self, hunk):
974 def countchanges(self, hunk):
975 """hunk -> (n+,n-)"""
975 """hunk -> (n+,n-)"""
976 add = len([h for h in hunk if h.startswith('+')])
976 add = len([h for h in hunk if h.startswith('+')])
977 rem = len([h for h in hunk if h.startswith('-')])
977 rem = len([h for h in hunk if h.startswith('-')])
978 return add, rem
978 return add, rem
979
979
980 def reversehunk(self):
980 def reversehunk(self):
981 """return another recordhunk which is the reverse of the hunk
981 """return another recordhunk which is the reverse of the hunk
982
982
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 that, swap fromline/toline and +/- signs while keep other things
984 that, swap fromline/toline and +/- signs while keep other things
985 unchanged.
985 unchanged.
986 """
986 """
987 m = {'+': '-', '-': '+', '\\': '\\'}
987 m = {'+': '-', '-': '+', '\\': '\\'}
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 self.before, hunk, self.after)
990 self.before, hunk, self.after)
991
991
992 def write(self, fp):
992 def write(self, fp):
993 delta = len(self.before) + len(self.after)
993 delta = len(self.before) + len(self.after)
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 delta -= 1
995 delta -= 1
996 fromlen = delta + self.removed
996 fromlen = delta + self.removed
997 tolen = delta + self.added
997 tolen = delta + self.added
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 (self.fromline, fromlen, self.toline, tolen,
999 (self.fromline, fromlen, self.toline, tolen,
1000 self.proc and (' ' + self.proc)))
1000 self.proc and (' ' + self.proc)))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1002
1002
1003 pretty = write
1003 pretty = write
1004
1004
1005 def filename(self):
1005 def filename(self):
1006 return self.header.filename()
1006 return self.header.filename()
1007
1007
1008 def __repr__(self):
1008 def __repr__(self):
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010
1010
1011 def getmessages():
1011 def getmessages():
1012 return {
1012 return {
1013 'multiple': {
1013 'multiple': {
1014 'apply': _("apply change %d/%d to '%s'?"),
1014 'apply': _("apply change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1017 },
1017 },
1018 'single': {
1018 'single': {
1019 'apply': _("apply this change to '%s'?"),
1019 'apply': _("apply this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1022 },
1022 },
1023 'help': {
1023 'help': {
1024 'apply': _('[Ynesfdaq?]'
1024 'apply': _('[Ynesfdaq?]'
1025 '$$ &Yes, apply this change'
1025 '$$ &Yes, apply this change'
1026 '$$ &No, skip this change'
1026 '$$ &No, skip this change'
1027 '$$ &Edit this change manually'
1027 '$$ &Edit this change manually'
1028 '$$ &Skip remaining changes to this file'
1028 '$$ &Skip remaining changes to this file'
1029 '$$ Apply remaining changes to this &file'
1029 '$$ Apply remaining changes to this &file'
1030 '$$ &Done, skip remaining changes and files'
1030 '$$ &Done, skip remaining changes and files'
1031 '$$ Apply &all changes to all remaining files'
1031 '$$ Apply &all changes to all remaining files'
1032 '$$ &Quit, applying no changes'
1032 '$$ &Quit, applying no changes'
1033 '$$ &? (display help)'),
1033 '$$ &? (display help)'),
1034 'discard': _('[Ynesfdaq?]'
1034 'discard': _('[Ynesfdaq?]'
1035 '$$ &Yes, discard this change'
1035 '$$ &Yes, discard this change'
1036 '$$ &No, skip this change'
1036 '$$ &No, skip this change'
1037 '$$ &Edit this change manually'
1037 '$$ &Edit this change manually'
1038 '$$ &Skip remaining changes to this file'
1038 '$$ &Skip remaining changes to this file'
1039 '$$ Discard remaining changes to this &file'
1039 '$$ Discard remaining changes to this &file'
1040 '$$ &Done, skip remaining changes and files'
1040 '$$ &Done, skip remaining changes and files'
1041 '$$ Discard &all changes to all remaining files'
1041 '$$ Discard &all changes to all remaining files'
1042 '$$ &Quit, discarding no changes'
1042 '$$ &Quit, discarding no changes'
1043 '$$ &? (display help)'),
1043 '$$ &? (display help)'),
1044 'record': _('[Ynesfdaq?]'
1044 'record': _('[Ynesfdaq?]'
1045 '$$ &Yes, record this change'
1045 '$$ &Yes, record this change'
1046 '$$ &No, skip this change'
1046 '$$ &No, skip this change'
1047 '$$ &Edit this change manually'
1047 '$$ &Edit this change manually'
1048 '$$ &Skip remaining changes to this file'
1048 '$$ &Skip remaining changes to this file'
1049 '$$ Record remaining changes to this &file'
1049 '$$ Record remaining changes to this &file'
1050 '$$ &Done, skip remaining changes and files'
1050 '$$ &Done, skip remaining changes and files'
1051 '$$ Record &all changes to all remaining files'
1051 '$$ Record &all changes to all remaining files'
1052 '$$ &Quit, recording no changes'
1052 '$$ &Quit, recording no changes'
1053 '$$ &? (display help)'),
1053 '$$ &? (display help)'),
1054 }
1054 }
1055 }
1055 }
1056
1056
1057 def filterpatch(ui, headers, operation=None):
1057 def filterpatch(ui, headers, operation=None):
1058 """Interactively filter patch chunks into applied-only chunks"""
1058 """Interactively filter patch chunks into applied-only chunks"""
1059 messages = getmessages()
1059 messages = getmessages()
1060
1060
1061 if operation is None:
1061 if operation is None:
1062 operation = 'record'
1062 operation = 'record'
1063
1063
1064 def prompt(skipfile, skipall, query, chunk):
1064 def prompt(skipfile, skipall, query, chunk):
1065 """prompt query, and process base inputs
1065 """prompt query, and process base inputs
1066
1066
1067 - y/n for the rest of file
1067 - y/n for the rest of file
1068 - y/n for the rest
1068 - y/n for the rest
1069 - ? (help)
1069 - ? (help)
1070 - q (quit)
1070 - q (quit)
1071
1071
1072 Return True/False and possibly updated skipfile and skipall.
1072 Return True/False and possibly updated skipfile and skipall.
1073 """
1073 """
1074 newpatches = None
1074 newpatches = None
1075 if skipall is not None:
1075 if skipall is not None:
1076 return skipall, skipfile, skipall, newpatches
1076 return skipall, skipfile, skipall, newpatches
1077 if skipfile is not None:
1077 if skipfile is not None:
1078 return skipfile, skipfile, skipall, newpatches
1078 return skipfile, skipfile, skipall, newpatches
1079 while True:
1079 while True:
1080 resps = messages['help'][operation]
1080 resps = messages['help'][operation]
1081 r = ui.promptchoice("%s %s" % (query, resps))
1081 r = ui.promptchoice("%s %s" % (query, resps))
1082 ui.write("\n")
1082 ui.write("\n")
1083 if r == 8: # ?
1083 if r == 8: # ?
1084 for c, t in ui.extractchoices(resps)[1]:
1084 for c, t in ui.extractchoices(resps)[1]:
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 continue
1086 continue
1087 elif r == 0: # yes
1087 elif r == 0: # yes
1088 ret = True
1088 ret = True
1089 elif r == 1: # no
1089 elif r == 1: # no
1090 ret = False
1090 ret = False
1091 elif r == 2: # Edit patch
1091 elif r == 2: # Edit patch
1092 if chunk is None:
1092 if chunk is None:
1093 ui.write(_('cannot edit patch for whole file'))
1093 ui.write(_('cannot edit patch for whole file'))
1094 ui.write("\n")
1094 ui.write("\n")
1095 continue
1095 continue
1096 if chunk.header.binary():
1096 if chunk.header.binary():
1097 ui.write(_('cannot edit patch for binary file'))
1097 ui.write(_('cannot edit patch for binary file'))
1098 ui.write("\n")
1098 ui.write("\n")
1099 continue
1099 continue
1100 # Patch comment based on the Git one (based on comment at end of
1100 # Patch comment based on the Git one (based on comment at end of
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1102 phelp = '---' + _("""
1102 phelp = '---' + _("""
1103 To remove '-' lines, make them ' ' lines (context).
1103 To remove '-' lines, make them ' ' lines (context).
1104 To remove '+' lines, delete them.
1104 To remove '+' lines, delete them.
1105 Lines starting with # will be removed from the patch.
1105 Lines starting with # will be removed from the patch.
1106
1106
1107 If the patch applies cleanly, the edited hunk will immediately be
1107 If the patch applies cleanly, the edited hunk will immediately be
1108 added to the record list. If it does not apply cleanly, a rejects
1108 added to the record list. If it does not apply cleanly, a rejects
1109 file will be generated: you can use that when you try again. If
1109 file will be generated: you can use that when you try again. If
1110 all lines of the hunk are removed, then the edit is aborted and
1110 all lines of the hunk are removed, then the edit is aborted and
1111 the hunk is left unchanged.
1111 the hunk is left unchanged.
1112 """)
1112 """)
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 suffix=".diff")
1114 suffix=".diff")
1115 ncpatchfp = None
1115 ncpatchfp = None
1116 try:
1116 try:
1117 # Write the initial patch
1117 # Write the initial patch
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 chunk.header.write(f)
1119 chunk.header.write(f)
1120 chunk.write(f)
1120 chunk.write(f)
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 f.close()
1122 f.close()
1123 # Start the editor and wait for it to complete
1123 # Start the editor and wait for it to complete
1124 editor = ui.geteditor()
1124 editor = ui.geteditor()
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 environ={'HGUSER': ui.username()},
1126 environ={'HGUSER': ui.username()},
1127 blockedtag='filterpatch')
1127 blockedtag='filterpatch')
1128 if ret != 0:
1128 if ret != 0:
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 continue
1130 continue
1131 # Remove comment lines
1131 # Remove comment lines
1132 patchfp = open(patchfn, r'rb')
1132 patchfp = open(patchfn, r'rb')
1133 ncpatchfp = stringio()
1133 ncpatchfp = stringio()
1134 for line in util.iterfile(patchfp):
1134 for line in util.iterfile(patchfp):
1135 line = util.fromnativeeol(line)
1135 line = util.fromnativeeol(line)
1136 if not line.startswith('#'):
1136 if not line.startswith('#'):
1137 ncpatchfp.write(line)
1137 ncpatchfp.write(line)
1138 patchfp.close()
1138 patchfp.close()
1139 ncpatchfp.seek(0)
1139 ncpatchfp.seek(0)
1140 newpatches = parsepatch(ncpatchfp)
1140 newpatches = parsepatch(ncpatchfp)
1141 finally:
1141 finally:
1142 os.unlink(patchfn)
1142 os.unlink(patchfn)
1143 del ncpatchfp
1143 del ncpatchfp
1144 # Signal that the chunk shouldn't be applied as-is, but
1144 # Signal that the chunk shouldn't be applied as-is, but
1145 # provide the new patch to be used instead.
1145 # provide the new patch to be used instead.
1146 ret = False
1146 ret = False
1147 elif r == 3: # Skip
1147 elif r == 3: # Skip
1148 ret = skipfile = False
1148 ret = skipfile = False
1149 elif r == 4: # file (Record remaining)
1149 elif r == 4: # file (Record remaining)
1150 ret = skipfile = True
1150 ret = skipfile = True
1151 elif r == 5: # done, skip remaining
1151 elif r == 5: # done, skip remaining
1152 ret = skipall = False
1152 ret = skipall = False
1153 elif r == 6: # all
1153 elif r == 6: # all
1154 ret = skipall = True
1154 ret = skipall = True
1155 elif r == 7: # quit
1155 elif r == 7: # quit
1156 raise error.Abort(_('user quit'))
1156 raise error.Abort(_('user quit'))
1157 return ret, skipfile, skipall, newpatches
1157 return ret, skipfile, skipall, newpatches
1158
1158
1159 seen = set()
1159 seen = set()
1160 applied = {} # 'filename' -> [] of chunks
1160 applied = {} # 'filename' -> [] of chunks
1161 skipfile, skipall = None, None
1161 skipfile, skipall = None, None
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 for h in headers:
1163 for h in headers:
1164 pos += len(h.hunks)
1164 pos += len(h.hunks)
1165 skipfile = None
1165 skipfile = None
1166 fixoffset = 0
1166 fixoffset = 0
1167 hdr = ''.join(h.header)
1167 hdr = ''.join(h.header)
1168 if hdr in seen:
1168 if hdr in seen:
1169 continue
1169 continue
1170 seen.add(hdr)
1170 seen.add(hdr)
1171 if skipall is None:
1171 if skipall is None:
1172 h.pretty(ui)
1172 h.pretty(ui)
1173 msg = (_('examine changes to %s?') %
1173 msg = (_('examine changes to %s?') %
1174 _(' and ').join("'%s'" % f for f in h.files()))
1174 _(' and ').join("'%s'" % f for f in h.files()))
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 if not r:
1176 if not r:
1177 continue
1177 continue
1178 applied[h.filename()] = [h]
1178 applied[h.filename()] = [h]
1179 if h.allhunks():
1179 if h.allhunks():
1180 applied[h.filename()] += h.hunks
1180 applied[h.filename()] += h.hunks
1181 continue
1181 continue
1182 for i, chunk in enumerate(h.hunks):
1182 for i, chunk in enumerate(h.hunks):
1183 if skipfile is None and skipall is None:
1183 if skipfile is None and skipall is None:
1184 chunk.pretty(ui)
1184 chunk.pretty(ui)
1185 if total == 1:
1185 if total == 1:
1186 msg = messages['single'][operation] % chunk.filename()
1186 msg = messages['single'][operation] % chunk.filename()
1187 else:
1187 else:
1188 idx = pos - len(h.hunks) + i
1188 idx = pos - len(h.hunks) + i
1189 msg = messages['multiple'][operation] % (idx, total,
1189 msg = messages['multiple'][operation] % (idx, total,
1190 chunk.filename())
1190 chunk.filename())
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 skipall, msg, chunk)
1192 skipall, msg, chunk)
1193 if r:
1193 if r:
1194 if fixoffset:
1194 if fixoffset:
1195 chunk = copy.copy(chunk)
1195 chunk = copy.copy(chunk)
1196 chunk.toline += fixoffset
1196 chunk.toline += fixoffset
1197 applied[chunk.filename()].append(chunk)
1197 applied[chunk.filename()].append(chunk)
1198 elif newpatches is not None:
1198 elif newpatches is not None:
1199 for newpatch in newpatches:
1199 for newpatch in newpatches:
1200 for newhunk in newpatch.hunks:
1200 for newhunk in newpatch.hunks:
1201 if fixoffset:
1201 if fixoffset:
1202 newhunk.toline += fixoffset
1202 newhunk.toline += fixoffset
1203 applied[newhunk.filename()].append(newhunk)
1203 applied[newhunk.filename()].append(newhunk)
1204 else:
1204 else:
1205 fixoffset += chunk.removed - chunk.added
1205 fixoffset += chunk.removed - chunk.added
1206 return (sum([h for h in applied.itervalues()
1206 return (sum([h for h in applied.itervalues()
1207 if h[0].special() or len(h) > 1], []), {})
1207 if h[0].special() or len(h) > 1], []), {})
1208 class hunk(object):
1208 class hunk(object):
1209 def __init__(self, desc, num, lr, context):
1209 def __init__(self, desc, num, lr, context):
1210 self.number = num
1210 self.number = num
1211 self.desc = desc
1211 self.desc = desc
1212 self.hunk = [desc]
1212 self.hunk = [desc]
1213 self.a = []
1213 self.a = []
1214 self.b = []
1214 self.b = []
1215 self.starta = self.lena = None
1215 self.starta = self.lena = None
1216 self.startb = self.lenb = None
1216 self.startb = self.lenb = None
1217 if lr is not None:
1217 if lr is not None:
1218 if context:
1218 if context:
1219 self.read_context_hunk(lr)
1219 self.read_context_hunk(lr)
1220 else:
1220 else:
1221 self.read_unified_hunk(lr)
1221 self.read_unified_hunk(lr)
1222
1222
1223 def getnormalized(self):
1223 def getnormalized(self):
1224 """Return a copy with line endings normalized to LF."""
1224 """Return a copy with line endings normalized to LF."""
1225
1225
1226 def normalize(lines):
1226 def normalize(lines):
1227 nlines = []
1227 nlines = []
1228 for line in lines:
1228 for line in lines:
1229 if line.endswith('\r\n'):
1229 if line.endswith('\r\n'):
1230 line = line[:-2] + '\n'
1230 line = line[:-2] + '\n'
1231 nlines.append(line)
1231 nlines.append(line)
1232 return nlines
1232 return nlines
1233
1233
1234 # Dummy object, it is rebuilt manually
1234 # Dummy object, it is rebuilt manually
1235 nh = hunk(self.desc, self.number, None, None)
1235 nh = hunk(self.desc, self.number, None, None)
1236 nh.number = self.number
1236 nh.number = self.number
1237 nh.desc = self.desc
1237 nh.desc = self.desc
1238 nh.hunk = self.hunk
1238 nh.hunk = self.hunk
1239 nh.a = normalize(self.a)
1239 nh.a = normalize(self.a)
1240 nh.b = normalize(self.b)
1240 nh.b = normalize(self.b)
1241 nh.starta = self.starta
1241 nh.starta = self.starta
1242 nh.startb = self.startb
1242 nh.startb = self.startb
1243 nh.lena = self.lena
1243 nh.lena = self.lena
1244 nh.lenb = self.lenb
1244 nh.lenb = self.lenb
1245 return nh
1245 return nh
1246
1246
1247 def read_unified_hunk(self, lr):
1247 def read_unified_hunk(self, lr):
1248 m = unidesc.match(self.desc)
1248 m = unidesc.match(self.desc)
1249 if not m:
1249 if not m:
1250 raise PatchError(_("bad hunk #%d") % self.number)
1250 raise PatchError(_("bad hunk #%d") % self.number)
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 if self.lena is None:
1252 if self.lena is None:
1253 self.lena = 1
1253 self.lena = 1
1254 else:
1254 else:
1255 self.lena = int(self.lena)
1255 self.lena = int(self.lena)
1256 if self.lenb is None:
1256 if self.lenb is None:
1257 self.lenb = 1
1257 self.lenb = 1
1258 else:
1258 else:
1259 self.lenb = int(self.lenb)
1259 self.lenb = int(self.lenb)
1260 self.starta = int(self.starta)
1260 self.starta = int(self.starta)
1261 self.startb = int(self.startb)
1261 self.startb = int(self.startb)
1262 try:
1262 try:
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 self.a, self.b)
1264 self.a, self.b)
1265 except error.ParseError as e:
1265 except error.ParseError as e:
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 # if we hit eof before finishing out the hunk, the last line will
1267 # if we hit eof before finishing out the hunk, the last line will
1268 # be zero length. Lets try to fix it up.
1268 # be zero length. Lets try to fix it up.
1269 while len(self.hunk[-1]) == 0:
1269 while len(self.hunk[-1]) == 0:
1270 del self.hunk[-1]
1270 del self.hunk[-1]
1271 del self.a[-1]
1271 del self.a[-1]
1272 del self.b[-1]
1272 del self.b[-1]
1273 self.lena -= 1
1273 self.lena -= 1
1274 self.lenb -= 1
1274 self.lenb -= 1
1275 self._fixnewline(lr)
1275 self._fixnewline(lr)
1276
1276
1277 def read_context_hunk(self, lr):
1277 def read_context_hunk(self, lr):
1278 self.desc = lr.readline()
1278 self.desc = lr.readline()
1279 m = contextdesc.match(self.desc)
1279 m = contextdesc.match(self.desc)
1280 if not m:
1280 if not m:
1281 raise PatchError(_("bad hunk #%d") % self.number)
1281 raise PatchError(_("bad hunk #%d") % self.number)
1282 self.starta, aend = m.groups()
1282 self.starta, aend = m.groups()
1283 self.starta = int(self.starta)
1283 self.starta = int(self.starta)
1284 if aend is None:
1284 if aend is None:
1285 aend = self.starta
1285 aend = self.starta
1286 self.lena = int(aend) - self.starta
1286 self.lena = int(aend) - self.starta
1287 if self.starta:
1287 if self.starta:
1288 self.lena += 1
1288 self.lena += 1
1289 for x in pycompat.xrange(self.lena):
1289 for x in pycompat.xrange(self.lena):
1290 l = lr.readline()
1290 l = lr.readline()
1291 if l.startswith('---'):
1291 if l.startswith('---'):
1292 # lines addition, old block is empty
1292 # lines addition, old block is empty
1293 lr.push(l)
1293 lr.push(l)
1294 break
1294 break
1295 s = l[2:]
1295 s = l[2:]
1296 if l.startswith('- ') or l.startswith('! '):
1296 if l.startswith('- ') or l.startswith('! '):
1297 u = '-' + s
1297 u = '-' + s
1298 elif l.startswith(' '):
1298 elif l.startswith(' '):
1299 u = ' ' + s
1299 u = ' ' + s
1300 else:
1300 else:
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1302 (self.number, x))
1302 (self.number, x))
1303 self.a.append(u)
1303 self.a.append(u)
1304 self.hunk.append(u)
1304 self.hunk.append(u)
1305
1305
1306 l = lr.readline()
1306 l = lr.readline()
1307 if l.startswith('\ '):
1307 if l.startswith('\ '):
1308 s = self.a[-1][:-1]
1308 s = self.a[-1][:-1]
1309 self.a[-1] = s
1309 self.a[-1] = s
1310 self.hunk[-1] = s
1310 self.hunk[-1] = s
1311 l = lr.readline()
1311 l = lr.readline()
1312 m = contextdesc.match(l)
1312 m = contextdesc.match(l)
1313 if not m:
1313 if not m:
1314 raise PatchError(_("bad hunk #%d") % self.number)
1314 raise PatchError(_("bad hunk #%d") % self.number)
1315 self.startb, bend = m.groups()
1315 self.startb, bend = m.groups()
1316 self.startb = int(self.startb)
1316 self.startb = int(self.startb)
1317 if bend is None:
1317 if bend is None:
1318 bend = self.startb
1318 bend = self.startb
1319 self.lenb = int(bend) - self.startb
1319 self.lenb = int(bend) - self.startb
1320 if self.startb:
1320 if self.startb:
1321 self.lenb += 1
1321 self.lenb += 1
1322 hunki = 1
1322 hunki = 1
1323 for x in pycompat.xrange(self.lenb):
1323 for x in pycompat.xrange(self.lenb):
1324 l = lr.readline()
1324 l = lr.readline()
1325 if l.startswith('\ '):
1325 if l.startswith('\ '):
1326 # XXX: the only way to hit this is with an invalid line range.
1326 # XXX: the only way to hit this is with an invalid line range.
1327 # The no-eol marker is not counted in the line range, but I
1327 # The no-eol marker is not counted in the line range, but I
1328 # guess there are diff(1) out there which behave differently.
1328 # guess there are diff(1) out there which behave differently.
1329 s = self.b[-1][:-1]
1329 s = self.b[-1][:-1]
1330 self.b[-1] = s
1330 self.b[-1] = s
1331 self.hunk[hunki - 1] = s
1331 self.hunk[hunki - 1] = s
1332 continue
1332 continue
1333 if not l:
1333 if not l:
1334 # line deletions, new block is empty and we hit EOF
1334 # line deletions, new block is empty and we hit EOF
1335 lr.push(l)
1335 lr.push(l)
1336 break
1336 break
1337 s = l[2:]
1337 s = l[2:]
1338 if l.startswith('+ ') or l.startswith('! '):
1338 if l.startswith('+ ') or l.startswith('! '):
1339 u = '+' + s
1339 u = '+' + s
1340 elif l.startswith(' '):
1340 elif l.startswith(' '):
1341 u = ' ' + s
1341 u = ' ' + s
1342 elif len(self.b) == 0:
1342 elif len(self.b) == 0:
1343 # line deletions, new block is empty
1343 # line deletions, new block is empty
1344 lr.push(l)
1344 lr.push(l)
1345 break
1345 break
1346 else:
1346 else:
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1348 (self.number, x))
1348 (self.number, x))
1349 self.b.append(s)
1349 self.b.append(s)
1350 while True:
1350 while True:
1351 if hunki >= len(self.hunk):
1351 if hunki >= len(self.hunk):
1352 h = ""
1352 h = ""
1353 else:
1353 else:
1354 h = self.hunk[hunki]
1354 h = self.hunk[hunki]
1355 hunki += 1
1355 hunki += 1
1356 if h == u:
1356 if h == u:
1357 break
1357 break
1358 elif h.startswith('-'):
1358 elif h.startswith('-'):
1359 continue
1359 continue
1360 else:
1360 else:
1361 self.hunk.insert(hunki - 1, u)
1361 self.hunk.insert(hunki - 1, u)
1362 break
1362 break
1363
1363
1364 if not self.a:
1364 if not self.a:
1365 # this happens when lines were only added to the hunk
1365 # this happens when lines were only added to the hunk
1366 for x in self.hunk:
1366 for x in self.hunk:
1367 if x.startswith('-') or x.startswith(' '):
1367 if x.startswith('-') or x.startswith(' '):
1368 self.a.append(x)
1368 self.a.append(x)
1369 if not self.b:
1369 if not self.b:
1370 # this happens when lines were only deleted from the hunk
1370 # this happens when lines were only deleted from the hunk
1371 for x in self.hunk:
1371 for x in self.hunk:
1372 if x.startswith('+') or x.startswith(' '):
1372 if x.startswith('+') or x.startswith(' '):
1373 self.b.append(x[1:])
1373 self.b.append(x[1:])
1374 # @@ -start,len +start,len @@
1374 # @@ -start,len +start,len @@
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 self.startb, self.lenb)
1376 self.startb, self.lenb)
1377 self.hunk[0] = self.desc
1377 self.hunk[0] = self.desc
1378 self._fixnewline(lr)
1378 self._fixnewline(lr)
1379
1379
1380 def _fixnewline(self, lr):
1380 def _fixnewline(self, lr):
1381 l = lr.readline()
1381 l = lr.readline()
1382 if l.startswith('\ '):
1382 if l.startswith('\ '):
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 else:
1384 else:
1385 lr.push(l)
1385 lr.push(l)
1386
1386
1387 def complete(self):
1387 def complete(self):
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1389
1389
1390 def _fuzzit(self, old, new, fuzz, toponly):
1390 def _fuzzit(self, old, new, fuzz, toponly):
1391 # this removes context lines from the top and bottom of list 'l'. It
1391 # this removes context lines from the top and bottom of list 'l'. It
1392 # checks the hunk to make sure only context lines are removed, and then
1392 # checks the hunk to make sure only context lines are removed, and then
1393 # returns a new shortened list of lines.
1393 # returns a new shortened list of lines.
1394 fuzz = min(fuzz, len(old))
1394 fuzz = min(fuzz, len(old))
1395 if fuzz:
1395 if fuzz:
1396 top = 0
1396 top = 0
1397 bot = 0
1397 bot = 0
1398 hlen = len(self.hunk)
1398 hlen = len(self.hunk)
1399 for x in pycompat.xrange(hlen - 1):
1399 for x in pycompat.xrange(hlen - 1):
1400 # the hunk starts with the @@ line, so use x+1
1400 # the hunk starts with the @@ line, so use x+1
1401 if self.hunk[x + 1].startswith(' '):
1401 if self.hunk[x + 1].startswith(' '):
1402 top += 1
1402 top += 1
1403 else:
1403 else:
1404 break
1404 break
1405 if not toponly:
1405 if not toponly:
1406 for x in pycompat.xrange(hlen - 1):
1406 for x in pycompat.xrange(hlen - 1):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1408 bot += 1
1408 bot += 1
1409 else:
1409 else:
1410 break
1410 break
1411
1411
1412 bot = min(fuzz, bot)
1412 bot = min(fuzz, bot)
1413 top = min(fuzz, top)
1413 top = min(fuzz, top)
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 return old, new, 0
1415 return old, new, 0
1416
1416
1417 def fuzzit(self, fuzz, toponly):
1417 def fuzzit(self, fuzz, toponly):
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 oldstart = self.starta + top
1419 oldstart = self.starta + top
1420 newstart = self.startb + top
1420 newstart = self.startb + top
1421 # zero length hunk ranges already have their start decremented
1421 # zero length hunk ranges already have their start decremented
1422 if self.lena and oldstart > 0:
1422 if self.lena and oldstart > 0:
1423 oldstart -= 1
1423 oldstart -= 1
1424 if self.lenb and newstart > 0:
1424 if self.lenb and newstart > 0:
1425 newstart -= 1
1425 newstart -= 1
1426 return old, oldstart, new, newstart
1426 return old, oldstart, new, newstart
1427
1427
1428 class binhunk(object):
1428 class binhunk(object):
1429 'A binary patch file.'
1429 'A binary patch file.'
1430 def __init__(self, lr, fname):
1430 def __init__(self, lr, fname):
1431 self.text = None
1431 self.text = None
1432 self.delta = False
1432 self.delta = False
1433 self.hunk = ['GIT binary patch\n']
1433 self.hunk = ['GIT binary patch\n']
1434 self._fname = fname
1434 self._fname = fname
1435 self._read(lr)
1435 self._read(lr)
1436
1436
1437 def complete(self):
1437 def complete(self):
1438 return self.text is not None
1438 return self.text is not None
1439
1439
1440 def new(self, lines):
1440 def new(self, lines):
1441 if self.delta:
1441 if self.delta:
1442 return [applybindelta(self.text, ''.join(lines))]
1442 return [applybindelta(self.text, ''.join(lines))]
1443 return [self.text]
1443 return [self.text]
1444
1444
1445 def _read(self, lr):
1445 def _read(self, lr):
1446 def getline(lr, hunk):
1446 def getline(lr, hunk):
1447 l = lr.readline()
1447 l = lr.readline()
1448 hunk.append(l)
1448 hunk.append(l)
1449 return l.rstrip('\r\n')
1449 return l.rstrip('\r\n')
1450
1450
1451 while True:
1451 while True:
1452 line = getline(lr, self.hunk)
1452 line = getline(lr, self.hunk)
1453 if not line:
1453 if not line:
1454 raise PatchError(_('could not extract "%s" binary data')
1454 raise PatchError(_('could not extract "%s" binary data')
1455 % self._fname)
1455 % self._fname)
1456 if line.startswith('literal '):
1456 if line.startswith('literal '):
1457 size = int(line[8:].rstrip())
1457 size = int(line[8:].rstrip())
1458 break
1458 break
1459 if line.startswith('delta '):
1459 if line.startswith('delta '):
1460 size = int(line[6:].rstrip())
1460 size = int(line[6:].rstrip())
1461 self.delta = True
1461 self.delta = True
1462 break
1462 break
1463 dec = []
1463 dec = []
1464 line = getline(lr, self.hunk)
1464 line = getline(lr, self.hunk)
1465 while len(line) > 1:
1465 while len(line) > 1:
1466 l = line[0:1]
1466 l = line[0:1]
1467 if l <= 'Z' and l >= 'A':
1467 if l <= 'Z' and l >= 'A':
1468 l = ord(l) - ord('A') + 1
1468 l = ord(l) - ord('A') + 1
1469 else:
1469 else:
1470 l = ord(l) - ord('a') + 27
1470 l = ord(l) - ord('a') + 27
1471 try:
1471 try:
1472 dec.append(util.b85decode(line[1:])[:l])
1472 dec.append(util.b85decode(line[1:])[:l])
1473 except ValueError as e:
1473 except ValueError as e:
1474 raise PatchError(_('could not decode "%s" binary patch: %s')
1474 raise PatchError(_('could not decode "%s" binary patch: %s')
1475 % (self._fname, stringutil.forcebytestr(e)))
1475 % (self._fname, stringutil.forcebytestr(e)))
1476 line = getline(lr, self.hunk)
1476 line = getline(lr, self.hunk)
1477 text = zlib.decompress(''.join(dec))
1477 text = zlib.decompress(''.join(dec))
1478 if len(text) != size:
1478 if len(text) != size:
1479 raise PatchError(_('"%s" length is %d bytes, should be %d')
1479 raise PatchError(_('"%s" length is %d bytes, should be %d')
1480 % (self._fname, len(text), size))
1480 % (self._fname, len(text), size))
1481 self.text = text
1481 self.text = text
1482
1482
1483 def parsefilename(str):
1483 def parsefilename(str):
1484 # --- filename \t|space stuff
1484 # --- filename \t|space stuff
1485 s = str[4:].rstrip('\r\n')
1485 s = str[4:].rstrip('\r\n')
1486 i = s.find('\t')
1486 i = s.find('\t')
1487 if i < 0:
1487 if i < 0:
1488 i = s.find(' ')
1488 i = s.find(' ')
1489 if i < 0:
1489 if i < 0:
1490 return s
1490 return s
1491 return s[:i]
1491 return s[:i]
1492
1492
1493 def reversehunks(hunks):
1493 def reversehunks(hunks):
1494 '''reverse the signs in the hunks given as argument
1494 '''reverse the signs in the hunks given as argument
1495
1495
1496 This function operates on hunks coming out of patch.filterpatch, that is
1496 This function operates on hunks coming out of patch.filterpatch, that is
1497 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1497 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1498
1498
1499 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1499 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1500 ... --- a/folder1/g
1500 ... --- a/folder1/g
1501 ... +++ b/folder1/g
1501 ... +++ b/folder1/g
1502 ... @@ -1,7 +1,7 @@
1502 ... @@ -1,7 +1,7 @@
1503 ... +firstline
1503 ... +firstline
1504 ... c
1504 ... c
1505 ... 1
1505 ... 1
1506 ... 2
1506 ... 2
1507 ... + 3
1507 ... + 3
1508 ... -4
1508 ... -4
1509 ... 5
1509 ... 5
1510 ... d
1510 ... d
1511 ... +lastline"""
1511 ... +lastline"""
1512 >>> hunks = parsepatch([rawpatch])
1512 >>> hunks = parsepatch([rawpatch])
1513 >>> hunkscomingfromfilterpatch = []
1513 >>> hunkscomingfromfilterpatch = []
1514 >>> for h in hunks:
1514 >>> for h in hunks:
1515 ... hunkscomingfromfilterpatch.append(h)
1515 ... hunkscomingfromfilterpatch.append(h)
1516 ... hunkscomingfromfilterpatch.extend(h.hunks)
1516 ... hunkscomingfromfilterpatch.extend(h.hunks)
1517
1517
1518 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1518 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1519 >>> from . import util
1519 >>> from . import util
1520 >>> fp = util.stringio()
1520 >>> fp = util.stringio()
1521 >>> for c in reversedhunks:
1521 >>> for c in reversedhunks:
1522 ... c.write(fp)
1522 ... c.write(fp)
1523 >>> fp.seek(0) or None
1523 >>> fp.seek(0) or None
1524 >>> reversedpatch = fp.read()
1524 >>> reversedpatch = fp.read()
1525 >>> print(pycompat.sysstr(reversedpatch))
1525 >>> print(pycompat.sysstr(reversedpatch))
1526 diff --git a/folder1/g b/folder1/g
1526 diff --git a/folder1/g b/folder1/g
1527 --- a/folder1/g
1527 --- a/folder1/g
1528 +++ b/folder1/g
1528 +++ b/folder1/g
1529 @@ -1,4 +1,3 @@
1529 @@ -1,4 +1,3 @@
1530 -firstline
1530 -firstline
1531 c
1531 c
1532 1
1532 1
1533 2
1533 2
1534 @@ -2,6 +1,6 @@
1534 @@ -2,6 +1,6 @@
1535 c
1535 c
1536 1
1536 1
1537 2
1537 2
1538 - 3
1538 - 3
1539 +4
1539 +4
1540 5
1540 5
1541 d
1541 d
1542 @@ -6,3 +5,2 @@
1542 @@ -6,3 +5,2 @@
1543 5
1543 5
1544 d
1544 d
1545 -lastline
1545 -lastline
1546
1546
1547 '''
1547 '''
1548
1548
1549 newhunks = []
1549 newhunks = []
1550 for c in hunks:
1550 for c in hunks:
1551 if util.safehasattr(c, 'reversehunk'):
1551 if util.safehasattr(c, 'reversehunk'):
1552 c = c.reversehunk()
1552 c = c.reversehunk()
1553 newhunks.append(c)
1553 newhunks.append(c)
1554 return newhunks
1554 return newhunks
1555
1555
1556 def parsepatch(originalchunks, maxcontext=None):
1556 def parsepatch(originalchunks, maxcontext=None):
1557 """patch -> [] of headers -> [] of hunks
1557 """patch -> [] of headers -> [] of hunks
1558
1558
1559 If maxcontext is not None, trim context lines if necessary.
1559 If maxcontext is not None, trim context lines if necessary.
1560
1560
1561 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1561 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1562 ... --- a/folder1/g
1562 ... --- a/folder1/g
1563 ... +++ b/folder1/g
1563 ... +++ b/folder1/g
1564 ... @@ -1,8 +1,10 @@
1564 ... @@ -1,8 +1,10 @@
1565 ... 1
1565 ... 1
1566 ... 2
1566 ... 2
1567 ... -3
1567 ... -3
1568 ... 4
1568 ... 4
1569 ... 5
1569 ... 5
1570 ... 6
1570 ... 6
1571 ... +6.1
1571 ... +6.1
1572 ... +6.2
1572 ... +6.2
1573 ... 7
1573 ... 7
1574 ... 8
1574 ... 8
1575 ... +9'''
1575 ... +9'''
1576 >>> out = util.stringio()
1576 >>> out = util.stringio()
1577 >>> headers = parsepatch([rawpatch], maxcontext=1)
1577 >>> headers = parsepatch([rawpatch], maxcontext=1)
1578 >>> for header in headers:
1578 >>> for header in headers:
1579 ... header.write(out)
1579 ... header.write(out)
1580 ... for hunk in header.hunks:
1580 ... for hunk in header.hunks:
1581 ... hunk.write(out)
1581 ... hunk.write(out)
1582 >>> print(pycompat.sysstr(out.getvalue()))
1582 >>> print(pycompat.sysstr(out.getvalue()))
1583 diff --git a/folder1/g b/folder1/g
1583 diff --git a/folder1/g b/folder1/g
1584 --- a/folder1/g
1584 --- a/folder1/g
1585 +++ b/folder1/g
1585 +++ b/folder1/g
1586 @@ -2,3 +2,2 @@
1586 @@ -2,3 +2,2 @@
1587 2
1587 2
1588 -3
1588 -3
1589 4
1589 4
1590 @@ -6,2 +5,4 @@
1590 @@ -6,2 +5,4 @@
1591 6
1591 6
1592 +6.1
1592 +6.1
1593 +6.2
1593 +6.2
1594 7
1594 7
1595 @@ -8,1 +9,2 @@
1595 @@ -8,1 +9,2 @@
1596 8
1596 8
1597 +9
1597 +9
1598 """
1598 """
1599 class parser(object):
1599 class parser(object):
1600 """patch parsing state machine"""
1600 """patch parsing state machine"""
1601 def __init__(self):
1601 def __init__(self):
1602 self.fromline = 0
1602 self.fromline = 0
1603 self.toline = 0
1603 self.toline = 0
1604 self.proc = ''
1604 self.proc = ''
1605 self.header = None
1605 self.header = None
1606 self.context = []
1606 self.context = []
1607 self.before = []
1607 self.before = []
1608 self.hunk = []
1608 self.hunk = []
1609 self.headers = []
1609 self.headers = []
1610
1610
1611 def addrange(self, limits):
1611 def addrange(self, limits):
1612 fromstart, fromend, tostart, toend, proc = limits
1612 fromstart, fromend, tostart, toend, proc = limits
1613 self.fromline = int(fromstart)
1613 self.fromline = int(fromstart)
1614 self.toline = int(tostart)
1614 self.toline = int(tostart)
1615 self.proc = proc
1615 self.proc = proc
1616
1616
1617 def addcontext(self, context):
1617 def addcontext(self, context):
1618 if self.hunk:
1618 if self.hunk:
1619 h = recordhunk(self.header, self.fromline, self.toline,
1619 h = recordhunk(self.header, self.fromline, self.toline,
1620 self.proc, self.before, self.hunk, context, maxcontext)
1620 self.proc, self.before, self.hunk, context, maxcontext)
1621 self.header.hunks.append(h)
1621 self.header.hunks.append(h)
1622 self.fromline += len(self.before) + h.removed
1622 self.fromline += len(self.before) + h.removed
1623 self.toline += len(self.before) + h.added
1623 self.toline += len(self.before) + h.added
1624 self.before = []
1624 self.before = []
1625 self.hunk = []
1625 self.hunk = []
1626 self.context = context
1626 self.context = context
1627
1627
1628 def addhunk(self, hunk):
1628 def addhunk(self, hunk):
1629 if self.context:
1629 if self.context:
1630 self.before = self.context
1630 self.before = self.context
1631 self.context = []
1631 self.context = []
1632 self.hunk = hunk
1632 self.hunk = hunk
1633
1633
1634 def newfile(self, hdr):
1634 def newfile(self, hdr):
1635 self.addcontext([])
1635 self.addcontext([])
1636 h = header(hdr)
1636 h = header(hdr)
1637 self.headers.append(h)
1637 self.headers.append(h)
1638 self.header = h
1638 self.header = h
1639
1639
1640 def addother(self, line):
1640 def addother(self, line):
1641 pass # 'other' lines are ignored
1641 pass # 'other' lines are ignored
1642
1642
1643 def finished(self):
1643 def finished(self):
1644 self.addcontext([])
1644 self.addcontext([])
1645 return self.headers
1645 return self.headers
1646
1646
1647 transitions = {
1647 transitions = {
1648 'file': {'context': addcontext,
1648 'file': {'context': addcontext,
1649 'file': newfile,
1649 'file': newfile,
1650 'hunk': addhunk,
1650 'hunk': addhunk,
1651 'range': addrange},
1651 'range': addrange},
1652 'context': {'file': newfile,
1652 'context': {'file': newfile,
1653 'hunk': addhunk,
1653 'hunk': addhunk,
1654 'range': addrange,
1654 'range': addrange,
1655 'other': addother},
1655 'other': addother},
1656 'hunk': {'context': addcontext,
1656 'hunk': {'context': addcontext,
1657 'file': newfile,
1657 'file': newfile,
1658 'range': addrange},
1658 'range': addrange},
1659 'range': {'context': addcontext,
1659 'range': {'context': addcontext,
1660 'hunk': addhunk},
1660 'hunk': addhunk},
1661 'other': {'other': addother},
1661 'other': {'other': addother},
1662 }
1662 }
1663
1663
1664 p = parser()
1664 p = parser()
1665 fp = stringio()
1665 fp = stringio()
1666 fp.write(''.join(originalchunks))
1666 fp.write(''.join(originalchunks))
1667 fp.seek(0)
1667 fp.seek(0)
1668
1668
1669 state = 'context'
1669 state = 'context'
1670 for newstate, data in scanpatch(fp):
1670 for newstate, data in scanpatch(fp):
1671 try:
1671 try:
1672 p.transitions[state][newstate](p, data)
1672 p.transitions[state][newstate](p, data)
1673 except KeyError:
1673 except KeyError:
1674 raise PatchError('unhandled transition: %s -> %s' %
1674 raise PatchError('unhandled transition: %s -> %s' %
1675 (state, newstate))
1675 (state, newstate))
1676 state = newstate
1676 state = newstate
1677 del fp
1677 del fp
1678 return p.finished()
1678 return p.finished()
1679
1679
1680 def pathtransform(path, strip, prefix):
1680 def pathtransform(path, strip, prefix):
1681 '''turn a path from a patch into a path suitable for the repository
1681 '''turn a path from a patch into a path suitable for the repository
1682
1682
1683 prefix, if not empty, is expected to be normalized with a / at the end.
1683 prefix, if not empty, is expected to be normalized with a / at the end.
1684
1684
1685 Returns (stripped components, path in repository).
1685 Returns (stripped components, path in repository).
1686
1686
1687 >>> pathtransform(b'a/b/c', 0, b'')
1687 >>> pathtransform(b'a/b/c', 0, b'')
1688 ('', 'a/b/c')
1688 ('', 'a/b/c')
1689 >>> pathtransform(b' a/b/c ', 0, b'')
1689 >>> pathtransform(b' a/b/c ', 0, b'')
1690 ('', ' a/b/c')
1690 ('', ' a/b/c')
1691 >>> pathtransform(b' a/b/c ', 2, b'')
1691 >>> pathtransform(b' a/b/c ', 2, b'')
1692 ('a/b/', 'c')
1692 ('a/b/', 'c')
1693 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1693 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1694 ('', 'd/e/a/b/c')
1694 ('', 'd/e/a/b/c')
1695 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1695 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1696 ('a//b/', 'd/e/c')
1696 ('a//b/', 'd/e/c')
1697 >>> pathtransform(b'a/b/c', 3, b'')
1697 >>> pathtransform(b'a/b/c', 3, b'')
1698 Traceback (most recent call last):
1698 Traceback (most recent call last):
1699 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1699 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1700 '''
1700 '''
1701 pathlen = len(path)
1701 pathlen = len(path)
1702 i = 0
1702 i = 0
1703 if strip == 0:
1703 if strip == 0:
1704 return '', prefix + path.rstrip()
1704 return '', prefix + path.rstrip()
1705 count = strip
1705 count = strip
1706 while count > 0:
1706 while count > 0:
1707 i = path.find('/', i)
1707 i = path.find('/', i)
1708 if i == -1:
1708 if i == -1:
1709 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1709 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1710 (count, strip, path))
1710 (count, strip, path))
1711 i += 1
1711 i += 1
1712 # consume '//' in the path
1712 # consume '//' in the path
1713 while i < pathlen - 1 and path[i:i + 1] == '/':
1713 while i < pathlen - 1 and path[i:i + 1] == '/':
1714 i += 1
1714 i += 1
1715 count -= 1
1715 count -= 1
1716 return path[:i].lstrip(), prefix + path[i:].rstrip()
1716 return path[:i].lstrip(), prefix + path[i:].rstrip()
1717
1717
1718 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1718 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1719 nulla = afile_orig == "/dev/null"
1719 nulla = afile_orig == "/dev/null"
1720 nullb = bfile_orig == "/dev/null"
1720 nullb = bfile_orig == "/dev/null"
1721 create = nulla and hunk.starta == 0 and hunk.lena == 0
1721 create = nulla and hunk.starta == 0 and hunk.lena == 0
1722 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1722 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1723 abase, afile = pathtransform(afile_orig, strip, prefix)
1723 abase, afile = pathtransform(afile_orig, strip, prefix)
1724 gooda = not nulla and backend.exists(afile)
1724 gooda = not nulla and backend.exists(afile)
1725 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1725 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1726 if afile == bfile:
1726 if afile == bfile:
1727 goodb = gooda
1727 goodb = gooda
1728 else:
1728 else:
1729 goodb = not nullb and backend.exists(bfile)
1729 goodb = not nullb and backend.exists(bfile)
1730 missing = not goodb and not gooda and not create
1730 missing = not goodb and not gooda and not create
1731
1731
1732 # some diff programs apparently produce patches where the afile is
1732 # some diff programs apparently produce patches where the afile is
1733 # not /dev/null, but afile starts with bfile
1733 # not /dev/null, but afile starts with bfile
1734 abasedir = afile[:afile.rfind('/') + 1]
1734 abasedir = afile[:afile.rfind('/') + 1]
1735 bbasedir = bfile[:bfile.rfind('/') + 1]
1735 bbasedir = bfile[:bfile.rfind('/') + 1]
1736 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1736 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1737 and hunk.starta == 0 and hunk.lena == 0):
1737 and hunk.starta == 0 and hunk.lena == 0):
1738 create = True
1738 create = True
1739 missing = False
1739 missing = False
1740
1740
1741 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1741 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1742 # diff is between a file and its backup. In this case, the original
1742 # diff is between a file and its backup. In this case, the original
1743 # file should be patched (see original mpatch code).
1743 # file should be patched (see original mpatch code).
1744 isbackup = (abase == bbase and bfile.startswith(afile))
1744 isbackup = (abase == bbase and bfile.startswith(afile))
1745 fname = None
1745 fname = None
1746 if not missing:
1746 if not missing:
1747 if gooda and goodb:
1747 if gooda and goodb:
1748 if isbackup:
1748 if isbackup:
1749 fname = afile
1749 fname = afile
1750 else:
1750 else:
1751 fname = bfile
1751 fname = bfile
1752 elif gooda:
1752 elif gooda:
1753 fname = afile
1753 fname = afile
1754
1754
1755 if not fname:
1755 if not fname:
1756 if not nullb:
1756 if not nullb:
1757 if isbackup:
1757 if isbackup:
1758 fname = afile
1758 fname = afile
1759 else:
1759 else:
1760 fname = bfile
1760 fname = bfile
1761 elif not nulla:
1761 elif not nulla:
1762 fname = afile
1762 fname = afile
1763 else:
1763 else:
1764 raise PatchError(_("undefined source and destination files"))
1764 raise PatchError(_("undefined source and destination files"))
1765
1765
1766 gp = patchmeta(fname)
1766 gp = patchmeta(fname)
1767 if create:
1767 if create:
1768 gp.op = 'ADD'
1768 gp.op = 'ADD'
1769 elif remove:
1769 elif remove:
1770 gp.op = 'DELETE'
1770 gp.op = 'DELETE'
1771 return gp
1771 return gp
1772
1772
1773 def scanpatch(fp):
1773 def scanpatch(fp):
1774 """like patch.iterhunks, but yield different events
1774 """like patch.iterhunks, but yield different events
1775
1775
1776 - ('file', [header_lines + fromfile + tofile])
1776 - ('file', [header_lines + fromfile + tofile])
1777 - ('context', [context_lines])
1777 - ('context', [context_lines])
1778 - ('hunk', [hunk_lines])
1778 - ('hunk', [hunk_lines])
1779 - ('range', (-start,len, +start,len, proc))
1779 - ('range', (-start,len, +start,len, proc))
1780 """
1780 """
1781 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1781 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1782 lr = linereader(fp)
1782 lr = linereader(fp)
1783
1783
1784 def scanwhile(first, p):
1784 def scanwhile(first, p):
1785 """scan lr while predicate holds"""
1785 """scan lr while predicate holds"""
1786 lines = [first]
1786 lines = [first]
1787 for line in iter(lr.readline, ''):
1787 for line in iter(lr.readline, ''):
1788 if p(line):
1788 if p(line):
1789 lines.append(line)
1789 lines.append(line)
1790 else:
1790 else:
1791 lr.push(line)
1791 lr.push(line)
1792 break
1792 break
1793 return lines
1793 return lines
1794
1794
1795 for line in iter(lr.readline, ''):
1795 for line in iter(lr.readline, ''):
1796 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1796 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1797 def notheader(line):
1797 def notheader(line):
1798 s = line.split(None, 1)
1798 s = line.split(None, 1)
1799 return not s or s[0] not in ('---', 'diff')
1799 return not s or s[0] not in ('---', 'diff')
1800 header = scanwhile(line, notheader)
1800 header = scanwhile(line, notheader)
1801 fromfile = lr.readline()
1801 fromfile = lr.readline()
1802 if fromfile.startswith('---'):
1802 if fromfile.startswith('---'):
1803 tofile = lr.readline()
1803 tofile = lr.readline()
1804 header += [fromfile, tofile]
1804 header += [fromfile, tofile]
1805 else:
1805 else:
1806 lr.push(fromfile)
1806 lr.push(fromfile)
1807 yield 'file', header
1807 yield 'file', header
1808 elif line.startswith(' '):
1808 elif line.startswith(' '):
1809 cs = (' ', '\\')
1809 cs = (' ', '\\')
1810 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1810 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1811 elif line.startswith(('-', '+')):
1811 elif line.startswith(('-', '+')):
1812 cs = ('-', '+', '\\')
1812 cs = ('-', '+', '\\')
1813 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1813 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1814 else:
1814 else:
1815 m = lines_re.match(line)
1815 m = lines_re.match(line)
1816 if m:
1816 if m:
1817 yield 'range', m.groups()
1817 yield 'range', m.groups()
1818 else:
1818 else:
1819 yield 'other', line
1819 yield 'other', line
1820
1820
1821 def scangitpatch(lr, firstline):
1821 def scangitpatch(lr, firstline):
1822 """
1822 """
1823 Git patches can emit:
1823 Git patches can emit:
1824 - rename a to b
1824 - rename a to b
1825 - change b
1825 - change b
1826 - copy a to c
1826 - copy a to c
1827 - change c
1827 - change c
1828
1828
1829 We cannot apply this sequence as-is, the renamed 'a' could not be
1829 We cannot apply this sequence as-is, the renamed 'a' could not be
1830 found for it would have been renamed already. And we cannot copy
1830 found for it would have been renamed already. And we cannot copy
1831 from 'b' instead because 'b' would have been changed already. So
1831 from 'b' instead because 'b' would have been changed already. So
1832 we scan the git patch for copy and rename commands so we can
1832 we scan the git patch for copy and rename commands so we can
1833 perform the copies ahead of time.
1833 perform the copies ahead of time.
1834 """
1834 """
1835 pos = 0
1835 pos = 0
1836 try:
1836 try:
1837 pos = lr.fp.tell()
1837 pos = lr.fp.tell()
1838 fp = lr.fp
1838 fp = lr.fp
1839 except IOError:
1839 except IOError:
1840 fp = stringio(lr.fp.read())
1840 fp = stringio(lr.fp.read())
1841 gitlr = linereader(fp)
1841 gitlr = linereader(fp)
1842 gitlr.push(firstline)
1842 gitlr.push(firstline)
1843 gitpatches = readgitpatch(gitlr)
1843 gitpatches = readgitpatch(gitlr)
1844 fp.seek(pos)
1844 fp.seek(pos)
1845 return gitpatches
1845 return gitpatches
1846
1846
1847 def iterhunks(fp):
1847 def iterhunks(fp):
1848 """Read a patch and yield the following events:
1848 """Read a patch and yield the following events:
1849 - ("file", afile, bfile, firsthunk): select a new target file.
1849 - ("file", afile, bfile, firsthunk): select a new target file.
1850 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1850 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1851 "file" event.
1851 "file" event.
1852 - ("git", gitchanges): current diff is in git format, gitchanges
1852 - ("git", gitchanges): current diff is in git format, gitchanges
1853 maps filenames to gitpatch records. Unique event.
1853 maps filenames to gitpatch records. Unique event.
1854 """
1854 """
1855 afile = ""
1855 afile = ""
1856 bfile = ""
1856 bfile = ""
1857 state = None
1857 state = None
1858 hunknum = 0
1858 hunknum = 0
1859 emitfile = newfile = False
1859 emitfile = newfile = False
1860 gitpatches = None
1860 gitpatches = None
1861
1861
1862 # our states
1862 # our states
1863 BFILE = 1
1863 BFILE = 1
1864 context = None
1864 context = None
1865 lr = linereader(fp)
1865 lr = linereader(fp)
1866
1866
1867 for x in iter(lr.readline, ''):
1867 for x in iter(lr.readline, ''):
1868 if state == BFILE and (
1868 if state == BFILE and (
1869 (not context and x.startswith('@'))
1869 (not context and x.startswith('@'))
1870 or (context is not False and x.startswith('***************'))
1870 or (context is not False and x.startswith('***************'))
1871 or x.startswith('GIT binary patch')):
1871 or x.startswith('GIT binary patch')):
1872 gp = None
1872 gp = None
1873 if (gitpatches and
1873 if (gitpatches and
1874 gitpatches[-1].ispatching(afile, bfile)):
1874 gitpatches[-1].ispatching(afile, bfile)):
1875 gp = gitpatches.pop()
1875 gp = gitpatches.pop()
1876 if x.startswith('GIT binary patch'):
1876 if x.startswith('GIT binary patch'):
1877 h = binhunk(lr, gp.path)
1877 h = binhunk(lr, gp.path)
1878 else:
1878 else:
1879 if context is None and x.startswith('***************'):
1879 if context is None and x.startswith('***************'):
1880 context = True
1880 context = True
1881 h = hunk(x, hunknum + 1, lr, context)
1881 h = hunk(x, hunknum + 1, lr, context)
1882 hunknum += 1
1882 hunknum += 1
1883 if emitfile:
1883 if emitfile:
1884 emitfile = False
1884 emitfile = False
1885 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1885 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1886 yield 'hunk', h
1886 yield 'hunk', h
1887 elif x.startswith('diff --git a/'):
1887 elif x.startswith('diff --git a/'):
1888 m = gitre.match(x.rstrip(' \r\n'))
1888 m = gitre.match(x.rstrip(' \r\n'))
1889 if not m:
1889 if not m:
1890 continue
1890 continue
1891 if gitpatches is None:
1891 if gitpatches is None:
1892 # scan whole input for git metadata
1892 # scan whole input for git metadata
1893 gitpatches = scangitpatch(lr, x)
1893 gitpatches = scangitpatch(lr, x)
1894 yield 'git', [g.copy() for g in gitpatches
1894 yield 'git', [g.copy() for g in gitpatches
1895 if g.op in ('COPY', 'RENAME')]
1895 if g.op in ('COPY', 'RENAME')]
1896 gitpatches.reverse()
1896 gitpatches.reverse()
1897 afile = 'a/' + m.group(1)
1897 afile = 'a/' + m.group(1)
1898 bfile = 'b/' + m.group(2)
1898 bfile = 'b/' + m.group(2)
1899 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1899 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1900 gp = gitpatches.pop()
1900 gp = gitpatches.pop()
1901 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1901 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1902 if not gitpatches:
1902 if not gitpatches:
1903 raise PatchError(_('failed to synchronize metadata for "%s"')
1903 raise PatchError(_('failed to synchronize metadata for "%s"')
1904 % afile[2:])
1904 % afile[2:])
1905 newfile = True
1905 newfile = True
1906 elif x.startswith('---'):
1906 elif x.startswith('---'):
1907 # check for a unified diff
1907 # check for a unified diff
1908 l2 = lr.readline()
1908 l2 = lr.readline()
1909 if not l2.startswith('+++'):
1909 if not l2.startswith('+++'):
1910 lr.push(l2)
1910 lr.push(l2)
1911 continue
1911 continue
1912 newfile = True
1912 newfile = True
1913 context = False
1913 context = False
1914 afile = parsefilename(x)
1914 afile = parsefilename(x)
1915 bfile = parsefilename(l2)
1915 bfile = parsefilename(l2)
1916 elif x.startswith('***'):
1916 elif x.startswith('***'):
1917 # check for a context diff
1917 # check for a context diff
1918 l2 = lr.readline()
1918 l2 = lr.readline()
1919 if not l2.startswith('---'):
1919 if not l2.startswith('---'):
1920 lr.push(l2)
1920 lr.push(l2)
1921 continue
1921 continue
1922 l3 = lr.readline()
1922 l3 = lr.readline()
1923 lr.push(l3)
1923 lr.push(l3)
1924 if not l3.startswith("***************"):
1924 if not l3.startswith("***************"):
1925 lr.push(l2)
1925 lr.push(l2)
1926 continue
1926 continue
1927 newfile = True
1927 newfile = True
1928 context = True
1928 context = True
1929 afile = parsefilename(x)
1929 afile = parsefilename(x)
1930 bfile = parsefilename(l2)
1930 bfile = parsefilename(l2)
1931
1931
1932 if newfile:
1932 if newfile:
1933 newfile = False
1933 newfile = False
1934 emitfile = True
1934 emitfile = True
1935 state = BFILE
1935 state = BFILE
1936 hunknum = 0
1936 hunknum = 0
1937
1937
1938 while gitpatches:
1938 while gitpatches:
1939 gp = gitpatches.pop()
1939 gp = gitpatches.pop()
1940 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1940 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1941
1941
1942 def applybindelta(binchunk, data):
1942 def applybindelta(binchunk, data):
1943 """Apply a binary delta hunk
1943 """Apply a binary delta hunk
1944 The algorithm used is the algorithm from git's patch-delta.c
1944 The algorithm used is the algorithm from git's patch-delta.c
1945 """
1945 """
1946 def deltahead(binchunk):
1946 def deltahead(binchunk):
1947 i = 0
1947 i = 0
1948 for c in pycompat.bytestr(binchunk):
1948 for c in pycompat.bytestr(binchunk):
1949 i += 1
1949 i += 1
1950 if not (ord(c) & 0x80):
1950 if not (ord(c) & 0x80):
1951 return i
1951 return i
1952 return i
1952 return i
1953 out = ""
1953 out = ""
1954 s = deltahead(binchunk)
1954 s = deltahead(binchunk)
1955 binchunk = binchunk[s:]
1955 binchunk = binchunk[s:]
1956 s = deltahead(binchunk)
1956 s = deltahead(binchunk)
1957 binchunk = binchunk[s:]
1957 binchunk = binchunk[s:]
1958 i = 0
1958 i = 0
1959 while i < len(binchunk):
1959 while i < len(binchunk):
1960 cmd = ord(binchunk[i:i + 1])
1960 cmd = ord(binchunk[i:i + 1])
1961 i += 1
1961 i += 1
1962 if (cmd & 0x80):
1962 if (cmd & 0x80):
1963 offset = 0
1963 offset = 0
1964 size = 0
1964 size = 0
1965 if (cmd & 0x01):
1965 if (cmd & 0x01):
1966 offset = ord(binchunk[i:i + 1])
1966 offset = ord(binchunk[i:i + 1])
1967 i += 1
1967 i += 1
1968 if (cmd & 0x02):
1968 if (cmd & 0x02):
1969 offset |= ord(binchunk[i:i + 1]) << 8
1969 offset |= ord(binchunk[i:i + 1]) << 8
1970 i += 1
1970 i += 1
1971 if (cmd & 0x04):
1971 if (cmd & 0x04):
1972 offset |= ord(binchunk[i:i + 1]) << 16
1972 offset |= ord(binchunk[i:i + 1]) << 16
1973 i += 1
1973 i += 1
1974 if (cmd & 0x08):
1974 if (cmd & 0x08):
1975 offset |= ord(binchunk[i:i + 1]) << 24
1975 offset |= ord(binchunk[i:i + 1]) << 24
1976 i += 1
1976 i += 1
1977 if (cmd & 0x10):
1977 if (cmd & 0x10):
1978 size = ord(binchunk[i:i + 1])
1978 size = ord(binchunk[i:i + 1])
1979 i += 1
1979 i += 1
1980 if (cmd & 0x20):
1980 if (cmd & 0x20):
1981 size |= ord(binchunk[i:i + 1]) << 8
1981 size |= ord(binchunk[i:i + 1]) << 8
1982 i += 1
1982 i += 1
1983 if (cmd & 0x40):
1983 if (cmd & 0x40):
1984 size |= ord(binchunk[i:i + 1]) << 16
1984 size |= ord(binchunk[i:i + 1]) << 16
1985 i += 1
1985 i += 1
1986 if size == 0:
1986 if size == 0:
1987 size = 0x10000
1987 size = 0x10000
1988 offset_end = offset + size
1988 offset_end = offset + size
1989 out += data[offset:offset_end]
1989 out += data[offset:offset_end]
1990 elif cmd != 0:
1990 elif cmd != 0:
1991 offset_end = i + cmd
1991 offset_end = i + cmd
1992 out += binchunk[i:offset_end]
1992 out += binchunk[i:offset_end]
1993 i += cmd
1993 i += cmd
1994 else:
1994 else:
1995 raise PatchError(_('unexpected delta opcode 0'))
1995 raise PatchError(_('unexpected delta opcode 0'))
1996 return out
1996 return out
1997
1997
1998 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1998 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1999 """Reads a patch from fp and tries to apply it.
1999 """Reads a patch from fp and tries to apply it.
2000
2000
2001 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2001 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2002 there was any fuzz.
2002 there was any fuzz.
2003
2003
2004 If 'eolmode' is 'strict', the patch content and patched file are
2004 If 'eolmode' is 'strict', the patch content and patched file are
2005 read in binary mode. Otherwise, line endings are ignored when
2005 read in binary mode. Otherwise, line endings are ignored when
2006 patching then normalized according to 'eolmode'.
2006 patching then normalized according to 'eolmode'.
2007 """
2007 """
2008 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2008 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2009 prefix=prefix, eolmode=eolmode)
2009 prefix=prefix, eolmode=eolmode)
2010
2010
2011 def _canonprefix(repo, prefix):
2011 def _canonprefix(repo, prefix):
2012 if prefix:
2012 if prefix:
2013 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2013 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2014 if prefix != '':
2014 if prefix != '':
2015 prefix += '/'
2015 prefix += '/'
2016 return prefix
2016 return prefix
2017
2017
2018 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2018 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2019 eolmode='strict'):
2019 eolmode='strict'):
2020 prefix = _canonprefix(backend.repo, prefix)
2020 prefix = _canonprefix(backend.repo, prefix)
2021 def pstrip(p):
2021 def pstrip(p):
2022 return pathtransform(p, strip - 1, prefix)[1]
2022 return pathtransform(p, strip - 1, prefix)[1]
2023
2023
2024 rejects = 0
2024 rejects = 0
2025 err = 0
2025 err = 0
2026 current_file = None
2026 current_file = None
2027
2027
2028 for state, values in iterhunks(fp):
2028 for state, values in iterhunks(fp):
2029 if state == 'hunk':
2029 if state == 'hunk':
2030 if not current_file:
2030 if not current_file:
2031 continue
2031 continue
2032 ret = current_file.apply(values)
2032 ret = current_file.apply(values)
2033 if ret > 0:
2033 if ret > 0:
2034 err = 1
2034 err = 1
2035 elif state == 'file':
2035 elif state == 'file':
2036 if current_file:
2036 if current_file:
2037 rejects += current_file.close()
2037 rejects += current_file.close()
2038 current_file = None
2038 current_file = None
2039 afile, bfile, first_hunk, gp = values
2039 afile, bfile, first_hunk, gp = values
2040 if gp:
2040 if gp:
2041 gp.path = pstrip(gp.path)
2041 gp.path = pstrip(gp.path)
2042 if gp.oldpath:
2042 if gp.oldpath:
2043 gp.oldpath = pstrip(gp.oldpath)
2043 gp.oldpath = pstrip(gp.oldpath)
2044 else:
2044 else:
2045 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2045 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2046 prefix)
2046 prefix)
2047 if gp.op == 'RENAME':
2047 if gp.op == 'RENAME':
2048 backend.unlink(gp.oldpath)
2048 backend.unlink(gp.oldpath)
2049 if not first_hunk:
2049 if not first_hunk:
2050 if gp.op == 'DELETE':
2050 if gp.op == 'DELETE':
2051 backend.unlink(gp.path)
2051 backend.unlink(gp.path)
2052 continue
2052 continue
2053 data, mode = None, None
2053 data, mode = None, None
2054 if gp.op in ('RENAME', 'COPY'):
2054 if gp.op in ('RENAME', 'COPY'):
2055 data, mode = store.getfile(gp.oldpath)[:2]
2055 data, mode = store.getfile(gp.oldpath)[:2]
2056 if data is None:
2056 if data is None:
2057 # This means that the old path does not exist
2057 # This means that the old path does not exist
2058 raise PatchError(_("source file '%s' does not exist")
2058 raise PatchError(_("source file '%s' does not exist")
2059 % gp.oldpath)
2059 % gp.oldpath)
2060 if gp.mode:
2060 if gp.mode:
2061 mode = gp.mode
2061 mode = gp.mode
2062 if gp.op == 'ADD':
2062 if gp.op == 'ADD':
2063 # Added files without content have no hunk and
2063 # Added files without content have no hunk and
2064 # must be created
2064 # must be created
2065 data = ''
2065 data = ''
2066 if data or mode:
2066 if data or mode:
2067 if (gp.op in ('ADD', 'RENAME', 'COPY')
2067 if (gp.op in ('ADD', 'RENAME', 'COPY')
2068 and backend.exists(gp.path)):
2068 and backend.exists(gp.path)):
2069 raise PatchError(_("cannot create %s: destination "
2069 raise PatchError(_("cannot create %s: destination "
2070 "already exists") % gp.path)
2070 "already exists") % gp.path)
2071 backend.setfile(gp.path, data, mode, gp.oldpath)
2071 backend.setfile(gp.path, data, mode, gp.oldpath)
2072 continue
2072 continue
2073 try:
2073 try:
2074 current_file = patcher(ui, gp, backend, store,
2074 current_file = patcher(ui, gp, backend, store,
2075 eolmode=eolmode)
2075 eolmode=eolmode)
2076 except PatchError as inst:
2076 except PatchError as inst:
2077 ui.warn(str(inst) + '\n')
2077 ui.warn(str(inst) + '\n')
2078 current_file = None
2078 current_file = None
2079 rejects += 1
2079 rejects += 1
2080 continue
2080 continue
2081 elif state == 'git':
2081 elif state == 'git':
2082 for gp in values:
2082 for gp in values:
2083 path = pstrip(gp.oldpath)
2083 path = pstrip(gp.oldpath)
2084 data, mode = backend.getfile(path)
2084 data, mode = backend.getfile(path)
2085 if data is None:
2085 if data is None:
2086 # The error ignored here will trigger a getfile()
2086 # The error ignored here will trigger a getfile()
2087 # error in a place more appropriate for error
2087 # error in a place more appropriate for error
2088 # handling, and will not interrupt the patching
2088 # handling, and will not interrupt the patching
2089 # process.
2089 # process.
2090 pass
2090 pass
2091 else:
2091 else:
2092 store.setfile(path, data, mode)
2092 store.setfile(path, data, mode)
2093 else:
2093 else:
2094 raise error.Abort(_('unsupported parser state: %s') % state)
2094 raise error.Abort(_('unsupported parser state: %s') % state)
2095
2095
2096 if current_file:
2096 if current_file:
2097 rejects += current_file.close()
2097 rejects += current_file.close()
2098
2098
2099 if rejects:
2099 if rejects:
2100 return -1
2100 return -1
2101 return err
2101 return err
2102
2102
2103 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2103 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2104 similarity):
2104 similarity):
2105 """use <patcher> to apply <patchname> to the working directory.
2105 """use <patcher> to apply <patchname> to the working directory.
2106 returns whether patch was applied with fuzz factor."""
2106 returns whether patch was applied with fuzz factor."""
2107
2107
2108 fuzz = False
2108 fuzz = False
2109 args = []
2109 args = []
2110 cwd = repo.root
2110 cwd = repo.root
2111 if cwd:
2111 if cwd:
2112 args.append('-d %s' % procutil.shellquote(cwd))
2112 args.append('-d %s' % procutil.shellquote(cwd))
2113 cmd = ('%s %s -p%d < %s'
2113 cmd = ('%s %s -p%d < %s'
2114 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2114 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2115 ui.debug('Using external patch tool: %s\n' % cmd)
2115 ui.debug('Using external patch tool: %s\n' % cmd)
2116 fp = procutil.popen(cmd, 'rb')
2116 fp = procutil.popen(cmd, 'rb')
2117 try:
2117 try:
2118 for line in util.iterfile(fp):
2118 for line in util.iterfile(fp):
2119 line = line.rstrip()
2119 line = line.rstrip()
2120 ui.note(line + '\n')
2120 ui.note(line + '\n')
2121 if line.startswith('patching file '):
2121 if line.startswith('patching file '):
2122 pf = util.parsepatchoutput(line)
2122 pf = util.parsepatchoutput(line)
2123 printed_file = False
2123 printed_file = False
2124 files.add(pf)
2124 files.add(pf)
2125 elif line.find('with fuzz') >= 0:
2125 elif line.find('with fuzz') >= 0:
2126 fuzz = True
2126 fuzz = True
2127 if not printed_file:
2127 if not printed_file:
2128 ui.warn(pf + '\n')
2128 ui.warn(pf + '\n')
2129 printed_file = True
2129 printed_file = True
2130 ui.warn(line + '\n')
2130 ui.warn(line + '\n')
2131 elif line.find('saving rejects to file') >= 0:
2131 elif line.find('saving rejects to file') >= 0:
2132 ui.warn(line + '\n')
2132 ui.warn(line + '\n')
2133 elif line.find('FAILED') >= 0:
2133 elif line.find('FAILED') >= 0:
2134 if not printed_file:
2134 if not printed_file:
2135 ui.warn(pf + '\n')
2135 ui.warn(pf + '\n')
2136 printed_file = True
2136 printed_file = True
2137 ui.warn(line + '\n')
2137 ui.warn(line + '\n')
2138 finally:
2138 finally:
2139 if files:
2139 if files:
2140 scmutil.marktouched(repo, files, similarity)
2140 scmutil.marktouched(repo, files, similarity)
2141 code = fp.close()
2141 code = fp.close()
2142 if code:
2142 if code:
2143 raise PatchError(_("patch command failed: %s") %
2143 raise PatchError(_("patch command failed: %s") %
2144 procutil.explainexit(code))
2144 procutil.explainexit(code))
2145 return fuzz
2145 return fuzz
2146
2146
2147 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2147 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2148 eolmode='strict'):
2148 eolmode='strict'):
2149 if files is None:
2149 if files is None:
2150 files = set()
2150 files = set()
2151 if eolmode is None:
2151 if eolmode is None:
2152 eolmode = ui.config('patch', 'eol')
2152 eolmode = ui.config('patch', 'eol')
2153 if eolmode.lower() not in eolmodes:
2153 if eolmode.lower() not in eolmodes:
2154 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2154 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2155 eolmode = eolmode.lower()
2155 eolmode = eolmode.lower()
2156
2156
2157 store = filestore()
2157 store = filestore()
2158 try:
2158 try:
2159 fp = open(patchobj, 'rb')
2159 fp = open(patchobj, 'rb')
2160 except TypeError:
2160 except TypeError:
2161 fp = patchobj
2161 fp = patchobj
2162 try:
2162 try:
2163 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2163 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2164 eolmode=eolmode)
2164 eolmode=eolmode)
2165 finally:
2165 finally:
2166 if fp != patchobj:
2166 if fp != patchobj:
2167 fp.close()
2167 fp.close()
2168 files.update(backend.close())
2168 files.update(backend.close())
2169 store.close()
2169 store.close()
2170 if ret < 0:
2170 if ret < 0:
2171 raise PatchError(_('patch failed to apply'))
2171 raise PatchError(_('patch failed to apply'))
2172 return ret > 0
2172 return ret > 0
2173
2173
2174 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2174 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2175 eolmode='strict', similarity=0):
2175 eolmode='strict', similarity=0):
2176 """use builtin patch to apply <patchobj> to the working directory.
2176 """use builtin patch to apply <patchobj> to the working directory.
2177 returns whether patch was applied with fuzz factor."""
2177 returns whether patch was applied with fuzz factor."""
2178 backend = workingbackend(ui, repo, similarity)
2178 backend = workingbackend(ui, repo, similarity)
2179 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2179 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2180
2180
2181 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2181 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2182 eolmode='strict'):
2182 eolmode='strict'):
2183 backend = repobackend(ui, repo, ctx, store)
2183 backend = repobackend(ui, repo, ctx, store)
2184 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2184 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2185
2185
2186 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2186 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2187 similarity=0):
2187 similarity=0):
2188 """Apply <patchname> to the working directory.
2188 """Apply <patchname> to the working directory.
2189
2189
2190 'eolmode' specifies how end of lines should be handled. It can be:
2190 'eolmode' specifies how end of lines should be handled. It can be:
2191 - 'strict': inputs are read in binary mode, EOLs are preserved
2191 - 'strict': inputs are read in binary mode, EOLs are preserved
2192 - 'crlf': EOLs are ignored when patching and reset to CRLF
2192 - 'crlf': EOLs are ignored when patching and reset to CRLF
2193 - 'lf': EOLs are ignored when patching and reset to LF
2193 - 'lf': EOLs are ignored when patching and reset to LF
2194 - None: get it from user settings, default to 'strict'
2194 - None: get it from user settings, default to 'strict'
2195 'eolmode' is ignored when using an external patcher program.
2195 'eolmode' is ignored when using an external patcher program.
2196
2196
2197 Returns whether patch was applied with fuzz factor.
2197 Returns whether patch was applied with fuzz factor.
2198 """
2198 """
2199 patcher = ui.config('ui', 'patch')
2199 patcher = ui.config('ui', 'patch')
2200 if files is None:
2200 if files is None:
2201 files = set()
2201 files = set()
2202 if patcher:
2202 if patcher:
2203 return _externalpatch(ui, repo, patcher, patchname, strip,
2203 return _externalpatch(ui, repo, patcher, patchname, strip,
2204 files, similarity)
2204 files, similarity)
2205 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2205 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2206 similarity)
2206 similarity)
2207
2207
2208 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2208 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2209 backend = fsbackend(ui, repo.root)
2209 backend = fsbackend(ui, repo.root)
2210 prefix = _canonprefix(repo, prefix)
2210 prefix = _canonprefix(repo, prefix)
2211 with open(patchpath, 'rb') as fp:
2211 with open(patchpath, 'rb') as fp:
2212 changed = set()
2212 changed = set()
2213 for state, values in iterhunks(fp):
2213 for state, values in iterhunks(fp):
2214 if state == 'file':
2214 if state == 'file':
2215 afile, bfile, first_hunk, gp = values
2215 afile, bfile, first_hunk, gp = values
2216 if gp:
2216 if gp:
2217 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2217 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2218 if gp.oldpath:
2218 if gp.oldpath:
2219 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2219 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2220 prefix)[1]
2220 prefix)[1]
2221 else:
2221 else:
2222 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2222 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2223 prefix)
2223 prefix)
2224 changed.add(gp.path)
2224 changed.add(gp.path)
2225 if gp.op == 'RENAME':
2225 if gp.op == 'RENAME':
2226 changed.add(gp.oldpath)
2226 changed.add(gp.oldpath)
2227 elif state not in ('hunk', 'git'):
2227 elif state not in ('hunk', 'git'):
2228 raise error.Abort(_('unsupported parser state: %s') % state)
2228 raise error.Abort(_('unsupported parser state: %s') % state)
2229 return changed
2229 return changed
2230
2230
2231 class GitDiffRequired(Exception):
2231 class GitDiffRequired(Exception):
2232 pass
2232 pass
2233
2233
2234 diffopts = diffutil.diffallopts
2234 diffopts = diffutil.diffallopts
2235 diffallopts = diffutil.diffallopts
2235 diffallopts = diffutil.diffallopts
2236 difffeatureopts = diffutil.difffeatureopts
2236 difffeatureopts = diffutil.difffeatureopts
2237
2237
2238 def diff(repo, node1=None, node2=None, match=None, changes=None,
2238 def diff(repo, node1=None, node2=None, match=None, changes=None,
2239 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2239 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2240 hunksfilterfn=None):
2240 hunksfilterfn=None):
2241 '''yields diff of changes to files between two nodes, or node and
2241 '''yields diff of changes to files between two nodes, or node and
2242 working directory.
2242 working directory.
2243
2243
2244 if node1 is None, use first dirstate parent instead.
2244 if node1 is None, use first dirstate parent instead.
2245 if node2 is None, compare node1 with working directory.
2245 if node2 is None, compare node1 with working directory.
2246
2246
2247 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2247 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2248 every time some change cannot be represented with the current
2248 every time some change cannot be represented with the current
2249 patch format. Return False to upgrade to git patch format, True to
2249 patch format. Return False to upgrade to git patch format, True to
2250 accept the loss or raise an exception to abort the diff. It is
2250 accept the loss or raise an exception to abort the diff. It is
2251 called with the name of current file being diffed as 'fn'. If set
2251 called with the name of current file being diffed as 'fn'. If set
2252 to None, patches will always be upgraded to git format when
2252 to None, patches will always be upgraded to git format when
2253 necessary.
2253 necessary.
2254
2254
2255 prefix is a filename prefix that is prepended to all filenames on
2255 prefix is a filename prefix that is prepended to all filenames on
2256 display (used for subrepos).
2256 display (used for subrepos).
2257
2257
2258 relroot, if not empty, must be normalized with a trailing /. Any match
2258 relroot, if not empty, must be normalized with a trailing /. Any match
2259 patterns that fall outside it will be ignored.
2259 patterns that fall outside it will be ignored.
2260
2260
2261 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2261 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2262 information.
2262 information.
2263
2263
2264 hunksfilterfn, if not None, should be a function taking a filectx and
2264 hunksfilterfn, if not None, should be a function taking a filectx and
2265 hunks generator that may yield filtered hunks.
2265 hunks generator that may yield filtered hunks.
2266 '''
2266 '''
2267 for fctx1, fctx2, hdr, hunks in diffhunks(
2267 for fctx1, fctx2, hdr, hunks in diffhunks(
2268 repo, node1=node1, node2=node2,
2268 repo, node1=node1, node2=node2,
2269 match=match, changes=changes, opts=opts,
2269 match=match, changes=changes, opts=opts,
2270 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2270 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2271 ):
2271 ):
2272 if hunksfilterfn is not None:
2272 if hunksfilterfn is not None:
2273 # If the file has been removed, fctx2 is None; but this should
2273 # If the file has been removed, fctx2 is None; but this should
2274 # not occur here since we catch removed files early in
2274 # not occur here since we catch removed files early in
2275 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2275 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2276 assert fctx2 is not None, \
2276 assert fctx2 is not None, \
2277 'fctx2 unexpectly None in diff hunks filtering'
2277 'fctx2 unexpectly None in diff hunks filtering'
2278 hunks = hunksfilterfn(fctx2, hunks)
2278 hunks = hunksfilterfn(fctx2, hunks)
2279 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2279 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2280 if hdr and (text or len(hdr) > 1):
2280 if hdr and (text or len(hdr) > 1):
2281 yield '\n'.join(hdr) + '\n'
2281 yield '\n'.join(hdr) + '\n'
2282 if text:
2282 if text:
2283 yield text
2283 yield text
2284
2284
2285 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2285 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2286 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2286 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2287 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2287 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2288 where `header` is a list of diff headers and `hunks` is an iterable of
2288 where `header` is a list of diff headers and `hunks` is an iterable of
2289 (`hunkrange`, `hunklines`) tuples.
2289 (`hunkrange`, `hunklines`) tuples.
2290
2290
2291 See diff() for the meaning of parameters.
2291 See diff() for the meaning of parameters.
2292 """
2292 """
2293
2293
2294 if opts is None:
2294 if opts is None:
2295 opts = mdiff.defaultopts
2295 opts = mdiff.defaultopts
2296
2296
2297 if not node1 and not node2:
2297 if not node1 and not node2:
2298 node1 = repo.dirstate.p1()
2298 node1 = repo.dirstate.p1()
2299
2299
2300 def lrugetfilectx():
2300 def lrugetfilectx():
2301 cache = {}
2301 cache = {}
2302 order = collections.deque()
2302 order = collections.deque()
2303 def getfilectx(f, ctx):
2303 def getfilectx(f, ctx):
2304 fctx = ctx.filectx(f, filelog=cache.get(f))
2304 fctx = ctx.filectx(f, filelog=cache.get(f))
2305 if f not in cache:
2305 if f not in cache:
2306 if len(cache) > 20:
2306 if len(cache) > 20:
2307 del cache[order.popleft()]
2307 del cache[order.popleft()]
2308 cache[f] = fctx.filelog()
2308 cache[f] = fctx.filelog()
2309 else:
2309 else:
2310 order.remove(f)
2310 order.remove(f)
2311 order.append(f)
2311 order.append(f)
2312 return fctx
2312 return fctx
2313 return getfilectx
2313 return getfilectx
2314 getfilectx = lrugetfilectx()
2314 getfilectx = lrugetfilectx()
2315
2315
2316 ctx1 = repo[node1]
2316 ctx1 = repo[node1]
2317 ctx2 = repo[node2]
2317 ctx2 = repo[node2]
2318
2318
2319 relfiltered = False
2319 relfiltered = False
2320 if relroot != '' and match.always():
2320 if relroot != '' and match.always():
2321 # as a special case, create a new matcher with just the relroot
2321 # as a special case, create a new matcher with just the relroot
2322 pats = [relroot]
2322 pats = [relroot]
2323 match = scmutil.match(ctx2, pats, default='path')
2323 match = scmutil.match(ctx2, pats, default='path')
2324 relfiltered = True
2324 relfiltered = True
2325
2325
2326 if not changes:
2326 if not changes:
2327 changes = ctx1.status(ctx2, match=match)
2327 changes = ctx1.status(ctx2, match=match)
2328 modified, added, removed = changes[:3]
2328 modified, added, removed = changes[:3]
2329
2329
2330 if not modified and not added and not removed:
2330 if not modified and not added and not removed:
2331 return []
2331 return []
2332
2332
2333 if repo.ui.debugflag:
2333 if repo.ui.debugflag:
2334 hexfunc = hex
2334 hexfunc = hex
2335 else:
2335 else:
2336 hexfunc = short
2336 hexfunc = short
2337 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2337 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2338
2338
2339 if copy is None:
2339 if copy is None:
2340 copy = {}
2340 copy = {}
2341 if opts.git or opts.upgrade:
2341 if opts.git or opts.upgrade:
2342 copy = copies.pathcopies(ctx1, ctx2, match=match)
2342 copy = copies.pathcopies(ctx1, ctx2, match=match)
2343
2343
2344 if relroot is not None:
2344 if relroot is not None:
2345 if not relfiltered:
2345 if not relfiltered:
2346 # XXX this would ideally be done in the matcher, but that is
2346 # XXX this would ideally be done in the matcher, but that is
2347 # generally meant to 'or' patterns, not 'and' them. In this case we
2347 # generally meant to 'or' patterns, not 'and' them. In this case we
2348 # need to 'and' all the patterns from the matcher with relroot.
2348 # need to 'and' all the patterns from the matcher with relroot.
2349 def filterrel(l):
2349 def filterrel(l):
2350 return [f for f in l if f.startswith(relroot)]
2350 return [f for f in l if f.startswith(relroot)]
2351 modified = filterrel(modified)
2351 modified = filterrel(modified)
2352 added = filterrel(added)
2352 added = filterrel(added)
2353 removed = filterrel(removed)
2353 removed = filterrel(removed)
2354 # filter out copies where either side isn't inside the relative root
2354 # filter out copies where either side isn't inside the relative root
2355 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2355 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2356 if dst.startswith(relroot)
2356 if dst.startswith(relroot)
2357 and src.startswith(relroot)))
2357 and src.startswith(relroot)))
2358
2358
2359 modifiedset = set(modified)
2359 modifiedset = set(modified)
2360 addedset = set(added)
2360 addedset = set(added)
2361 removedset = set(removed)
2361 removedset = set(removed)
2362 for f in modified:
2362 for f in modified:
2363 if f not in ctx1:
2363 if f not in ctx1:
2364 # Fix up added, since merged-in additions appear as
2364 # Fix up added, since merged-in additions appear as
2365 # modifications during merges
2365 # modifications during merges
2366 modifiedset.remove(f)
2366 modifiedset.remove(f)
2367 addedset.add(f)
2367 addedset.add(f)
2368 for f in removed:
2368 for f in removed:
2369 if f not in ctx1:
2369 if f not in ctx1:
2370 # Merged-in additions that are then removed are reported as removed.
2370 # Merged-in additions that are then removed are reported as removed.
2371 # They are not in ctx1, so We don't want to show them in the diff.
2371 # They are not in ctx1, so We don't want to show them in the diff.
2372 removedset.remove(f)
2372 removedset.remove(f)
2373 modified = sorted(modifiedset)
2373 modified = sorted(modifiedset)
2374 added = sorted(addedset)
2374 added = sorted(addedset)
2375 removed = sorted(removedset)
2375 removed = sorted(removedset)
2376 for dst, src in list(copy.items()):
2376 for dst, src in list(copy.items()):
2377 if src not in ctx1:
2377 if src not in ctx1:
2378 # Files merged in during a merge and then copied/renamed are
2378 # Files merged in during a merge and then copied/renamed are
2379 # reported as copies. We want to show them in the diff as additions.
2379 # reported as copies. We want to show them in the diff as additions.
2380 del copy[dst]
2380 del copy[dst]
2381
2381
2382 prefetchmatch = scmutil.matchfiles(
2382 prefetchmatch = scmutil.matchfiles(
2383 repo, list(modifiedset | addedset | removedset))
2383 repo, list(modifiedset | addedset | removedset))
2384 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2384 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2385
2385
2386 def difffn(opts, losedata):
2386 def difffn(opts, losedata):
2387 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2387 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2388 copy, getfilectx, opts, losedata, prefix, relroot)
2388 copy, getfilectx, opts, losedata, prefix, relroot)
2389 if opts.upgrade and not opts.git:
2389 if opts.upgrade and not opts.git:
2390 try:
2390 try:
2391 def losedata(fn):
2391 def losedata(fn):
2392 if not losedatafn or not losedatafn(fn=fn):
2392 if not losedatafn or not losedatafn(fn=fn):
2393 raise GitDiffRequired
2393 raise GitDiffRequired
2394 # Buffer the whole output until we are sure it can be generated
2394 # Buffer the whole output until we are sure it can be generated
2395 return list(difffn(opts.copy(git=False), losedata))
2395 return list(difffn(opts.copy(git=False), losedata))
2396 except GitDiffRequired:
2396 except GitDiffRequired:
2397 return difffn(opts.copy(git=True), None)
2397 return difffn(opts.copy(git=True), None)
2398 else:
2398 else:
2399 return difffn(opts, None)
2399 return difffn(opts, None)
2400
2400
2401 def diffsinglehunk(hunklines):
2401 def diffsinglehunk(hunklines):
2402 """yield tokens for a list of lines in a single hunk"""
2402 """yield tokens for a list of lines in a single hunk"""
2403 for line in hunklines:
2403 for line in hunklines:
2404 # chomp
2404 # chomp
2405 chompline = line.rstrip('\r\n')
2405 chompline = line.rstrip('\r\n')
2406 # highlight tabs and trailing whitespace
2406 # highlight tabs and trailing whitespace
2407 stripline = chompline.rstrip()
2407 stripline = chompline.rstrip()
2408 if line.startswith('-'):
2408 if line.startswith('-'):
2409 label = 'diff.deleted'
2409 label = 'diff.deleted'
2410 elif line.startswith('+'):
2410 elif line.startswith('+'):
2411 label = 'diff.inserted'
2411 label = 'diff.inserted'
2412 else:
2412 else:
2413 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2413 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2414 for token in tabsplitter.findall(stripline):
2414 for token in tabsplitter.findall(stripline):
2415 if token.startswith('\t'):
2415 if token.startswith('\t'):
2416 yield (token, 'diff.tab')
2416 yield (token, 'diff.tab')
2417 else:
2417 else:
2418 yield (token, label)
2418 yield (token, label)
2419
2419
2420 if chompline != stripline:
2420 if chompline != stripline:
2421 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2421 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2422 if chompline != line:
2422 if chompline != line:
2423 yield (line[len(chompline):], '')
2423 yield (line[len(chompline):], '')
2424
2424
2425 def diffsinglehunkinline(hunklines):
2425 def diffsinglehunkinline(hunklines):
2426 """yield tokens for a list of lines in a single hunk, with inline colors"""
2426 """yield tokens for a list of lines in a single hunk, with inline colors"""
2427 # prepare deleted, and inserted content
2427 # prepare deleted, and inserted content
2428 a = ''
2428 a = ''
2429 b = ''
2429 b = ''
2430 for line in hunklines:
2430 for line in hunklines:
2431 if line[0:1] == '-':
2431 if line[0:1] == '-':
2432 a += line[1:]
2432 a += line[1:]
2433 elif line[0:1] == '+':
2433 elif line[0:1] == '+':
2434 b += line[1:]
2434 b += line[1:]
2435 else:
2435 else:
2436 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2436 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2437 # fast path: if either side is empty, use diffsinglehunk
2437 # fast path: if either side is empty, use diffsinglehunk
2438 if not a or not b:
2438 if not a or not b:
2439 for t in diffsinglehunk(hunklines):
2439 for t in diffsinglehunk(hunklines):
2440 yield t
2440 yield t
2441 return
2441 return
2442 # re-split the content into words
2442 # re-split the content into words
2443 al = wordsplitter.findall(a)
2443 al = wordsplitter.findall(a)
2444 bl = wordsplitter.findall(b)
2444 bl = wordsplitter.findall(b)
2445 # re-arrange the words to lines since the diff algorithm is line-based
2445 # re-arrange the words to lines since the diff algorithm is line-based
2446 aln = [s if s == '\n' else s + '\n' for s in al]
2446 aln = [s if s == '\n' else s + '\n' for s in al]
2447 bln = [s if s == '\n' else s + '\n' for s in bl]
2447 bln = [s if s == '\n' else s + '\n' for s in bl]
2448 an = ''.join(aln)
2448 an = ''.join(aln)
2449 bn = ''.join(bln)
2449 bn = ''.join(bln)
2450 # run the diff algorithm, prepare atokens and btokens
2450 # run the diff algorithm, prepare atokens and btokens
2451 atokens = []
2451 atokens = []
2452 btokens = []
2452 btokens = []
2453 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2453 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2454 for (a1, a2, b1, b2), btype in blocks:
2454 for (a1, a2, b1, b2), btype in blocks:
2455 changed = btype == '!'
2455 changed = btype == '!'
2456 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2456 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2457 atokens.append((changed, token))
2457 atokens.append((changed, token))
2458 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2458 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2459 btokens.append((changed, token))
2459 btokens.append((changed, token))
2460
2460
2461 # yield deleted tokens, then inserted ones
2461 # yield deleted tokens, then inserted ones
2462 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2462 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2463 ('+', 'diff.inserted', btokens)]:
2463 ('+', 'diff.inserted', btokens)]:
2464 nextisnewline = True
2464 nextisnewline = True
2465 for changed, token in tokens:
2465 for changed, token in tokens:
2466 if nextisnewline:
2466 if nextisnewline:
2467 yield (prefix, label)
2467 yield (prefix, label)
2468 nextisnewline = False
2468 nextisnewline = False
2469 # special handling line end
2469 # special handling line end
2470 isendofline = token.endswith('\n')
2470 isendofline = token.endswith('\n')
2471 if isendofline:
2471 if isendofline:
2472 chomp = token[:-1] # chomp
2472 chomp = token[:-1] # chomp
2473 if chomp.endswith('\r'):
2473 if chomp.endswith('\r'):
2474 chomp = chomp[:-1]
2474 chomp = chomp[:-1]
2475 endofline = token[len(chomp):]
2475 endofline = token[len(chomp):]
2476 token = chomp.rstrip() # detect spaces at the end
2476 token = chomp.rstrip() # detect spaces at the end
2477 endspaces = chomp[len(token):]
2477 endspaces = chomp[len(token):]
2478 # scan tabs
2478 # scan tabs
2479 for maybetab in tabsplitter.findall(token):
2479 for maybetab in tabsplitter.findall(token):
2480 if b'\t' == maybetab[0:1]:
2480 if b'\t' == maybetab[0:1]:
2481 currentlabel = 'diff.tab'
2481 currentlabel = 'diff.tab'
2482 else:
2482 else:
2483 if changed:
2483 if changed:
2484 currentlabel = label + '.changed'
2484 currentlabel = label + '.changed'
2485 else:
2485 else:
2486 currentlabel = label + '.unchanged'
2486 currentlabel = label + '.unchanged'
2487 yield (maybetab, currentlabel)
2487 yield (maybetab, currentlabel)
2488 if isendofline:
2488 if isendofline:
2489 if endspaces:
2489 if endspaces:
2490 yield (endspaces, 'diff.trailingwhitespace')
2490 yield (endspaces, 'diff.trailingwhitespace')
2491 yield (endofline, '')
2491 yield (endofline, '')
2492 nextisnewline = True
2492 nextisnewline = True
2493
2493
2494 def difflabel(func, *args, **kw):
2494 def difflabel(func, *args, **kw):
2495 '''yields 2-tuples of (output, label) based on the output of func()'''
2495 '''yields 2-tuples of (output, label) based on the output of func()'''
2496 if kw.get(r'opts') and kw[r'opts'].worddiff:
2496 if kw.get(r'opts') and kw[r'opts'].worddiff:
2497 dodiffhunk = diffsinglehunkinline
2497 dodiffhunk = diffsinglehunkinline
2498 else:
2498 else:
2499 dodiffhunk = diffsinglehunk
2499 dodiffhunk = diffsinglehunk
2500 headprefixes = [('diff', 'diff.diffline'),
2500 headprefixes = [('diff', 'diff.diffline'),
2501 ('copy', 'diff.extended'),
2501 ('copy', 'diff.extended'),
2502 ('rename', 'diff.extended'),
2502 ('rename', 'diff.extended'),
2503 ('old', 'diff.extended'),
2503 ('old', 'diff.extended'),
2504 ('new', 'diff.extended'),
2504 ('new', 'diff.extended'),
2505 ('deleted', 'diff.extended'),
2505 ('deleted', 'diff.extended'),
2506 ('index', 'diff.extended'),
2506 ('index', 'diff.extended'),
2507 ('similarity', 'diff.extended'),
2507 ('similarity', 'diff.extended'),
2508 ('---', 'diff.file_a'),
2508 ('---', 'diff.file_a'),
2509 ('+++', 'diff.file_b')]
2509 ('+++', 'diff.file_b')]
2510 textprefixes = [('@', 'diff.hunk'),
2510 textprefixes = [('@', 'diff.hunk'),
2511 # - and + are handled by diffsinglehunk
2511 # - and + are handled by diffsinglehunk
2512 ]
2512 ]
2513 head = False
2513 head = False
2514
2514
2515 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2515 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2516 hunkbuffer = []
2516 hunkbuffer = []
2517 def consumehunkbuffer():
2517 def consumehunkbuffer():
2518 if hunkbuffer:
2518 if hunkbuffer:
2519 for token in dodiffhunk(hunkbuffer):
2519 for token in dodiffhunk(hunkbuffer):
2520 yield token
2520 yield token
2521 hunkbuffer[:] = []
2521 hunkbuffer[:] = []
2522
2522
2523 for chunk in func(*args, **kw):
2523 for chunk in func(*args, **kw):
2524 lines = chunk.split('\n')
2524 lines = chunk.split('\n')
2525 linecount = len(lines)
2525 linecount = len(lines)
2526 for i, line in enumerate(lines):
2526 for i, line in enumerate(lines):
2527 if head:
2527 if head:
2528 if line.startswith('@'):
2528 if line.startswith('@'):
2529 head = False
2529 head = False
2530 else:
2530 else:
2531 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2531 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2532 head = True
2532 head = True
2533 diffline = False
2533 diffline = False
2534 if not head and line and line.startswith(('+', '-')):
2534 if not head and line and line.startswith(('+', '-')):
2535 diffline = True
2535 diffline = True
2536
2536
2537 prefixes = textprefixes
2537 prefixes = textprefixes
2538 if head:
2538 if head:
2539 prefixes = headprefixes
2539 prefixes = headprefixes
2540 if diffline:
2540 if diffline:
2541 # buffered
2541 # buffered
2542 bufferedline = line
2542 bufferedline = line
2543 if i + 1 < linecount:
2543 if i + 1 < linecount:
2544 bufferedline += "\n"
2544 bufferedline += "\n"
2545 hunkbuffer.append(bufferedline)
2545 hunkbuffer.append(bufferedline)
2546 else:
2546 else:
2547 # unbuffered
2547 # unbuffered
2548 for token in consumehunkbuffer():
2548 for token in consumehunkbuffer():
2549 yield token
2549 yield token
2550 stripline = line.rstrip()
2550 stripline = line.rstrip()
2551 for prefix, label in prefixes:
2551 for prefix, label in prefixes:
2552 if stripline.startswith(prefix):
2552 if stripline.startswith(prefix):
2553 yield (stripline, label)
2553 yield (stripline, label)
2554 if line != stripline:
2554 if line != stripline:
2555 yield (line[len(stripline):],
2555 yield (line[len(stripline):],
2556 'diff.trailingwhitespace')
2556 'diff.trailingwhitespace')
2557 break
2557 break
2558 else:
2558 else:
2559 yield (line, '')
2559 yield (line, '')
2560 if i + 1 < linecount:
2560 if i + 1 < linecount:
2561 yield ('\n', '')
2561 yield ('\n', '')
2562 for token in consumehunkbuffer():
2562 for token in consumehunkbuffer():
2563 yield token
2563 yield token
2564
2564
2565 def diffui(*args, **kw):
2565 def diffui(*args, **kw):
2566 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2566 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2567 return difflabel(diff, *args, **kw)
2567 return difflabel(diff, *args, **kw)
2568
2568
2569 def _filepairs(modified, added, removed, copy, opts):
2569 def _filepairs(modified, added, removed, copy, opts):
2570 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2570 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2571 before and f2 is the the name after. For added files, f1 will be None,
2571 before and f2 is the the name after. For added files, f1 will be None,
2572 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2572 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2573 or 'rename' (the latter two only if opts.git is set).'''
2573 or 'rename' (the latter two only if opts.git is set).'''
2574 gone = set()
2574 gone = set()
2575
2575
2576 copyto = dict([(v, k) for k, v in copy.items()])
2576 copyto = dict([(v, k) for k, v in copy.items()])
2577
2577
2578 addedset, removedset = set(added), set(removed)
2578 addedset, removedset = set(added), set(removed)
2579
2579
2580 for f in sorted(modified + added + removed):
2580 for f in sorted(modified + added + removed):
2581 copyop = None
2581 copyop = None
2582 f1, f2 = f, f
2582 f1, f2 = f, f
2583 if f in addedset:
2583 if f in addedset:
2584 f1 = None
2584 f1 = None
2585 if f in copy:
2585 if f in copy:
2586 if opts.git:
2586 if opts.git:
2587 f1 = copy[f]
2587 f1 = copy[f]
2588 if f1 in removedset and f1 not in gone:
2588 if f1 in removedset and f1 not in gone:
2589 copyop = 'rename'
2589 copyop = 'rename'
2590 gone.add(f1)
2590 gone.add(f1)
2591 else:
2591 else:
2592 copyop = 'copy'
2592 copyop = 'copy'
2593 elif f in removedset:
2593 elif f in removedset:
2594 f2 = None
2594 f2 = None
2595 if opts.git:
2595 if opts.git:
2596 # have we already reported a copy above?
2596 # have we already reported a copy above?
2597 if (f in copyto and copyto[f] in addedset
2597 if (f in copyto and copyto[f] in addedset
2598 and copy[copyto[f]] == f):
2598 and copy[copyto[f]] == f):
2599 continue
2599 continue
2600 yield f1, f2, copyop
2600 yield f1, f2, copyop
2601
2601
2602 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2602 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2603 copy, getfilectx, opts, losedatafn, prefix, relroot):
2603 copy, getfilectx, opts, losedatafn, prefix, relroot):
2604 '''given input data, generate a diff and yield it in blocks
2604 '''given input data, generate a diff and yield it in blocks
2605
2605
2606 If generating a diff would lose data like flags or binary data and
2606 If generating a diff would lose data like flags or binary data and
2607 losedatafn is not None, it will be called.
2607 losedatafn is not None, it will be called.
2608
2608
2609 relroot is removed and prefix is added to every path in the diff output.
2609 relroot is removed and prefix is added to every path in the diff output.
2610
2610
2611 If relroot is not empty, this function expects every path in modified,
2611 If relroot is not empty, this function expects every path in modified,
2612 added, removed and copy to start with it.'''
2612 added, removed and copy to start with it.'''
2613
2613
2614 def gitindex(text):
2614 def gitindex(text):
2615 if not text:
2615 if not text:
2616 text = ""
2616 text = ""
2617 l = len(text)
2617 l = len(text)
2618 s = hashlib.sha1('blob %d\0' % l)
2618 s = hashlib.sha1('blob %d\0' % l)
2619 s.update(text)
2619 s.update(text)
2620 return hex(s.digest())
2620 return hex(s.digest())
2621
2621
2622 if opts.noprefix:
2622 if opts.noprefix:
2623 aprefix = bprefix = ''
2623 aprefix = bprefix = ''
2624 else:
2624 else:
2625 aprefix = 'a/'
2625 aprefix = 'a/'
2626 bprefix = 'b/'
2626 bprefix = 'b/'
2627
2627
2628 def diffline(f, revs):
2628 def diffline(f, revs):
2629 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2629 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2630 return 'diff %s %s' % (revinfo, f)
2630 return 'diff %s %s' % (revinfo, f)
2631
2631
2632 def isempty(fctx):
2632 def isempty(fctx):
2633 return fctx is None or fctx.size() == 0
2633 return fctx is None or fctx.size() == 0
2634
2634
2635 date1 = dateutil.datestr(ctx1.date())
2635 date1 = dateutil.datestr(ctx1.date())
2636 date2 = dateutil.datestr(ctx2.date())
2636 date2 = dateutil.datestr(ctx2.date())
2637
2637
2638 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2638 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2639
2639
2640 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2640 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2641 or repo.ui.configbool('devel', 'check-relroot')):
2641 or repo.ui.configbool('devel', 'check-relroot')):
2642 for f in modified + added + removed + list(copy) + list(copy.values()):
2642 for f in modified + added + removed + list(copy) + list(copy.values()):
2643 if f is not None and not f.startswith(relroot):
2643 if f is not None and not f.startswith(relroot):
2644 raise AssertionError(
2644 raise AssertionError(
2645 "file %s doesn't start with relroot %s" % (f, relroot))
2645 "file %s doesn't start with relroot %s" % (f, relroot))
2646
2646
2647 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2647 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2648 content1 = None
2648 content1 = None
2649 content2 = None
2649 content2 = None
2650 fctx1 = None
2650 fctx1 = None
2651 fctx2 = None
2651 fctx2 = None
2652 flag1 = None
2652 flag1 = None
2653 flag2 = None
2653 flag2 = None
2654 if f1:
2654 if f1:
2655 fctx1 = getfilectx(f1, ctx1)
2655 fctx1 = getfilectx(f1, ctx1)
2656 if opts.git or losedatafn:
2656 if opts.git or losedatafn:
2657 flag1 = ctx1.flags(f1)
2657 flag1 = ctx1.flags(f1)
2658 if f2:
2658 if f2:
2659 fctx2 = getfilectx(f2, ctx2)
2659 fctx2 = getfilectx(f2, ctx2)
2660 if opts.git or losedatafn:
2660 if opts.git or losedatafn:
2661 flag2 = ctx2.flags(f2)
2661 flag2 = ctx2.flags(f2)
2662 # if binary is True, output "summary" or "base85", but not "text diff"
2662 # if binary is True, output "summary" or "base85", but not "text diff"
2663 if opts.text:
2663 if opts.text:
2664 binary = False
2664 binary = False
2665 else:
2665 else:
2666 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2666 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2667
2667
2668 if losedatafn and not opts.git:
2668 if losedatafn and not opts.git:
2669 if (binary or
2669 if (binary or
2670 # copy/rename
2670 # copy/rename
2671 f2 in copy or
2671 f2 in copy or
2672 # empty file creation
2672 # empty file creation
2673 (not f1 and isempty(fctx2)) or
2673 (not f1 and isempty(fctx2)) or
2674 # empty file deletion
2674 # empty file deletion
2675 (isempty(fctx1) and not f2) or
2675 (isempty(fctx1) and not f2) or
2676 # create with flags
2676 # create with flags
2677 (not f1 and flag2) or
2677 (not f1 and flag2) or
2678 # change flags
2678 # change flags
2679 (f1 and f2 and flag1 != flag2)):
2679 (f1 and f2 and flag1 != flag2)):
2680 losedatafn(f2 or f1)
2680 losedatafn(f2 or f1)
2681
2681
2682 path1 = f1 or f2
2682 path1 = f1 or f2
2683 path2 = f2 or f1
2683 path2 = f2 or f1
2684 path1 = posixpath.join(prefix, path1[len(relroot):])
2684 path1 = posixpath.join(prefix, path1[len(relroot):])
2685 path2 = posixpath.join(prefix, path2[len(relroot):])
2685 path2 = posixpath.join(prefix, path2[len(relroot):])
2686 header = []
2686 header = []
2687 if opts.git:
2687 if opts.git:
2688 header.append('diff --git %s%s %s%s' %
2688 header.append('diff --git %s%s %s%s' %
2689 (aprefix, path1, bprefix, path2))
2689 (aprefix, path1, bprefix, path2))
2690 if not f1: # added
2690 if not f1: # added
2691 header.append('new file mode %s' % gitmode[flag2])
2691 header.append('new file mode %s' % gitmode[flag2])
2692 elif not f2: # removed
2692 elif not f2: # removed
2693 header.append('deleted file mode %s' % gitmode[flag1])
2693 header.append('deleted file mode %s' % gitmode[flag1])
2694 else: # modified/copied/renamed
2694 else: # modified/copied/renamed
2695 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2695 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2696 if mode1 != mode2:
2696 if mode1 != mode2:
2697 header.append('old mode %s' % mode1)
2697 header.append('old mode %s' % mode1)
2698 header.append('new mode %s' % mode2)
2698 header.append('new mode %s' % mode2)
2699 if copyop is not None:
2699 if copyop is not None:
2700 if opts.showsimilarity:
2700 if opts.showsimilarity:
2701 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2701 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2702 header.append('similarity index %d%%' % sim)
2702 header.append('similarity index %d%%' % sim)
2703 header.append('%s from %s' % (copyop, path1))
2703 header.append('%s from %s' % (copyop, path1))
2704 header.append('%s to %s' % (copyop, path2))
2704 header.append('%s to %s' % (copyop, path2))
2705 elif revs and not repo.ui.quiet:
2705 elif revs and not repo.ui.quiet:
2706 header.append(diffline(path1, revs))
2706 header.append(diffline(path1, revs))
2707
2707
2708 # fctx.is | diffopts | what to | is fctx.data()
2708 # fctx.is | diffopts | what to | is fctx.data()
2709 # binary() | text nobinary git index | output? | outputted?
2709 # binary() | text nobinary git index | output? | outputted?
2710 # ------------------------------------|----------------------------
2710 # ------------------------------------|----------------------------
2711 # yes | no no no * | summary | no
2711 # yes | no no no * | summary | no
2712 # yes | no no yes * | base85 | yes
2712 # yes | no no yes * | base85 | yes
2713 # yes | no yes no * | summary | no
2713 # yes | no yes no * | summary | no
2714 # yes | no yes yes 0 | summary | no
2714 # yes | no yes yes 0 | summary | no
2715 # yes | no yes yes >0 | summary | semi [1]
2715 # yes | no yes yes >0 | summary | semi [1]
2716 # yes | yes * * * | text diff | yes
2716 # yes | yes * * * | text diff | yes
2717 # no | * * * * | text diff | yes
2717 # no | * * * * | text diff | yes
2718 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2718 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2719 if binary and (not opts.git or (opts.git and opts.nobinary and not
2719 if binary and (not opts.git or (opts.git and opts.nobinary and not
2720 opts.index)):
2720 opts.index)):
2721 # fast path: no binary content will be displayed, content1 and
2721 # fast path: no binary content will be displayed, content1 and
2722 # content2 are only used for equivalent test. cmp() could have a
2722 # content2 are only used for equivalent test. cmp() could have a
2723 # fast path.
2723 # fast path.
2724 if fctx1 is not None:
2724 if fctx1 is not None:
2725 content1 = b'\0'
2725 content1 = b'\0'
2726 if fctx2 is not None:
2726 if fctx2 is not None:
2727 if fctx1 is not None and not fctx1.cmp(fctx2):
2727 if fctx1 is not None and not fctx1.cmp(fctx2):
2728 content2 = b'\0' # not different
2728 content2 = b'\0' # not different
2729 else:
2729 else:
2730 content2 = b'\0\0'
2730 content2 = b'\0\0'
2731 else:
2731 else:
2732 # normal path: load contents
2732 # normal path: load contents
2733 if fctx1 is not None:
2733 if fctx1 is not None:
2734 content1 = fctx1.data()
2734 content1 = fctx1.data()
2735 if fctx2 is not None:
2735 if fctx2 is not None:
2736 content2 = fctx2.data()
2736 content2 = fctx2.data()
2737
2737
2738 if binary and opts.git and not opts.nobinary:
2738 if binary and opts.git and not opts.nobinary:
2739 text = mdiff.b85diff(content1, content2)
2739 text = mdiff.b85diff(content1, content2)
2740 if text:
2740 if text:
2741 header.append('index %s..%s' %
2741 header.append('index %s..%s' %
2742 (gitindex(content1), gitindex(content2)))
2742 (gitindex(content1), gitindex(content2)))
2743 hunks = (None, [text]),
2743 hunks = (None, [text]),
2744 else:
2744 else:
2745 if opts.git and opts.index > 0:
2745 if opts.git and opts.index > 0:
2746 flag = flag1
2746 flag = flag1
2747 if flag is None:
2747 if flag is None:
2748 flag = flag2
2748 flag = flag2
2749 header.append('index %s..%s %s' %
2749 header.append('index %s..%s %s' %
2750 (gitindex(content1)[0:opts.index],
2750 (gitindex(content1)[0:opts.index],
2751 gitindex(content2)[0:opts.index],
2751 gitindex(content2)[0:opts.index],
2752 gitmode[flag]))
2752 gitmode[flag]))
2753
2753
2754 uheaders, hunks = mdiff.unidiff(content1, date1,
2754 uheaders, hunks = mdiff.unidiff(content1, date1,
2755 content2, date2,
2755 content2, date2,
2756 path1, path2,
2756 path1, path2,
2757 binary=binary, opts=opts)
2757 binary=binary, opts=opts)
2758 header.extend(uheaders)
2758 header.extend(uheaders)
2759 yield fctx1, fctx2, header, hunks
2759 yield fctx1, fctx2, header, hunks
2760
2760
2761 def diffstatsum(stats):
2761 def diffstatsum(stats):
2762 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2762 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2763 for f, a, r, b in stats:
2763 for f, a, r, b in stats:
2764 maxfile = max(maxfile, encoding.colwidth(f))
2764 maxfile = max(maxfile, encoding.colwidth(f))
2765 maxtotal = max(maxtotal, a + r)
2765 maxtotal = max(maxtotal, a + r)
2766 addtotal += a
2766 addtotal += a
2767 removetotal += r
2767 removetotal += r
2768 binary = binary or b
2768 binary = binary or b
2769
2769
2770 return maxfile, maxtotal, addtotal, removetotal, binary
2770 return maxfile, maxtotal, addtotal, removetotal, binary
2771
2771
2772 def diffstatdata(lines):
2772 def diffstatdata(lines):
2773 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2773 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2774
2774
2775 results = []
2775 results = []
2776 filename, adds, removes, isbinary = None, 0, 0, False
2776 filename, adds, removes, isbinary = None, 0, 0, False
2777
2777
2778 def addresult():
2778 def addresult():
2779 if filename:
2779 if filename:
2780 results.append((filename, adds, removes, isbinary))
2780 results.append((filename, adds, removes, isbinary))
2781
2781
2782 # inheader is used to track if a line is in the
2782 # inheader is used to track if a line is in the
2783 # header portion of the diff. This helps properly account
2783 # header portion of the diff. This helps properly account
2784 # for lines that start with '--' or '++'
2784 # for lines that start with '--' or '++'
2785 inheader = False
2785 inheader = False
2786
2786
2787 for line in lines:
2787 for line in lines:
2788 if line.startswith('diff'):
2788 if line.startswith('diff'):
2789 addresult()
2789 addresult()
2790 # starting a new file diff
2790 # starting a new file diff
2791 # set numbers to 0 and reset inheader
2791 # set numbers to 0 and reset inheader
2792 inheader = True
2792 inheader = True
2793 adds, removes, isbinary = 0, 0, False
2793 adds, removes, isbinary = 0, 0, False
2794 if line.startswith('diff --git a/'):
2794 if line.startswith('diff --git a/'):
2795 filename = gitre.search(line).group(2)
2795 filename = gitre.search(line).group(2)
2796 elif line.startswith('diff -r'):
2796 elif line.startswith('diff -r'):
2797 # format: "diff -r ... -r ... filename"
2797 # format: "diff -r ... -r ... filename"
2798 filename = diffre.search(line).group(1)
2798 filename = diffre.search(line).group(1)
2799 elif line.startswith('@@'):
2799 elif line.startswith('@@'):
2800 inheader = False
2800 inheader = False
2801 elif line.startswith('+') and not inheader:
2801 elif line.startswith('+') and not inheader:
2802 adds += 1
2802 adds += 1
2803 elif line.startswith('-') and not inheader:
2803 elif line.startswith('-') and not inheader:
2804 removes += 1
2804 removes += 1
2805 elif (line.startswith('GIT binary patch') or
2805 elif (line.startswith('GIT binary patch') or
2806 line.startswith('Binary file')):
2806 line.startswith('Binary file')):
2807 isbinary = True
2807 isbinary = True
2808 elif line.startswith('rename from'):
2808 elif line.startswith('rename from'):
2809 filename = line.split()[-1]
2809 filename = line[12:]
2810 elif line.startswith('rename to'):
2810 elif line.startswith('rename to'):
2811 filename += ' => %s' % line.split()[-1]
2811 filename += ' => %s' % line[10:]
2812 addresult()
2812 addresult()
2813 return results
2813 return results
2814
2814
2815 def diffstat(lines, width=80):
2815 def diffstat(lines, width=80):
2816 output = []
2816 output = []
2817 stats = diffstatdata(lines)
2817 stats = diffstatdata(lines)
2818 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2818 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2819
2819
2820 countwidth = len(str(maxtotal))
2820 countwidth = len(str(maxtotal))
2821 if hasbinary and countwidth < 3:
2821 if hasbinary and countwidth < 3:
2822 countwidth = 3
2822 countwidth = 3
2823 graphwidth = width - countwidth - maxname - 6
2823 graphwidth = width - countwidth - maxname - 6
2824 if graphwidth < 10:
2824 if graphwidth < 10:
2825 graphwidth = 10
2825 graphwidth = 10
2826
2826
2827 def scale(i):
2827 def scale(i):
2828 if maxtotal <= graphwidth:
2828 if maxtotal <= graphwidth:
2829 return i
2829 return i
2830 # If diffstat runs out of room it doesn't print anything,
2830 # If diffstat runs out of room it doesn't print anything,
2831 # which isn't very useful, so always print at least one + or -
2831 # which isn't very useful, so always print at least one + or -
2832 # if there were at least some changes.
2832 # if there were at least some changes.
2833 return max(i * graphwidth // maxtotal, int(bool(i)))
2833 return max(i * graphwidth // maxtotal, int(bool(i)))
2834
2834
2835 for filename, adds, removes, isbinary in stats:
2835 for filename, adds, removes, isbinary in stats:
2836 if isbinary:
2836 if isbinary:
2837 count = 'Bin'
2837 count = 'Bin'
2838 else:
2838 else:
2839 count = '%d' % (adds + removes)
2839 count = '%d' % (adds + removes)
2840 pluses = '+' * scale(adds)
2840 pluses = '+' * scale(adds)
2841 minuses = '-' * scale(removes)
2841 minuses = '-' * scale(removes)
2842 output.append(' %s%s | %*s %s%s\n' %
2842 output.append(' %s%s | %*s %s%s\n' %
2843 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2843 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2844 countwidth, count, pluses, minuses))
2844 countwidth, count, pluses, minuses))
2845
2845
2846 if stats:
2846 if stats:
2847 output.append(_(' %d files changed, %d insertions(+), '
2847 output.append(_(' %d files changed, %d insertions(+), '
2848 '%d deletions(-)\n')
2848 '%d deletions(-)\n')
2849 % (len(stats), totaladds, totalremoves))
2849 % (len(stats), totaladds, totalremoves))
2850
2850
2851 return ''.join(output)
2851 return ''.join(output)
2852
2852
2853 def diffstatui(*args, **kw):
2853 def diffstatui(*args, **kw):
2854 '''like diffstat(), but yields 2-tuples of (output, label) for
2854 '''like diffstat(), but yields 2-tuples of (output, label) for
2855 ui.write()
2855 ui.write()
2856 '''
2856 '''
2857
2857
2858 for line in diffstat(*args, **kw).splitlines():
2858 for line in diffstat(*args, **kw).splitlines():
2859 if line and line[-1] in '+-':
2859 if line and line[-1] in '+-':
2860 name, graph = line.rsplit(' ', 1)
2860 name, graph = line.rsplit(' ', 1)
2861 yield (name + ' ', '')
2861 yield (name + ' ', '')
2862 m = re.search(br'\++', graph)
2862 m = re.search(br'\++', graph)
2863 if m:
2863 if m:
2864 yield (m.group(0), 'diffstat.inserted')
2864 yield (m.group(0), 'diffstat.inserted')
2865 m = re.search(br'-+', graph)
2865 m = re.search(br'-+', graph)
2866 if m:
2866 if m:
2867 yield (m.group(0), 'diffstat.deleted')
2867 yield (m.group(0), 'diffstat.deleted')
2868 else:
2868 else:
2869 yield (line, '')
2869 yield (line, '')
2870 yield ('\n', '')
2870 yield ('\n', '')
@@ -1,257 +1,273
1 $ hg init repo
1 $ hg init repo
2 $ cd repo
2 $ cd repo
3 $ i=0; while [ "$i" -lt 213 ]; do echo a >> a; i=`expr $i + 1`; done
3 $ i=0; while [ "$i" -lt 213 ]; do echo a >> a; i=`expr $i + 1`; done
4 $ hg add a
4 $ hg add a
5 $ cp a b
5 $ cp a b
6 $ hg add b
6 $ hg add b
7
7
8 Wide diffstat:
8 Wide diffstat:
9
9
10 $ hg diff --stat
10 $ hg diff --stat
11 a | 213 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
11 a | 213 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
12 b | 213 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
12 b | 213 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 426 insertions(+), 0 deletions(-)
13 2 files changed, 426 insertions(+), 0 deletions(-)
14
14
15 diffstat width:
15 diffstat width:
16
16
17 $ COLUMNS=24 hg diff --config ui.interactive=true --stat
17 $ COLUMNS=24 hg diff --config ui.interactive=true --stat
18 a | 213 ++++++++++++++
18 a | 213 ++++++++++++++
19 b | 213 ++++++++++++++
19 b | 213 ++++++++++++++
20 2 files changed, 426 insertions(+), 0 deletions(-)
20 2 files changed, 426 insertions(+), 0 deletions(-)
21
21
22 $ hg ci -m adda
22 $ hg ci -m adda
23
23
24 $ cat >> a <<EOF
24 $ cat >> a <<EOF
25 > a
25 > a
26 > a
26 > a
27 > a
27 > a
28 > EOF
28 > EOF
29
29
30 Narrow diffstat:
30 Narrow diffstat:
31
31
32 $ hg diff --stat
32 $ hg diff --stat
33 a | 3 +++
33 a | 3 +++
34 1 files changed, 3 insertions(+), 0 deletions(-)
34 1 files changed, 3 insertions(+), 0 deletions(-)
35
35
36 $ hg ci -m appenda
36 $ hg ci -m appenda
37
37
38 >>> open("c", "wb").write(b"\0") and None
38 >>> open("c", "wb").write(b"\0") and None
39 $ touch d
39 $ touch d
40 $ hg add c d
40 $ hg add c d
41
41
42 Binary diffstat:
42 Binary diffstat:
43
43
44 $ hg diff --stat
44 $ hg diff --stat
45 c | Bin
45 c | Bin
46 1 files changed, 0 insertions(+), 0 deletions(-)
46 1 files changed, 0 insertions(+), 0 deletions(-)
47
47
48 Binary git diffstat:
48 Binary git diffstat:
49
49
50 $ hg diff --stat --git
50 $ hg diff --stat --git
51 c | Bin
51 c | Bin
52 d | 0
52 d | 0
53 2 files changed, 0 insertions(+), 0 deletions(-)
53 2 files changed, 0 insertions(+), 0 deletions(-)
54
54
55 $ hg ci -m createb
55 $ hg ci -m createb
56
56
57 >>> open("file with spaces", "wb").write(b"\0") and None
57 >>> open("file with spaces", "wb").write(b"\0") and None
58 $ hg add "file with spaces"
58 $ hg add "file with spaces"
59
59
60 Filename with spaces diffstat:
60 Filename with spaces diffstat:
61
61
62 $ hg diff --stat
62 $ hg diff --stat
63 file with spaces | Bin
63 file with spaces | Bin
64 1 files changed, 0 insertions(+), 0 deletions(-)
64 1 files changed, 0 insertions(+), 0 deletions(-)
65
65
66 Filename with spaces git diffstat:
66 Filename with spaces git diffstat:
67
67
68 $ hg diff --stat --git
68 $ hg diff --stat --git
69 file with spaces | Bin
69 file with spaces | Bin
70 1 files changed, 0 insertions(+), 0 deletions(-)
70 1 files changed, 0 insertions(+), 0 deletions(-)
71
71
72 Filename without "a/" or "b/" (issue5759):
72 Filename without "a/" or "b/" (issue5759):
73
73
74 $ hg diff --config 'diff.noprefix=1' -c1 --stat --git
74 $ hg diff --config 'diff.noprefix=1' -c1 --stat --git
75 a | 3 +++
75 a | 3 +++
76 1 files changed, 3 insertions(+), 0 deletions(-)
76 1 files changed, 3 insertions(+), 0 deletions(-)
77 $ hg diff --config 'diff.noprefix=1' -c2 --stat --git
77 $ hg diff --config 'diff.noprefix=1' -c2 --stat --git
78 c | Bin
78 c | Bin
79 d | 0
79 d | 0
80 2 files changed, 0 insertions(+), 0 deletions(-)
80 2 files changed, 0 insertions(+), 0 deletions(-)
81
81
82 $ hg log --config 'diff.noprefix=1' -r '1:' -p --stat --git
82 $ hg log --config 'diff.noprefix=1' -r '1:' -p --stat --git
83 changeset: 1:3a95b07bb77f
83 changeset: 1:3a95b07bb77f
84 user: test
84 user: test
85 date: Thu Jan 01 00:00:00 1970 +0000
85 date: Thu Jan 01 00:00:00 1970 +0000
86 summary: appenda
86 summary: appenda
87
87
88 a | 3 +++
88 a | 3 +++
89 1 files changed, 3 insertions(+), 0 deletions(-)
89 1 files changed, 3 insertions(+), 0 deletions(-)
90
90
91 diff --git a a
91 diff --git a a
92 --- a
92 --- a
93 +++ a
93 +++ a
94 @@ -211,3 +211,6 @@
94 @@ -211,3 +211,6 @@
95 a
95 a
96 a
96 a
97 a
97 a
98 +a
98 +a
99 +a
99 +a
100 +a
100 +a
101
101
102 changeset: 2:c60a6c753773
102 changeset: 2:c60a6c753773
103 tag: tip
103 tag: tip
104 user: test
104 user: test
105 date: Thu Jan 01 00:00:00 1970 +0000
105 date: Thu Jan 01 00:00:00 1970 +0000
106 summary: createb
106 summary: createb
107
107
108 c | Bin
108 c | Bin
109 d | 0
109 d | 0
110 2 files changed, 0 insertions(+), 0 deletions(-)
110 2 files changed, 0 insertions(+), 0 deletions(-)
111
111
112 diff --git c c
112 diff --git c c
113 new file mode 100644
113 new file mode 100644
114 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f76dd238ade08917e6712764a16a22005a50573d
114 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f76dd238ade08917e6712764a16a22005a50573d
115 GIT binary patch
115 GIT binary patch
116 literal 1
116 literal 1
117 Ic${MZ000310RR91
117 Ic${MZ000310RR91
118
118
119 diff --git d d
119 diff --git d d
120 new file mode 100644
120 new file mode 100644
121
121
122
122
123 diffstat within directories:
123 diffstat within directories:
124
124
125 $ hg rm -f 'file with spaces'
125 $ hg rm -f 'file with spaces'
126
126
127 $ mkdir dir1 dir2
127 $ mkdir dir1 dir2
128 $ echo new1 > dir1/new
128 $ echo new1 > dir1/new
129 $ echo new2 > dir2/new
129 $ echo new2 > dir2/new
130 $ hg add dir1/new dir2/new
130 $ hg add dir1/new dir2/new
131 $ hg diff --stat
131 $ hg diff --stat
132 dir1/new | 1 +
132 dir1/new | 1 +
133 dir2/new | 1 +
133 dir2/new | 1 +
134 2 files changed, 2 insertions(+), 0 deletions(-)
134 2 files changed, 2 insertions(+), 0 deletions(-)
135
135
136 $ hg diff --stat --root dir1
136 $ hg diff --stat --root dir1
137 new | 1 +
137 new | 1 +
138 1 files changed, 1 insertions(+), 0 deletions(-)
138 1 files changed, 1 insertions(+), 0 deletions(-)
139
139
140 $ hg diff --stat --root dir1 dir2
140 $ hg diff --stat --root dir1 dir2
141 warning: dir2 not inside relative root dir1
141 warning: dir2 not inside relative root dir1
142
142
143 $ hg diff --stat --root dir1 -I dir1/old
143 $ hg diff --stat --root dir1 -I dir1/old
144
144
145 $ cd dir1
145 $ cd dir1
146 $ hg diff --stat .
146 $ hg diff --stat .
147 dir1/new | 1 +
147 dir1/new | 1 +
148 1 files changed, 1 insertions(+), 0 deletions(-)
148 1 files changed, 1 insertions(+), 0 deletions(-)
149 $ hg diff --stat --root .
149 $ hg diff --stat --root .
150 new | 1 +
150 new | 1 +
151 1 files changed, 1 insertions(+), 0 deletions(-)
151 1 files changed, 1 insertions(+), 0 deletions(-)
152
152
153 $ hg diff --stat --root ../dir1 ../dir2
153 $ hg diff --stat --root ../dir1 ../dir2
154 warning: ../dir2 not inside relative root .
154 warning: ../dir2 not inside relative root .
155
155
156 $ hg diff --stat --root . -I old
156 $ hg diff --stat --root . -I old
157
157
158 $ cd ..
158 $ cd ..
159
159
160 Files with lines beginning with '--' or '++' should be properly counted in diffstat
160 Files with lines beginning with '--' or '++' should be properly counted in diffstat
161
161
162 $ hg up -Cr tip
162 $ hg up -Cr tip
163 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 $ rm dir1/new
164 $ rm dir1/new
165 $ rm dir2/new
165 $ rm dir2/new
166 $ rm "file with spaces"
166 $ rm "file with spaces"
167 $ cat > file << EOF
167 $ cat > file << EOF
168 > line 1
168 > line 1
169 > line 2
169 > line 2
170 > line 3
170 > line 3
171 > EOF
171 > EOF
172 $ hg commit -Am file
172 $ hg commit -Am file
173 adding file
173 adding file
174
174
175 Lines added starting with '--' should count as additions
175 Lines added starting with '--' should count as additions
176 $ cat > file << EOF
176 $ cat > file << EOF
177 > line 1
177 > line 1
178 > -- line 2, with dashes
178 > -- line 2, with dashes
179 > line 3
179 > line 3
180 > EOF
180 > EOF
181
181
182 $ hg diff --root .
182 $ hg diff --root .
183 diff -r be1569354b24 file
183 diff -r be1569354b24 file
184 --- a/file Thu Jan 01 00:00:00 1970 +0000
184 --- a/file Thu Jan 01 00:00:00 1970 +0000
185 +++ b/file * (glob)
185 +++ b/file * (glob)
186 @@ -1,3 +1,3 @@
186 @@ -1,3 +1,3 @@
187 line 1
187 line 1
188 -line 2
188 -line 2
189 +-- line 2, with dashes
189 +-- line 2, with dashes
190 line 3
190 line 3
191
191
192 $ hg diff --root . --stat
192 $ hg diff --root . --stat
193 file | 2 +-
193 file | 2 +-
194 1 files changed, 1 insertions(+), 1 deletions(-)
194 1 files changed, 1 insertions(+), 1 deletions(-)
195
195
196 Lines changed starting with '--' should count as deletions
196 Lines changed starting with '--' should count as deletions
197 $ hg commit -m filev2
197 $ hg commit -m filev2
198 $ cat > file << EOF
198 $ cat > file << EOF
199 > line 1
199 > line 1
200 > -- line 2, with dashes, changed again
200 > -- line 2, with dashes, changed again
201 > line 3
201 > line 3
202 > EOF
202 > EOF
203
203
204 $ hg diff --root .
204 $ hg diff --root .
205 diff -r 160f7c034df6 file
205 diff -r 160f7c034df6 file
206 --- a/file Thu Jan 01 00:00:00 1970 +0000
206 --- a/file Thu Jan 01 00:00:00 1970 +0000
207 +++ b/file * (glob)
207 +++ b/file * (glob)
208 @@ -1,3 +1,3 @@
208 @@ -1,3 +1,3 @@
209 line 1
209 line 1
210 --- line 2, with dashes
210 --- line 2, with dashes
211 +-- line 2, with dashes, changed again
211 +-- line 2, with dashes, changed again
212 line 3
212 line 3
213
213
214 $ hg diff --root . --stat
214 $ hg diff --root . --stat
215 file | 2 +-
215 file | 2 +-
216 1 files changed, 1 insertions(+), 1 deletions(-)
216 1 files changed, 1 insertions(+), 1 deletions(-)
217
217
218 Lines changed starting with '--' should count as deletions
218 Lines changed starting with '--' should count as deletions
219 and starting with '++' should count as additions
219 and starting with '++' should count as additions
220 $ cat > file << EOF
220 $ cat > file << EOF
221 > line 1
221 > line 1
222 > ++ line 2, switched dashes to plusses
222 > ++ line 2, switched dashes to plusses
223 > line 3
223 > line 3
224 > EOF
224 > EOF
225
225
226 $ hg diff --root .
226 $ hg diff --root .
227 diff -r 160f7c034df6 file
227 diff -r 160f7c034df6 file
228 --- a/file Thu Jan 01 00:00:00 1970 +0000
228 --- a/file Thu Jan 01 00:00:00 1970 +0000
229 +++ b/file * (glob)
229 +++ b/file * (glob)
230 @@ -1,3 +1,3 @@
230 @@ -1,3 +1,3 @@
231 line 1
231 line 1
232 --- line 2, with dashes
232 --- line 2, with dashes
233 +++ line 2, switched dashes to plusses
233 +++ line 2, switched dashes to plusses
234 line 3
234 line 3
235
235
236 $ hg diff --root . --stat
236 $ hg diff --root . --stat
237 file | 2 +-
237 file | 2 +-
238 1 files changed, 1 insertions(+), 1 deletions(-)
238 1 files changed, 1 insertions(+), 1 deletions(-)
239
239
240 When a file is renamed, --git shouldn't loss the info about old file
240 When a file is renamed, --git shouldn't loss the info about old file
241 $ hg init issue6025
241 $ hg init issue6025
242 $ cd issue6025
242 $ cd issue6025
243 $ echo > a
243 $ echo > a
244 $ hg ci -Am 'add a'
244 $ hg ci -Am 'add a'
245 adding a
245 adding a
246 $ hg mv a b
246 $ hg mv a b
247 $ hg diff --git
247 $ hg diff --git
248 diff --git a/a b/b
248 diff --git a/a b/b
249 rename from a
249 rename from a
250 rename to b
250 rename to b
251 $ hg diff --stat
251 $ hg diff --stat
252 a | 1 -
252 a | 1 -
253 b | 1 +
253 b | 1 +
254 2 files changed, 1 insertions(+), 1 deletions(-)
254 2 files changed, 1 insertions(+), 1 deletions(-)
255 $ hg diff --stat --git
255 $ hg diff --stat --git
256 a => b | 0
256 a => b | 0
257 1 files changed, 0 insertions(+), 0 deletions(-)
257 1 files changed, 0 insertions(+), 0 deletions(-)
258 -- filename may contain whitespaces
259 $ echo > c
260 $ hg ci -Am 'add c'
261 adding c
262 $ hg mv c 'new c'
263 $ hg diff --git
264 diff --git a/c b/new c
265 rename from c
266 rename to new c
267 $ hg diff --stat
268 c | 1 -
269 new c | 1 +
270 2 files changed, 1 insertions(+), 1 deletions(-)
271 $ hg diff --stat --git
272 c => new c | 0
273 1 files changed, 0 insertions(+), 0 deletions(-)
General Comments 0
You need to be logged in to leave comments. Login now