##// END OF EJS Templates
patch: fix a str + bytes issue in an exception handler...
Matt Harbison -
r44127:eab0b738 stable
parent child Browse files
Show More
@@ -1,3217 +1,3217 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import re
17 import re
18 import shutil
18 import shutil
19 import zlib
19 import zlib
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import (
22 from .node import (
23 hex,
23 hex,
24 short,
24 short,
25 )
25 )
26 from .pycompat import open
26 from .pycompat import open
27 from . import (
27 from . import (
28 copies,
28 copies,
29 diffhelper,
29 diffhelper,
30 diffutil,
30 diffutil,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 dateutil,
43 dateutil,
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(
52 wordsplitter = re.compile(
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 )
54 )
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60
60
61 def split(stream):
61 def split(stream):
62 '''return an iterator of individual patches from a stream'''
62 '''return an iterator of individual patches from a stream'''
63
63
64 def isheader(line, inheader):
64 def isheader(line, inheader):
65 if inheader and line.startswith((b' ', b'\t')):
65 if inheader and line.startswith((b' ', b'\t')):
66 # continuation
66 # continuation
67 return True
67 return True
68 if line.startswith((b' ', b'-', b'+')):
68 if line.startswith((b' ', b'-', b'+')):
69 # diff line - don't check for header pattern in there
69 # diff line - don't check for header pattern in there
70 return False
70 return False
71 l = line.split(b': ', 1)
71 l = line.split(b': ', 1)
72 return len(l) == 2 and b' ' not in l[0]
72 return len(l) == 2 and b' ' not in l[0]
73
73
74 def chunk(lines):
74 def chunk(lines):
75 return stringio(b''.join(lines))
75 return stringio(b''.join(lines))
76
76
77 def hgsplit(stream, cur):
77 def hgsplit(stream, cur):
78 inheader = True
78 inheader = True
79
79
80 for line in stream:
80 for line in stream:
81 if not line.strip():
81 if not line.strip():
82 inheader = False
82 inheader = False
83 if not inheader and line.startswith(b'# HG changeset patch'):
83 if not inheader and line.startswith(b'# HG changeset patch'):
84 yield chunk(cur)
84 yield chunk(cur)
85 cur = []
85 cur = []
86 inheader = True
86 inheader = True
87
87
88 cur.append(line)
88 cur.append(line)
89
89
90 if cur:
90 if cur:
91 yield chunk(cur)
91 yield chunk(cur)
92
92
93 def mboxsplit(stream, cur):
93 def mboxsplit(stream, cur):
94 for line in stream:
94 for line in stream:
95 if line.startswith(b'From '):
95 if line.startswith(b'From '):
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98 cur = []
98 cur = []
99
99
100 cur.append(line)
100 cur.append(line)
101
101
102 if cur:
102 if cur:
103 for c in split(chunk(cur[1:])):
103 for c in split(chunk(cur[1:])):
104 yield c
104 yield c
105
105
106 def mimesplit(stream, cur):
106 def mimesplit(stream, cur):
107 def msgfp(m):
107 def msgfp(m):
108 fp = stringio()
108 fp = stringio()
109 g = mail.Generator(fp, mangle_from_=False)
109 g = mail.Generator(fp, mangle_from_=False)
110 g.flatten(m)
110 g.flatten(m)
111 fp.seek(0)
111 fp.seek(0)
112 return fp
112 return fp
113
113
114 for line in stream:
114 for line in stream:
115 cur.append(line)
115 cur.append(line)
116 c = chunk(cur)
116 c = chunk(cur)
117
117
118 m = mail.parse(c)
118 m = mail.parse(c)
119 if not m.is_multipart():
119 if not m.is_multipart():
120 yield msgfp(m)
120 yield msgfp(m)
121 else:
121 else:
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 for part in m.walk():
123 for part in m.walk():
124 ct = part.get_content_type()
124 ct = part.get_content_type()
125 if ct not in ok_types:
125 if ct not in ok_types:
126 continue
126 continue
127 yield msgfp(part)
127 yield msgfp(part)
128
128
129 def headersplit(stream, cur):
129 def headersplit(stream, cur):
130 inheader = False
130 inheader = False
131
131
132 for line in stream:
132 for line in stream:
133 if not inheader and isheader(line, inheader):
133 if not inheader and isheader(line, inheader):
134 yield chunk(cur)
134 yield chunk(cur)
135 cur = []
135 cur = []
136 inheader = True
136 inheader = True
137 if inheader and not isheader(line, inheader):
137 if inheader and not isheader(line, inheader):
138 inheader = False
138 inheader = False
139
139
140 cur.append(line)
140 cur.append(line)
141
141
142 if cur:
142 if cur:
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 def remainder(cur):
145 def remainder(cur):
146 yield chunk(cur)
146 yield chunk(cur)
147
147
148 class fiter(object):
148 class fiter(object):
149 def __init__(self, fp):
149 def __init__(self, fp):
150 self.fp = fp
150 self.fp = fp
151
151
152 def __iter__(self):
152 def __iter__(self):
153 return self
153 return self
154
154
155 def next(self):
155 def next(self):
156 l = self.fp.readline()
156 l = self.fp.readline()
157 if not l:
157 if not l:
158 raise StopIteration
158 raise StopIteration
159 return l
159 return l
160
160
161 __next__ = next
161 __next__ = next
162
162
163 inheader = False
163 inheader = False
164 cur = []
164 cur = []
165
165
166 mimeheaders = [b'content-type']
166 mimeheaders = [b'content-type']
167
167
168 if not util.safehasattr(stream, b'next'):
168 if not util.safehasattr(stream, b'next'):
169 # http responses, for example, have readline but not next
169 # http responses, for example, have readline but not next
170 stream = fiter(stream)
170 stream = fiter(stream)
171
171
172 for line in stream:
172 for line in stream:
173 cur.append(line)
173 cur.append(line)
174 if line.startswith(b'# HG changeset patch'):
174 if line.startswith(b'# HG changeset patch'):
175 return hgsplit(stream, cur)
175 return hgsplit(stream, cur)
176 elif line.startswith(b'From '):
176 elif line.startswith(b'From '):
177 return mboxsplit(stream, cur)
177 return mboxsplit(stream, cur)
178 elif isheader(line, inheader):
178 elif isheader(line, inheader):
179 inheader = True
179 inheader = True
180 if line.split(b':', 1)[0].lower() in mimeheaders:
180 if line.split(b':', 1)[0].lower() in mimeheaders:
181 # let email parser handle this
181 # let email parser handle this
182 return mimesplit(stream, cur)
182 return mimesplit(stream, cur)
183 elif line.startswith(b'--- ') and inheader:
183 elif line.startswith(b'--- ') and inheader:
184 # No evil headers seen by diff start, split by hand
184 # No evil headers seen by diff start, split by hand
185 return headersplit(stream, cur)
185 return headersplit(stream, cur)
186 # Not enough info, keep reading
186 # Not enough info, keep reading
187
187
188 # if we are here, we have a very plain patch
188 # if we are here, we have a very plain patch
189 return remainder(cur)
189 return remainder(cur)
190
190
191
191
192 ## Some facility for extensible patch parsing:
192 ## Some facility for extensible patch parsing:
193 # list of pairs ("header to match", "data key")
193 # list of pairs ("header to match", "data key")
194 patchheadermap = [
194 patchheadermap = [
195 (b'Date', b'date'),
195 (b'Date', b'date'),
196 (b'Branch', b'branch'),
196 (b'Branch', b'branch'),
197 (b'Node ID', b'nodeid'),
197 (b'Node ID', b'nodeid'),
198 ]
198 ]
199
199
200
200
201 @contextlib.contextmanager
201 @contextlib.contextmanager
202 def extract(ui, fileobj):
202 def extract(ui, fileobj):
203 '''extract patch from data read from fileobj.
203 '''extract patch from data read from fileobj.
204
204
205 patch can be a normal patch or contained in an email message.
205 patch can be a normal patch or contained in an email message.
206
206
207 return a dictionary. Standard keys are:
207 return a dictionary. Standard keys are:
208 - filename,
208 - filename,
209 - message,
209 - message,
210 - user,
210 - user,
211 - date,
211 - date,
212 - branch,
212 - branch,
213 - node,
213 - node,
214 - p1,
214 - p1,
215 - p2.
215 - p2.
216 Any item can be missing from the dictionary. If filename is missing,
216 Any item can be missing from the dictionary. If filename is missing,
217 fileobj did not contain a patch. Caller must unlink filename when done.'''
217 fileobj did not contain a patch. Caller must unlink filename when done.'''
218
218
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 tmpfp = os.fdopen(fd, r'wb')
220 tmpfp = os.fdopen(fd, r'wb')
221 try:
221 try:
222 yield _extract(ui, fileobj, tmpname, tmpfp)
222 yield _extract(ui, fileobj, tmpname, tmpfp)
223 finally:
223 finally:
224 tmpfp.close()
224 tmpfp.close()
225 os.unlink(tmpname)
225 os.unlink(tmpname)
226
226
227
227
228 def _extract(ui, fileobj, tmpname, tmpfp):
228 def _extract(ui, fileobj, tmpname, tmpfp):
229
229
230 # attempt to detect the start of a patch
230 # attempt to detect the start of a patch
231 # (this heuristic is borrowed from quilt)
231 # (this heuristic is borrowed from quilt)
232 diffre = re.compile(
232 diffre = re.compile(
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
236 br'\*\*\*[ \t].*?^---[ \t])',
236 br'\*\*\*[ \t].*?^---[ \t])',
237 re.MULTILINE | re.DOTALL,
237 re.MULTILINE | re.DOTALL,
238 )
238 )
239
239
240 data = {}
240 data = {}
241
241
242 msg = mail.parse(fileobj)
242 msg = mail.parse(fileobj)
243
243
244 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
244 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
245 data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
245 data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
246 if not subject and not data[b'user']:
246 if not subject and not data[b'user']:
247 # Not an email, restore parsed headers if any
247 # Not an email, restore parsed headers if any
248 subject = (
248 subject = (
249 b'\n'.join(
249 b'\n'.join(
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 )
251 )
252 + b'\n'
252 + b'\n'
253 )
253 )
254
254
255 # should try to parse msg['Date']
255 # should try to parse msg['Date']
256 parents = []
256 parents = []
257
257
258 nodeid = msg[r'X-Mercurial-Node']
258 nodeid = msg[r'X-Mercurial-Node']
259 if nodeid:
259 if nodeid:
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
262
262
263 if subject:
263 if subject:
264 if subject.startswith(b'[PATCH'):
264 if subject.startswith(b'[PATCH'):
265 pend = subject.find(b']')
265 pend = subject.find(b']')
266 if pend >= 0:
266 if pend >= 0:
267 subject = subject[pend + 1 :].lstrip()
267 subject = subject[pend + 1 :].lstrip()
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 ui.debug(b'Subject: %s\n' % subject)
269 ui.debug(b'Subject: %s\n' % subject)
270 if data[b'user']:
270 if data[b'user']:
271 ui.debug(b'From: %s\n' % data[b'user'])
271 ui.debug(b'From: %s\n' % data[b'user'])
272 diffs_seen = 0
272 diffs_seen = 0
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 message = b''
274 message = b''
275 for part in msg.walk():
275 for part in msg.walk():
276 content_type = pycompat.bytestr(part.get_content_type())
276 content_type = pycompat.bytestr(part.get_content_type())
277 ui.debug(b'Content-Type: %s\n' % content_type)
277 ui.debug(b'Content-Type: %s\n' % content_type)
278 if content_type not in ok_types:
278 if content_type not in ok_types:
279 continue
279 continue
280 payload = part.get_payload(decode=True)
280 payload = part.get_payload(decode=True)
281 m = diffre.search(payload)
281 m = diffre.search(payload)
282 if m:
282 if m:
283 hgpatch = False
283 hgpatch = False
284 hgpatchheader = False
284 hgpatchheader = False
285 ignoretext = False
285 ignoretext = False
286
286
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 diffs_seen += 1
288 diffs_seen += 1
289 cfp = stringio()
289 cfp = stringio()
290 for line in payload[: m.start(0)].splitlines():
290 for line in payload[: m.start(0)].splitlines():
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 ui.debug(b'patch generated by hg export\n')
292 ui.debug(b'patch generated by hg export\n')
293 hgpatch = True
293 hgpatch = True
294 hgpatchheader = True
294 hgpatchheader = True
295 # drop earlier commit message content
295 # drop earlier commit message content
296 cfp.seek(0)
296 cfp.seek(0)
297 cfp.truncate()
297 cfp.truncate()
298 subject = None
298 subject = None
299 elif hgpatchheader:
299 elif hgpatchheader:
300 if line.startswith(b'# User '):
300 if line.startswith(b'# User '):
301 data[b'user'] = line[7:]
301 data[b'user'] = line[7:]
302 ui.debug(b'From: %s\n' % data[b'user'])
302 ui.debug(b'From: %s\n' % data[b'user'])
303 elif line.startswith(b"# Parent "):
303 elif line.startswith(b"# Parent "):
304 parents.append(line[9:].lstrip())
304 parents.append(line[9:].lstrip())
305 elif line.startswith(b"# "):
305 elif line.startswith(b"# "):
306 for header, key in patchheadermap:
306 for header, key in patchheadermap:
307 prefix = b'# %s ' % header
307 prefix = b'# %s ' % header
308 if line.startswith(prefix):
308 if line.startswith(prefix):
309 data[key] = line[len(prefix) :]
309 data[key] = line[len(prefix) :]
310 ui.debug(b'%s: %s\n' % (header, data[key]))
310 ui.debug(b'%s: %s\n' % (header, data[key]))
311 else:
311 else:
312 hgpatchheader = False
312 hgpatchheader = False
313 elif line == b'---':
313 elif line == b'---':
314 ignoretext = True
314 ignoretext = True
315 if not hgpatchheader and not ignoretext:
315 if not hgpatchheader and not ignoretext:
316 cfp.write(line)
316 cfp.write(line)
317 cfp.write(b'\n')
317 cfp.write(b'\n')
318 message = cfp.getvalue()
318 message = cfp.getvalue()
319 if tmpfp:
319 if tmpfp:
320 tmpfp.write(payload)
320 tmpfp.write(payload)
321 if not payload.endswith(b'\n'):
321 if not payload.endswith(b'\n'):
322 tmpfp.write(b'\n')
322 tmpfp.write(b'\n')
323 elif not diffs_seen and message and content_type == b'text/plain':
323 elif not diffs_seen and message and content_type == b'text/plain':
324 message += b'\n' + payload
324 message += b'\n' + payload
325
325
326 if subject and not message.startswith(subject):
326 if subject and not message.startswith(subject):
327 message = b'%s\n%s' % (subject, message)
327 message = b'%s\n%s' % (subject, message)
328 data[b'message'] = message
328 data[b'message'] = message
329 tmpfp.close()
329 tmpfp.close()
330 if parents:
330 if parents:
331 data[b'p1'] = parents.pop(0)
331 data[b'p1'] = parents.pop(0)
332 if parents:
332 if parents:
333 data[b'p2'] = parents.pop(0)
333 data[b'p2'] = parents.pop(0)
334
334
335 if diffs_seen:
335 if diffs_seen:
336 data[b'filename'] = tmpname
336 data[b'filename'] = tmpname
337
337
338 return data
338 return data
339
339
340
340
341 class patchmeta(object):
341 class patchmeta(object):
342 """Patched file metadata
342 """Patched file metadata
343
343
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 'islink' is True if the file is a symlink and 'isexec' is True if
348 'islink' is True if the file is a symlink and 'isexec' is True if
349 the file is executable. Otherwise, 'mode' is None.
349 the file is executable. Otherwise, 'mode' is None.
350 """
350 """
351
351
352 def __init__(self, path):
352 def __init__(self, path):
353 self.path = path
353 self.path = path
354 self.oldpath = None
354 self.oldpath = None
355 self.mode = None
355 self.mode = None
356 self.op = b'MODIFY'
356 self.op = b'MODIFY'
357 self.binary = False
357 self.binary = False
358
358
359 def setmode(self, mode):
359 def setmode(self, mode):
360 islink = mode & 0o20000
360 islink = mode & 0o20000
361 isexec = mode & 0o100
361 isexec = mode & 0o100
362 self.mode = (islink, isexec)
362 self.mode = (islink, isexec)
363
363
364 def copy(self):
364 def copy(self):
365 other = patchmeta(self.path)
365 other = patchmeta(self.path)
366 other.oldpath = self.oldpath
366 other.oldpath = self.oldpath
367 other.mode = self.mode
367 other.mode = self.mode
368 other.op = self.op
368 other.op = self.op
369 other.binary = self.binary
369 other.binary = self.binary
370 return other
370 return other
371
371
372 def _ispatchinga(self, afile):
372 def _ispatchinga(self, afile):
373 if afile == b'/dev/null':
373 if afile == b'/dev/null':
374 return self.op == b'ADD'
374 return self.op == b'ADD'
375 return afile == b'a/' + (self.oldpath or self.path)
375 return afile == b'a/' + (self.oldpath or self.path)
376
376
377 def _ispatchingb(self, bfile):
377 def _ispatchingb(self, bfile):
378 if bfile == b'/dev/null':
378 if bfile == b'/dev/null':
379 return self.op == b'DELETE'
379 return self.op == b'DELETE'
380 return bfile == b'b/' + self.path
380 return bfile == b'b/' + self.path
381
381
382 def ispatching(self, afile, bfile):
382 def ispatching(self, afile, bfile):
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384
384
385 def __repr__(self):
385 def __repr__(self):
386 return r"<patchmeta %s %r>" % (self.op, self.path)
386 return r"<patchmeta %s %r>" % (self.op, self.path)
387
387
388
388
389 def readgitpatch(lr):
389 def readgitpatch(lr):
390 """extract git-style metadata about patches from <patchname>"""
390 """extract git-style metadata about patches from <patchname>"""
391
391
392 # Filter patch for git information
392 # Filter patch for git information
393 gp = None
393 gp = None
394 gitpatches = []
394 gitpatches = []
395 for line in lr:
395 for line in lr:
396 line = line.rstrip(b' \r\n')
396 line = line.rstrip(b' \r\n')
397 if line.startswith(b'diff --git a/'):
397 if line.startswith(b'diff --git a/'):
398 m = gitre.match(line)
398 m = gitre.match(line)
399 if m:
399 if m:
400 if gp:
400 if gp:
401 gitpatches.append(gp)
401 gitpatches.append(gp)
402 dst = m.group(2)
402 dst = m.group(2)
403 gp = patchmeta(dst)
403 gp = patchmeta(dst)
404 elif gp:
404 elif gp:
405 if line.startswith(b'--- '):
405 if line.startswith(b'--- '):
406 gitpatches.append(gp)
406 gitpatches.append(gp)
407 gp = None
407 gp = None
408 continue
408 continue
409 if line.startswith(b'rename from '):
409 if line.startswith(b'rename from '):
410 gp.op = b'RENAME'
410 gp.op = b'RENAME'
411 gp.oldpath = line[12:]
411 gp.oldpath = line[12:]
412 elif line.startswith(b'rename to '):
412 elif line.startswith(b'rename to '):
413 gp.path = line[10:]
413 gp.path = line[10:]
414 elif line.startswith(b'copy from '):
414 elif line.startswith(b'copy from '):
415 gp.op = b'COPY'
415 gp.op = b'COPY'
416 gp.oldpath = line[10:]
416 gp.oldpath = line[10:]
417 elif line.startswith(b'copy to '):
417 elif line.startswith(b'copy to '):
418 gp.path = line[8:]
418 gp.path = line[8:]
419 elif line.startswith(b'deleted file'):
419 elif line.startswith(b'deleted file'):
420 gp.op = b'DELETE'
420 gp.op = b'DELETE'
421 elif line.startswith(b'new file mode '):
421 elif line.startswith(b'new file mode '):
422 gp.op = b'ADD'
422 gp.op = b'ADD'
423 gp.setmode(int(line[-6:], 8))
423 gp.setmode(int(line[-6:], 8))
424 elif line.startswith(b'new mode '):
424 elif line.startswith(b'new mode '):
425 gp.setmode(int(line[-6:], 8))
425 gp.setmode(int(line[-6:], 8))
426 elif line.startswith(b'GIT binary patch'):
426 elif line.startswith(b'GIT binary patch'):
427 gp.binary = True
427 gp.binary = True
428 if gp:
428 if gp:
429 gitpatches.append(gp)
429 gitpatches.append(gp)
430
430
431 return gitpatches
431 return gitpatches
432
432
433
433
434 class linereader(object):
434 class linereader(object):
435 # simple class to allow pushing lines back into the input stream
435 # simple class to allow pushing lines back into the input stream
436 def __init__(self, fp):
436 def __init__(self, fp):
437 self.fp = fp
437 self.fp = fp
438 self.buf = []
438 self.buf = []
439
439
440 def push(self, line):
440 def push(self, line):
441 if line is not None:
441 if line is not None:
442 self.buf.append(line)
442 self.buf.append(line)
443
443
444 def readline(self):
444 def readline(self):
445 if self.buf:
445 if self.buf:
446 l = self.buf[0]
446 l = self.buf[0]
447 del self.buf[0]
447 del self.buf[0]
448 return l
448 return l
449 return self.fp.readline()
449 return self.fp.readline()
450
450
451 def __iter__(self):
451 def __iter__(self):
452 return iter(self.readline, b'')
452 return iter(self.readline, b'')
453
453
454
454
455 class abstractbackend(object):
455 class abstractbackend(object):
456 def __init__(self, ui):
456 def __init__(self, ui):
457 self.ui = ui
457 self.ui = ui
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 """Return target file data and flags as a (data, (islink,
460 """Return target file data and flags as a (data, (islink,
461 isexec)) tuple. Data is None if file is missing/deleted.
461 isexec)) tuple. Data is None if file is missing/deleted.
462 """
462 """
463 raise NotImplementedError
463 raise NotImplementedError
464
464
465 def setfile(self, fname, data, mode, copysource):
465 def setfile(self, fname, data, mode, copysource):
466 """Write data to target file fname and set its mode. mode is a
466 """Write data to target file fname and set its mode. mode is a
467 (islink, isexec) tuple. If data is None, the file content should
467 (islink, isexec) tuple. If data is None, the file content should
468 be left unchanged. If the file is modified after being copied,
468 be left unchanged. If the file is modified after being copied,
469 copysource is set to the original file name.
469 copysource is set to the original file name.
470 """
470 """
471 raise NotImplementedError
471 raise NotImplementedError
472
472
473 def unlink(self, fname):
473 def unlink(self, fname):
474 """Unlink target file."""
474 """Unlink target file."""
475 raise NotImplementedError
475 raise NotImplementedError
476
476
477 def writerej(self, fname, failed, total, lines):
477 def writerej(self, fname, failed, total, lines):
478 """Write rejected lines for fname. total is the number of hunks
478 """Write rejected lines for fname. total is the number of hunks
479 which failed to apply and total the total number of hunks for this
479 which failed to apply and total the total number of hunks for this
480 files.
480 files.
481 """
481 """
482
482
483 def exists(self, fname):
483 def exists(self, fname):
484 raise NotImplementedError
484 raise NotImplementedError
485
485
486 def close(self):
486 def close(self):
487 raise NotImplementedError
487 raise NotImplementedError
488
488
489
489
490 class fsbackend(abstractbackend):
490 class fsbackend(abstractbackend):
491 def __init__(self, ui, basedir):
491 def __init__(self, ui, basedir):
492 super(fsbackend, self).__init__(ui)
492 super(fsbackend, self).__init__(ui)
493 self.opener = vfsmod.vfs(basedir)
493 self.opener = vfsmod.vfs(basedir)
494
494
495 def getfile(self, fname):
495 def getfile(self, fname):
496 if self.opener.islink(fname):
496 if self.opener.islink(fname):
497 return (self.opener.readlink(fname), (True, False))
497 return (self.opener.readlink(fname), (True, False))
498
498
499 isexec = False
499 isexec = False
500 try:
500 try:
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 except OSError as e:
502 except OSError as e:
503 if e.errno != errno.ENOENT:
503 if e.errno != errno.ENOENT:
504 raise
504 raise
505 try:
505 try:
506 return (self.opener.read(fname), (False, isexec))
506 return (self.opener.read(fname), (False, isexec))
507 except IOError as e:
507 except IOError as e:
508 if e.errno != errno.ENOENT:
508 if e.errno != errno.ENOENT:
509 raise
509 raise
510 return None, None
510 return None, None
511
511
512 def setfile(self, fname, data, mode, copysource):
512 def setfile(self, fname, data, mode, copysource):
513 islink, isexec = mode
513 islink, isexec = mode
514 if data is None:
514 if data is None:
515 self.opener.setflags(fname, islink, isexec)
515 self.opener.setflags(fname, islink, isexec)
516 return
516 return
517 if islink:
517 if islink:
518 self.opener.symlink(data, fname)
518 self.opener.symlink(data, fname)
519 else:
519 else:
520 self.opener.write(fname, data)
520 self.opener.write(fname, data)
521 if isexec:
521 if isexec:
522 self.opener.setflags(fname, False, True)
522 self.opener.setflags(fname, False, True)
523
523
524 def unlink(self, fname):
524 def unlink(self, fname):
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527
527
528 def writerej(self, fname, failed, total, lines):
528 def writerej(self, fname, failed, total, lines):
529 fname = fname + b".rej"
529 fname = fname + b".rej"
530 self.ui.warn(
530 self.ui.warn(
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 % (failed, total, fname)
532 % (failed, total, fname)
533 )
533 )
534 fp = self.opener(fname, b'w')
534 fp = self.opener(fname, b'w')
535 fp.writelines(lines)
535 fp.writelines(lines)
536 fp.close()
536 fp.close()
537
537
538 def exists(self, fname):
538 def exists(self, fname):
539 return self.opener.lexists(fname)
539 return self.opener.lexists(fname)
540
540
541
541
542 class workingbackend(fsbackend):
542 class workingbackend(fsbackend):
543 def __init__(self, ui, repo, similarity):
543 def __init__(self, ui, repo, similarity):
544 super(workingbackend, self).__init__(ui, repo.root)
544 super(workingbackend, self).__init__(ui, repo.root)
545 self.repo = repo
545 self.repo = repo
546 self.similarity = similarity
546 self.similarity = similarity
547 self.removed = set()
547 self.removed = set()
548 self.changed = set()
548 self.changed = set()
549 self.copied = []
549 self.copied = []
550
550
551 def _checkknown(self, fname):
551 def _checkknown(self, fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554
554
555 def setfile(self, fname, data, mode, copysource):
555 def setfile(self, fname, data, mode, copysource):
556 self._checkknown(fname)
556 self._checkknown(fname)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 if copysource is not None:
558 if copysource is not None:
559 self.copied.append((copysource, fname))
559 self.copied.append((copysource, fname))
560 self.changed.add(fname)
560 self.changed.add(fname)
561
561
562 def unlink(self, fname):
562 def unlink(self, fname):
563 self._checkknown(fname)
563 self._checkknown(fname)
564 super(workingbackend, self).unlink(fname)
564 super(workingbackend, self).unlink(fname)
565 self.removed.add(fname)
565 self.removed.add(fname)
566 self.changed.add(fname)
566 self.changed.add(fname)
567
567
568 def close(self):
568 def close(self):
569 wctx = self.repo[None]
569 wctx = self.repo[None]
570 changed = set(self.changed)
570 changed = set(self.changed)
571 for src, dst in self.copied:
571 for src, dst in self.copied:
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 if self.removed:
573 if self.removed:
574 wctx.forget(sorted(self.removed))
574 wctx.forget(sorted(self.removed))
575 for f in self.removed:
575 for f in self.removed:
576 if f not in self.repo.dirstate:
576 if f not in self.repo.dirstate:
577 # File was deleted and no longer belongs to the
577 # File was deleted and no longer belongs to the
578 # dirstate, it was probably marked added then
578 # dirstate, it was probably marked added then
579 # deleted, and should not be considered by
579 # deleted, and should not be considered by
580 # marktouched().
580 # marktouched().
581 changed.discard(f)
581 changed.discard(f)
582 if changed:
582 if changed:
583 scmutil.marktouched(self.repo, changed, self.similarity)
583 scmutil.marktouched(self.repo, changed, self.similarity)
584 return sorted(self.changed)
584 return sorted(self.changed)
585
585
586
586
587 class filestore(object):
587 class filestore(object):
588 def __init__(self, maxsize=None):
588 def __init__(self, maxsize=None):
589 self.opener = None
589 self.opener = None
590 self.files = {}
590 self.files = {}
591 self.created = 0
591 self.created = 0
592 self.maxsize = maxsize
592 self.maxsize = maxsize
593 if self.maxsize is None:
593 if self.maxsize is None:
594 self.maxsize = 4 * (2 ** 20)
594 self.maxsize = 4 * (2 ** 20)
595 self.size = 0
595 self.size = 0
596 self.data = {}
596 self.data = {}
597
597
598 def setfile(self, fname, data, mode, copied=None):
598 def setfile(self, fname, data, mode, copied=None):
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 self.data[fname] = (data, mode, copied)
600 self.data[fname] = (data, mode, copied)
601 self.size += len(data)
601 self.size += len(data)
602 else:
602 else:
603 if self.opener is None:
603 if self.opener is None:
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 self.opener = vfsmod.vfs(root)
605 self.opener = vfsmod.vfs(root)
606 # Avoid filename issues with these simple names
606 # Avoid filename issues with these simple names
607 fn = b'%d' % self.created
607 fn = b'%d' % self.created
608 self.opener.write(fn, data)
608 self.opener.write(fn, data)
609 self.created += 1
609 self.created += 1
610 self.files[fname] = (fn, mode, copied)
610 self.files[fname] = (fn, mode, copied)
611
611
612 def getfile(self, fname):
612 def getfile(self, fname):
613 if fname in self.data:
613 if fname in self.data:
614 return self.data[fname]
614 return self.data[fname]
615 if not self.opener or fname not in self.files:
615 if not self.opener or fname not in self.files:
616 return None, None, None
616 return None, None, None
617 fn, mode, copied = self.files[fname]
617 fn, mode, copied = self.files[fname]
618 return self.opener.read(fn), mode, copied
618 return self.opener.read(fn), mode, copied
619
619
620 def close(self):
620 def close(self):
621 if self.opener:
621 if self.opener:
622 shutil.rmtree(self.opener.base)
622 shutil.rmtree(self.opener.base)
623
623
624
624
625 class repobackend(abstractbackend):
625 class repobackend(abstractbackend):
626 def __init__(self, ui, repo, ctx, store):
626 def __init__(self, ui, repo, ctx, store):
627 super(repobackend, self).__init__(ui)
627 super(repobackend, self).__init__(ui)
628 self.repo = repo
628 self.repo = repo
629 self.ctx = ctx
629 self.ctx = ctx
630 self.store = store
630 self.store = store
631 self.changed = set()
631 self.changed = set()
632 self.removed = set()
632 self.removed = set()
633 self.copied = {}
633 self.copied = {}
634
634
635 def _checkknown(self, fname):
635 def _checkknown(self, fname):
636 if fname not in self.ctx:
636 if fname not in self.ctx:
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638
638
639 def getfile(self, fname):
639 def getfile(self, fname):
640 try:
640 try:
641 fctx = self.ctx[fname]
641 fctx = self.ctx[fname]
642 except error.LookupError:
642 except error.LookupError:
643 return None, None
643 return None, None
644 flags = fctx.flags()
644 flags = fctx.flags()
645 return fctx.data(), (b'l' in flags, b'x' in flags)
645 return fctx.data(), (b'l' in flags, b'x' in flags)
646
646
647 def setfile(self, fname, data, mode, copysource):
647 def setfile(self, fname, data, mode, copysource):
648 if copysource:
648 if copysource:
649 self._checkknown(copysource)
649 self._checkknown(copysource)
650 if data is None:
650 if data is None:
651 data = self.ctx[fname].data()
651 data = self.ctx[fname].data()
652 self.store.setfile(fname, data, mode, copysource)
652 self.store.setfile(fname, data, mode, copysource)
653 self.changed.add(fname)
653 self.changed.add(fname)
654 if copysource:
654 if copysource:
655 self.copied[fname] = copysource
655 self.copied[fname] = copysource
656
656
657 def unlink(self, fname):
657 def unlink(self, fname):
658 self._checkknown(fname)
658 self._checkknown(fname)
659 self.removed.add(fname)
659 self.removed.add(fname)
660
660
661 def exists(self, fname):
661 def exists(self, fname):
662 return fname in self.ctx
662 return fname in self.ctx
663
663
664 def close(self):
664 def close(self):
665 return self.changed | self.removed
665 return self.changed | self.removed
666
666
667
667
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672
672
673
673
674 class patchfile(object):
674 class patchfile(object):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 self.fname = gp.path
676 self.fname = gp.path
677 self.eolmode = eolmode
677 self.eolmode = eolmode
678 self.eol = None
678 self.eol = None
679 self.backend = backend
679 self.backend = backend
680 self.ui = ui
680 self.ui = ui
681 self.lines = []
681 self.lines = []
682 self.exists = False
682 self.exists = False
683 self.missing = True
683 self.missing = True
684 self.mode = gp.mode
684 self.mode = gp.mode
685 self.copysource = gp.oldpath
685 self.copysource = gp.oldpath
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 self.remove = gp.op == b'DELETE'
687 self.remove = gp.op == b'DELETE'
688 if self.copysource is None:
688 if self.copysource is None:
689 data, mode = backend.getfile(self.fname)
689 data, mode = backend.getfile(self.fname)
690 else:
690 else:
691 data, mode = store.getfile(self.copysource)[:2]
691 data, mode = store.getfile(self.copysource)[:2]
692 if data is not None:
692 if data is not None:
693 self.exists = self.copysource is None or backend.exists(self.fname)
693 self.exists = self.copysource is None or backend.exists(self.fname)
694 self.missing = False
694 self.missing = False
695 if data:
695 if data:
696 self.lines = mdiff.splitnewlines(data)
696 self.lines = mdiff.splitnewlines(data)
697 if self.mode is None:
697 if self.mode is None:
698 self.mode = mode
698 self.mode = mode
699 if self.lines:
699 if self.lines:
700 # Normalize line endings
700 # Normalize line endings
701 if self.lines[0].endswith(b'\r\n'):
701 if self.lines[0].endswith(b'\r\n'):
702 self.eol = b'\r\n'
702 self.eol = b'\r\n'
703 elif self.lines[0].endswith(b'\n'):
703 elif self.lines[0].endswith(b'\n'):
704 self.eol = b'\n'
704 self.eol = b'\n'
705 if eolmode != b'strict':
705 if eolmode != b'strict':
706 nlines = []
706 nlines = []
707 for l in self.lines:
707 for l in self.lines:
708 if l.endswith(b'\r\n'):
708 if l.endswith(b'\r\n'):
709 l = l[:-2] + b'\n'
709 l = l[:-2] + b'\n'
710 nlines.append(l)
710 nlines.append(l)
711 self.lines = nlines
711 self.lines = nlines
712 else:
712 else:
713 if self.create:
713 if self.create:
714 self.missing = False
714 self.missing = False
715 if self.mode is None:
715 if self.mode is None:
716 self.mode = (False, False)
716 self.mode = (False, False)
717 if self.missing:
717 if self.missing:
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 self.ui.warn(
719 self.ui.warn(
720 _(
720 _(
721 b"(use '--prefix' to apply patch relative to the "
721 b"(use '--prefix' to apply patch relative to the "
722 b"current directory)\n"
722 b"current directory)\n"
723 )
723 )
724 )
724 )
725
725
726 self.hash = {}
726 self.hash = {}
727 self.dirty = 0
727 self.dirty = 0
728 self.offset = 0
728 self.offset = 0
729 self.skew = 0
729 self.skew = 0
730 self.rej = []
730 self.rej = []
731 self.fileprinted = False
731 self.fileprinted = False
732 self.printfile(False)
732 self.printfile(False)
733 self.hunks = 0
733 self.hunks = 0
734
734
735 def writelines(self, fname, lines, mode):
735 def writelines(self, fname, lines, mode):
736 if self.eolmode == b'auto':
736 if self.eolmode == b'auto':
737 eol = self.eol
737 eol = self.eol
738 elif self.eolmode == b'crlf':
738 elif self.eolmode == b'crlf':
739 eol = b'\r\n'
739 eol = b'\r\n'
740 else:
740 else:
741 eol = b'\n'
741 eol = b'\n'
742
742
743 if self.eolmode != b'strict' and eol and eol != b'\n':
743 if self.eolmode != b'strict' and eol and eol != b'\n':
744 rawlines = []
744 rawlines = []
745 for l in lines:
745 for l in lines:
746 if l and l.endswith(b'\n'):
746 if l and l.endswith(b'\n'):
747 l = l[:-1] + eol
747 l = l[:-1] + eol
748 rawlines.append(l)
748 rawlines.append(l)
749 lines = rawlines
749 lines = rawlines
750
750
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752
752
753 def printfile(self, warn):
753 def printfile(self, warn):
754 if self.fileprinted:
754 if self.fileprinted:
755 return
755 return
756 if warn or self.ui.verbose:
756 if warn or self.ui.verbose:
757 self.fileprinted = True
757 self.fileprinted = True
758 s = _(b"patching file %s\n") % self.fname
758 s = _(b"patching file %s\n") % self.fname
759 if warn:
759 if warn:
760 self.ui.warn(s)
760 self.ui.warn(s)
761 else:
761 else:
762 self.ui.note(s)
762 self.ui.note(s)
763
763
764 def findlines(self, l, linenum):
764 def findlines(self, l, linenum):
765 # looks through the hash and finds candidate lines. The
765 # looks through the hash and finds candidate lines. The
766 # result is a list of line numbers sorted based on distance
766 # result is a list of line numbers sorted based on distance
767 # from linenum
767 # from linenum
768
768
769 cand = self.hash.get(l, [])
769 cand = self.hash.get(l, [])
770 if len(cand) > 1:
770 if len(cand) > 1:
771 # resort our list of potentials forward then back.
771 # resort our list of potentials forward then back.
772 cand.sort(key=lambda x: abs(x - linenum))
772 cand.sort(key=lambda x: abs(x - linenum))
773 return cand
773 return cand
774
774
775 def write_rej(self):
775 def write_rej(self):
776 # our rejects are a little different from patch(1). This always
776 # our rejects are a little different from patch(1). This always
777 # creates rejects in the same form as the original patch. A file
777 # creates rejects in the same form as the original patch. A file
778 # header is inserted so that you can run the reject through patch again
778 # header is inserted so that you can run the reject through patch again
779 # without having to type the filename.
779 # without having to type the filename.
780 if not self.rej:
780 if not self.rej:
781 return
781 return
782 base = os.path.basename(self.fname)
782 base = os.path.basename(self.fname)
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 for x in self.rej:
784 for x in self.rej:
785 for l in x.hunk:
785 for l in x.hunk:
786 lines.append(l)
786 lines.append(l)
787 if l[-1:] != b'\n':
787 if l[-1:] != b'\n':
788 lines.append(b"\n\\ No newline at end of file\n")
788 lines.append(b"\n\\ No newline at end of file\n")
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790
790
791 def apply(self, h):
791 def apply(self, h):
792 if not h.complete():
792 if not h.complete():
793 raise PatchError(
793 raise PatchError(
794 _(b"bad hunk #%d %s (%d %d %d %d)")
794 _(b"bad hunk #%d %s (%d %d %d %d)")
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 )
796 )
797
797
798 self.hunks += 1
798 self.hunks += 1
799
799
800 if self.missing:
800 if self.missing:
801 self.rej.append(h)
801 self.rej.append(h)
802 return -1
802 return -1
803
803
804 if self.exists and self.create:
804 if self.exists and self.create:
805 if self.copysource:
805 if self.copysource:
806 self.ui.warn(
806 self.ui.warn(
807 _(b"cannot create %s: destination already exists\n")
807 _(b"cannot create %s: destination already exists\n")
808 % self.fname
808 % self.fname
809 )
809 )
810 else:
810 else:
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 self.rej.append(h)
812 self.rej.append(h)
813 return -1
813 return -1
814
814
815 if isinstance(h, binhunk):
815 if isinstance(h, binhunk):
816 if self.remove:
816 if self.remove:
817 self.backend.unlink(self.fname)
817 self.backend.unlink(self.fname)
818 else:
818 else:
819 l = h.new(self.lines)
819 l = h.new(self.lines)
820 self.lines[:] = l
820 self.lines[:] = l
821 self.offset += len(l)
821 self.offset += len(l)
822 self.dirty = True
822 self.dirty = True
823 return 0
823 return 0
824
824
825 horig = h
825 horig = h
826 if (
826 if (
827 self.eolmode in (b'crlf', b'lf')
827 self.eolmode in (b'crlf', b'lf')
828 or self.eolmode == b'auto'
828 or self.eolmode == b'auto'
829 and self.eol
829 and self.eol
830 ):
830 ):
831 # If new eols are going to be normalized, then normalize
831 # If new eols are going to be normalized, then normalize
832 # hunk data before patching. Otherwise, preserve input
832 # hunk data before patching. Otherwise, preserve input
833 # line-endings.
833 # line-endings.
834 h = h.getnormalized()
834 h = h.getnormalized()
835
835
836 # fast case first, no offsets, no fuzz
836 # fast case first, no offsets, no fuzz
837 old, oldstart, new, newstart = h.fuzzit(0, False)
837 old, oldstart, new, newstart = h.fuzzit(0, False)
838 oldstart += self.offset
838 oldstart += self.offset
839 orig_start = oldstart
839 orig_start = oldstart
840 # if there's skew we want to emit the "(offset %d lines)" even
840 # if there's skew we want to emit the "(offset %d lines)" even
841 # when the hunk cleanly applies at start + skew, so skip the
841 # when the hunk cleanly applies at start + skew, so skip the
842 # fast case code
842 # fast case code
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 if self.remove:
844 if self.remove:
845 self.backend.unlink(self.fname)
845 self.backend.unlink(self.fname)
846 else:
846 else:
847 self.lines[oldstart : oldstart + len(old)] = new
847 self.lines[oldstart : oldstart + len(old)] = new
848 self.offset += len(new) - len(old)
848 self.offset += len(new) - len(old)
849 self.dirty = True
849 self.dirty = True
850 return 0
850 return 0
851
851
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 self.hash = {}
853 self.hash = {}
854 for x, s in enumerate(self.lines):
854 for x, s in enumerate(self.lines):
855 self.hash.setdefault(s, []).append(x)
855 self.hash.setdefault(s, []).append(x)
856
856
857 for fuzzlen in pycompat.xrange(
857 for fuzzlen in pycompat.xrange(
858 self.ui.configint(b"patch", b"fuzz") + 1
858 self.ui.configint(b"patch", b"fuzz") + 1
859 ):
859 ):
860 for toponly in [True, False]:
860 for toponly in [True, False]:
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 oldstart = oldstart + self.offset + self.skew
862 oldstart = oldstart + self.offset + self.skew
863 oldstart = min(oldstart, len(self.lines))
863 oldstart = min(oldstart, len(self.lines))
864 if old:
864 if old:
865 cand = self.findlines(old[0][1:], oldstart)
865 cand = self.findlines(old[0][1:], oldstart)
866 else:
866 else:
867 # Only adding lines with no or fuzzed context, just
867 # Only adding lines with no or fuzzed context, just
868 # take the skew in account
868 # take the skew in account
869 cand = [oldstart]
869 cand = [oldstart]
870
870
871 for l in cand:
871 for l in cand:
872 if not old or diffhelper.testhunk(old, self.lines, l):
872 if not old or diffhelper.testhunk(old, self.lines, l):
873 self.lines[l : l + len(old)] = new
873 self.lines[l : l + len(old)] = new
874 self.offset += len(new) - len(old)
874 self.offset += len(new) - len(old)
875 self.skew = l - orig_start
875 self.skew = l - orig_start
876 self.dirty = True
876 self.dirty = True
877 offset = l - orig_start - fuzzlen
877 offset = l - orig_start - fuzzlen
878 if fuzzlen:
878 if fuzzlen:
879 msg = _(
879 msg = _(
880 b"Hunk #%d succeeded at %d "
880 b"Hunk #%d succeeded at %d "
881 b"with fuzz %d "
881 b"with fuzz %d "
882 b"(offset %d lines).\n"
882 b"(offset %d lines).\n"
883 )
883 )
884 self.printfile(True)
884 self.printfile(True)
885 self.ui.warn(
885 self.ui.warn(
886 msg % (h.number, l + 1, fuzzlen, offset)
886 msg % (h.number, l + 1, fuzzlen, offset)
887 )
887 )
888 else:
888 else:
889 msg = _(
889 msg = _(
890 b"Hunk #%d succeeded at %d "
890 b"Hunk #%d succeeded at %d "
891 b"(offset %d lines).\n"
891 b"(offset %d lines).\n"
892 )
892 )
893 self.ui.note(msg % (h.number, l + 1, offset))
893 self.ui.note(msg % (h.number, l + 1, offset))
894 return fuzzlen
894 return fuzzlen
895 self.printfile(True)
895 self.printfile(True)
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 self.rej.append(horig)
897 self.rej.append(horig)
898 return -1
898 return -1
899
899
900 def close(self):
900 def close(self):
901 if self.dirty:
901 if self.dirty:
902 self.writelines(self.fname, self.lines, self.mode)
902 self.writelines(self.fname, self.lines, self.mode)
903 self.write_rej()
903 self.write_rej()
904 return len(self.rej)
904 return len(self.rej)
905
905
906
906
907 class header(object):
907 class header(object):
908 """patch header
908 """patch header
909 """
909 """
910
910
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
917
917
918 def __init__(self, header):
918 def __init__(self, header):
919 self.header = header
919 self.header = header
920 self.hunks = []
920 self.hunks = []
921
921
922 def binary(self):
922 def binary(self):
923 return any(h.startswith(b'index ') for h in self.header)
923 return any(h.startswith(b'index ') for h in self.header)
924
924
925 def pretty(self, fp):
925 def pretty(self, fp):
926 for h in self.header:
926 for h in self.header:
927 if h.startswith(b'index '):
927 if h.startswith(b'index '):
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
929 break
929 break
930 if self.pretty_re.match(h):
930 if self.pretty_re.match(h):
931 fp.write(h)
931 fp.write(h)
932 if self.binary():
932 if self.binary():
933 fp.write(_(b'this is a binary file\n'))
933 fp.write(_(b'this is a binary file\n'))
934 break
934 break
935 if h.startswith(b'---'):
935 if h.startswith(b'---'):
936 fp.write(
936 fp.write(
937 _(b'%d hunks, %d lines changed\n')
937 _(b'%d hunks, %d lines changed\n')
938 % (
938 % (
939 len(self.hunks),
939 len(self.hunks),
940 sum([max(h.added, h.removed) for h in self.hunks]),
940 sum([max(h.added, h.removed) for h in self.hunks]),
941 )
941 )
942 )
942 )
943 break
943 break
944 fp.write(h)
944 fp.write(h)
945
945
946 def write(self, fp):
946 def write(self, fp):
947 fp.write(b''.join(self.header))
947 fp.write(b''.join(self.header))
948
948
949 def allhunks(self):
949 def allhunks(self):
950 return any(self.allhunks_re.match(h) for h in self.header)
950 return any(self.allhunks_re.match(h) for h in self.header)
951
951
952 def files(self):
952 def files(self):
953 match = self.diffgit_re.match(self.header[0])
953 match = self.diffgit_re.match(self.header[0])
954 if match:
954 if match:
955 fromfile, tofile = match.groups()
955 fromfile, tofile = match.groups()
956 if fromfile == tofile:
956 if fromfile == tofile:
957 return [fromfile]
957 return [fromfile]
958 return [fromfile, tofile]
958 return [fromfile, tofile]
959 else:
959 else:
960 return self.diff_re.match(self.header[0]).groups()
960 return self.diff_re.match(self.header[0]).groups()
961
961
962 def filename(self):
962 def filename(self):
963 return self.files()[-1]
963 return self.files()[-1]
964
964
965 def __repr__(self):
965 def __repr__(self):
966 return b'<header %s>' % (b' '.join(map(repr, self.files())))
966 return b'<header %s>' % (b' '.join(map(repr, self.files())))
967
967
968 def isnewfile(self):
968 def isnewfile(self):
969 return any(self.newfile_re.match(h) for h in self.header)
969 return any(self.newfile_re.match(h) for h in self.header)
970
970
971 def special(self):
971 def special(self):
972 # Special files are shown only at the header level and not at the hunk
972 # Special files are shown only at the header level and not at the hunk
973 # level for example a file that has been deleted is a special file.
973 # level for example a file that has been deleted is a special file.
974 # The user cannot change the content of the operation, in the case of
974 # The user cannot change the content of the operation, in the case of
975 # the deleted file he has to take the deletion or not take it, he
975 # the deleted file he has to take the deletion or not take it, he
976 # cannot take some of it.
976 # cannot take some of it.
977 # Newly added files are special if they are empty, they are not special
977 # Newly added files are special if they are empty, they are not special
978 # if they have some content as we want to be able to change it
978 # if they have some content as we want to be able to change it
979 nocontent = len(self.header) == 2
979 nocontent = len(self.header) == 2
980 emptynewfile = self.isnewfile() and nocontent
980 emptynewfile = self.isnewfile() and nocontent
981 return emptynewfile or any(
981 return emptynewfile or any(
982 self.special_re.match(h) for h in self.header
982 self.special_re.match(h) for h in self.header
983 )
983 )
984
984
985
985
986 class recordhunk(object):
986 class recordhunk(object):
987 """patch hunk
987 """patch hunk
988
988
989 XXX shouldn't we merge this with the other hunk class?
989 XXX shouldn't we merge this with the other hunk class?
990 """
990 """
991
991
992 def __init__(
992 def __init__(
993 self,
993 self,
994 header,
994 header,
995 fromline,
995 fromline,
996 toline,
996 toline,
997 proc,
997 proc,
998 before,
998 before,
999 hunk,
999 hunk,
1000 after,
1000 after,
1001 maxcontext=None,
1001 maxcontext=None,
1002 ):
1002 ):
1003 def trimcontext(lines, reverse=False):
1003 def trimcontext(lines, reverse=False):
1004 if maxcontext is not None:
1004 if maxcontext is not None:
1005 delta = len(lines) - maxcontext
1005 delta = len(lines) - maxcontext
1006 if delta > 0:
1006 if delta > 0:
1007 if reverse:
1007 if reverse:
1008 return delta, lines[delta:]
1008 return delta, lines[delta:]
1009 else:
1009 else:
1010 return delta, lines[:maxcontext]
1010 return delta, lines[:maxcontext]
1011 return 0, lines
1011 return 0, lines
1012
1012
1013 self.header = header
1013 self.header = header
1014 trimedbefore, self.before = trimcontext(before, True)
1014 trimedbefore, self.before = trimcontext(before, True)
1015 self.fromline = fromline + trimedbefore
1015 self.fromline = fromline + trimedbefore
1016 self.toline = toline + trimedbefore
1016 self.toline = toline + trimedbefore
1017 _trimedafter, self.after = trimcontext(after, False)
1017 _trimedafter, self.after = trimcontext(after, False)
1018 self.proc = proc
1018 self.proc = proc
1019 self.hunk = hunk
1019 self.hunk = hunk
1020 self.added, self.removed = self.countchanges(self.hunk)
1020 self.added, self.removed = self.countchanges(self.hunk)
1021
1021
1022 def __eq__(self, v):
1022 def __eq__(self, v):
1023 if not isinstance(v, recordhunk):
1023 if not isinstance(v, recordhunk):
1024 return False
1024 return False
1025
1025
1026 return (
1026 return (
1027 (v.hunk == self.hunk)
1027 (v.hunk == self.hunk)
1028 and (v.proc == self.proc)
1028 and (v.proc == self.proc)
1029 and (self.fromline == v.fromline)
1029 and (self.fromline == v.fromline)
1030 and (self.header.files() == v.header.files())
1030 and (self.header.files() == v.header.files())
1031 )
1031 )
1032
1032
1033 def __hash__(self):
1033 def __hash__(self):
1034 return hash(
1034 return hash(
1035 (
1035 (
1036 tuple(self.hunk),
1036 tuple(self.hunk),
1037 tuple(self.header.files()),
1037 tuple(self.header.files()),
1038 self.fromline,
1038 self.fromline,
1039 self.proc,
1039 self.proc,
1040 )
1040 )
1041 )
1041 )
1042
1042
1043 def countchanges(self, hunk):
1043 def countchanges(self, hunk):
1044 """hunk -> (n+,n-)"""
1044 """hunk -> (n+,n-)"""
1045 add = len([h for h in hunk if h.startswith(b'+')])
1045 add = len([h for h in hunk if h.startswith(b'+')])
1046 rem = len([h for h in hunk if h.startswith(b'-')])
1046 rem = len([h for h in hunk if h.startswith(b'-')])
1047 return add, rem
1047 return add, rem
1048
1048
1049 def reversehunk(self):
1049 def reversehunk(self):
1050 """return another recordhunk which is the reverse of the hunk
1050 """return another recordhunk which is the reverse of the hunk
1051
1051
1052 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1052 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1053 that, swap fromline/toline and +/- signs while keep other things
1053 that, swap fromline/toline and +/- signs while keep other things
1054 unchanged.
1054 unchanged.
1055 """
1055 """
1056 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1056 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1057 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1057 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1058 return recordhunk(
1058 return recordhunk(
1059 self.header,
1059 self.header,
1060 self.toline,
1060 self.toline,
1061 self.fromline,
1061 self.fromline,
1062 self.proc,
1062 self.proc,
1063 self.before,
1063 self.before,
1064 hunk,
1064 hunk,
1065 self.after,
1065 self.after,
1066 )
1066 )
1067
1067
1068 def write(self, fp):
1068 def write(self, fp):
1069 delta = len(self.before) + len(self.after)
1069 delta = len(self.before) + len(self.after)
1070 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1070 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1071 delta -= 1
1071 delta -= 1
1072 fromlen = delta + self.removed
1072 fromlen = delta + self.removed
1073 tolen = delta + self.added
1073 tolen = delta + self.added
1074 fp.write(
1074 fp.write(
1075 b'@@ -%d,%d +%d,%d @@%s\n'
1075 b'@@ -%d,%d +%d,%d @@%s\n'
1076 % (
1076 % (
1077 self.fromline,
1077 self.fromline,
1078 fromlen,
1078 fromlen,
1079 self.toline,
1079 self.toline,
1080 tolen,
1080 tolen,
1081 self.proc and (b' ' + self.proc),
1081 self.proc and (b' ' + self.proc),
1082 )
1082 )
1083 )
1083 )
1084 fp.write(b''.join(self.before + self.hunk + self.after))
1084 fp.write(b''.join(self.before + self.hunk + self.after))
1085
1085
1086 pretty = write
1086 pretty = write
1087
1087
1088 def filename(self):
1088 def filename(self):
1089 return self.header.filename()
1089 return self.header.filename()
1090
1090
1091 def __repr__(self):
1091 def __repr__(self):
1092 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1092 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1093
1093
1094
1094
1095 def getmessages():
1095 def getmessages():
1096 return {
1096 return {
1097 b'multiple': {
1097 b'multiple': {
1098 b'apply': _(b"apply change %d/%d to '%s'?"),
1098 b'apply': _(b"apply change %d/%d to '%s'?"),
1099 b'discard': _(b"discard change %d/%d to '%s'?"),
1099 b'discard': _(b"discard change %d/%d to '%s'?"),
1100 b'keep': _(b"keep change %d/%d to '%s'?"),
1100 b'keep': _(b"keep change %d/%d to '%s'?"),
1101 b'record': _(b"record change %d/%d to '%s'?"),
1101 b'record': _(b"record change %d/%d to '%s'?"),
1102 },
1102 },
1103 b'single': {
1103 b'single': {
1104 b'apply': _(b"apply this change to '%s'?"),
1104 b'apply': _(b"apply this change to '%s'?"),
1105 b'discard': _(b"discard this change to '%s'?"),
1105 b'discard': _(b"discard this change to '%s'?"),
1106 b'keep': _(b"keep this change to '%s'?"),
1106 b'keep': _(b"keep this change to '%s'?"),
1107 b'record': _(b"record this change to '%s'?"),
1107 b'record': _(b"record this change to '%s'?"),
1108 },
1108 },
1109 b'help': {
1109 b'help': {
1110 b'apply': _(
1110 b'apply': _(
1111 b'[Ynesfdaq?]'
1111 b'[Ynesfdaq?]'
1112 b'$$ &Yes, apply this change'
1112 b'$$ &Yes, apply this change'
1113 b'$$ &No, skip this change'
1113 b'$$ &No, skip this change'
1114 b'$$ &Edit this change manually'
1114 b'$$ &Edit this change manually'
1115 b'$$ &Skip remaining changes to this file'
1115 b'$$ &Skip remaining changes to this file'
1116 b'$$ Apply remaining changes to this &file'
1116 b'$$ Apply remaining changes to this &file'
1117 b'$$ &Done, skip remaining changes and files'
1117 b'$$ &Done, skip remaining changes and files'
1118 b'$$ Apply &all changes to all remaining files'
1118 b'$$ Apply &all changes to all remaining files'
1119 b'$$ &Quit, applying no changes'
1119 b'$$ &Quit, applying no changes'
1120 b'$$ &? (display help)'
1120 b'$$ &? (display help)'
1121 ),
1121 ),
1122 b'discard': _(
1122 b'discard': _(
1123 b'[Ynesfdaq?]'
1123 b'[Ynesfdaq?]'
1124 b'$$ &Yes, discard this change'
1124 b'$$ &Yes, discard this change'
1125 b'$$ &No, skip this change'
1125 b'$$ &No, skip this change'
1126 b'$$ &Edit this change manually'
1126 b'$$ &Edit this change manually'
1127 b'$$ &Skip remaining changes to this file'
1127 b'$$ &Skip remaining changes to this file'
1128 b'$$ Discard remaining changes to this &file'
1128 b'$$ Discard remaining changes to this &file'
1129 b'$$ &Done, skip remaining changes and files'
1129 b'$$ &Done, skip remaining changes and files'
1130 b'$$ Discard &all changes to all remaining files'
1130 b'$$ Discard &all changes to all remaining files'
1131 b'$$ &Quit, discarding no changes'
1131 b'$$ &Quit, discarding no changes'
1132 b'$$ &? (display help)'
1132 b'$$ &? (display help)'
1133 ),
1133 ),
1134 b'keep': _(
1134 b'keep': _(
1135 b'[Ynesfdaq?]'
1135 b'[Ynesfdaq?]'
1136 b'$$ &Yes, keep this change'
1136 b'$$ &Yes, keep this change'
1137 b'$$ &No, skip this change'
1137 b'$$ &No, skip this change'
1138 b'$$ &Edit this change manually'
1138 b'$$ &Edit this change manually'
1139 b'$$ &Skip remaining changes to this file'
1139 b'$$ &Skip remaining changes to this file'
1140 b'$$ Keep remaining changes to this &file'
1140 b'$$ Keep remaining changes to this &file'
1141 b'$$ &Done, skip remaining changes and files'
1141 b'$$ &Done, skip remaining changes and files'
1142 b'$$ Keep &all changes to all remaining files'
1142 b'$$ Keep &all changes to all remaining files'
1143 b'$$ &Quit, keeping all changes'
1143 b'$$ &Quit, keeping all changes'
1144 b'$$ &? (display help)'
1144 b'$$ &? (display help)'
1145 ),
1145 ),
1146 b'record': _(
1146 b'record': _(
1147 b'[Ynesfdaq?]'
1147 b'[Ynesfdaq?]'
1148 b'$$ &Yes, record this change'
1148 b'$$ &Yes, record this change'
1149 b'$$ &No, skip this change'
1149 b'$$ &No, skip this change'
1150 b'$$ &Edit this change manually'
1150 b'$$ &Edit this change manually'
1151 b'$$ &Skip remaining changes to this file'
1151 b'$$ &Skip remaining changes to this file'
1152 b'$$ Record remaining changes to this &file'
1152 b'$$ Record remaining changes to this &file'
1153 b'$$ &Done, skip remaining changes and files'
1153 b'$$ &Done, skip remaining changes and files'
1154 b'$$ Record &all changes to all remaining files'
1154 b'$$ Record &all changes to all remaining files'
1155 b'$$ &Quit, recording no changes'
1155 b'$$ &Quit, recording no changes'
1156 b'$$ &? (display help)'
1156 b'$$ &? (display help)'
1157 ),
1157 ),
1158 },
1158 },
1159 }
1159 }
1160
1160
1161
1161
1162 def filterpatch(ui, headers, match, operation=None):
1162 def filterpatch(ui, headers, match, operation=None):
1163 """Interactively filter patch chunks into applied-only chunks"""
1163 """Interactively filter patch chunks into applied-only chunks"""
1164 messages = getmessages()
1164 messages = getmessages()
1165
1165
1166 if operation is None:
1166 if operation is None:
1167 operation = b'record'
1167 operation = b'record'
1168
1168
1169 def prompt(skipfile, skipall, query, chunk):
1169 def prompt(skipfile, skipall, query, chunk):
1170 """prompt query, and process base inputs
1170 """prompt query, and process base inputs
1171
1171
1172 - y/n for the rest of file
1172 - y/n for the rest of file
1173 - y/n for the rest
1173 - y/n for the rest
1174 - ? (help)
1174 - ? (help)
1175 - q (quit)
1175 - q (quit)
1176
1176
1177 Return True/False and possibly updated skipfile and skipall.
1177 Return True/False and possibly updated skipfile and skipall.
1178 """
1178 """
1179 newpatches = None
1179 newpatches = None
1180 if skipall is not None:
1180 if skipall is not None:
1181 return skipall, skipfile, skipall, newpatches
1181 return skipall, skipfile, skipall, newpatches
1182 if skipfile is not None:
1182 if skipfile is not None:
1183 return skipfile, skipfile, skipall, newpatches
1183 return skipfile, skipfile, skipall, newpatches
1184 while True:
1184 while True:
1185 resps = messages[b'help'][operation]
1185 resps = messages[b'help'][operation]
1186 # IMPORTANT: keep the last line of this prompt short (<40 english
1186 # IMPORTANT: keep the last line of this prompt short (<40 english
1187 # chars is a good target) because of issue6158.
1187 # chars is a good target) because of issue6158.
1188 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1188 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1189 ui.write(b"\n")
1189 ui.write(b"\n")
1190 if r == 8: # ?
1190 if r == 8: # ?
1191 for c, t in ui.extractchoices(resps)[1]:
1191 for c, t in ui.extractchoices(resps)[1]:
1192 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1192 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1193 continue
1193 continue
1194 elif r == 0: # yes
1194 elif r == 0: # yes
1195 ret = True
1195 ret = True
1196 elif r == 1: # no
1196 elif r == 1: # no
1197 ret = False
1197 ret = False
1198 elif r == 2: # Edit patch
1198 elif r == 2: # Edit patch
1199 if chunk is None:
1199 if chunk is None:
1200 ui.write(_(b'cannot edit patch for whole file'))
1200 ui.write(_(b'cannot edit patch for whole file'))
1201 ui.write(b"\n")
1201 ui.write(b"\n")
1202 continue
1202 continue
1203 if chunk.header.binary():
1203 if chunk.header.binary():
1204 ui.write(_(b'cannot edit patch for binary file'))
1204 ui.write(_(b'cannot edit patch for binary file'))
1205 ui.write(b"\n")
1205 ui.write(b"\n")
1206 continue
1206 continue
1207 # Patch comment based on the Git one (based on comment at end of
1207 # Patch comment based on the Git one (based on comment at end of
1208 # https://mercurial-scm.org/wiki/RecordExtension)
1208 # https://mercurial-scm.org/wiki/RecordExtension)
1209 phelp = b'---' + _(
1209 phelp = b'---' + _(
1210 """
1210 """
1211 To remove '-' lines, make them ' ' lines (context).
1211 To remove '-' lines, make them ' ' lines (context).
1212 To remove '+' lines, delete them.
1212 To remove '+' lines, delete them.
1213 Lines starting with # will be removed from the patch.
1213 Lines starting with # will be removed from the patch.
1214
1214
1215 If the patch applies cleanly, the edited hunk will immediately be
1215 If the patch applies cleanly, the edited hunk will immediately be
1216 added to the record list. If it does not apply cleanly, a rejects
1216 added to the record list. If it does not apply cleanly, a rejects
1217 file will be generated: you can use that when you try again. If
1217 file will be generated: you can use that when you try again. If
1218 all lines of the hunk are removed, then the edit is aborted and
1218 all lines of the hunk are removed, then the edit is aborted and
1219 the hunk is left unchanged.
1219 the hunk is left unchanged.
1220 """
1220 """
1221 )
1221 )
1222 (patchfd, patchfn) = pycompat.mkstemp(
1222 (patchfd, patchfn) = pycompat.mkstemp(
1223 prefix=b"hg-editor-", suffix=b".diff"
1223 prefix=b"hg-editor-", suffix=b".diff"
1224 )
1224 )
1225 ncpatchfp = None
1225 ncpatchfp = None
1226 try:
1226 try:
1227 # Write the initial patch
1227 # Write the initial patch
1228 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1228 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1229 chunk.header.write(f)
1229 chunk.header.write(f)
1230 chunk.write(f)
1230 chunk.write(f)
1231 f.write(
1231 f.write(
1232 b''.join(
1232 b''.join(
1233 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1233 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1234 )
1234 )
1235 )
1235 )
1236 f.close()
1236 f.close()
1237 # Start the editor and wait for it to complete
1237 # Start the editor and wait for it to complete
1238 editor = ui.geteditor()
1238 editor = ui.geteditor()
1239 ret = ui.system(
1239 ret = ui.system(
1240 b"%s \"%s\"" % (editor, patchfn),
1240 b"%s \"%s\"" % (editor, patchfn),
1241 environ={b'HGUSER': ui.username()},
1241 environ={b'HGUSER': ui.username()},
1242 blockedtag=b'filterpatch',
1242 blockedtag=b'filterpatch',
1243 )
1243 )
1244 if ret != 0:
1244 if ret != 0:
1245 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1245 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1246 continue
1246 continue
1247 # Remove comment lines
1247 # Remove comment lines
1248 patchfp = open(patchfn, r'rb')
1248 patchfp = open(patchfn, r'rb')
1249 ncpatchfp = stringio()
1249 ncpatchfp = stringio()
1250 for line in util.iterfile(patchfp):
1250 for line in util.iterfile(patchfp):
1251 line = util.fromnativeeol(line)
1251 line = util.fromnativeeol(line)
1252 if not line.startswith(b'#'):
1252 if not line.startswith(b'#'):
1253 ncpatchfp.write(line)
1253 ncpatchfp.write(line)
1254 patchfp.close()
1254 patchfp.close()
1255 ncpatchfp.seek(0)
1255 ncpatchfp.seek(0)
1256 newpatches = parsepatch(ncpatchfp)
1256 newpatches = parsepatch(ncpatchfp)
1257 finally:
1257 finally:
1258 os.unlink(patchfn)
1258 os.unlink(patchfn)
1259 del ncpatchfp
1259 del ncpatchfp
1260 # Signal that the chunk shouldn't be applied as-is, but
1260 # Signal that the chunk shouldn't be applied as-is, but
1261 # provide the new patch to be used instead.
1261 # provide the new patch to be used instead.
1262 ret = False
1262 ret = False
1263 elif r == 3: # Skip
1263 elif r == 3: # Skip
1264 ret = skipfile = False
1264 ret = skipfile = False
1265 elif r == 4: # file (Record remaining)
1265 elif r == 4: # file (Record remaining)
1266 ret = skipfile = True
1266 ret = skipfile = True
1267 elif r == 5: # done, skip remaining
1267 elif r == 5: # done, skip remaining
1268 ret = skipall = False
1268 ret = skipall = False
1269 elif r == 6: # all
1269 elif r == 6: # all
1270 ret = skipall = True
1270 ret = skipall = True
1271 elif r == 7: # quit
1271 elif r == 7: # quit
1272 raise error.Abort(_(b'user quit'))
1272 raise error.Abort(_(b'user quit'))
1273 return ret, skipfile, skipall, newpatches
1273 return ret, skipfile, skipall, newpatches
1274
1274
1275 seen = set()
1275 seen = set()
1276 applied = {} # 'filename' -> [] of chunks
1276 applied = {} # 'filename' -> [] of chunks
1277 skipfile, skipall = None, None
1277 skipfile, skipall = None, None
1278 pos, total = 1, sum(len(h.hunks) for h in headers)
1278 pos, total = 1, sum(len(h.hunks) for h in headers)
1279 for h in headers:
1279 for h in headers:
1280 pos += len(h.hunks)
1280 pos += len(h.hunks)
1281 skipfile = None
1281 skipfile = None
1282 fixoffset = 0
1282 fixoffset = 0
1283 hdr = b''.join(h.header)
1283 hdr = b''.join(h.header)
1284 if hdr in seen:
1284 if hdr in seen:
1285 continue
1285 continue
1286 seen.add(hdr)
1286 seen.add(hdr)
1287 if skipall is None:
1287 if skipall is None:
1288 h.pretty(ui)
1288 h.pretty(ui)
1289 files = h.files()
1289 files = h.files()
1290 msg = _(b'examine changes to %s?') % _(b' and ').join(
1290 msg = _(b'examine changes to %s?') % _(b' and ').join(
1291 b"'%s'" % f for f in files
1291 b"'%s'" % f for f in files
1292 )
1292 )
1293 if all(match.exact(f) for f in files):
1293 if all(match.exact(f) for f in files):
1294 r, skipall, np = True, None, None
1294 r, skipall, np = True, None, None
1295 else:
1295 else:
1296 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1296 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1297 if not r:
1297 if not r:
1298 continue
1298 continue
1299 applied[h.filename()] = [h]
1299 applied[h.filename()] = [h]
1300 if h.allhunks():
1300 if h.allhunks():
1301 applied[h.filename()] += h.hunks
1301 applied[h.filename()] += h.hunks
1302 continue
1302 continue
1303 for i, chunk in enumerate(h.hunks):
1303 for i, chunk in enumerate(h.hunks):
1304 if skipfile is None and skipall is None:
1304 if skipfile is None and skipall is None:
1305 chunk.pretty(ui)
1305 chunk.pretty(ui)
1306 if total == 1:
1306 if total == 1:
1307 msg = messages[b'single'][operation] % chunk.filename()
1307 msg = messages[b'single'][operation] % chunk.filename()
1308 else:
1308 else:
1309 idx = pos - len(h.hunks) + i
1309 idx = pos - len(h.hunks) + i
1310 msg = messages[b'multiple'][operation] % (
1310 msg = messages[b'multiple'][operation] % (
1311 idx,
1311 idx,
1312 total,
1312 total,
1313 chunk.filename(),
1313 chunk.filename(),
1314 )
1314 )
1315 r, skipfile, skipall, newpatches = prompt(
1315 r, skipfile, skipall, newpatches = prompt(
1316 skipfile, skipall, msg, chunk
1316 skipfile, skipall, msg, chunk
1317 )
1317 )
1318 if r:
1318 if r:
1319 if fixoffset:
1319 if fixoffset:
1320 chunk = copy.copy(chunk)
1320 chunk = copy.copy(chunk)
1321 chunk.toline += fixoffset
1321 chunk.toline += fixoffset
1322 applied[chunk.filename()].append(chunk)
1322 applied[chunk.filename()].append(chunk)
1323 elif newpatches is not None:
1323 elif newpatches is not None:
1324 for newpatch in newpatches:
1324 for newpatch in newpatches:
1325 for newhunk in newpatch.hunks:
1325 for newhunk in newpatch.hunks:
1326 if fixoffset:
1326 if fixoffset:
1327 newhunk.toline += fixoffset
1327 newhunk.toline += fixoffset
1328 applied[newhunk.filename()].append(newhunk)
1328 applied[newhunk.filename()].append(newhunk)
1329 else:
1329 else:
1330 fixoffset += chunk.removed - chunk.added
1330 fixoffset += chunk.removed - chunk.added
1331 return (
1331 return (
1332 sum(
1332 sum(
1333 [
1333 [
1334 h
1334 h
1335 for h in pycompat.itervalues(applied)
1335 for h in pycompat.itervalues(applied)
1336 if h[0].special() or len(h) > 1
1336 if h[0].special() or len(h) > 1
1337 ],
1337 ],
1338 [],
1338 [],
1339 ),
1339 ),
1340 {},
1340 {},
1341 )
1341 )
1342
1342
1343
1343
1344 class hunk(object):
1344 class hunk(object):
1345 def __init__(self, desc, num, lr, context):
1345 def __init__(self, desc, num, lr, context):
1346 self.number = num
1346 self.number = num
1347 self.desc = desc
1347 self.desc = desc
1348 self.hunk = [desc]
1348 self.hunk = [desc]
1349 self.a = []
1349 self.a = []
1350 self.b = []
1350 self.b = []
1351 self.starta = self.lena = None
1351 self.starta = self.lena = None
1352 self.startb = self.lenb = None
1352 self.startb = self.lenb = None
1353 if lr is not None:
1353 if lr is not None:
1354 if context:
1354 if context:
1355 self.read_context_hunk(lr)
1355 self.read_context_hunk(lr)
1356 else:
1356 else:
1357 self.read_unified_hunk(lr)
1357 self.read_unified_hunk(lr)
1358
1358
1359 def getnormalized(self):
1359 def getnormalized(self):
1360 """Return a copy with line endings normalized to LF."""
1360 """Return a copy with line endings normalized to LF."""
1361
1361
1362 def normalize(lines):
1362 def normalize(lines):
1363 nlines = []
1363 nlines = []
1364 for line in lines:
1364 for line in lines:
1365 if line.endswith(b'\r\n'):
1365 if line.endswith(b'\r\n'):
1366 line = line[:-2] + b'\n'
1366 line = line[:-2] + b'\n'
1367 nlines.append(line)
1367 nlines.append(line)
1368 return nlines
1368 return nlines
1369
1369
1370 # Dummy object, it is rebuilt manually
1370 # Dummy object, it is rebuilt manually
1371 nh = hunk(self.desc, self.number, None, None)
1371 nh = hunk(self.desc, self.number, None, None)
1372 nh.number = self.number
1372 nh.number = self.number
1373 nh.desc = self.desc
1373 nh.desc = self.desc
1374 nh.hunk = self.hunk
1374 nh.hunk = self.hunk
1375 nh.a = normalize(self.a)
1375 nh.a = normalize(self.a)
1376 nh.b = normalize(self.b)
1376 nh.b = normalize(self.b)
1377 nh.starta = self.starta
1377 nh.starta = self.starta
1378 nh.startb = self.startb
1378 nh.startb = self.startb
1379 nh.lena = self.lena
1379 nh.lena = self.lena
1380 nh.lenb = self.lenb
1380 nh.lenb = self.lenb
1381 return nh
1381 return nh
1382
1382
1383 def read_unified_hunk(self, lr):
1383 def read_unified_hunk(self, lr):
1384 m = unidesc.match(self.desc)
1384 m = unidesc.match(self.desc)
1385 if not m:
1385 if not m:
1386 raise PatchError(_(b"bad hunk #%d") % self.number)
1386 raise PatchError(_(b"bad hunk #%d") % self.number)
1387 self.starta, self.lena, self.startb, self.lenb = m.groups()
1387 self.starta, self.lena, self.startb, self.lenb = m.groups()
1388 if self.lena is None:
1388 if self.lena is None:
1389 self.lena = 1
1389 self.lena = 1
1390 else:
1390 else:
1391 self.lena = int(self.lena)
1391 self.lena = int(self.lena)
1392 if self.lenb is None:
1392 if self.lenb is None:
1393 self.lenb = 1
1393 self.lenb = 1
1394 else:
1394 else:
1395 self.lenb = int(self.lenb)
1395 self.lenb = int(self.lenb)
1396 self.starta = int(self.starta)
1396 self.starta = int(self.starta)
1397 self.startb = int(self.startb)
1397 self.startb = int(self.startb)
1398 try:
1398 try:
1399 diffhelper.addlines(
1399 diffhelper.addlines(
1400 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1400 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1401 )
1401 )
1402 except error.ParseError as e:
1402 except error.ParseError as e:
1403 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1403 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1404 # if we hit eof before finishing out the hunk, the last line will
1404 # if we hit eof before finishing out the hunk, the last line will
1405 # be zero length. Lets try to fix it up.
1405 # be zero length. Lets try to fix it up.
1406 while len(self.hunk[-1]) == 0:
1406 while len(self.hunk[-1]) == 0:
1407 del self.hunk[-1]
1407 del self.hunk[-1]
1408 del self.a[-1]
1408 del self.a[-1]
1409 del self.b[-1]
1409 del self.b[-1]
1410 self.lena -= 1
1410 self.lena -= 1
1411 self.lenb -= 1
1411 self.lenb -= 1
1412 self._fixnewline(lr)
1412 self._fixnewline(lr)
1413
1413
1414 def read_context_hunk(self, lr):
1414 def read_context_hunk(self, lr):
1415 self.desc = lr.readline()
1415 self.desc = lr.readline()
1416 m = contextdesc.match(self.desc)
1416 m = contextdesc.match(self.desc)
1417 if not m:
1417 if not m:
1418 raise PatchError(_(b"bad hunk #%d") % self.number)
1418 raise PatchError(_(b"bad hunk #%d") % self.number)
1419 self.starta, aend = m.groups()
1419 self.starta, aend = m.groups()
1420 self.starta = int(self.starta)
1420 self.starta = int(self.starta)
1421 if aend is None:
1421 if aend is None:
1422 aend = self.starta
1422 aend = self.starta
1423 self.lena = int(aend) - self.starta
1423 self.lena = int(aend) - self.starta
1424 if self.starta:
1424 if self.starta:
1425 self.lena += 1
1425 self.lena += 1
1426 for x in pycompat.xrange(self.lena):
1426 for x in pycompat.xrange(self.lena):
1427 l = lr.readline()
1427 l = lr.readline()
1428 if l.startswith(b'---'):
1428 if l.startswith(b'---'):
1429 # lines addition, old block is empty
1429 # lines addition, old block is empty
1430 lr.push(l)
1430 lr.push(l)
1431 break
1431 break
1432 s = l[2:]
1432 s = l[2:]
1433 if l.startswith(b'- ') or l.startswith(b'! '):
1433 if l.startswith(b'- ') or l.startswith(b'! '):
1434 u = b'-' + s
1434 u = b'-' + s
1435 elif l.startswith(b' '):
1435 elif l.startswith(b' '):
1436 u = b' ' + s
1436 u = b' ' + s
1437 else:
1437 else:
1438 raise PatchError(
1438 raise PatchError(
1439 _(b"bad hunk #%d old text line %d") % (self.number, x)
1439 _(b"bad hunk #%d old text line %d") % (self.number, x)
1440 )
1440 )
1441 self.a.append(u)
1441 self.a.append(u)
1442 self.hunk.append(u)
1442 self.hunk.append(u)
1443
1443
1444 l = lr.readline()
1444 l = lr.readline()
1445 if l.startswith(br'\ '):
1445 if l.startswith(br'\ '):
1446 s = self.a[-1][:-1]
1446 s = self.a[-1][:-1]
1447 self.a[-1] = s
1447 self.a[-1] = s
1448 self.hunk[-1] = s
1448 self.hunk[-1] = s
1449 l = lr.readline()
1449 l = lr.readline()
1450 m = contextdesc.match(l)
1450 m = contextdesc.match(l)
1451 if not m:
1451 if not m:
1452 raise PatchError(_(b"bad hunk #%d") % self.number)
1452 raise PatchError(_(b"bad hunk #%d") % self.number)
1453 self.startb, bend = m.groups()
1453 self.startb, bend = m.groups()
1454 self.startb = int(self.startb)
1454 self.startb = int(self.startb)
1455 if bend is None:
1455 if bend is None:
1456 bend = self.startb
1456 bend = self.startb
1457 self.lenb = int(bend) - self.startb
1457 self.lenb = int(bend) - self.startb
1458 if self.startb:
1458 if self.startb:
1459 self.lenb += 1
1459 self.lenb += 1
1460 hunki = 1
1460 hunki = 1
1461 for x in pycompat.xrange(self.lenb):
1461 for x in pycompat.xrange(self.lenb):
1462 l = lr.readline()
1462 l = lr.readline()
1463 if l.startswith(br'\ '):
1463 if l.startswith(br'\ '):
1464 # XXX: the only way to hit this is with an invalid line range.
1464 # XXX: the only way to hit this is with an invalid line range.
1465 # The no-eol marker is not counted in the line range, but I
1465 # The no-eol marker is not counted in the line range, but I
1466 # guess there are diff(1) out there which behave differently.
1466 # guess there are diff(1) out there which behave differently.
1467 s = self.b[-1][:-1]
1467 s = self.b[-1][:-1]
1468 self.b[-1] = s
1468 self.b[-1] = s
1469 self.hunk[hunki - 1] = s
1469 self.hunk[hunki - 1] = s
1470 continue
1470 continue
1471 if not l:
1471 if not l:
1472 # line deletions, new block is empty and we hit EOF
1472 # line deletions, new block is empty and we hit EOF
1473 lr.push(l)
1473 lr.push(l)
1474 break
1474 break
1475 s = l[2:]
1475 s = l[2:]
1476 if l.startswith(b'+ ') or l.startswith(b'! '):
1476 if l.startswith(b'+ ') or l.startswith(b'! '):
1477 u = b'+' + s
1477 u = b'+' + s
1478 elif l.startswith(b' '):
1478 elif l.startswith(b' '):
1479 u = b' ' + s
1479 u = b' ' + s
1480 elif len(self.b) == 0:
1480 elif len(self.b) == 0:
1481 # line deletions, new block is empty
1481 # line deletions, new block is empty
1482 lr.push(l)
1482 lr.push(l)
1483 break
1483 break
1484 else:
1484 else:
1485 raise PatchError(
1485 raise PatchError(
1486 _(b"bad hunk #%d old text line %d") % (self.number, x)
1486 _(b"bad hunk #%d old text line %d") % (self.number, x)
1487 )
1487 )
1488 self.b.append(s)
1488 self.b.append(s)
1489 while True:
1489 while True:
1490 if hunki >= len(self.hunk):
1490 if hunki >= len(self.hunk):
1491 h = b""
1491 h = b""
1492 else:
1492 else:
1493 h = self.hunk[hunki]
1493 h = self.hunk[hunki]
1494 hunki += 1
1494 hunki += 1
1495 if h == u:
1495 if h == u:
1496 break
1496 break
1497 elif h.startswith(b'-'):
1497 elif h.startswith(b'-'):
1498 continue
1498 continue
1499 else:
1499 else:
1500 self.hunk.insert(hunki - 1, u)
1500 self.hunk.insert(hunki - 1, u)
1501 break
1501 break
1502
1502
1503 if not self.a:
1503 if not self.a:
1504 # this happens when lines were only added to the hunk
1504 # this happens when lines were only added to the hunk
1505 for x in self.hunk:
1505 for x in self.hunk:
1506 if x.startswith(b'-') or x.startswith(b' '):
1506 if x.startswith(b'-') or x.startswith(b' '):
1507 self.a.append(x)
1507 self.a.append(x)
1508 if not self.b:
1508 if not self.b:
1509 # this happens when lines were only deleted from the hunk
1509 # this happens when lines were only deleted from the hunk
1510 for x in self.hunk:
1510 for x in self.hunk:
1511 if x.startswith(b'+') or x.startswith(b' '):
1511 if x.startswith(b'+') or x.startswith(b' '):
1512 self.b.append(x[1:])
1512 self.b.append(x[1:])
1513 # @@ -start,len +start,len @@
1513 # @@ -start,len +start,len @@
1514 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1514 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1515 self.starta,
1515 self.starta,
1516 self.lena,
1516 self.lena,
1517 self.startb,
1517 self.startb,
1518 self.lenb,
1518 self.lenb,
1519 )
1519 )
1520 self.hunk[0] = self.desc
1520 self.hunk[0] = self.desc
1521 self._fixnewline(lr)
1521 self._fixnewline(lr)
1522
1522
1523 def _fixnewline(self, lr):
1523 def _fixnewline(self, lr):
1524 l = lr.readline()
1524 l = lr.readline()
1525 if l.startswith(br'\ '):
1525 if l.startswith(br'\ '):
1526 diffhelper.fixnewline(self.hunk, self.a, self.b)
1526 diffhelper.fixnewline(self.hunk, self.a, self.b)
1527 else:
1527 else:
1528 lr.push(l)
1528 lr.push(l)
1529
1529
1530 def complete(self):
1530 def complete(self):
1531 return len(self.a) == self.lena and len(self.b) == self.lenb
1531 return len(self.a) == self.lena and len(self.b) == self.lenb
1532
1532
1533 def _fuzzit(self, old, new, fuzz, toponly):
1533 def _fuzzit(self, old, new, fuzz, toponly):
1534 # this removes context lines from the top and bottom of list 'l'. It
1534 # this removes context lines from the top and bottom of list 'l'. It
1535 # checks the hunk to make sure only context lines are removed, and then
1535 # checks the hunk to make sure only context lines are removed, and then
1536 # returns a new shortened list of lines.
1536 # returns a new shortened list of lines.
1537 fuzz = min(fuzz, len(old))
1537 fuzz = min(fuzz, len(old))
1538 if fuzz:
1538 if fuzz:
1539 top = 0
1539 top = 0
1540 bot = 0
1540 bot = 0
1541 hlen = len(self.hunk)
1541 hlen = len(self.hunk)
1542 for x in pycompat.xrange(hlen - 1):
1542 for x in pycompat.xrange(hlen - 1):
1543 # the hunk starts with the @@ line, so use x+1
1543 # the hunk starts with the @@ line, so use x+1
1544 if self.hunk[x + 1].startswith(b' '):
1544 if self.hunk[x + 1].startswith(b' '):
1545 top += 1
1545 top += 1
1546 else:
1546 else:
1547 break
1547 break
1548 if not toponly:
1548 if not toponly:
1549 for x in pycompat.xrange(hlen - 1):
1549 for x in pycompat.xrange(hlen - 1):
1550 if self.hunk[hlen - bot - 1].startswith(b' '):
1550 if self.hunk[hlen - bot - 1].startswith(b' '):
1551 bot += 1
1551 bot += 1
1552 else:
1552 else:
1553 break
1553 break
1554
1554
1555 bot = min(fuzz, bot)
1555 bot = min(fuzz, bot)
1556 top = min(fuzz, top)
1556 top = min(fuzz, top)
1557 return old[top : len(old) - bot], new[top : len(new) - bot], top
1557 return old[top : len(old) - bot], new[top : len(new) - bot], top
1558 return old, new, 0
1558 return old, new, 0
1559
1559
1560 def fuzzit(self, fuzz, toponly):
1560 def fuzzit(self, fuzz, toponly):
1561 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1561 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1562 oldstart = self.starta + top
1562 oldstart = self.starta + top
1563 newstart = self.startb + top
1563 newstart = self.startb + top
1564 # zero length hunk ranges already have their start decremented
1564 # zero length hunk ranges already have their start decremented
1565 if self.lena and oldstart > 0:
1565 if self.lena and oldstart > 0:
1566 oldstart -= 1
1566 oldstart -= 1
1567 if self.lenb and newstart > 0:
1567 if self.lenb and newstart > 0:
1568 newstart -= 1
1568 newstart -= 1
1569 return old, oldstart, new, newstart
1569 return old, oldstart, new, newstart
1570
1570
1571
1571
1572 class binhunk(object):
1572 class binhunk(object):
1573 b'A binary patch file.'
1573 b'A binary patch file.'
1574
1574
1575 def __init__(self, lr, fname):
1575 def __init__(self, lr, fname):
1576 self.text = None
1576 self.text = None
1577 self.delta = False
1577 self.delta = False
1578 self.hunk = [b'GIT binary patch\n']
1578 self.hunk = [b'GIT binary patch\n']
1579 self._fname = fname
1579 self._fname = fname
1580 self._read(lr)
1580 self._read(lr)
1581
1581
1582 def complete(self):
1582 def complete(self):
1583 return self.text is not None
1583 return self.text is not None
1584
1584
1585 def new(self, lines):
1585 def new(self, lines):
1586 if self.delta:
1586 if self.delta:
1587 return [applybindelta(self.text, b''.join(lines))]
1587 return [applybindelta(self.text, b''.join(lines))]
1588 return [self.text]
1588 return [self.text]
1589
1589
1590 def _read(self, lr):
1590 def _read(self, lr):
1591 def getline(lr, hunk):
1591 def getline(lr, hunk):
1592 l = lr.readline()
1592 l = lr.readline()
1593 hunk.append(l)
1593 hunk.append(l)
1594 return l.rstrip(b'\r\n')
1594 return l.rstrip(b'\r\n')
1595
1595
1596 while True:
1596 while True:
1597 line = getline(lr, self.hunk)
1597 line = getline(lr, self.hunk)
1598 if not line:
1598 if not line:
1599 raise PatchError(
1599 raise PatchError(
1600 _(b'could not extract "%s" binary data') % self._fname
1600 _(b'could not extract "%s" binary data') % self._fname
1601 )
1601 )
1602 if line.startswith(b'literal '):
1602 if line.startswith(b'literal '):
1603 size = int(line[8:].rstrip())
1603 size = int(line[8:].rstrip())
1604 break
1604 break
1605 if line.startswith(b'delta '):
1605 if line.startswith(b'delta '):
1606 size = int(line[6:].rstrip())
1606 size = int(line[6:].rstrip())
1607 self.delta = True
1607 self.delta = True
1608 break
1608 break
1609 dec = []
1609 dec = []
1610 line = getline(lr, self.hunk)
1610 line = getline(lr, self.hunk)
1611 while len(line) > 1:
1611 while len(line) > 1:
1612 l = line[0:1]
1612 l = line[0:1]
1613 if l <= b'Z' and l >= b'A':
1613 if l <= b'Z' and l >= b'A':
1614 l = ord(l) - ord(b'A') + 1
1614 l = ord(l) - ord(b'A') + 1
1615 else:
1615 else:
1616 l = ord(l) - ord(b'a') + 27
1616 l = ord(l) - ord(b'a') + 27
1617 try:
1617 try:
1618 dec.append(util.b85decode(line[1:])[:l])
1618 dec.append(util.b85decode(line[1:])[:l])
1619 except ValueError as e:
1619 except ValueError as e:
1620 raise PatchError(
1620 raise PatchError(
1621 _(b'could not decode "%s" binary patch: %s')
1621 _(b'could not decode "%s" binary patch: %s')
1622 % (self._fname, stringutil.forcebytestr(e))
1622 % (self._fname, stringutil.forcebytestr(e))
1623 )
1623 )
1624 line = getline(lr, self.hunk)
1624 line = getline(lr, self.hunk)
1625 text = zlib.decompress(b''.join(dec))
1625 text = zlib.decompress(b''.join(dec))
1626 if len(text) != size:
1626 if len(text) != size:
1627 raise PatchError(
1627 raise PatchError(
1628 _(b'"%s" length is %d bytes, should be %d')
1628 _(b'"%s" length is %d bytes, should be %d')
1629 % (self._fname, len(text), size)
1629 % (self._fname, len(text), size)
1630 )
1630 )
1631 self.text = text
1631 self.text = text
1632
1632
1633
1633
1634 def parsefilename(str):
1634 def parsefilename(str):
1635 # --- filename \t|space stuff
1635 # --- filename \t|space stuff
1636 s = str[4:].rstrip(b'\r\n')
1636 s = str[4:].rstrip(b'\r\n')
1637 i = s.find(b'\t')
1637 i = s.find(b'\t')
1638 if i < 0:
1638 if i < 0:
1639 i = s.find(b' ')
1639 i = s.find(b' ')
1640 if i < 0:
1640 if i < 0:
1641 return s
1641 return s
1642 return s[:i]
1642 return s[:i]
1643
1643
1644
1644
1645 def reversehunks(hunks):
1645 def reversehunks(hunks):
1646 '''reverse the signs in the hunks given as argument
1646 '''reverse the signs in the hunks given as argument
1647
1647
1648 This function operates on hunks coming out of patch.filterpatch, that is
1648 This function operates on hunks coming out of patch.filterpatch, that is
1649 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1649 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1650
1650
1651 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1651 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1652 ... --- a/folder1/g
1652 ... --- a/folder1/g
1653 ... +++ b/folder1/g
1653 ... +++ b/folder1/g
1654 ... @@ -1,7 +1,7 @@
1654 ... @@ -1,7 +1,7 @@
1655 ... +firstline
1655 ... +firstline
1656 ... c
1656 ... c
1657 ... 1
1657 ... 1
1658 ... 2
1658 ... 2
1659 ... + 3
1659 ... + 3
1660 ... -4
1660 ... -4
1661 ... 5
1661 ... 5
1662 ... d
1662 ... d
1663 ... +lastline"""
1663 ... +lastline"""
1664 >>> hunks = parsepatch([rawpatch])
1664 >>> hunks = parsepatch([rawpatch])
1665 >>> hunkscomingfromfilterpatch = []
1665 >>> hunkscomingfromfilterpatch = []
1666 >>> for h in hunks:
1666 >>> for h in hunks:
1667 ... hunkscomingfromfilterpatch.append(h)
1667 ... hunkscomingfromfilterpatch.append(h)
1668 ... hunkscomingfromfilterpatch.extend(h.hunks)
1668 ... hunkscomingfromfilterpatch.extend(h.hunks)
1669
1669
1670 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1670 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1671 >>> from . import util
1671 >>> from . import util
1672 >>> fp = util.stringio()
1672 >>> fp = util.stringio()
1673 >>> for c in reversedhunks:
1673 >>> for c in reversedhunks:
1674 ... c.write(fp)
1674 ... c.write(fp)
1675 >>> fp.seek(0) or None
1675 >>> fp.seek(0) or None
1676 >>> reversedpatch = fp.read()
1676 >>> reversedpatch = fp.read()
1677 >>> print(pycompat.sysstr(reversedpatch))
1677 >>> print(pycompat.sysstr(reversedpatch))
1678 diff --git a/folder1/g b/folder1/g
1678 diff --git a/folder1/g b/folder1/g
1679 --- a/folder1/g
1679 --- a/folder1/g
1680 +++ b/folder1/g
1680 +++ b/folder1/g
1681 @@ -1,4 +1,3 @@
1681 @@ -1,4 +1,3 @@
1682 -firstline
1682 -firstline
1683 c
1683 c
1684 1
1684 1
1685 2
1685 2
1686 @@ -2,6 +1,6 @@
1686 @@ -2,6 +1,6 @@
1687 c
1687 c
1688 1
1688 1
1689 2
1689 2
1690 - 3
1690 - 3
1691 +4
1691 +4
1692 5
1692 5
1693 d
1693 d
1694 @@ -6,3 +5,2 @@
1694 @@ -6,3 +5,2 @@
1695 5
1695 5
1696 d
1696 d
1697 -lastline
1697 -lastline
1698
1698
1699 '''
1699 '''
1700
1700
1701 newhunks = []
1701 newhunks = []
1702 for c in hunks:
1702 for c in hunks:
1703 if util.safehasattr(c, b'reversehunk'):
1703 if util.safehasattr(c, b'reversehunk'):
1704 c = c.reversehunk()
1704 c = c.reversehunk()
1705 newhunks.append(c)
1705 newhunks.append(c)
1706 return newhunks
1706 return newhunks
1707
1707
1708
1708
1709 def parsepatch(originalchunks, maxcontext=None):
1709 def parsepatch(originalchunks, maxcontext=None):
1710 """patch -> [] of headers -> [] of hunks
1710 """patch -> [] of headers -> [] of hunks
1711
1711
1712 If maxcontext is not None, trim context lines if necessary.
1712 If maxcontext is not None, trim context lines if necessary.
1713
1713
1714 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1714 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1715 ... --- a/folder1/g
1715 ... --- a/folder1/g
1716 ... +++ b/folder1/g
1716 ... +++ b/folder1/g
1717 ... @@ -1,8 +1,10 @@
1717 ... @@ -1,8 +1,10 @@
1718 ... 1
1718 ... 1
1719 ... 2
1719 ... 2
1720 ... -3
1720 ... -3
1721 ... 4
1721 ... 4
1722 ... 5
1722 ... 5
1723 ... 6
1723 ... 6
1724 ... +6.1
1724 ... +6.1
1725 ... +6.2
1725 ... +6.2
1726 ... 7
1726 ... 7
1727 ... 8
1727 ... 8
1728 ... +9'''
1728 ... +9'''
1729 >>> out = util.stringio()
1729 >>> out = util.stringio()
1730 >>> headers = parsepatch([rawpatch], maxcontext=1)
1730 >>> headers = parsepatch([rawpatch], maxcontext=1)
1731 >>> for header in headers:
1731 >>> for header in headers:
1732 ... header.write(out)
1732 ... header.write(out)
1733 ... for hunk in header.hunks:
1733 ... for hunk in header.hunks:
1734 ... hunk.write(out)
1734 ... hunk.write(out)
1735 >>> print(pycompat.sysstr(out.getvalue()))
1735 >>> print(pycompat.sysstr(out.getvalue()))
1736 diff --git a/folder1/g b/folder1/g
1736 diff --git a/folder1/g b/folder1/g
1737 --- a/folder1/g
1737 --- a/folder1/g
1738 +++ b/folder1/g
1738 +++ b/folder1/g
1739 @@ -2,3 +2,2 @@
1739 @@ -2,3 +2,2 @@
1740 2
1740 2
1741 -3
1741 -3
1742 4
1742 4
1743 @@ -6,2 +5,4 @@
1743 @@ -6,2 +5,4 @@
1744 6
1744 6
1745 +6.1
1745 +6.1
1746 +6.2
1746 +6.2
1747 7
1747 7
1748 @@ -8,1 +9,2 @@
1748 @@ -8,1 +9,2 @@
1749 8
1749 8
1750 +9
1750 +9
1751 """
1751 """
1752
1752
1753 class parser(object):
1753 class parser(object):
1754 """patch parsing state machine"""
1754 """patch parsing state machine"""
1755
1755
1756 def __init__(self):
1756 def __init__(self):
1757 self.fromline = 0
1757 self.fromline = 0
1758 self.toline = 0
1758 self.toline = 0
1759 self.proc = b''
1759 self.proc = b''
1760 self.header = None
1760 self.header = None
1761 self.context = []
1761 self.context = []
1762 self.before = []
1762 self.before = []
1763 self.hunk = []
1763 self.hunk = []
1764 self.headers = []
1764 self.headers = []
1765
1765
1766 def addrange(self, limits):
1766 def addrange(self, limits):
1767 self.addcontext([])
1767 self.addcontext([])
1768 fromstart, fromend, tostart, toend, proc = limits
1768 fromstart, fromend, tostart, toend, proc = limits
1769 self.fromline = int(fromstart)
1769 self.fromline = int(fromstart)
1770 self.toline = int(tostart)
1770 self.toline = int(tostart)
1771 self.proc = proc
1771 self.proc = proc
1772
1772
1773 def addcontext(self, context):
1773 def addcontext(self, context):
1774 if self.hunk:
1774 if self.hunk:
1775 h = recordhunk(
1775 h = recordhunk(
1776 self.header,
1776 self.header,
1777 self.fromline,
1777 self.fromline,
1778 self.toline,
1778 self.toline,
1779 self.proc,
1779 self.proc,
1780 self.before,
1780 self.before,
1781 self.hunk,
1781 self.hunk,
1782 context,
1782 context,
1783 maxcontext,
1783 maxcontext,
1784 )
1784 )
1785 self.header.hunks.append(h)
1785 self.header.hunks.append(h)
1786 self.fromline += len(self.before) + h.removed
1786 self.fromline += len(self.before) + h.removed
1787 self.toline += len(self.before) + h.added
1787 self.toline += len(self.before) + h.added
1788 self.before = []
1788 self.before = []
1789 self.hunk = []
1789 self.hunk = []
1790 self.context = context
1790 self.context = context
1791
1791
1792 def addhunk(self, hunk):
1792 def addhunk(self, hunk):
1793 if self.context:
1793 if self.context:
1794 self.before = self.context
1794 self.before = self.context
1795 self.context = []
1795 self.context = []
1796 if self.hunk:
1796 if self.hunk:
1797 self.addcontext([])
1797 self.addcontext([])
1798 self.hunk = hunk
1798 self.hunk = hunk
1799
1799
1800 def newfile(self, hdr):
1800 def newfile(self, hdr):
1801 self.addcontext([])
1801 self.addcontext([])
1802 h = header(hdr)
1802 h = header(hdr)
1803 self.headers.append(h)
1803 self.headers.append(h)
1804 self.header = h
1804 self.header = h
1805
1805
1806 def addother(self, line):
1806 def addother(self, line):
1807 pass # 'other' lines are ignored
1807 pass # 'other' lines are ignored
1808
1808
1809 def finished(self):
1809 def finished(self):
1810 self.addcontext([])
1810 self.addcontext([])
1811 return self.headers
1811 return self.headers
1812
1812
1813 transitions = {
1813 transitions = {
1814 b'file': {
1814 b'file': {
1815 b'context': addcontext,
1815 b'context': addcontext,
1816 b'file': newfile,
1816 b'file': newfile,
1817 b'hunk': addhunk,
1817 b'hunk': addhunk,
1818 b'range': addrange,
1818 b'range': addrange,
1819 },
1819 },
1820 b'context': {
1820 b'context': {
1821 b'file': newfile,
1821 b'file': newfile,
1822 b'hunk': addhunk,
1822 b'hunk': addhunk,
1823 b'range': addrange,
1823 b'range': addrange,
1824 b'other': addother,
1824 b'other': addother,
1825 },
1825 },
1826 b'hunk': {
1826 b'hunk': {
1827 b'context': addcontext,
1827 b'context': addcontext,
1828 b'file': newfile,
1828 b'file': newfile,
1829 b'range': addrange,
1829 b'range': addrange,
1830 },
1830 },
1831 b'range': {b'context': addcontext, b'hunk': addhunk},
1831 b'range': {b'context': addcontext, b'hunk': addhunk},
1832 b'other': {b'other': addother},
1832 b'other': {b'other': addother},
1833 }
1833 }
1834
1834
1835 p = parser()
1835 p = parser()
1836 fp = stringio()
1836 fp = stringio()
1837 fp.write(b''.join(originalchunks))
1837 fp.write(b''.join(originalchunks))
1838 fp.seek(0)
1838 fp.seek(0)
1839
1839
1840 state = b'context'
1840 state = b'context'
1841 for newstate, data in scanpatch(fp):
1841 for newstate, data in scanpatch(fp):
1842 try:
1842 try:
1843 p.transitions[state][newstate](p, data)
1843 p.transitions[state][newstate](p, data)
1844 except KeyError:
1844 except KeyError:
1845 raise PatchError(
1845 raise PatchError(
1846 b'unhandled transition: %s -> %s' % (state, newstate)
1846 b'unhandled transition: %s -> %s' % (state, newstate)
1847 )
1847 )
1848 state = newstate
1848 state = newstate
1849 del fp
1849 del fp
1850 return p.finished()
1850 return p.finished()
1851
1851
1852
1852
1853 def pathtransform(path, strip, prefix):
1853 def pathtransform(path, strip, prefix):
1854 '''turn a path from a patch into a path suitable for the repository
1854 '''turn a path from a patch into a path suitable for the repository
1855
1855
1856 prefix, if not empty, is expected to be normalized with a / at the end.
1856 prefix, if not empty, is expected to be normalized with a / at the end.
1857
1857
1858 Returns (stripped components, path in repository).
1858 Returns (stripped components, path in repository).
1859
1859
1860 >>> pathtransform(b'a/b/c', 0, b'')
1860 >>> pathtransform(b'a/b/c', 0, b'')
1861 ('', 'a/b/c')
1861 ('', 'a/b/c')
1862 >>> pathtransform(b' a/b/c ', 0, b'')
1862 >>> pathtransform(b' a/b/c ', 0, b'')
1863 ('', ' a/b/c')
1863 ('', ' a/b/c')
1864 >>> pathtransform(b' a/b/c ', 2, b'')
1864 >>> pathtransform(b' a/b/c ', 2, b'')
1865 ('a/b/', 'c')
1865 ('a/b/', 'c')
1866 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1866 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1867 ('', 'd/e/a/b/c')
1867 ('', 'd/e/a/b/c')
1868 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1868 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1869 ('a//b/', 'd/e/c')
1869 ('a//b/', 'd/e/c')
1870 >>> pathtransform(b'a/b/c', 3, b'')
1870 >>> pathtransform(b'a/b/c', 3, b'')
1871 Traceback (most recent call last):
1871 Traceback (most recent call last):
1872 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1872 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1873 '''
1873 '''
1874 pathlen = len(path)
1874 pathlen = len(path)
1875 i = 0
1875 i = 0
1876 if strip == 0:
1876 if strip == 0:
1877 return b'', prefix + path.rstrip()
1877 return b'', prefix + path.rstrip()
1878 count = strip
1878 count = strip
1879 while count > 0:
1879 while count > 0:
1880 i = path.find(b'/', i)
1880 i = path.find(b'/', i)
1881 if i == -1:
1881 if i == -1:
1882 raise PatchError(
1882 raise PatchError(
1883 _(b"unable to strip away %d of %d dirs from %s")
1883 _(b"unable to strip away %d of %d dirs from %s")
1884 % (count, strip, path)
1884 % (count, strip, path)
1885 )
1885 )
1886 i += 1
1886 i += 1
1887 # consume '//' in the path
1887 # consume '//' in the path
1888 while i < pathlen - 1 and path[i : i + 1] == b'/':
1888 while i < pathlen - 1 and path[i : i + 1] == b'/':
1889 i += 1
1889 i += 1
1890 count -= 1
1890 count -= 1
1891 return path[:i].lstrip(), prefix + path[i:].rstrip()
1891 return path[:i].lstrip(), prefix + path[i:].rstrip()
1892
1892
1893
1893
1894 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1894 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1895 nulla = afile_orig == b"/dev/null"
1895 nulla = afile_orig == b"/dev/null"
1896 nullb = bfile_orig == b"/dev/null"
1896 nullb = bfile_orig == b"/dev/null"
1897 create = nulla and hunk.starta == 0 and hunk.lena == 0
1897 create = nulla and hunk.starta == 0 and hunk.lena == 0
1898 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1898 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1899 abase, afile = pathtransform(afile_orig, strip, prefix)
1899 abase, afile = pathtransform(afile_orig, strip, prefix)
1900 gooda = not nulla and backend.exists(afile)
1900 gooda = not nulla and backend.exists(afile)
1901 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1901 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1902 if afile == bfile:
1902 if afile == bfile:
1903 goodb = gooda
1903 goodb = gooda
1904 else:
1904 else:
1905 goodb = not nullb and backend.exists(bfile)
1905 goodb = not nullb and backend.exists(bfile)
1906 missing = not goodb and not gooda and not create
1906 missing = not goodb and not gooda and not create
1907
1907
1908 # some diff programs apparently produce patches where the afile is
1908 # some diff programs apparently produce patches where the afile is
1909 # not /dev/null, but afile starts with bfile
1909 # not /dev/null, but afile starts with bfile
1910 abasedir = afile[: afile.rfind(b'/') + 1]
1910 abasedir = afile[: afile.rfind(b'/') + 1]
1911 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1911 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1912 if (
1912 if (
1913 missing
1913 missing
1914 and abasedir == bbasedir
1914 and abasedir == bbasedir
1915 and afile.startswith(bfile)
1915 and afile.startswith(bfile)
1916 and hunk.starta == 0
1916 and hunk.starta == 0
1917 and hunk.lena == 0
1917 and hunk.lena == 0
1918 ):
1918 ):
1919 create = True
1919 create = True
1920 missing = False
1920 missing = False
1921
1921
1922 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1922 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1923 # diff is between a file and its backup. In this case, the original
1923 # diff is between a file and its backup. In this case, the original
1924 # file should be patched (see original mpatch code).
1924 # file should be patched (see original mpatch code).
1925 isbackup = abase == bbase and bfile.startswith(afile)
1925 isbackup = abase == bbase and bfile.startswith(afile)
1926 fname = None
1926 fname = None
1927 if not missing:
1927 if not missing:
1928 if gooda and goodb:
1928 if gooda and goodb:
1929 if isbackup:
1929 if isbackup:
1930 fname = afile
1930 fname = afile
1931 else:
1931 else:
1932 fname = bfile
1932 fname = bfile
1933 elif gooda:
1933 elif gooda:
1934 fname = afile
1934 fname = afile
1935
1935
1936 if not fname:
1936 if not fname:
1937 if not nullb:
1937 if not nullb:
1938 if isbackup:
1938 if isbackup:
1939 fname = afile
1939 fname = afile
1940 else:
1940 else:
1941 fname = bfile
1941 fname = bfile
1942 elif not nulla:
1942 elif not nulla:
1943 fname = afile
1943 fname = afile
1944 else:
1944 else:
1945 raise PatchError(_(b"undefined source and destination files"))
1945 raise PatchError(_(b"undefined source and destination files"))
1946
1946
1947 gp = patchmeta(fname)
1947 gp = patchmeta(fname)
1948 if create:
1948 if create:
1949 gp.op = b'ADD'
1949 gp.op = b'ADD'
1950 elif remove:
1950 elif remove:
1951 gp.op = b'DELETE'
1951 gp.op = b'DELETE'
1952 return gp
1952 return gp
1953
1953
1954
1954
1955 def scanpatch(fp):
1955 def scanpatch(fp):
1956 """like patch.iterhunks, but yield different events
1956 """like patch.iterhunks, but yield different events
1957
1957
1958 - ('file', [header_lines + fromfile + tofile])
1958 - ('file', [header_lines + fromfile + tofile])
1959 - ('context', [context_lines])
1959 - ('context', [context_lines])
1960 - ('hunk', [hunk_lines])
1960 - ('hunk', [hunk_lines])
1961 - ('range', (-start,len, +start,len, proc))
1961 - ('range', (-start,len, +start,len, proc))
1962 """
1962 """
1963 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1963 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1964 lr = linereader(fp)
1964 lr = linereader(fp)
1965
1965
1966 def scanwhile(first, p):
1966 def scanwhile(first, p):
1967 """scan lr while predicate holds"""
1967 """scan lr while predicate holds"""
1968 lines = [first]
1968 lines = [first]
1969 for line in iter(lr.readline, b''):
1969 for line in iter(lr.readline, b''):
1970 if p(line):
1970 if p(line):
1971 lines.append(line)
1971 lines.append(line)
1972 else:
1972 else:
1973 lr.push(line)
1973 lr.push(line)
1974 break
1974 break
1975 return lines
1975 return lines
1976
1976
1977 for line in iter(lr.readline, b''):
1977 for line in iter(lr.readline, b''):
1978 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1978 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1979
1979
1980 def notheader(line):
1980 def notheader(line):
1981 s = line.split(None, 1)
1981 s = line.split(None, 1)
1982 return not s or s[0] not in (b'---', b'diff')
1982 return not s or s[0] not in (b'---', b'diff')
1983
1983
1984 header = scanwhile(line, notheader)
1984 header = scanwhile(line, notheader)
1985 fromfile = lr.readline()
1985 fromfile = lr.readline()
1986 if fromfile.startswith(b'---'):
1986 if fromfile.startswith(b'---'):
1987 tofile = lr.readline()
1987 tofile = lr.readline()
1988 header += [fromfile, tofile]
1988 header += [fromfile, tofile]
1989 else:
1989 else:
1990 lr.push(fromfile)
1990 lr.push(fromfile)
1991 yield b'file', header
1991 yield b'file', header
1992 elif line.startswith(b' '):
1992 elif line.startswith(b' '):
1993 cs = (b' ', b'\\')
1993 cs = (b' ', b'\\')
1994 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1994 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1995 elif line.startswith((b'-', b'+')):
1995 elif line.startswith((b'-', b'+')):
1996 cs = (b'-', b'+', b'\\')
1996 cs = (b'-', b'+', b'\\')
1997 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
1997 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
1998 else:
1998 else:
1999 m = lines_re.match(line)
1999 m = lines_re.match(line)
2000 if m:
2000 if m:
2001 yield b'range', m.groups()
2001 yield b'range', m.groups()
2002 else:
2002 else:
2003 yield b'other', line
2003 yield b'other', line
2004
2004
2005
2005
2006 def scangitpatch(lr, firstline):
2006 def scangitpatch(lr, firstline):
2007 """
2007 """
2008 Git patches can emit:
2008 Git patches can emit:
2009 - rename a to b
2009 - rename a to b
2010 - change b
2010 - change b
2011 - copy a to c
2011 - copy a to c
2012 - change c
2012 - change c
2013
2013
2014 We cannot apply this sequence as-is, the renamed 'a' could not be
2014 We cannot apply this sequence as-is, the renamed 'a' could not be
2015 found for it would have been renamed already. And we cannot copy
2015 found for it would have been renamed already. And we cannot copy
2016 from 'b' instead because 'b' would have been changed already. So
2016 from 'b' instead because 'b' would have been changed already. So
2017 we scan the git patch for copy and rename commands so we can
2017 we scan the git patch for copy and rename commands so we can
2018 perform the copies ahead of time.
2018 perform the copies ahead of time.
2019 """
2019 """
2020 pos = 0
2020 pos = 0
2021 try:
2021 try:
2022 pos = lr.fp.tell()
2022 pos = lr.fp.tell()
2023 fp = lr.fp
2023 fp = lr.fp
2024 except IOError:
2024 except IOError:
2025 fp = stringio(lr.fp.read())
2025 fp = stringio(lr.fp.read())
2026 gitlr = linereader(fp)
2026 gitlr = linereader(fp)
2027 gitlr.push(firstline)
2027 gitlr.push(firstline)
2028 gitpatches = readgitpatch(gitlr)
2028 gitpatches = readgitpatch(gitlr)
2029 fp.seek(pos)
2029 fp.seek(pos)
2030 return gitpatches
2030 return gitpatches
2031
2031
2032
2032
2033 def iterhunks(fp):
2033 def iterhunks(fp):
2034 """Read a patch and yield the following events:
2034 """Read a patch and yield the following events:
2035 - ("file", afile, bfile, firsthunk): select a new target file.
2035 - ("file", afile, bfile, firsthunk): select a new target file.
2036 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2036 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2037 "file" event.
2037 "file" event.
2038 - ("git", gitchanges): current diff is in git format, gitchanges
2038 - ("git", gitchanges): current diff is in git format, gitchanges
2039 maps filenames to gitpatch records. Unique event.
2039 maps filenames to gitpatch records. Unique event.
2040 """
2040 """
2041 afile = b""
2041 afile = b""
2042 bfile = b""
2042 bfile = b""
2043 state = None
2043 state = None
2044 hunknum = 0
2044 hunknum = 0
2045 emitfile = newfile = False
2045 emitfile = newfile = False
2046 gitpatches = None
2046 gitpatches = None
2047
2047
2048 # our states
2048 # our states
2049 BFILE = 1
2049 BFILE = 1
2050 context = None
2050 context = None
2051 lr = linereader(fp)
2051 lr = linereader(fp)
2052
2052
2053 for x in iter(lr.readline, b''):
2053 for x in iter(lr.readline, b''):
2054 if state == BFILE and (
2054 if state == BFILE and (
2055 (not context and x.startswith(b'@'))
2055 (not context and x.startswith(b'@'))
2056 or (context is not False and x.startswith(b'***************'))
2056 or (context is not False and x.startswith(b'***************'))
2057 or x.startswith(b'GIT binary patch')
2057 or x.startswith(b'GIT binary patch')
2058 ):
2058 ):
2059 gp = None
2059 gp = None
2060 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2060 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2061 gp = gitpatches.pop()
2061 gp = gitpatches.pop()
2062 if x.startswith(b'GIT binary patch'):
2062 if x.startswith(b'GIT binary patch'):
2063 h = binhunk(lr, gp.path)
2063 h = binhunk(lr, gp.path)
2064 else:
2064 else:
2065 if context is None and x.startswith(b'***************'):
2065 if context is None and x.startswith(b'***************'):
2066 context = True
2066 context = True
2067 h = hunk(x, hunknum + 1, lr, context)
2067 h = hunk(x, hunknum + 1, lr, context)
2068 hunknum += 1
2068 hunknum += 1
2069 if emitfile:
2069 if emitfile:
2070 emitfile = False
2070 emitfile = False
2071 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2071 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2072 yield b'hunk', h
2072 yield b'hunk', h
2073 elif x.startswith(b'diff --git a/'):
2073 elif x.startswith(b'diff --git a/'):
2074 m = gitre.match(x.rstrip(b' \r\n'))
2074 m = gitre.match(x.rstrip(b' \r\n'))
2075 if not m:
2075 if not m:
2076 continue
2076 continue
2077 if gitpatches is None:
2077 if gitpatches is None:
2078 # scan whole input for git metadata
2078 # scan whole input for git metadata
2079 gitpatches = scangitpatch(lr, x)
2079 gitpatches = scangitpatch(lr, x)
2080 yield b'git', [
2080 yield b'git', [
2081 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2081 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2082 ]
2082 ]
2083 gitpatches.reverse()
2083 gitpatches.reverse()
2084 afile = b'a/' + m.group(1)
2084 afile = b'a/' + m.group(1)
2085 bfile = b'b/' + m.group(2)
2085 bfile = b'b/' + m.group(2)
2086 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2086 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2087 gp = gitpatches.pop()
2087 gp = gitpatches.pop()
2088 yield b'file', (
2088 yield b'file', (
2089 b'a/' + gp.path,
2089 b'a/' + gp.path,
2090 b'b/' + gp.path,
2090 b'b/' + gp.path,
2091 None,
2091 None,
2092 gp.copy(),
2092 gp.copy(),
2093 )
2093 )
2094 if not gitpatches:
2094 if not gitpatches:
2095 raise PatchError(
2095 raise PatchError(
2096 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2096 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2097 )
2097 )
2098 newfile = True
2098 newfile = True
2099 elif x.startswith(b'---'):
2099 elif x.startswith(b'---'):
2100 # check for a unified diff
2100 # check for a unified diff
2101 l2 = lr.readline()
2101 l2 = lr.readline()
2102 if not l2.startswith(b'+++'):
2102 if not l2.startswith(b'+++'):
2103 lr.push(l2)
2103 lr.push(l2)
2104 continue
2104 continue
2105 newfile = True
2105 newfile = True
2106 context = False
2106 context = False
2107 afile = parsefilename(x)
2107 afile = parsefilename(x)
2108 bfile = parsefilename(l2)
2108 bfile = parsefilename(l2)
2109 elif x.startswith(b'***'):
2109 elif x.startswith(b'***'):
2110 # check for a context diff
2110 # check for a context diff
2111 l2 = lr.readline()
2111 l2 = lr.readline()
2112 if not l2.startswith(b'---'):
2112 if not l2.startswith(b'---'):
2113 lr.push(l2)
2113 lr.push(l2)
2114 continue
2114 continue
2115 l3 = lr.readline()
2115 l3 = lr.readline()
2116 lr.push(l3)
2116 lr.push(l3)
2117 if not l3.startswith(b"***************"):
2117 if not l3.startswith(b"***************"):
2118 lr.push(l2)
2118 lr.push(l2)
2119 continue
2119 continue
2120 newfile = True
2120 newfile = True
2121 context = True
2121 context = True
2122 afile = parsefilename(x)
2122 afile = parsefilename(x)
2123 bfile = parsefilename(l2)
2123 bfile = parsefilename(l2)
2124
2124
2125 if newfile:
2125 if newfile:
2126 newfile = False
2126 newfile = False
2127 emitfile = True
2127 emitfile = True
2128 state = BFILE
2128 state = BFILE
2129 hunknum = 0
2129 hunknum = 0
2130
2130
2131 while gitpatches:
2131 while gitpatches:
2132 gp = gitpatches.pop()
2132 gp = gitpatches.pop()
2133 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2133 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2134
2134
2135
2135
2136 def applybindelta(binchunk, data):
2136 def applybindelta(binchunk, data):
2137 """Apply a binary delta hunk
2137 """Apply a binary delta hunk
2138 The algorithm used is the algorithm from git's patch-delta.c
2138 The algorithm used is the algorithm from git's patch-delta.c
2139 """
2139 """
2140
2140
2141 def deltahead(binchunk):
2141 def deltahead(binchunk):
2142 i = 0
2142 i = 0
2143 for c in pycompat.bytestr(binchunk):
2143 for c in pycompat.bytestr(binchunk):
2144 i += 1
2144 i += 1
2145 if not (ord(c) & 0x80):
2145 if not (ord(c) & 0x80):
2146 return i
2146 return i
2147 return i
2147 return i
2148
2148
2149 out = b""
2149 out = b""
2150 s = deltahead(binchunk)
2150 s = deltahead(binchunk)
2151 binchunk = binchunk[s:]
2151 binchunk = binchunk[s:]
2152 s = deltahead(binchunk)
2152 s = deltahead(binchunk)
2153 binchunk = binchunk[s:]
2153 binchunk = binchunk[s:]
2154 i = 0
2154 i = 0
2155 while i < len(binchunk):
2155 while i < len(binchunk):
2156 cmd = ord(binchunk[i : i + 1])
2156 cmd = ord(binchunk[i : i + 1])
2157 i += 1
2157 i += 1
2158 if cmd & 0x80:
2158 if cmd & 0x80:
2159 offset = 0
2159 offset = 0
2160 size = 0
2160 size = 0
2161 if cmd & 0x01:
2161 if cmd & 0x01:
2162 offset = ord(binchunk[i : i + 1])
2162 offset = ord(binchunk[i : i + 1])
2163 i += 1
2163 i += 1
2164 if cmd & 0x02:
2164 if cmd & 0x02:
2165 offset |= ord(binchunk[i : i + 1]) << 8
2165 offset |= ord(binchunk[i : i + 1]) << 8
2166 i += 1
2166 i += 1
2167 if cmd & 0x04:
2167 if cmd & 0x04:
2168 offset |= ord(binchunk[i : i + 1]) << 16
2168 offset |= ord(binchunk[i : i + 1]) << 16
2169 i += 1
2169 i += 1
2170 if cmd & 0x08:
2170 if cmd & 0x08:
2171 offset |= ord(binchunk[i : i + 1]) << 24
2171 offset |= ord(binchunk[i : i + 1]) << 24
2172 i += 1
2172 i += 1
2173 if cmd & 0x10:
2173 if cmd & 0x10:
2174 size = ord(binchunk[i : i + 1])
2174 size = ord(binchunk[i : i + 1])
2175 i += 1
2175 i += 1
2176 if cmd & 0x20:
2176 if cmd & 0x20:
2177 size |= ord(binchunk[i : i + 1]) << 8
2177 size |= ord(binchunk[i : i + 1]) << 8
2178 i += 1
2178 i += 1
2179 if cmd & 0x40:
2179 if cmd & 0x40:
2180 size |= ord(binchunk[i : i + 1]) << 16
2180 size |= ord(binchunk[i : i + 1]) << 16
2181 i += 1
2181 i += 1
2182 if size == 0:
2182 if size == 0:
2183 size = 0x10000
2183 size = 0x10000
2184 offset_end = offset + size
2184 offset_end = offset + size
2185 out += data[offset:offset_end]
2185 out += data[offset:offset_end]
2186 elif cmd != 0:
2186 elif cmd != 0:
2187 offset_end = i + cmd
2187 offset_end = i + cmd
2188 out += binchunk[i:offset_end]
2188 out += binchunk[i:offset_end]
2189 i += cmd
2189 i += cmd
2190 else:
2190 else:
2191 raise PatchError(_(b'unexpected delta opcode 0'))
2191 raise PatchError(_(b'unexpected delta opcode 0'))
2192 return out
2192 return out
2193
2193
2194
2194
2195 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2195 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2196 """Reads a patch from fp and tries to apply it.
2196 """Reads a patch from fp and tries to apply it.
2197
2197
2198 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2198 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2199 there was any fuzz.
2199 there was any fuzz.
2200
2200
2201 If 'eolmode' is 'strict', the patch content and patched file are
2201 If 'eolmode' is 'strict', the patch content and patched file are
2202 read in binary mode. Otherwise, line endings are ignored when
2202 read in binary mode. Otherwise, line endings are ignored when
2203 patching then normalized according to 'eolmode'.
2203 patching then normalized according to 'eolmode'.
2204 """
2204 """
2205 return _applydiff(
2205 return _applydiff(
2206 ui,
2206 ui,
2207 fp,
2207 fp,
2208 patchfile,
2208 patchfile,
2209 backend,
2209 backend,
2210 store,
2210 store,
2211 strip=strip,
2211 strip=strip,
2212 prefix=prefix,
2212 prefix=prefix,
2213 eolmode=eolmode,
2213 eolmode=eolmode,
2214 )
2214 )
2215
2215
2216
2216
2217 def _canonprefix(repo, prefix):
2217 def _canonprefix(repo, prefix):
2218 if prefix:
2218 if prefix:
2219 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2219 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2220 if prefix != b'':
2220 if prefix != b'':
2221 prefix += b'/'
2221 prefix += b'/'
2222 return prefix
2222 return prefix
2223
2223
2224
2224
2225 def _applydiff(
2225 def _applydiff(
2226 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2226 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2227 ):
2227 ):
2228 prefix = _canonprefix(backend.repo, prefix)
2228 prefix = _canonprefix(backend.repo, prefix)
2229
2229
2230 def pstrip(p):
2230 def pstrip(p):
2231 return pathtransform(p, strip - 1, prefix)[1]
2231 return pathtransform(p, strip - 1, prefix)[1]
2232
2232
2233 rejects = 0
2233 rejects = 0
2234 err = 0
2234 err = 0
2235 current_file = None
2235 current_file = None
2236
2236
2237 for state, values in iterhunks(fp):
2237 for state, values in iterhunks(fp):
2238 if state == b'hunk':
2238 if state == b'hunk':
2239 if not current_file:
2239 if not current_file:
2240 continue
2240 continue
2241 ret = current_file.apply(values)
2241 ret = current_file.apply(values)
2242 if ret > 0:
2242 if ret > 0:
2243 err = 1
2243 err = 1
2244 elif state == b'file':
2244 elif state == b'file':
2245 if current_file:
2245 if current_file:
2246 rejects += current_file.close()
2246 rejects += current_file.close()
2247 current_file = None
2247 current_file = None
2248 afile, bfile, first_hunk, gp = values
2248 afile, bfile, first_hunk, gp = values
2249 if gp:
2249 if gp:
2250 gp.path = pstrip(gp.path)
2250 gp.path = pstrip(gp.path)
2251 if gp.oldpath:
2251 if gp.oldpath:
2252 gp.oldpath = pstrip(gp.oldpath)
2252 gp.oldpath = pstrip(gp.oldpath)
2253 else:
2253 else:
2254 gp = makepatchmeta(
2254 gp = makepatchmeta(
2255 backend, afile, bfile, first_hunk, strip, prefix
2255 backend, afile, bfile, first_hunk, strip, prefix
2256 )
2256 )
2257 if gp.op == b'RENAME':
2257 if gp.op == b'RENAME':
2258 backend.unlink(gp.oldpath)
2258 backend.unlink(gp.oldpath)
2259 if not first_hunk:
2259 if not first_hunk:
2260 if gp.op == b'DELETE':
2260 if gp.op == b'DELETE':
2261 backend.unlink(gp.path)
2261 backend.unlink(gp.path)
2262 continue
2262 continue
2263 data, mode = None, None
2263 data, mode = None, None
2264 if gp.op in (b'RENAME', b'COPY'):
2264 if gp.op in (b'RENAME', b'COPY'):
2265 data, mode = store.getfile(gp.oldpath)[:2]
2265 data, mode = store.getfile(gp.oldpath)[:2]
2266 if data is None:
2266 if data is None:
2267 # This means that the old path does not exist
2267 # This means that the old path does not exist
2268 raise PatchError(
2268 raise PatchError(
2269 _(b"source file '%s' does not exist") % gp.oldpath
2269 _(b"source file '%s' does not exist") % gp.oldpath
2270 )
2270 )
2271 if gp.mode:
2271 if gp.mode:
2272 mode = gp.mode
2272 mode = gp.mode
2273 if gp.op == b'ADD':
2273 if gp.op == b'ADD':
2274 # Added files without content have no hunk and
2274 # Added files without content have no hunk and
2275 # must be created
2275 # must be created
2276 data = b''
2276 data = b''
2277 if data or mode:
2277 if data or mode:
2278 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2278 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2279 gp.path
2279 gp.path
2280 ):
2280 ):
2281 raise PatchError(
2281 raise PatchError(
2282 _(
2282 _(
2283 b"cannot create %s: destination "
2283 b"cannot create %s: destination "
2284 b"already exists"
2284 b"already exists"
2285 )
2285 )
2286 % gp.path
2286 % gp.path
2287 )
2287 )
2288 backend.setfile(gp.path, data, mode, gp.oldpath)
2288 backend.setfile(gp.path, data, mode, gp.oldpath)
2289 continue
2289 continue
2290 try:
2290 try:
2291 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2291 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2292 except PatchError as inst:
2292 except PatchError as inst:
2293 ui.warn(str(inst) + b'\n')
2293 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2294 current_file = None
2294 current_file = None
2295 rejects += 1
2295 rejects += 1
2296 continue
2296 continue
2297 elif state == b'git':
2297 elif state == b'git':
2298 for gp in values:
2298 for gp in values:
2299 path = pstrip(gp.oldpath)
2299 path = pstrip(gp.oldpath)
2300 data, mode = backend.getfile(path)
2300 data, mode = backend.getfile(path)
2301 if data is None:
2301 if data is None:
2302 # The error ignored here will trigger a getfile()
2302 # The error ignored here will trigger a getfile()
2303 # error in a place more appropriate for error
2303 # error in a place more appropriate for error
2304 # handling, and will not interrupt the patching
2304 # handling, and will not interrupt the patching
2305 # process.
2305 # process.
2306 pass
2306 pass
2307 else:
2307 else:
2308 store.setfile(path, data, mode)
2308 store.setfile(path, data, mode)
2309 else:
2309 else:
2310 raise error.Abort(_(b'unsupported parser state: %s') % state)
2310 raise error.Abort(_(b'unsupported parser state: %s') % state)
2311
2311
2312 if current_file:
2312 if current_file:
2313 rejects += current_file.close()
2313 rejects += current_file.close()
2314
2314
2315 if rejects:
2315 if rejects:
2316 return -1
2316 return -1
2317 return err
2317 return err
2318
2318
2319
2319
2320 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2320 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2321 """use <patcher> to apply <patchname> to the working directory.
2321 """use <patcher> to apply <patchname> to the working directory.
2322 returns whether patch was applied with fuzz factor."""
2322 returns whether patch was applied with fuzz factor."""
2323
2323
2324 fuzz = False
2324 fuzz = False
2325 args = []
2325 args = []
2326 cwd = repo.root
2326 cwd = repo.root
2327 if cwd:
2327 if cwd:
2328 args.append(b'-d %s' % procutil.shellquote(cwd))
2328 args.append(b'-d %s' % procutil.shellquote(cwd))
2329 cmd = b'%s %s -p%d < %s' % (
2329 cmd = b'%s %s -p%d < %s' % (
2330 patcher,
2330 patcher,
2331 b' '.join(args),
2331 b' '.join(args),
2332 strip,
2332 strip,
2333 procutil.shellquote(patchname),
2333 procutil.shellquote(patchname),
2334 )
2334 )
2335 ui.debug(b'Using external patch tool: %s\n' % cmd)
2335 ui.debug(b'Using external patch tool: %s\n' % cmd)
2336 fp = procutil.popen(cmd, b'rb')
2336 fp = procutil.popen(cmd, b'rb')
2337 try:
2337 try:
2338 for line in util.iterfile(fp):
2338 for line in util.iterfile(fp):
2339 line = line.rstrip()
2339 line = line.rstrip()
2340 ui.note(line + b'\n')
2340 ui.note(line + b'\n')
2341 if line.startswith(b'patching file '):
2341 if line.startswith(b'patching file '):
2342 pf = util.parsepatchoutput(line)
2342 pf = util.parsepatchoutput(line)
2343 printed_file = False
2343 printed_file = False
2344 files.add(pf)
2344 files.add(pf)
2345 elif line.find(b'with fuzz') >= 0:
2345 elif line.find(b'with fuzz') >= 0:
2346 fuzz = True
2346 fuzz = True
2347 if not printed_file:
2347 if not printed_file:
2348 ui.warn(pf + b'\n')
2348 ui.warn(pf + b'\n')
2349 printed_file = True
2349 printed_file = True
2350 ui.warn(line + b'\n')
2350 ui.warn(line + b'\n')
2351 elif line.find(b'saving rejects to file') >= 0:
2351 elif line.find(b'saving rejects to file') >= 0:
2352 ui.warn(line + b'\n')
2352 ui.warn(line + b'\n')
2353 elif line.find(b'FAILED') >= 0:
2353 elif line.find(b'FAILED') >= 0:
2354 if not printed_file:
2354 if not printed_file:
2355 ui.warn(pf + b'\n')
2355 ui.warn(pf + b'\n')
2356 printed_file = True
2356 printed_file = True
2357 ui.warn(line + b'\n')
2357 ui.warn(line + b'\n')
2358 finally:
2358 finally:
2359 if files:
2359 if files:
2360 scmutil.marktouched(repo, files, similarity)
2360 scmutil.marktouched(repo, files, similarity)
2361 code = fp.close()
2361 code = fp.close()
2362 if code:
2362 if code:
2363 raise PatchError(
2363 raise PatchError(
2364 _(b"patch command failed: %s") % procutil.explainexit(code)
2364 _(b"patch command failed: %s") % procutil.explainexit(code)
2365 )
2365 )
2366 return fuzz
2366 return fuzz
2367
2367
2368
2368
2369 def patchbackend(
2369 def patchbackend(
2370 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2370 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2371 ):
2371 ):
2372 if files is None:
2372 if files is None:
2373 files = set()
2373 files = set()
2374 if eolmode is None:
2374 if eolmode is None:
2375 eolmode = ui.config(b'patch', b'eol')
2375 eolmode = ui.config(b'patch', b'eol')
2376 if eolmode.lower() not in eolmodes:
2376 if eolmode.lower() not in eolmodes:
2377 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2377 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2378 eolmode = eolmode.lower()
2378 eolmode = eolmode.lower()
2379
2379
2380 store = filestore()
2380 store = filestore()
2381 try:
2381 try:
2382 fp = open(patchobj, b'rb')
2382 fp = open(patchobj, b'rb')
2383 except TypeError:
2383 except TypeError:
2384 fp = patchobj
2384 fp = patchobj
2385 try:
2385 try:
2386 ret = applydiff(
2386 ret = applydiff(
2387 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2387 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2388 )
2388 )
2389 finally:
2389 finally:
2390 if fp != patchobj:
2390 if fp != patchobj:
2391 fp.close()
2391 fp.close()
2392 files.update(backend.close())
2392 files.update(backend.close())
2393 store.close()
2393 store.close()
2394 if ret < 0:
2394 if ret < 0:
2395 raise PatchError(_(b'patch failed to apply'))
2395 raise PatchError(_(b'patch failed to apply'))
2396 return ret > 0
2396 return ret > 0
2397
2397
2398
2398
2399 def internalpatch(
2399 def internalpatch(
2400 ui,
2400 ui,
2401 repo,
2401 repo,
2402 patchobj,
2402 patchobj,
2403 strip,
2403 strip,
2404 prefix=b'',
2404 prefix=b'',
2405 files=None,
2405 files=None,
2406 eolmode=b'strict',
2406 eolmode=b'strict',
2407 similarity=0,
2407 similarity=0,
2408 ):
2408 ):
2409 """use builtin patch to apply <patchobj> to the working directory.
2409 """use builtin patch to apply <patchobj> to the working directory.
2410 returns whether patch was applied with fuzz factor."""
2410 returns whether patch was applied with fuzz factor."""
2411 backend = workingbackend(ui, repo, similarity)
2411 backend = workingbackend(ui, repo, similarity)
2412 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2412 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2413
2413
2414
2414
2415 def patchrepo(
2415 def patchrepo(
2416 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2416 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2417 ):
2417 ):
2418 backend = repobackend(ui, repo, ctx, store)
2418 backend = repobackend(ui, repo, ctx, store)
2419 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2419 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2420
2420
2421
2421
2422 def patch(
2422 def patch(
2423 ui,
2423 ui,
2424 repo,
2424 repo,
2425 patchname,
2425 patchname,
2426 strip=1,
2426 strip=1,
2427 prefix=b'',
2427 prefix=b'',
2428 files=None,
2428 files=None,
2429 eolmode=b'strict',
2429 eolmode=b'strict',
2430 similarity=0,
2430 similarity=0,
2431 ):
2431 ):
2432 """Apply <patchname> to the working directory.
2432 """Apply <patchname> to the working directory.
2433
2433
2434 'eolmode' specifies how end of lines should be handled. It can be:
2434 'eolmode' specifies how end of lines should be handled. It can be:
2435 - 'strict': inputs are read in binary mode, EOLs are preserved
2435 - 'strict': inputs are read in binary mode, EOLs are preserved
2436 - 'crlf': EOLs are ignored when patching and reset to CRLF
2436 - 'crlf': EOLs are ignored when patching and reset to CRLF
2437 - 'lf': EOLs are ignored when patching and reset to LF
2437 - 'lf': EOLs are ignored when patching and reset to LF
2438 - None: get it from user settings, default to 'strict'
2438 - None: get it from user settings, default to 'strict'
2439 'eolmode' is ignored when using an external patcher program.
2439 'eolmode' is ignored when using an external patcher program.
2440
2440
2441 Returns whether patch was applied with fuzz factor.
2441 Returns whether patch was applied with fuzz factor.
2442 """
2442 """
2443 patcher = ui.config(b'ui', b'patch')
2443 patcher = ui.config(b'ui', b'patch')
2444 if files is None:
2444 if files is None:
2445 files = set()
2445 files = set()
2446 if patcher:
2446 if patcher:
2447 return _externalpatch(
2447 return _externalpatch(
2448 ui, repo, patcher, patchname, strip, files, similarity
2448 ui, repo, patcher, patchname, strip, files, similarity
2449 )
2449 )
2450 return internalpatch(
2450 return internalpatch(
2451 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2451 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2452 )
2452 )
2453
2453
2454
2454
2455 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2455 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2456 backend = fsbackend(ui, repo.root)
2456 backend = fsbackend(ui, repo.root)
2457 prefix = _canonprefix(repo, prefix)
2457 prefix = _canonprefix(repo, prefix)
2458 with open(patchpath, b'rb') as fp:
2458 with open(patchpath, b'rb') as fp:
2459 changed = set()
2459 changed = set()
2460 for state, values in iterhunks(fp):
2460 for state, values in iterhunks(fp):
2461 if state == b'file':
2461 if state == b'file':
2462 afile, bfile, first_hunk, gp = values
2462 afile, bfile, first_hunk, gp = values
2463 if gp:
2463 if gp:
2464 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2464 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2465 if gp.oldpath:
2465 if gp.oldpath:
2466 gp.oldpath = pathtransform(
2466 gp.oldpath = pathtransform(
2467 gp.oldpath, strip - 1, prefix
2467 gp.oldpath, strip - 1, prefix
2468 )[1]
2468 )[1]
2469 else:
2469 else:
2470 gp = makepatchmeta(
2470 gp = makepatchmeta(
2471 backend, afile, bfile, first_hunk, strip, prefix
2471 backend, afile, bfile, first_hunk, strip, prefix
2472 )
2472 )
2473 changed.add(gp.path)
2473 changed.add(gp.path)
2474 if gp.op == b'RENAME':
2474 if gp.op == b'RENAME':
2475 changed.add(gp.oldpath)
2475 changed.add(gp.oldpath)
2476 elif state not in (b'hunk', b'git'):
2476 elif state not in (b'hunk', b'git'):
2477 raise error.Abort(_(b'unsupported parser state: %s') % state)
2477 raise error.Abort(_(b'unsupported parser state: %s') % state)
2478 return changed
2478 return changed
2479
2479
2480
2480
2481 class GitDiffRequired(Exception):
2481 class GitDiffRequired(Exception):
2482 pass
2482 pass
2483
2483
2484
2484
2485 diffopts = diffutil.diffallopts
2485 diffopts = diffutil.diffallopts
2486 diffallopts = diffutil.diffallopts
2486 diffallopts = diffutil.diffallopts
2487 difffeatureopts = diffutil.difffeatureopts
2487 difffeatureopts = diffutil.difffeatureopts
2488
2488
2489
2489
2490 def diff(
2490 def diff(
2491 repo,
2491 repo,
2492 node1=None,
2492 node1=None,
2493 node2=None,
2493 node2=None,
2494 match=None,
2494 match=None,
2495 changes=None,
2495 changes=None,
2496 opts=None,
2496 opts=None,
2497 losedatafn=None,
2497 losedatafn=None,
2498 pathfn=None,
2498 pathfn=None,
2499 copy=None,
2499 copy=None,
2500 copysourcematch=None,
2500 copysourcematch=None,
2501 hunksfilterfn=None,
2501 hunksfilterfn=None,
2502 ):
2502 ):
2503 '''yields diff of changes to files between two nodes, or node and
2503 '''yields diff of changes to files between two nodes, or node and
2504 working directory.
2504 working directory.
2505
2505
2506 if node1 is None, use first dirstate parent instead.
2506 if node1 is None, use first dirstate parent instead.
2507 if node2 is None, compare node1 with working directory.
2507 if node2 is None, compare node1 with working directory.
2508
2508
2509 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2509 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2510 every time some change cannot be represented with the current
2510 every time some change cannot be represented with the current
2511 patch format. Return False to upgrade to git patch format, True to
2511 patch format. Return False to upgrade to git patch format, True to
2512 accept the loss or raise an exception to abort the diff. It is
2512 accept the loss or raise an exception to abort the diff. It is
2513 called with the name of current file being diffed as 'fn'. If set
2513 called with the name of current file being diffed as 'fn'. If set
2514 to None, patches will always be upgraded to git format when
2514 to None, patches will always be upgraded to git format when
2515 necessary.
2515 necessary.
2516
2516
2517 prefix is a filename prefix that is prepended to all filenames on
2517 prefix is a filename prefix that is prepended to all filenames on
2518 display (used for subrepos).
2518 display (used for subrepos).
2519
2519
2520 relroot, if not empty, must be normalized with a trailing /. Any match
2520 relroot, if not empty, must be normalized with a trailing /. Any match
2521 patterns that fall outside it will be ignored.
2521 patterns that fall outside it will be ignored.
2522
2522
2523 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2523 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2524 information.
2524 information.
2525
2525
2526 if copysourcematch is not None, then copy sources will be filtered by this
2526 if copysourcematch is not None, then copy sources will be filtered by this
2527 matcher
2527 matcher
2528
2528
2529 hunksfilterfn, if not None, should be a function taking a filectx and
2529 hunksfilterfn, if not None, should be a function taking a filectx and
2530 hunks generator that may yield filtered hunks.
2530 hunks generator that may yield filtered hunks.
2531 '''
2531 '''
2532 if not node1 and not node2:
2532 if not node1 and not node2:
2533 node1 = repo.dirstate.p1()
2533 node1 = repo.dirstate.p1()
2534
2534
2535 ctx1 = repo[node1]
2535 ctx1 = repo[node1]
2536 ctx2 = repo[node2]
2536 ctx2 = repo[node2]
2537
2537
2538 for fctx1, fctx2, hdr, hunks in diffhunks(
2538 for fctx1, fctx2, hdr, hunks in diffhunks(
2539 repo,
2539 repo,
2540 ctx1=ctx1,
2540 ctx1=ctx1,
2541 ctx2=ctx2,
2541 ctx2=ctx2,
2542 match=match,
2542 match=match,
2543 changes=changes,
2543 changes=changes,
2544 opts=opts,
2544 opts=opts,
2545 losedatafn=losedatafn,
2545 losedatafn=losedatafn,
2546 pathfn=pathfn,
2546 pathfn=pathfn,
2547 copy=copy,
2547 copy=copy,
2548 copysourcematch=copysourcematch,
2548 copysourcematch=copysourcematch,
2549 ):
2549 ):
2550 if hunksfilterfn is not None:
2550 if hunksfilterfn is not None:
2551 # If the file has been removed, fctx2 is None; but this should
2551 # If the file has been removed, fctx2 is None; but this should
2552 # not occur here since we catch removed files early in
2552 # not occur here since we catch removed files early in
2553 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2553 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2554 assert (
2554 assert (
2555 fctx2 is not None
2555 fctx2 is not None
2556 ), b'fctx2 unexpectly None in diff hunks filtering'
2556 ), b'fctx2 unexpectly None in diff hunks filtering'
2557 hunks = hunksfilterfn(fctx2, hunks)
2557 hunks = hunksfilterfn(fctx2, hunks)
2558 text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2558 text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2559 if hdr and (text or len(hdr) > 1):
2559 if hdr and (text or len(hdr) > 1):
2560 yield b'\n'.join(hdr) + b'\n'
2560 yield b'\n'.join(hdr) + b'\n'
2561 if text:
2561 if text:
2562 yield text
2562 yield text
2563
2563
2564
2564
2565 def diffhunks(
2565 def diffhunks(
2566 repo,
2566 repo,
2567 ctx1,
2567 ctx1,
2568 ctx2,
2568 ctx2,
2569 match=None,
2569 match=None,
2570 changes=None,
2570 changes=None,
2571 opts=None,
2571 opts=None,
2572 losedatafn=None,
2572 losedatafn=None,
2573 pathfn=None,
2573 pathfn=None,
2574 copy=None,
2574 copy=None,
2575 copysourcematch=None,
2575 copysourcematch=None,
2576 ):
2576 ):
2577 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2577 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2578 where `header` is a list of diff headers and `hunks` is an iterable of
2578 where `header` is a list of diff headers and `hunks` is an iterable of
2579 (`hunkrange`, `hunklines`) tuples.
2579 (`hunkrange`, `hunklines`) tuples.
2580
2580
2581 See diff() for the meaning of parameters.
2581 See diff() for the meaning of parameters.
2582 """
2582 """
2583
2583
2584 if opts is None:
2584 if opts is None:
2585 opts = mdiff.defaultopts
2585 opts = mdiff.defaultopts
2586
2586
2587 def lrugetfilectx():
2587 def lrugetfilectx():
2588 cache = {}
2588 cache = {}
2589 order = collections.deque()
2589 order = collections.deque()
2590
2590
2591 def getfilectx(f, ctx):
2591 def getfilectx(f, ctx):
2592 fctx = ctx.filectx(f, filelog=cache.get(f))
2592 fctx = ctx.filectx(f, filelog=cache.get(f))
2593 if f not in cache:
2593 if f not in cache:
2594 if len(cache) > 20:
2594 if len(cache) > 20:
2595 del cache[order.popleft()]
2595 del cache[order.popleft()]
2596 cache[f] = fctx.filelog()
2596 cache[f] = fctx.filelog()
2597 else:
2597 else:
2598 order.remove(f)
2598 order.remove(f)
2599 order.append(f)
2599 order.append(f)
2600 return fctx
2600 return fctx
2601
2601
2602 return getfilectx
2602 return getfilectx
2603
2603
2604 getfilectx = lrugetfilectx()
2604 getfilectx = lrugetfilectx()
2605
2605
2606 if not changes:
2606 if not changes:
2607 changes = ctx1.status(ctx2, match=match)
2607 changes = ctx1.status(ctx2, match=match)
2608 modified, added, removed = changes[:3]
2608 modified, added, removed = changes[:3]
2609
2609
2610 if not modified and not added and not removed:
2610 if not modified and not added and not removed:
2611 return []
2611 return []
2612
2612
2613 if repo.ui.debugflag:
2613 if repo.ui.debugflag:
2614 hexfunc = hex
2614 hexfunc = hex
2615 else:
2615 else:
2616 hexfunc = short
2616 hexfunc = short
2617 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2617 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2618
2618
2619 if copy is None:
2619 if copy is None:
2620 copy = {}
2620 copy = {}
2621 if opts.git or opts.upgrade:
2621 if opts.git or opts.upgrade:
2622 copy = copies.pathcopies(ctx1, ctx2, match=match)
2622 copy = copies.pathcopies(ctx1, ctx2, match=match)
2623
2623
2624 if copysourcematch:
2624 if copysourcematch:
2625 # filter out copies where source side isn't inside the matcher
2625 # filter out copies where source side isn't inside the matcher
2626 # (copies.pathcopies() already filtered out the destination)
2626 # (copies.pathcopies() already filtered out the destination)
2627 copy = {
2627 copy = {
2628 dst: src
2628 dst: src
2629 for dst, src in pycompat.iteritems(copy)
2629 for dst, src in pycompat.iteritems(copy)
2630 if copysourcematch(src)
2630 if copysourcematch(src)
2631 }
2631 }
2632
2632
2633 modifiedset = set(modified)
2633 modifiedset = set(modified)
2634 addedset = set(added)
2634 addedset = set(added)
2635 removedset = set(removed)
2635 removedset = set(removed)
2636 for f in modified:
2636 for f in modified:
2637 if f not in ctx1:
2637 if f not in ctx1:
2638 # Fix up added, since merged-in additions appear as
2638 # Fix up added, since merged-in additions appear as
2639 # modifications during merges
2639 # modifications during merges
2640 modifiedset.remove(f)
2640 modifiedset.remove(f)
2641 addedset.add(f)
2641 addedset.add(f)
2642 for f in removed:
2642 for f in removed:
2643 if f not in ctx1:
2643 if f not in ctx1:
2644 # Merged-in additions that are then removed are reported as removed.
2644 # Merged-in additions that are then removed are reported as removed.
2645 # They are not in ctx1, so We don't want to show them in the diff.
2645 # They are not in ctx1, so We don't want to show them in the diff.
2646 removedset.remove(f)
2646 removedset.remove(f)
2647 modified = sorted(modifiedset)
2647 modified = sorted(modifiedset)
2648 added = sorted(addedset)
2648 added = sorted(addedset)
2649 removed = sorted(removedset)
2649 removed = sorted(removedset)
2650 for dst, src in list(copy.items()):
2650 for dst, src in list(copy.items()):
2651 if src not in ctx1:
2651 if src not in ctx1:
2652 # Files merged in during a merge and then copied/renamed are
2652 # Files merged in during a merge and then copied/renamed are
2653 # reported as copies. We want to show them in the diff as additions.
2653 # reported as copies. We want to show them in the diff as additions.
2654 del copy[dst]
2654 del copy[dst]
2655
2655
2656 prefetchmatch = scmutil.matchfiles(
2656 prefetchmatch = scmutil.matchfiles(
2657 repo, list(modifiedset | addedset | removedset)
2657 repo, list(modifiedset | addedset | removedset)
2658 )
2658 )
2659 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2659 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2660
2660
2661 def difffn(opts, losedata):
2661 def difffn(opts, losedata):
2662 return trydiff(
2662 return trydiff(
2663 repo,
2663 repo,
2664 revs,
2664 revs,
2665 ctx1,
2665 ctx1,
2666 ctx2,
2666 ctx2,
2667 modified,
2667 modified,
2668 added,
2668 added,
2669 removed,
2669 removed,
2670 copy,
2670 copy,
2671 getfilectx,
2671 getfilectx,
2672 opts,
2672 opts,
2673 losedata,
2673 losedata,
2674 pathfn,
2674 pathfn,
2675 )
2675 )
2676
2676
2677 if opts.upgrade and not opts.git:
2677 if opts.upgrade and not opts.git:
2678 try:
2678 try:
2679
2679
2680 def losedata(fn):
2680 def losedata(fn):
2681 if not losedatafn or not losedatafn(fn=fn):
2681 if not losedatafn or not losedatafn(fn=fn):
2682 raise GitDiffRequired
2682 raise GitDiffRequired
2683
2683
2684 # Buffer the whole output until we are sure it can be generated
2684 # Buffer the whole output until we are sure it can be generated
2685 return list(difffn(opts.copy(git=False), losedata))
2685 return list(difffn(opts.copy(git=False), losedata))
2686 except GitDiffRequired:
2686 except GitDiffRequired:
2687 return difffn(opts.copy(git=True), None)
2687 return difffn(opts.copy(git=True), None)
2688 else:
2688 else:
2689 return difffn(opts, None)
2689 return difffn(opts, None)
2690
2690
2691
2691
2692 def diffsinglehunk(hunklines):
2692 def diffsinglehunk(hunklines):
2693 """yield tokens for a list of lines in a single hunk"""
2693 """yield tokens for a list of lines in a single hunk"""
2694 for line in hunklines:
2694 for line in hunklines:
2695 # chomp
2695 # chomp
2696 chompline = line.rstrip(b'\r\n')
2696 chompline = line.rstrip(b'\r\n')
2697 # highlight tabs and trailing whitespace
2697 # highlight tabs and trailing whitespace
2698 stripline = chompline.rstrip()
2698 stripline = chompline.rstrip()
2699 if line.startswith(b'-'):
2699 if line.startswith(b'-'):
2700 label = b'diff.deleted'
2700 label = b'diff.deleted'
2701 elif line.startswith(b'+'):
2701 elif line.startswith(b'+'):
2702 label = b'diff.inserted'
2702 label = b'diff.inserted'
2703 else:
2703 else:
2704 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2704 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2705 for token in tabsplitter.findall(stripline):
2705 for token in tabsplitter.findall(stripline):
2706 if token.startswith(b'\t'):
2706 if token.startswith(b'\t'):
2707 yield (token, b'diff.tab')
2707 yield (token, b'diff.tab')
2708 else:
2708 else:
2709 yield (token, label)
2709 yield (token, label)
2710
2710
2711 if chompline != stripline:
2711 if chompline != stripline:
2712 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2712 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2713 if chompline != line:
2713 if chompline != line:
2714 yield (line[len(chompline) :], b'')
2714 yield (line[len(chompline) :], b'')
2715
2715
2716
2716
2717 def diffsinglehunkinline(hunklines):
2717 def diffsinglehunkinline(hunklines):
2718 """yield tokens for a list of lines in a single hunk, with inline colors"""
2718 """yield tokens for a list of lines in a single hunk, with inline colors"""
2719 # prepare deleted, and inserted content
2719 # prepare deleted, and inserted content
2720 a = b''
2720 a = b''
2721 b = b''
2721 b = b''
2722 for line in hunklines:
2722 for line in hunklines:
2723 if line[0:1] == b'-':
2723 if line[0:1] == b'-':
2724 a += line[1:]
2724 a += line[1:]
2725 elif line[0:1] == b'+':
2725 elif line[0:1] == b'+':
2726 b += line[1:]
2726 b += line[1:]
2727 else:
2727 else:
2728 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2728 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2729 # fast path: if either side is empty, use diffsinglehunk
2729 # fast path: if either side is empty, use diffsinglehunk
2730 if not a or not b:
2730 if not a or not b:
2731 for t in diffsinglehunk(hunklines):
2731 for t in diffsinglehunk(hunklines):
2732 yield t
2732 yield t
2733 return
2733 return
2734 # re-split the content into words
2734 # re-split the content into words
2735 al = wordsplitter.findall(a)
2735 al = wordsplitter.findall(a)
2736 bl = wordsplitter.findall(b)
2736 bl = wordsplitter.findall(b)
2737 # re-arrange the words to lines since the diff algorithm is line-based
2737 # re-arrange the words to lines since the diff algorithm is line-based
2738 aln = [s if s == b'\n' else s + b'\n' for s in al]
2738 aln = [s if s == b'\n' else s + b'\n' for s in al]
2739 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2739 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2740 an = b''.join(aln)
2740 an = b''.join(aln)
2741 bn = b''.join(bln)
2741 bn = b''.join(bln)
2742 # run the diff algorithm, prepare atokens and btokens
2742 # run the diff algorithm, prepare atokens and btokens
2743 atokens = []
2743 atokens = []
2744 btokens = []
2744 btokens = []
2745 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2745 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2746 for (a1, a2, b1, b2), btype in blocks:
2746 for (a1, a2, b1, b2), btype in blocks:
2747 changed = btype == b'!'
2747 changed = btype == b'!'
2748 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2748 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2749 atokens.append((changed, token))
2749 atokens.append((changed, token))
2750 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2750 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2751 btokens.append((changed, token))
2751 btokens.append((changed, token))
2752
2752
2753 # yield deleted tokens, then inserted ones
2753 # yield deleted tokens, then inserted ones
2754 for prefix, label, tokens in [
2754 for prefix, label, tokens in [
2755 (b'-', b'diff.deleted', atokens),
2755 (b'-', b'diff.deleted', atokens),
2756 (b'+', b'diff.inserted', btokens),
2756 (b'+', b'diff.inserted', btokens),
2757 ]:
2757 ]:
2758 nextisnewline = True
2758 nextisnewline = True
2759 for changed, token in tokens:
2759 for changed, token in tokens:
2760 if nextisnewline:
2760 if nextisnewline:
2761 yield (prefix, label)
2761 yield (prefix, label)
2762 nextisnewline = False
2762 nextisnewline = False
2763 # special handling line end
2763 # special handling line end
2764 isendofline = token.endswith(b'\n')
2764 isendofline = token.endswith(b'\n')
2765 if isendofline:
2765 if isendofline:
2766 chomp = token[:-1] # chomp
2766 chomp = token[:-1] # chomp
2767 if chomp.endswith(b'\r'):
2767 if chomp.endswith(b'\r'):
2768 chomp = chomp[:-1]
2768 chomp = chomp[:-1]
2769 endofline = token[len(chomp) :]
2769 endofline = token[len(chomp) :]
2770 token = chomp.rstrip() # detect spaces at the end
2770 token = chomp.rstrip() # detect spaces at the end
2771 endspaces = chomp[len(token) :]
2771 endspaces = chomp[len(token) :]
2772 # scan tabs
2772 # scan tabs
2773 for maybetab in tabsplitter.findall(token):
2773 for maybetab in tabsplitter.findall(token):
2774 if b'\t' == maybetab[0:1]:
2774 if b'\t' == maybetab[0:1]:
2775 currentlabel = b'diff.tab'
2775 currentlabel = b'diff.tab'
2776 else:
2776 else:
2777 if changed:
2777 if changed:
2778 currentlabel = label + b'.changed'
2778 currentlabel = label + b'.changed'
2779 else:
2779 else:
2780 currentlabel = label + b'.unchanged'
2780 currentlabel = label + b'.unchanged'
2781 yield (maybetab, currentlabel)
2781 yield (maybetab, currentlabel)
2782 if isendofline:
2782 if isendofline:
2783 if endspaces:
2783 if endspaces:
2784 yield (endspaces, b'diff.trailingwhitespace')
2784 yield (endspaces, b'diff.trailingwhitespace')
2785 yield (endofline, b'')
2785 yield (endofline, b'')
2786 nextisnewline = True
2786 nextisnewline = True
2787
2787
2788
2788
2789 def difflabel(func, *args, **kw):
2789 def difflabel(func, *args, **kw):
2790 '''yields 2-tuples of (output, label) based on the output of func()'''
2790 '''yields 2-tuples of (output, label) based on the output of func()'''
2791 if kw.get(r'opts') and kw[r'opts'].worddiff:
2791 if kw.get(r'opts') and kw[r'opts'].worddiff:
2792 dodiffhunk = diffsinglehunkinline
2792 dodiffhunk = diffsinglehunkinline
2793 else:
2793 else:
2794 dodiffhunk = diffsinglehunk
2794 dodiffhunk = diffsinglehunk
2795 headprefixes = [
2795 headprefixes = [
2796 (b'diff', b'diff.diffline'),
2796 (b'diff', b'diff.diffline'),
2797 (b'copy', b'diff.extended'),
2797 (b'copy', b'diff.extended'),
2798 (b'rename', b'diff.extended'),
2798 (b'rename', b'diff.extended'),
2799 (b'old', b'diff.extended'),
2799 (b'old', b'diff.extended'),
2800 (b'new', b'diff.extended'),
2800 (b'new', b'diff.extended'),
2801 (b'deleted', b'diff.extended'),
2801 (b'deleted', b'diff.extended'),
2802 (b'index', b'diff.extended'),
2802 (b'index', b'diff.extended'),
2803 (b'similarity', b'diff.extended'),
2803 (b'similarity', b'diff.extended'),
2804 (b'---', b'diff.file_a'),
2804 (b'---', b'diff.file_a'),
2805 (b'+++', b'diff.file_b'),
2805 (b'+++', b'diff.file_b'),
2806 ]
2806 ]
2807 textprefixes = [
2807 textprefixes = [
2808 (b'@', b'diff.hunk'),
2808 (b'@', b'diff.hunk'),
2809 # - and + are handled by diffsinglehunk
2809 # - and + are handled by diffsinglehunk
2810 ]
2810 ]
2811 head = False
2811 head = False
2812
2812
2813 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2813 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2814 hunkbuffer = []
2814 hunkbuffer = []
2815
2815
2816 def consumehunkbuffer():
2816 def consumehunkbuffer():
2817 if hunkbuffer:
2817 if hunkbuffer:
2818 for token in dodiffhunk(hunkbuffer):
2818 for token in dodiffhunk(hunkbuffer):
2819 yield token
2819 yield token
2820 hunkbuffer[:] = []
2820 hunkbuffer[:] = []
2821
2821
2822 for chunk in func(*args, **kw):
2822 for chunk in func(*args, **kw):
2823 lines = chunk.split(b'\n')
2823 lines = chunk.split(b'\n')
2824 linecount = len(lines)
2824 linecount = len(lines)
2825 for i, line in enumerate(lines):
2825 for i, line in enumerate(lines):
2826 if head:
2826 if head:
2827 if line.startswith(b'@'):
2827 if line.startswith(b'@'):
2828 head = False
2828 head = False
2829 else:
2829 else:
2830 if line and not line.startswith(
2830 if line and not line.startswith(
2831 (b' ', b'+', b'-', b'@', b'\\')
2831 (b' ', b'+', b'-', b'@', b'\\')
2832 ):
2832 ):
2833 head = True
2833 head = True
2834 diffline = False
2834 diffline = False
2835 if not head and line and line.startswith((b'+', b'-')):
2835 if not head and line and line.startswith((b'+', b'-')):
2836 diffline = True
2836 diffline = True
2837
2837
2838 prefixes = textprefixes
2838 prefixes = textprefixes
2839 if head:
2839 if head:
2840 prefixes = headprefixes
2840 prefixes = headprefixes
2841 if diffline:
2841 if diffline:
2842 # buffered
2842 # buffered
2843 bufferedline = line
2843 bufferedline = line
2844 if i + 1 < linecount:
2844 if i + 1 < linecount:
2845 bufferedline += b"\n"
2845 bufferedline += b"\n"
2846 hunkbuffer.append(bufferedline)
2846 hunkbuffer.append(bufferedline)
2847 else:
2847 else:
2848 # unbuffered
2848 # unbuffered
2849 for token in consumehunkbuffer():
2849 for token in consumehunkbuffer():
2850 yield token
2850 yield token
2851 stripline = line.rstrip()
2851 stripline = line.rstrip()
2852 for prefix, label in prefixes:
2852 for prefix, label in prefixes:
2853 if stripline.startswith(prefix):
2853 if stripline.startswith(prefix):
2854 yield (stripline, label)
2854 yield (stripline, label)
2855 if line != stripline:
2855 if line != stripline:
2856 yield (
2856 yield (
2857 line[len(stripline) :],
2857 line[len(stripline) :],
2858 b'diff.trailingwhitespace',
2858 b'diff.trailingwhitespace',
2859 )
2859 )
2860 break
2860 break
2861 else:
2861 else:
2862 yield (line, b'')
2862 yield (line, b'')
2863 if i + 1 < linecount:
2863 if i + 1 < linecount:
2864 yield (b'\n', b'')
2864 yield (b'\n', b'')
2865 for token in consumehunkbuffer():
2865 for token in consumehunkbuffer():
2866 yield token
2866 yield token
2867
2867
2868
2868
2869 def diffui(*args, **kw):
2869 def diffui(*args, **kw):
2870 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2870 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2871 return difflabel(diff, *args, **kw)
2871 return difflabel(diff, *args, **kw)
2872
2872
2873
2873
2874 def _filepairs(modified, added, removed, copy, opts):
2874 def _filepairs(modified, added, removed, copy, opts):
2875 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2875 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2876 before and f2 is the the name after. For added files, f1 will be None,
2876 before and f2 is the the name after. For added files, f1 will be None,
2877 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2877 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2878 or 'rename' (the latter two only if opts.git is set).'''
2878 or 'rename' (the latter two only if opts.git is set).'''
2879 gone = set()
2879 gone = set()
2880
2880
2881 copyto = dict([(v, k) for k, v in copy.items()])
2881 copyto = dict([(v, k) for k, v in copy.items()])
2882
2882
2883 addedset, removedset = set(added), set(removed)
2883 addedset, removedset = set(added), set(removed)
2884
2884
2885 for f in sorted(modified + added + removed):
2885 for f in sorted(modified + added + removed):
2886 copyop = None
2886 copyop = None
2887 f1, f2 = f, f
2887 f1, f2 = f, f
2888 if f in addedset:
2888 if f in addedset:
2889 f1 = None
2889 f1 = None
2890 if f in copy:
2890 if f in copy:
2891 if opts.git:
2891 if opts.git:
2892 f1 = copy[f]
2892 f1 = copy[f]
2893 if f1 in removedset and f1 not in gone:
2893 if f1 in removedset and f1 not in gone:
2894 copyop = b'rename'
2894 copyop = b'rename'
2895 gone.add(f1)
2895 gone.add(f1)
2896 else:
2896 else:
2897 copyop = b'copy'
2897 copyop = b'copy'
2898 elif f in removedset:
2898 elif f in removedset:
2899 f2 = None
2899 f2 = None
2900 if opts.git:
2900 if opts.git:
2901 # have we already reported a copy above?
2901 # have we already reported a copy above?
2902 if (
2902 if (
2903 f in copyto
2903 f in copyto
2904 and copyto[f] in addedset
2904 and copyto[f] in addedset
2905 and copy[copyto[f]] == f
2905 and copy[copyto[f]] == f
2906 ):
2906 ):
2907 continue
2907 continue
2908 yield f1, f2, copyop
2908 yield f1, f2, copyop
2909
2909
2910
2910
2911 def trydiff(
2911 def trydiff(
2912 repo,
2912 repo,
2913 revs,
2913 revs,
2914 ctx1,
2914 ctx1,
2915 ctx2,
2915 ctx2,
2916 modified,
2916 modified,
2917 added,
2917 added,
2918 removed,
2918 removed,
2919 copy,
2919 copy,
2920 getfilectx,
2920 getfilectx,
2921 opts,
2921 opts,
2922 losedatafn,
2922 losedatafn,
2923 pathfn,
2923 pathfn,
2924 ):
2924 ):
2925 '''given input data, generate a diff and yield it in blocks
2925 '''given input data, generate a diff and yield it in blocks
2926
2926
2927 If generating a diff would lose data like flags or binary data and
2927 If generating a diff would lose data like flags or binary data and
2928 losedatafn is not None, it will be called.
2928 losedatafn is not None, it will be called.
2929
2929
2930 pathfn is applied to every path in the diff output.
2930 pathfn is applied to every path in the diff output.
2931 '''
2931 '''
2932
2932
2933 def gitindex(text):
2933 def gitindex(text):
2934 if not text:
2934 if not text:
2935 text = b""
2935 text = b""
2936 l = len(text)
2936 l = len(text)
2937 s = hashlib.sha1(b'blob %d\0' % l)
2937 s = hashlib.sha1(b'blob %d\0' % l)
2938 s.update(text)
2938 s.update(text)
2939 return hex(s.digest())
2939 return hex(s.digest())
2940
2940
2941 if opts.noprefix:
2941 if opts.noprefix:
2942 aprefix = bprefix = b''
2942 aprefix = bprefix = b''
2943 else:
2943 else:
2944 aprefix = b'a/'
2944 aprefix = b'a/'
2945 bprefix = b'b/'
2945 bprefix = b'b/'
2946
2946
2947 def diffline(f, revs):
2947 def diffline(f, revs):
2948 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2948 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2949 return b'diff %s %s' % (revinfo, f)
2949 return b'diff %s %s' % (revinfo, f)
2950
2950
2951 def isempty(fctx):
2951 def isempty(fctx):
2952 return fctx is None or fctx.size() == 0
2952 return fctx is None or fctx.size() == 0
2953
2953
2954 date1 = dateutil.datestr(ctx1.date())
2954 date1 = dateutil.datestr(ctx1.date())
2955 date2 = dateutil.datestr(ctx2.date())
2955 date2 = dateutil.datestr(ctx2.date())
2956
2956
2957 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2957 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2958
2958
2959 if not pathfn:
2959 if not pathfn:
2960 pathfn = lambda f: f
2960 pathfn = lambda f: f
2961
2961
2962 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2962 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2963 content1 = None
2963 content1 = None
2964 content2 = None
2964 content2 = None
2965 fctx1 = None
2965 fctx1 = None
2966 fctx2 = None
2966 fctx2 = None
2967 flag1 = None
2967 flag1 = None
2968 flag2 = None
2968 flag2 = None
2969 if f1:
2969 if f1:
2970 fctx1 = getfilectx(f1, ctx1)
2970 fctx1 = getfilectx(f1, ctx1)
2971 if opts.git or losedatafn:
2971 if opts.git or losedatafn:
2972 flag1 = ctx1.flags(f1)
2972 flag1 = ctx1.flags(f1)
2973 if f2:
2973 if f2:
2974 fctx2 = getfilectx(f2, ctx2)
2974 fctx2 = getfilectx(f2, ctx2)
2975 if opts.git or losedatafn:
2975 if opts.git or losedatafn:
2976 flag2 = ctx2.flags(f2)
2976 flag2 = ctx2.flags(f2)
2977 # if binary is True, output "summary" or "base85", but not "text diff"
2977 # if binary is True, output "summary" or "base85", but not "text diff"
2978 if opts.text:
2978 if opts.text:
2979 binary = False
2979 binary = False
2980 else:
2980 else:
2981 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2981 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2982
2982
2983 if losedatafn and not opts.git:
2983 if losedatafn and not opts.git:
2984 if (
2984 if (
2985 binary
2985 binary
2986 or
2986 or
2987 # copy/rename
2987 # copy/rename
2988 f2 in copy
2988 f2 in copy
2989 or
2989 or
2990 # empty file creation
2990 # empty file creation
2991 (not f1 and isempty(fctx2))
2991 (not f1 and isempty(fctx2))
2992 or
2992 or
2993 # empty file deletion
2993 # empty file deletion
2994 (isempty(fctx1) and not f2)
2994 (isempty(fctx1) and not f2)
2995 or
2995 or
2996 # create with flags
2996 # create with flags
2997 (not f1 and flag2)
2997 (not f1 and flag2)
2998 or
2998 or
2999 # change flags
2999 # change flags
3000 (f1 and f2 and flag1 != flag2)
3000 (f1 and f2 and flag1 != flag2)
3001 ):
3001 ):
3002 losedatafn(f2 or f1)
3002 losedatafn(f2 or f1)
3003
3003
3004 path1 = pathfn(f1 or f2)
3004 path1 = pathfn(f1 or f2)
3005 path2 = pathfn(f2 or f1)
3005 path2 = pathfn(f2 or f1)
3006 header = []
3006 header = []
3007 if opts.git:
3007 if opts.git:
3008 header.append(
3008 header.append(
3009 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3009 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3010 )
3010 )
3011 if not f1: # added
3011 if not f1: # added
3012 header.append(b'new file mode %s' % gitmode[flag2])
3012 header.append(b'new file mode %s' % gitmode[flag2])
3013 elif not f2: # removed
3013 elif not f2: # removed
3014 header.append(b'deleted file mode %s' % gitmode[flag1])
3014 header.append(b'deleted file mode %s' % gitmode[flag1])
3015 else: # modified/copied/renamed
3015 else: # modified/copied/renamed
3016 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3016 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3017 if mode1 != mode2:
3017 if mode1 != mode2:
3018 header.append(b'old mode %s' % mode1)
3018 header.append(b'old mode %s' % mode1)
3019 header.append(b'new mode %s' % mode2)
3019 header.append(b'new mode %s' % mode2)
3020 if copyop is not None:
3020 if copyop is not None:
3021 if opts.showsimilarity:
3021 if opts.showsimilarity:
3022 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3022 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3023 header.append(b'similarity index %d%%' % sim)
3023 header.append(b'similarity index %d%%' % sim)
3024 header.append(b'%s from %s' % (copyop, path1))
3024 header.append(b'%s from %s' % (copyop, path1))
3025 header.append(b'%s to %s' % (copyop, path2))
3025 header.append(b'%s to %s' % (copyop, path2))
3026 elif revs:
3026 elif revs:
3027 header.append(diffline(path1, revs))
3027 header.append(diffline(path1, revs))
3028
3028
3029 # fctx.is | diffopts | what to | is fctx.data()
3029 # fctx.is | diffopts | what to | is fctx.data()
3030 # binary() | text nobinary git index | output? | outputted?
3030 # binary() | text nobinary git index | output? | outputted?
3031 # ------------------------------------|----------------------------
3031 # ------------------------------------|----------------------------
3032 # yes | no no no * | summary | no
3032 # yes | no no no * | summary | no
3033 # yes | no no yes * | base85 | yes
3033 # yes | no no yes * | base85 | yes
3034 # yes | no yes no * | summary | no
3034 # yes | no yes no * | summary | no
3035 # yes | no yes yes 0 | summary | no
3035 # yes | no yes yes 0 | summary | no
3036 # yes | no yes yes >0 | summary | semi [1]
3036 # yes | no yes yes >0 | summary | semi [1]
3037 # yes | yes * * * | text diff | yes
3037 # yes | yes * * * | text diff | yes
3038 # no | * * * * | text diff | yes
3038 # no | * * * * | text diff | yes
3039 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3039 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3040 if binary and (
3040 if binary and (
3041 not opts.git or (opts.git and opts.nobinary and not opts.index)
3041 not opts.git or (opts.git and opts.nobinary and not opts.index)
3042 ):
3042 ):
3043 # fast path: no binary content will be displayed, content1 and
3043 # fast path: no binary content will be displayed, content1 and
3044 # content2 are only used for equivalent test. cmp() could have a
3044 # content2 are only used for equivalent test. cmp() could have a
3045 # fast path.
3045 # fast path.
3046 if fctx1 is not None:
3046 if fctx1 is not None:
3047 content1 = b'\0'
3047 content1 = b'\0'
3048 if fctx2 is not None:
3048 if fctx2 is not None:
3049 if fctx1 is not None and not fctx1.cmp(fctx2):
3049 if fctx1 is not None and not fctx1.cmp(fctx2):
3050 content2 = b'\0' # not different
3050 content2 = b'\0' # not different
3051 else:
3051 else:
3052 content2 = b'\0\0'
3052 content2 = b'\0\0'
3053 else:
3053 else:
3054 # normal path: load contents
3054 # normal path: load contents
3055 if fctx1 is not None:
3055 if fctx1 is not None:
3056 content1 = fctx1.data()
3056 content1 = fctx1.data()
3057 if fctx2 is not None:
3057 if fctx2 is not None:
3058 content2 = fctx2.data()
3058 content2 = fctx2.data()
3059
3059
3060 if binary and opts.git and not opts.nobinary:
3060 if binary and opts.git and not opts.nobinary:
3061 text = mdiff.b85diff(content1, content2)
3061 text = mdiff.b85diff(content1, content2)
3062 if text:
3062 if text:
3063 header.append(
3063 header.append(
3064 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3064 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3065 )
3065 )
3066 hunks = ((None, [text]),)
3066 hunks = ((None, [text]),)
3067 else:
3067 else:
3068 if opts.git and opts.index > 0:
3068 if opts.git and opts.index > 0:
3069 flag = flag1
3069 flag = flag1
3070 if flag is None:
3070 if flag is None:
3071 flag = flag2
3071 flag = flag2
3072 header.append(
3072 header.append(
3073 b'index %s..%s %s'
3073 b'index %s..%s %s'
3074 % (
3074 % (
3075 gitindex(content1)[0 : opts.index],
3075 gitindex(content1)[0 : opts.index],
3076 gitindex(content2)[0 : opts.index],
3076 gitindex(content2)[0 : opts.index],
3077 gitmode[flag],
3077 gitmode[flag],
3078 )
3078 )
3079 )
3079 )
3080
3080
3081 uheaders, hunks = mdiff.unidiff(
3081 uheaders, hunks = mdiff.unidiff(
3082 content1,
3082 content1,
3083 date1,
3083 date1,
3084 content2,
3084 content2,
3085 date2,
3085 date2,
3086 path1,
3086 path1,
3087 path2,
3087 path2,
3088 binary=binary,
3088 binary=binary,
3089 opts=opts,
3089 opts=opts,
3090 )
3090 )
3091 header.extend(uheaders)
3091 header.extend(uheaders)
3092 yield fctx1, fctx2, header, hunks
3092 yield fctx1, fctx2, header, hunks
3093
3093
3094
3094
3095 def diffstatsum(stats):
3095 def diffstatsum(stats):
3096 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3096 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3097 for f, a, r, b in stats:
3097 for f, a, r, b in stats:
3098 maxfile = max(maxfile, encoding.colwidth(f))
3098 maxfile = max(maxfile, encoding.colwidth(f))
3099 maxtotal = max(maxtotal, a + r)
3099 maxtotal = max(maxtotal, a + r)
3100 addtotal += a
3100 addtotal += a
3101 removetotal += r
3101 removetotal += r
3102 binary = binary or b
3102 binary = binary or b
3103
3103
3104 return maxfile, maxtotal, addtotal, removetotal, binary
3104 return maxfile, maxtotal, addtotal, removetotal, binary
3105
3105
3106
3106
3107 def diffstatdata(lines):
3107 def diffstatdata(lines):
3108 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3108 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3109
3109
3110 results = []
3110 results = []
3111 filename, adds, removes, isbinary = None, 0, 0, False
3111 filename, adds, removes, isbinary = None, 0, 0, False
3112
3112
3113 def addresult():
3113 def addresult():
3114 if filename:
3114 if filename:
3115 results.append((filename, adds, removes, isbinary))
3115 results.append((filename, adds, removes, isbinary))
3116
3116
3117 # inheader is used to track if a line is in the
3117 # inheader is used to track if a line is in the
3118 # header portion of the diff. This helps properly account
3118 # header portion of the diff. This helps properly account
3119 # for lines that start with '--' or '++'
3119 # for lines that start with '--' or '++'
3120 inheader = False
3120 inheader = False
3121
3121
3122 for line in lines:
3122 for line in lines:
3123 if line.startswith(b'diff'):
3123 if line.startswith(b'diff'):
3124 addresult()
3124 addresult()
3125 # starting a new file diff
3125 # starting a new file diff
3126 # set numbers to 0 and reset inheader
3126 # set numbers to 0 and reset inheader
3127 inheader = True
3127 inheader = True
3128 adds, removes, isbinary = 0, 0, False
3128 adds, removes, isbinary = 0, 0, False
3129 if line.startswith(b'diff --git a/'):
3129 if line.startswith(b'diff --git a/'):
3130 filename = gitre.search(line).group(2)
3130 filename = gitre.search(line).group(2)
3131 elif line.startswith(b'diff -r'):
3131 elif line.startswith(b'diff -r'):
3132 # format: "diff -r ... -r ... filename"
3132 # format: "diff -r ... -r ... filename"
3133 filename = diffre.search(line).group(1)
3133 filename = diffre.search(line).group(1)
3134 elif line.startswith(b'@@'):
3134 elif line.startswith(b'@@'):
3135 inheader = False
3135 inheader = False
3136 elif line.startswith(b'+') and not inheader:
3136 elif line.startswith(b'+') and not inheader:
3137 adds += 1
3137 adds += 1
3138 elif line.startswith(b'-') and not inheader:
3138 elif line.startswith(b'-') and not inheader:
3139 removes += 1
3139 removes += 1
3140 elif line.startswith(b'GIT binary patch') or line.startswith(
3140 elif line.startswith(b'GIT binary patch') or line.startswith(
3141 b'Binary file'
3141 b'Binary file'
3142 ):
3142 ):
3143 isbinary = True
3143 isbinary = True
3144 elif line.startswith(b'rename from'):
3144 elif line.startswith(b'rename from'):
3145 filename = line[12:]
3145 filename = line[12:]
3146 elif line.startswith(b'rename to'):
3146 elif line.startswith(b'rename to'):
3147 filename += b' => %s' % line[10:]
3147 filename += b' => %s' % line[10:]
3148 addresult()
3148 addresult()
3149 return results
3149 return results
3150
3150
3151
3151
3152 def diffstat(lines, width=80):
3152 def diffstat(lines, width=80):
3153 output = []
3153 output = []
3154 stats = diffstatdata(lines)
3154 stats = diffstatdata(lines)
3155 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3155 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3156
3156
3157 countwidth = len(str(maxtotal))
3157 countwidth = len(str(maxtotal))
3158 if hasbinary and countwidth < 3:
3158 if hasbinary and countwidth < 3:
3159 countwidth = 3
3159 countwidth = 3
3160 graphwidth = width - countwidth - maxname - 6
3160 graphwidth = width - countwidth - maxname - 6
3161 if graphwidth < 10:
3161 if graphwidth < 10:
3162 graphwidth = 10
3162 graphwidth = 10
3163
3163
3164 def scale(i):
3164 def scale(i):
3165 if maxtotal <= graphwidth:
3165 if maxtotal <= graphwidth:
3166 return i
3166 return i
3167 # If diffstat runs out of room it doesn't print anything,
3167 # If diffstat runs out of room it doesn't print anything,
3168 # which isn't very useful, so always print at least one + or -
3168 # which isn't very useful, so always print at least one + or -
3169 # if there were at least some changes.
3169 # if there were at least some changes.
3170 return max(i * graphwidth // maxtotal, int(bool(i)))
3170 return max(i * graphwidth // maxtotal, int(bool(i)))
3171
3171
3172 for filename, adds, removes, isbinary in stats:
3172 for filename, adds, removes, isbinary in stats:
3173 if isbinary:
3173 if isbinary:
3174 count = b'Bin'
3174 count = b'Bin'
3175 else:
3175 else:
3176 count = b'%d' % (adds + removes)
3176 count = b'%d' % (adds + removes)
3177 pluses = b'+' * scale(adds)
3177 pluses = b'+' * scale(adds)
3178 minuses = b'-' * scale(removes)
3178 minuses = b'-' * scale(removes)
3179 output.append(
3179 output.append(
3180 b' %s%s | %*s %s%s\n'
3180 b' %s%s | %*s %s%s\n'
3181 % (
3181 % (
3182 filename,
3182 filename,
3183 b' ' * (maxname - encoding.colwidth(filename)),
3183 b' ' * (maxname - encoding.colwidth(filename)),
3184 countwidth,
3184 countwidth,
3185 count,
3185 count,
3186 pluses,
3186 pluses,
3187 minuses,
3187 minuses,
3188 )
3188 )
3189 )
3189 )
3190
3190
3191 if stats:
3191 if stats:
3192 output.append(
3192 output.append(
3193 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3193 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3194 % (len(stats), totaladds, totalremoves)
3194 % (len(stats), totaladds, totalremoves)
3195 )
3195 )
3196
3196
3197 return b''.join(output)
3197 return b''.join(output)
3198
3198
3199
3199
3200 def diffstatui(*args, **kw):
3200 def diffstatui(*args, **kw):
3201 '''like diffstat(), but yields 2-tuples of (output, label) for
3201 '''like diffstat(), but yields 2-tuples of (output, label) for
3202 ui.write()
3202 ui.write()
3203 '''
3203 '''
3204
3204
3205 for line in diffstat(*args, **kw).splitlines():
3205 for line in diffstat(*args, **kw).splitlines():
3206 if line and line[-1] in b'+-':
3206 if line and line[-1] in b'+-':
3207 name, graph = line.rsplit(b' ', 1)
3207 name, graph = line.rsplit(b' ', 1)
3208 yield (name + b' ', b'')
3208 yield (name + b' ', b'')
3209 m = re.search(br'\++', graph)
3209 m = re.search(br'\++', graph)
3210 if m:
3210 if m:
3211 yield (m.group(0), b'diffstat.inserted')
3211 yield (m.group(0), b'diffstat.inserted')
3212 m = re.search(br'-+', graph)
3212 m = re.search(br'-+', graph)
3213 if m:
3213 if m:
3214 yield (m.group(0), b'diffstat.deleted')
3214 yield (m.group(0), b'diffstat.deleted')
3215 else:
3215 else:
3216 yield (line, b'')
3216 yield (line, b'')
3217 yield (b'\n', b'')
3217 yield (b'\n', b'')
General Comments 0
You need to be logged in to leave comments. Login now