##// END OF EJS Templates
patch: refactor content diffing part in separate fn so extensions can wrap...
Pulkit Goyal -
r45658:20a65e39 default
parent child Browse files
Show More
@@ -1,3231 +1,3260 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import errno
14 import errno
15 import os
15 import os
16 import re
16 import re
17 import shutil
17 import shutil
18 import zlib
18 import zlib
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import (
21 from .node import (
22 hex,
22 hex,
23 short,
23 short,
24 )
24 )
25 from .pycompat import open
25 from .pycompat import open
26 from . import (
26 from . import (
27 copies,
27 copies,
28 diffhelper,
28 diffhelper,
29 diffutil,
29 diffutil,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 similar,
37 similar,
38 util,
38 util,
39 vfs as vfsmod,
39 vfs as vfsmod,
40 )
40 )
41 from .utils import (
41 from .utils import (
42 dateutil,
42 dateutil,
43 hashutil,
43 hashutil,
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(
52 wordsplitter = re.compile(
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 )
54 )
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60
60
61 def split(stream):
61 def split(stream):
62 '''return an iterator of individual patches from a stream'''
62 '''return an iterator of individual patches from a stream'''
63
63
64 def isheader(line, inheader):
64 def isheader(line, inheader):
65 if inheader and line.startswith((b' ', b'\t')):
65 if inheader and line.startswith((b' ', b'\t')):
66 # continuation
66 # continuation
67 return True
67 return True
68 if line.startswith((b' ', b'-', b'+')):
68 if line.startswith((b' ', b'-', b'+')):
69 # diff line - don't check for header pattern in there
69 # diff line - don't check for header pattern in there
70 return False
70 return False
71 l = line.split(b': ', 1)
71 l = line.split(b': ', 1)
72 return len(l) == 2 and b' ' not in l[0]
72 return len(l) == 2 and b' ' not in l[0]
73
73
74 def chunk(lines):
74 def chunk(lines):
75 return stringio(b''.join(lines))
75 return stringio(b''.join(lines))
76
76
77 def hgsplit(stream, cur):
77 def hgsplit(stream, cur):
78 inheader = True
78 inheader = True
79
79
80 for line in stream:
80 for line in stream:
81 if not line.strip():
81 if not line.strip():
82 inheader = False
82 inheader = False
83 if not inheader and line.startswith(b'# HG changeset patch'):
83 if not inheader and line.startswith(b'# HG changeset patch'):
84 yield chunk(cur)
84 yield chunk(cur)
85 cur = []
85 cur = []
86 inheader = True
86 inheader = True
87
87
88 cur.append(line)
88 cur.append(line)
89
89
90 if cur:
90 if cur:
91 yield chunk(cur)
91 yield chunk(cur)
92
92
93 def mboxsplit(stream, cur):
93 def mboxsplit(stream, cur):
94 for line in stream:
94 for line in stream:
95 if line.startswith(b'From '):
95 if line.startswith(b'From '):
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98 cur = []
98 cur = []
99
99
100 cur.append(line)
100 cur.append(line)
101
101
102 if cur:
102 if cur:
103 for c in split(chunk(cur[1:])):
103 for c in split(chunk(cur[1:])):
104 yield c
104 yield c
105
105
106 def mimesplit(stream, cur):
106 def mimesplit(stream, cur):
107 def msgfp(m):
107 def msgfp(m):
108 fp = stringio()
108 fp = stringio()
109 g = mail.Generator(fp, mangle_from_=False)
109 g = mail.Generator(fp, mangle_from_=False)
110 g.flatten(m)
110 g.flatten(m)
111 fp.seek(0)
111 fp.seek(0)
112 return fp
112 return fp
113
113
114 for line in stream:
114 for line in stream:
115 cur.append(line)
115 cur.append(line)
116 c = chunk(cur)
116 c = chunk(cur)
117
117
118 m = mail.parse(c)
118 m = mail.parse(c)
119 if not m.is_multipart():
119 if not m.is_multipart():
120 yield msgfp(m)
120 yield msgfp(m)
121 else:
121 else:
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 for part in m.walk():
123 for part in m.walk():
124 ct = part.get_content_type()
124 ct = part.get_content_type()
125 if ct not in ok_types:
125 if ct not in ok_types:
126 continue
126 continue
127 yield msgfp(part)
127 yield msgfp(part)
128
128
129 def headersplit(stream, cur):
129 def headersplit(stream, cur):
130 inheader = False
130 inheader = False
131
131
132 for line in stream:
132 for line in stream:
133 if not inheader and isheader(line, inheader):
133 if not inheader and isheader(line, inheader):
134 yield chunk(cur)
134 yield chunk(cur)
135 cur = []
135 cur = []
136 inheader = True
136 inheader = True
137 if inheader and not isheader(line, inheader):
137 if inheader and not isheader(line, inheader):
138 inheader = False
138 inheader = False
139
139
140 cur.append(line)
140 cur.append(line)
141
141
142 if cur:
142 if cur:
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 def remainder(cur):
145 def remainder(cur):
146 yield chunk(cur)
146 yield chunk(cur)
147
147
148 class fiter(object):
148 class fiter(object):
149 def __init__(self, fp):
149 def __init__(self, fp):
150 self.fp = fp
150 self.fp = fp
151
151
152 def __iter__(self):
152 def __iter__(self):
153 return self
153 return self
154
154
155 def next(self):
155 def next(self):
156 l = self.fp.readline()
156 l = self.fp.readline()
157 if not l:
157 if not l:
158 raise StopIteration
158 raise StopIteration
159 return l
159 return l
160
160
161 __next__ = next
161 __next__ = next
162
162
163 inheader = False
163 inheader = False
164 cur = []
164 cur = []
165
165
166 mimeheaders = [b'content-type']
166 mimeheaders = [b'content-type']
167
167
168 if not util.safehasattr(stream, b'next'):
168 if not util.safehasattr(stream, b'next'):
169 # http responses, for example, have readline but not next
169 # http responses, for example, have readline but not next
170 stream = fiter(stream)
170 stream = fiter(stream)
171
171
172 for line in stream:
172 for line in stream:
173 cur.append(line)
173 cur.append(line)
174 if line.startswith(b'# HG changeset patch'):
174 if line.startswith(b'# HG changeset patch'):
175 return hgsplit(stream, cur)
175 return hgsplit(stream, cur)
176 elif line.startswith(b'From '):
176 elif line.startswith(b'From '):
177 return mboxsplit(stream, cur)
177 return mboxsplit(stream, cur)
178 elif isheader(line, inheader):
178 elif isheader(line, inheader):
179 inheader = True
179 inheader = True
180 if line.split(b':', 1)[0].lower() in mimeheaders:
180 if line.split(b':', 1)[0].lower() in mimeheaders:
181 # let email parser handle this
181 # let email parser handle this
182 return mimesplit(stream, cur)
182 return mimesplit(stream, cur)
183 elif line.startswith(b'--- ') and inheader:
183 elif line.startswith(b'--- ') and inheader:
184 # No evil headers seen by diff start, split by hand
184 # No evil headers seen by diff start, split by hand
185 return headersplit(stream, cur)
185 return headersplit(stream, cur)
186 # Not enough info, keep reading
186 # Not enough info, keep reading
187
187
188 # if we are here, we have a very plain patch
188 # if we are here, we have a very plain patch
189 return remainder(cur)
189 return remainder(cur)
190
190
191
191
192 ## Some facility for extensible patch parsing:
192 ## Some facility for extensible patch parsing:
193 # list of pairs ("header to match", "data key")
193 # list of pairs ("header to match", "data key")
194 patchheadermap = [
194 patchheadermap = [
195 (b'Date', b'date'),
195 (b'Date', b'date'),
196 (b'Branch', b'branch'),
196 (b'Branch', b'branch'),
197 (b'Node ID', b'nodeid'),
197 (b'Node ID', b'nodeid'),
198 ]
198 ]
199
199
200
200
201 @contextlib.contextmanager
201 @contextlib.contextmanager
202 def extract(ui, fileobj):
202 def extract(ui, fileobj):
203 '''extract patch from data read from fileobj.
203 '''extract patch from data read from fileobj.
204
204
205 patch can be a normal patch or contained in an email message.
205 patch can be a normal patch or contained in an email message.
206
206
207 return a dictionary. Standard keys are:
207 return a dictionary. Standard keys are:
208 - filename,
208 - filename,
209 - message,
209 - message,
210 - user,
210 - user,
211 - date,
211 - date,
212 - branch,
212 - branch,
213 - node,
213 - node,
214 - p1,
214 - p1,
215 - p2.
215 - p2.
216 Any item can be missing from the dictionary. If filename is missing,
216 Any item can be missing from the dictionary. If filename is missing,
217 fileobj did not contain a patch. Caller must unlink filename when done.'''
217 fileobj did not contain a patch. Caller must unlink filename when done.'''
218
218
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 tmpfp = os.fdopen(fd, 'wb')
220 tmpfp = os.fdopen(fd, 'wb')
221 try:
221 try:
222 yield _extract(ui, fileobj, tmpname, tmpfp)
222 yield _extract(ui, fileobj, tmpname, tmpfp)
223 finally:
223 finally:
224 tmpfp.close()
224 tmpfp.close()
225 os.unlink(tmpname)
225 os.unlink(tmpname)
226
226
227
227
228 def _extract(ui, fileobj, tmpname, tmpfp):
228 def _extract(ui, fileobj, tmpname, tmpfp):
229
229
230 # attempt to detect the start of a patch
230 # attempt to detect the start of a patch
231 # (this heuristic is borrowed from quilt)
231 # (this heuristic is borrowed from quilt)
232 diffre = re.compile(
232 diffre = re.compile(
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
236 br'\*\*\*[ \t].*?^---[ \t])',
236 br'\*\*\*[ \t].*?^---[ \t])',
237 re.MULTILINE | re.DOTALL,
237 re.MULTILINE | re.DOTALL,
238 )
238 )
239
239
240 data = {}
240 data = {}
241
241
242 msg = mail.parse(fileobj)
242 msg = mail.parse(fileobj)
243
243
244 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
244 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
245 data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
245 data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
246 if not subject and not data[b'user']:
246 if not subject and not data[b'user']:
247 # Not an email, restore parsed headers if any
247 # Not an email, restore parsed headers if any
248 subject = (
248 subject = (
249 b'\n'.join(
249 b'\n'.join(
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 )
251 )
252 + b'\n'
252 + b'\n'
253 )
253 )
254
254
255 # should try to parse msg['Date']
255 # should try to parse msg['Date']
256 parents = []
256 parents = []
257
257
258 nodeid = msg['X-Mercurial-Node']
258 nodeid = msg['X-Mercurial-Node']
259 if nodeid:
259 if nodeid:
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
262
262
263 if subject:
263 if subject:
264 if subject.startswith(b'[PATCH'):
264 if subject.startswith(b'[PATCH'):
265 pend = subject.find(b']')
265 pend = subject.find(b']')
266 if pend >= 0:
266 if pend >= 0:
267 subject = subject[pend + 1 :].lstrip()
267 subject = subject[pend + 1 :].lstrip()
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 ui.debug(b'Subject: %s\n' % subject)
269 ui.debug(b'Subject: %s\n' % subject)
270 if data[b'user']:
270 if data[b'user']:
271 ui.debug(b'From: %s\n' % data[b'user'])
271 ui.debug(b'From: %s\n' % data[b'user'])
272 diffs_seen = 0
272 diffs_seen = 0
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 message = b''
274 message = b''
275 for part in msg.walk():
275 for part in msg.walk():
276 content_type = pycompat.bytestr(part.get_content_type())
276 content_type = pycompat.bytestr(part.get_content_type())
277 ui.debug(b'Content-Type: %s\n' % content_type)
277 ui.debug(b'Content-Type: %s\n' % content_type)
278 if content_type not in ok_types:
278 if content_type not in ok_types:
279 continue
279 continue
280 payload = part.get_payload(decode=True)
280 payload = part.get_payload(decode=True)
281 m = diffre.search(payload)
281 m = diffre.search(payload)
282 if m:
282 if m:
283 hgpatch = False
283 hgpatch = False
284 hgpatchheader = False
284 hgpatchheader = False
285 ignoretext = False
285 ignoretext = False
286
286
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 diffs_seen += 1
288 diffs_seen += 1
289 cfp = stringio()
289 cfp = stringio()
290 for line in payload[: m.start(0)].splitlines():
290 for line in payload[: m.start(0)].splitlines():
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 ui.debug(b'patch generated by hg export\n')
292 ui.debug(b'patch generated by hg export\n')
293 hgpatch = True
293 hgpatch = True
294 hgpatchheader = True
294 hgpatchheader = True
295 # drop earlier commit message content
295 # drop earlier commit message content
296 cfp.seek(0)
296 cfp.seek(0)
297 cfp.truncate()
297 cfp.truncate()
298 subject = None
298 subject = None
299 elif hgpatchheader:
299 elif hgpatchheader:
300 if line.startswith(b'# User '):
300 if line.startswith(b'# User '):
301 data[b'user'] = line[7:]
301 data[b'user'] = line[7:]
302 ui.debug(b'From: %s\n' % data[b'user'])
302 ui.debug(b'From: %s\n' % data[b'user'])
303 elif line.startswith(b"# Parent "):
303 elif line.startswith(b"# Parent "):
304 parents.append(line[9:].lstrip())
304 parents.append(line[9:].lstrip())
305 elif line.startswith(b"# "):
305 elif line.startswith(b"# "):
306 for header, key in patchheadermap:
306 for header, key in patchheadermap:
307 prefix = b'# %s ' % header
307 prefix = b'# %s ' % header
308 if line.startswith(prefix):
308 if line.startswith(prefix):
309 data[key] = line[len(prefix) :]
309 data[key] = line[len(prefix) :]
310 ui.debug(b'%s: %s\n' % (header, data[key]))
310 ui.debug(b'%s: %s\n' % (header, data[key]))
311 else:
311 else:
312 hgpatchheader = False
312 hgpatchheader = False
313 elif line == b'---':
313 elif line == b'---':
314 ignoretext = True
314 ignoretext = True
315 if not hgpatchheader and not ignoretext:
315 if not hgpatchheader and not ignoretext:
316 cfp.write(line)
316 cfp.write(line)
317 cfp.write(b'\n')
317 cfp.write(b'\n')
318 message = cfp.getvalue()
318 message = cfp.getvalue()
319 if tmpfp:
319 if tmpfp:
320 tmpfp.write(payload)
320 tmpfp.write(payload)
321 if not payload.endswith(b'\n'):
321 if not payload.endswith(b'\n'):
322 tmpfp.write(b'\n')
322 tmpfp.write(b'\n')
323 elif not diffs_seen and message and content_type == b'text/plain':
323 elif not diffs_seen and message and content_type == b'text/plain':
324 message += b'\n' + payload
324 message += b'\n' + payload
325
325
326 if subject and not message.startswith(subject):
326 if subject and not message.startswith(subject):
327 message = b'%s\n%s' % (subject, message)
327 message = b'%s\n%s' % (subject, message)
328 data[b'message'] = message
328 data[b'message'] = message
329 tmpfp.close()
329 tmpfp.close()
330 if parents:
330 if parents:
331 data[b'p1'] = parents.pop(0)
331 data[b'p1'] = parents.pop(0)
332 if parents:
332 if parents:
333 data[b'p2'] = parents.pop(0)
333 data[b'p2'] = parents.pop(0)
334
334
335 if diffs_seen:
335 if diffs_seen:
336 data[b'filename'] = tmpname
336 data[b'filename'] = tmpname
337
337
338 return data
338 return data
339
339
340
340
341 class patchmeta(object):
341 class patchmeta(object):
342 """Patched file metadata
342 """Patched file metadata
343
343
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 'islink' is True if the file is a symlink and 'isexec' is True if
348 'islink' is True if the file is a symlink and 'isexec' is True if
349 the file is executable. Otherwise, 'mode' is None.
349 the file is executable. Otherwise, 'mode' is None.
350 """
350 """
351
351
352 def __init__(self, path):
352 def __init__(self, path):
353 self.path = path
353 self.path = path
354 self.oldpath = None
354 self.oldpath = None
355 self.mode = None
355 self.mode = None
356 self.op = b'MODIFY'
356 self.op = b'MODIFY'
357 self.binary = False
357 self.binary = False
358
358
359 def setmode(self, mode):
359 def setmode(self, mode):
360 islink = mode & 0o20000
360 islink = mode & 0o20000
361 isexec = mode & 0o100
361 isexec = mode & 0o100
362 self.mode = (islink, isexec)
362 self.mode = (islink, isexec)
363
363
364 def copy(self):
364 def copy(self):
365 other = patchmeta(self.path)
365 other = patchmeta(self.path)
366 other.oldpath = self.oldpath
366 other.oldpath = self.oldpath
367 other.mode = self.mode
367 other.mode = self.mode
368 other.op = self.op
368 other.op = self.op
369 other.binary = self.binary
369 other.binary = self.binary
370 return other
370 return other
371
371
372 def _ispatchinga(self, afile):
372 def _ispatchinga(self, afile):
373 if afile == b'/dev/null':
373 if afile == b'/dev/null':
374 return self.op == b'ADD'
374 return self.op == b'ADD'
375 return afile == b'a/' + (self.oldpath or self.path)
375 return afile == b'a/' + (self.oldpath or self.path)
376
376
377 def _ispatchingb(self, bfile):
377 def _ispatchingb(self, bfile):
378 if bfile == b'/dev/null':
378 if bfile == b'/dev/null':
379 return self.op == b'DELETE'
379 return self.op == b'DELETE'
380 return bfile == b'b/' + self.path
380 return bfile == b'b/' + self.path
381
381
382 def ispatching(self, afile, bfile):
382 def ispatching(self, afile, bfile):
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384
384
385 def __repr__(self):
385 def __repr__(self):
386 return "<patchmeta %s %r>" % (self.op, self.path)
386 return "<patchmeta %s %r>" % (self.op, self.path)
387
387
388
388
389 def readgitpatch(lr):
389 def readgitpatch(lr):
390 """extract git-style metadata about patches from <patchname>"""
390 """extract git-style metadata about patches from <patchname>"""
391
391
392 # Filter patch for git information
392 # Filter patch for git information
393 gp = None
393 gp = None
394 gitpatches = []
394 gitpatches = []
395 for line in lr:
395 for line in lr:
396 line = line.rstrip(b' \r\n')
396 line = line.rstrip(b' \r\n')
397 if line.startswith(b'diff --git a/'):
397 if line.startswith(b'diff --git a/'):
398 m = gitre.match(line)
398 m = gitre.match(line)
399 if m:
399 if m:
400 if gp:
400 if gp:
401 gitpatches.append(gp)
401 gitpatches.append(gp)
402 dst = m.group(2)
402 dst = m.group(2)
403 gp = patchmeta(dst)
403 gp = patchmeta(dst)
404 elif gp:
404 elif gp:
405 if line.startswith(b'--- '):
405 if line.startswith(b'--- '):
406 gitpatches.append(gp)
406 gitpatches.append(gp)
407 gp = None
407 gp = None
408 continue
408 continue
409 if line.startswith(b'rename from '):
409 if line.startswith(b'rename from '):
410 gp.op = b'RENAME'
410 gp.op = b'RENAME'
411 gp.oldpath = line[12:]
411 gp.oldpath = line[12:]
412 elif line.startswith(b'rename to '):
412 elif line.startswith(b'rename to '):
413 gp.path = line[10:]
413 gp.path = line[10:]
414 elif line.startswith(b'copy from '):
414 elif line.startswith(b'copy from '):
415 gp.op = b'COPY'
415 gp.op = b'COPY'
416 gp.oldpath = line[10:]
416 gp.oldpath = line[10:]
417 elif line.startswith(b'copy to '):
417 elif line.startswith(b'copy to '):
418 gp.path = line[8:]
418 gp.path = line[8:]
419 elif line.startswith(b'deleted file'):
419 elif line.startswith(b'deleted file'):
420 gp.op = b'DELETE'
420 gp.op = b'DELETE'
421 elif line.startswith(b'new file mode '):
421 elif line.startswith(b'new file mode '):
422 gp.op = b'ADD'
422 gp.op = b'ADD'
423 gp.setmode(int(line[-6:], 8))
423 gp.setmode(int(line[-6:], 8))
424 elif line.startswith(b'new mode '):
424 elif line.startswith(b'new mode '):
425 gp.setmode(int(line[-6:], 8))
425 gp.setmode(int(line[-6:], 8))
426 elif line.startswith(b'GIT binary patch'):
426 elif line.startswith(b'GIT binary patch'):
427 gp.binary = True
427 gp.binary = True
428 if gp:
428 if gp:
429 gitpatches.append(gp)
429 gitpatches.append(gp)
430
430
431 return gitpatches
431 return gitpatches
432
432
433
433
434 class linereader(object):
434 class linereader(object):
435 # simple class to allow pushing lines back into the input stream
435 # simple class to allow pushing lines back into the input stream
436 def __init__(self, fp):
436 def __init__(self, fp):
437 self.fp = fp
437 self.fp = fp
438 self.buf = []
438 self.buf = []
439
439
440 def push(self, line):
440 def push(self, line):
441 if line is not None:
441 if line is not None:
442 self.buf.append(line)
442 self.buf.append(line)
443
443
444 def readline(self):
444 def readline(self):
445 if self.buf:
445 if self.buf:
446 l = self.buf[0]
446 l = self.buf[0]
447 del self.buf[0]
447 del self.buf[0]
448 return l
448 return l
449 return self.fp.readline()
449 return self.fp.readline()
450
450
451 def __iter__(self):
451 def __iter__(self):
452 return iter(self.readline, b'')
452 return iter(self.readline, b'')
453
453
454
454
455 class abstractbackend(object):
455 class abstractbackend(object):
456 def __init__(self, ui):
456 def __init__(self, ui):
457 self.ui = ui
457 self.ui = ui
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 """Return target file data and flags as a (data, (islink,
460 """Return target file data and flags as a (data, (islink,
461 isexec)) tuple. Data is None if file is missing/deleted.
461 isexec)) tuple. Data is None if file is missing/deleted.
462 """
462 """
463 raise NotImplementedError
463 raise NotImplementedError
464
464
465 def setfile(self, fname, data, mode, copysource):
465 def setfile(self, fname, data, mode, copysource):
466 """Write data to target file fname and set its mode. mode is a
466 """Write data to target file fname and set its mode. mode is a
467 (islink, isexec) tuple. If data is None, the file content should
467 (islink, isexec) tuple. If data is None, the file content should
468 be left unchanged. If the file is modified after being copied,
468 be left unchanged. If the file is modified after being copied,
469 copysource is set to the original file name.
469 copysource is set to the original file name.
470 """
470 """
471 raise NotImplementedError
471 raise NotImplementedError
472
472
473 def unlink(self, fname):
473 def unlink(self, fname):
474 """Unlink target file."""
474 """Unlink target file."""
475 raise NotImplementedError
475 raise NotImplementedError
476
476
477 def writerej(self, fname, failed, total, lines):
477 def writerej(self, fname, failed, total, lines):
478 """Write rejected lines for fname. total is the number of hunks
478 """Write rejected lines for fname. total is the number of hunks
479 which failed to apply and total the total number of hunks for this
479 which failed to apply and total the total number of hunks for this
480 files.
480 files.
481 """
481 """
482
482
483 def exists(self, fname):
483 def exists(self, fname):
484 raise NotImplementedError
484 raise NotImplementedError
485
485
486 def close(self):
486 def close(self):
487 raise NotImplementedError
487 raise NotImplementedError
488
488
489
489
490 class fsbackend(abstractbackend):
490 class fsbackend(abstractbackend):
491 def __init__(self, ui, basedir):
491 def __init__(self, ui, basedir):
492 super(fsbackend, self).__init__(ui)
492 super(fsbackend, self).__init__(ui)
493 self.opener = vfsmod.vfs(basedir)
493 self.opener = vfsmod.vfs(basedir)
494
494
495 def getfile(self, fname):
495 def getfile(self, fname):
496 if self.opener.islink(fname):
496 if self.opener.islink(fname):
497 return (self.opener.readlink(fname), (True, False))
497 return (self.opener.readlink(fname), (True, False))
498
498
499 isexec = False
499 isexec = False
500 try:
500 try:
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 except OSError as e:
502 except OSError as e:
503 if e.errno != errno.ENOENT:
503 if e.errno != errno.ENOENT:
504 raise
504 raise
505 try:
505 try:
506 return (self.opener.read(fname), (False, isexec))
506 return (self.opener.read(fname), (False, isexec))
507 except IOError as e:
507 except IOError as e:
508 if e.errno != errno.ENOENT:
508 if e.errno != errno.ENOENT:
509 raise
509 raise
510 return None, None
510 return None, None
511
511
512 def setfile(self, fname, data, mode, copysource):
512 def setfile(self, fname, data, mode, copysource):
513 islink, isexec = mode
513 islink, isexec = mode
514 if data is None:
514 if data is None:
515 self.opener.setflags(fname, islink, isexec)
515 self.opener.setflags(fname, islink, isexec)
516 return
516 return
517 if islink:
517 if islink:
518 self.opener.symlink(data, fname)
518 self.opener.symlink(data, fname)
519 else:
519 else:
520 self.opener.write(fname, data)
520 self.opener.write(fname, data)
521 if isexec:
521 if isexec:
522 self.opener.setflags(fname, False, True)
522 self.opener.setflags(fname, False, True)
523
523
524 def unlink(self, fname):
524 def unlink(self, fname):
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527
527
528 def writerej(self, fname, failed, total, lines):
528 def writerej(self, fname, failed, total, lines):
529 fname = fname + b".rej"
529 fname = fname + b".rej"
530 self.ui.warn(
530 self.ui.warn(
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 % (failed, total, fname)
532 % (failed, total, fname)
533 )
533 )
534 fp = self.opener(fname, b'w')
534 fp = self.opener(fname, b'w')
535 fp.writelines(lines)
535 fp.writelines(lines)
536 fp.close()
536 fp.close()
537
537
538 def exists(self, fname):
538 def exists(self, fname):
539 return self.opener.lexists(fname)
539 return self.opener.lexists(fname)
540
540
541
541
542 class workingbackend(fsbackend):
542 class workingbackend(fsbackend):
543 def __init__(self, ui, repo, similarity):
543 def __init__(self, ui, repo, similarity):
544 super(workingbackend, self).__init__(ui, repo.root)
544 super(workingbackend, self).__init__(ui, repo.root)
545 self.repo = repo
545 self.repo = repo
546 self.similarity = similarity
546 self.similarity = similarity
547 self.removed = set()
547 self.removed = set()
548 self.changed = set()
548 self.changed = set()
549 self.copied = []
549 self.copied = []
550
550
551 def _checkknown(self, fname):
551 def _checkknown(self, fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554
554
555 def setfile(self, fname, data, mode, copysource):
555 def setfile(self, fname, data, mode, copysource):
556 self._checkknown(fname)
556 self._checkknown(fname)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 if copysource is not None:
558 if copysource is not None:
559 self.copied.append((copysource, fname))
559 self.copied.append((copysource, fname))
560 self.changed.add(fname)
560 self.changed.add(fname)
561
561
562 def unlink(self, fname):
562 def unlink(self, fname):
563 self._checkknown(fname)
563 self._checkknown(fname)
564 super(workingbackend, self).unlink(fname)
564 super(workingbackend, self).unlink(fname)
565 self.removed.add(fname)
565 self.removed.add(fname)
566 self.changed.add(fname)
566 self.changed.add(fname)
567
567
568 def close(self):
568 def close(self):
569 wctx = self.repo[None]
569 wctx = self.repo[None]
570 changed = set(self.changed)
570 changed = set(self.changed)
571 for src, dst in self.copied:
571 for src, dst in self.copied:
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 if self.removed:
573 if self.removed:
574 wctx.forget(sorted(self.removed))
574 wctx.forget(sorted(self.removed))
575 for f in self.removed:
575 for f in self.removed:
576 if f not in self.repo.dirstate:
576 if f not in self.repo.dirstate:
577 # File was deleted and no longer belongs to the
577 # File was deleted and no longer belongs to the
578 # dirstate, it was probably marked added then
578 # dirstate, it was probably marked added then
579 # deleted, and should not be considered by
579 # deleted, and should not be considered by
580 # marktouched().
580 # marktouched().
581 changed.discard(f)
581 changed.discard(f)
582 if changed:
582 if changed:
583 scmutil.marktouched(self.repo, changed, self.similarity)
583 scmutil.marktouched(self.repo, changed, self.similarity)
584 return sorted(self.changed)
584 return sorted(self.changed)
585
585
586
586
587 class filestore(object):
587 class filestore(object):
588 def __init__(self, maxsize=None):
588 def __init__(self, maxsize=None):
589 self.opener = None
589 self.opener = None
590 self.files = {}
590 self.files = {}
591 self.created = 0
591 self.created = 0
592 self.maxsize = maxsize
592 self.maxsize = maxsize
593 if self.maxsize is None:
593 if self.maxsize is None:
594 self.maxsize = 4 * (2 ** 20)
594 self.maxsize = 4 * (2 ** 20)
595 self.size = 0
595 self.size = 0
596 self.data = {}
596 self.data = {}
597
597
598 def setfile(self, fname, data, mode, copied=None):
598 def setfile(self, fname, data, mode, copied=None):
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 self.data[fname] = (data, mode, copied)
600 self.data[fname] = (data, mode, copied)
601 self.size += len(data)
601 self.size += len(data)
602 else:
602 else:
603 if self.opener is None:
603 if self.opener is None:
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 self.opener = vfsmod.vfs(root)
605 self.opener = vfsmod.vfs(root)
606 # Avoid filename issues with these simple names
606 # Avoid filename issues with these simple names
607 fn = b'%d' % self.created
607 fn = b'%d' % self.created
608 self.opener.write(fn, data)
608 self.opener.write(fn, data)
609 self.created += 1
609 self.created += 1
610 self.files[fname] = (fn, mode, copied)
610 self.files[fname] = (fn, mode, copied)
611
611
612 def getfile(self, fname):
612 def getfile(self, fname):
613 if fname in self.data:
613 if fname in self.data:
614 return self.data[fname]
614 return self.data[fname]
615 if not self.opener or fname not in self.files:
615 if not self.opener or fname not in self.files:
616 return None, None, None
616 return None, None, None
617 fn, mode, copied = self.files[fname]
617 fn, mode, copied = self.files[fname]
618 return self.opener.read(fn), mode, copied
618 return self.opener.read(fn), mode, copied
619
619
620 def close(self):
620 def close(self):
621 if self.opener:
621 if self.opener:
622 shutil.rmtree(self.opener.base)
622 shutil.rmtree(self.opener.base)
623
623
624
624
625 class repobackend(abstractbackend):
625 class repobackend(abstractbackend):
626 def __init__(self, ui, repo, ctx, store):
626 def __init__(self, ui, repo, ctx, store):
627 super(repobackend, self).__init__(ui)
627 super(repobackend, self).__init__(ui)
628 self.repo = repo
628 self.repo = repo
629 self.ctx = ctx
629 self.ctx = ctx
630 self.store = store
630 self.store = store
631 self.changed = set()
631 self.changed = set()
632 self.removed = set()
632 self.removed = set()
633 self.copied = {}
633 self.copied = {}
634
634
635 def _checkknown(self, fname):
635 def _checkknown(self, fname):
636 if fname not in self.ctx:
636 if fname not in self.ctx:
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638
638
639 def getfile(self, fname):
639 def getfile(self, fname):
640 try:
640 try:
641 fctx = self.ctx[fname]
641 fctx = self.ctx[fname]
642 except error.LookupError:
642 except error.LookupError:
643 return None, None
643 return None, None
644 flags = fctx.flags()
644 flags = fctx.flags()
645 return fctx.data(), (b'l' in flags, b'x' in flags)
645 return fctx.data(), (b'l' in flags, b'x' in flags)
646
646
647 def setfile(self, fname, data, mode, copysource):
647 def setfile(self, fname, data, mode, copysource):
648 if copysource:
648 if copysource:
649 self._checkknown(copysource)
649 self._checkknown(copysource)
650 if data is None:
650 if data is None:
651 data = self.ctx[fname].data()
651 data = self.ctx[fname].data()
652 self.store.setfile(fname, data, mode, copysource)
652 self.store.setfile(fname, data, mode, copysource)
653 self.changed.add(fname)
653 self.changed.add(fname)
654 if copysource:
654 if copysource:
655 self.copied[fname] = copysource
655 self.copied[fname] = copysource
656
656
657 def unlink(self, fname):
657 def unlink(self, fname):
658 self._checkknown(fname)
658 self._checkknown(fname)
659 self.removed.add(fname)
659 self.removed.add(fname)
660
660
661 def exists(self, fname):
661 def exists(self, fname):
662 return fname in self.ctx
662 return fname in self.ctx
663
663
664 def close(self):
664 def close(self):
665 return self.changed | self.removed
665 return self.changed | self.removed
666
666
667
667
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672
672
673
673
674 class patchfile(object):
674 class patchfile(object):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 self.fname = gp.path
676 self.fname = gp.path
677 self.eolmode = eolmode
677 self.eolmode = eolmode
678 self.eol = None
678 self.eol = None
679 self.backend = backend
679 self.backend = backend
680 self.ui = ui
680 self.ui = ui
681 self.lines = []
681 self.lines = []
682 self.exists = False
682 self.exists = False
683 self.missing = True
683 self.missing = True
684 self.mode = gp.mode
684 self.mode = gp.mode
685 self.copysource = gp.oldpath
685 self.copysource = gp.oldpath
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 self.remove = gp.op == b'DELETE'
687 self.remove = gp.op == b'DELETE'
688 if self.copysource is None:
688 if self.copysource is None:
689 data, mode = backend.getfile(self.fname)
689 data, mode = backend.getfile(self.fname)
690 else:
690 else:
691 data, mode = store.getfile(self.copysource)[:2]
691 data, mode = store.getfile(self.copysource)[:2]
692 if data is not None:
692 if data is not None:
693 self.exists = self.copysource is None or backend.exists(self.fname)
693 self.exists = self.copysource is None or backend.exists(self.fname)
694 self.missing = False
694 self.missing = False
695 if data:
695 if data:
696 self.lines = mdiff.splitnewlines(data)
696 self.lines = mdiff.splitnewlines(data)
697 if self.mode is None:
697 if self.mode is None:
698 self.mode = mode
698 self.mode = mode
699 if self.lines:
699 if self.lines:
700 # Normalize line endings
700 # Normalize line endings
701 if self.lines[0].endswith(b'\r\n'):
701 if self.lines[0].endswith(b'\r\n'):
702 self.eol = b'\r\n'
702 self.eol = b'\r\n'
703 elif self.lines[0].endswith(b'\n'):
703 elif self.lines[0].endswith(b'\n'):
704 self.eol = b'\n'
704 self.eol = b'\n'
705 if eolmode != b'strict':
705 if eolmode != b'strict':
706 nlines = []
706 nlines = []
707 for l in self.lines:
707 for l in self.lines:
708 if l.endswith(b'\r\n'):
708 if l.endswith(b'\r\n'):
709 l = l[:-2] + b'\n'
709 l = l[:-2] + b'\n'
710 nlines.append(l)
710 nlines.append(l)
711 self.lines = nlines
711 self.lines = nlines
712 else:
712 else:
713 if self.create:
713 if self.create:
714 self.missing = False
714 self.missing = False
715 if self.mode is None:
715 if self.mode is None:
716 self.mode = (False, False)
716 self.mode = (False, False)
717 if self.missing:
717 if self.missing:
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 self.ui.warn(
719 self.ui.warn(
720 _(
720 _(
721 b"(use '--prefix' to apply patch relative to the "
721 b"(use '--prefix' to apply patch relative to the "
722 b"current directory)\n"
722 b"current directory)\n"
723 )
723 )
724 )
724 )
725
725
726 self.hash = {}
726 self.hash = {}
727 self.dirty = 0
727 self.dirty = 0
728 self.offset = 0
728 self.offset = 0
729 self.skew = 0
729 self.skew = 0
730 self.rej = []
730 self.rej = []
731 self.fileprinted = False
731 self.fileprinted = False
732 self.printfile(False)
732 self.printfile(False)
733 self.hunks = 0
733 self.hunks = 0
734
734
735 def writelines(self, fname, lines, mode):
735 def writelines(self, fname, lines, mode):
736 if self.eolmode == b'auto':
736 if self.eolmode == b'auto':
737 eol = self.eol
737 eol = self.eol
738 elif self.eolmode == b'crlf':
738 elif self.eolmode == b'crlf':
739 eol = b'\r\n'
739 eol = b'\r\n'
740 else:
740 else:
741 eol = b'\n'
741 eol = b'\n'
742
742
743 if self.eolmode != b'strict' and eol and eol != b'\n':
743 if self.eolmode != b'strict' and eol and eol != b'\n':
744 rawlines = []
744 rawlines = []
745 for l in lines:
745 for l in lines:
746 if l and l.endswith(b'\n'):
746 if l and l.endswith(b'\n'):
747 l = l[:-1] + eol
747 l = l[:-1] + eol
748 rawlines.append(l)
748 rawlines.append(l)
749 lines = rawlines
749 lines = rawlines
750
750
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752
752
753 def printfile(self, warn):
753 def printfile(self, warn):
754 if self.fileprinted:
754 if self.fileprinted:
755 return
755 return
756 if warn or self.ui.verbose:
756 if warn or self.ui.verbose:
757 self.fileprinted = True
757 self.fileprinted = True
758 s = _(b"patching file %s\n") % self.fname
758 s = _(b"patching file %s\n") % self.fname
759 if warn:
759 if warn:
760 self.ui.warn(s)
760 self.ui.warn(s)
761 else:
761 else:
762 self.ui.note(s)
762 self.ui.note(s)
763
763
764 def findlines(self, l, linenum):
764 def findlines(self, l, linenum):
765 # looks through the hash and finds candidate lines. The
765 # looks through the hash and finds candidate lines. The
766 # result is a list of line numbers sorted based on distance
766 # result is a list of line numbers sorted based on distance
767 # from linenum
767 # from linenum
768
768
769 cand = self.hash.get(l, [])
769 cand = self.hash.get(l, [])
770 if len(cand) > 1:
770 if len(cand) > 1:
771 # resort our list of potentials forward then back.
771 # resort our list of potentials forward then back.
772 cand.sort(key=lambda x: abs(x - linenum))
772 cand.sort(key=lambda x: abs(x - linenum))
773 return cand
773 return cand
774
774
775 def write_rej(self):
775 def write_rej(self):
776 # our rejects are a little different from patch(1). This always
776 # our rejects are a little different from patch(1). This always
777 # creates rejects in the same form as the original patch. A file
777 # creates rejects in the same form as the original patch. A file
778 # header is inserted so that you can run the reject through patch again
778 # header is inserted so that you can run the reject through patch again
779 # without having to type the filename.
779 # without having to type the filename.
780 if not self.rej:
780 if not self.rej:
781 return
781 return
782 base = os.path.basename(self.fname)
782 base = os.path.basename(self.fname)
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 for x in self.rej:
784 for x in self.rej:
785 for l in x.hunk:
785 for l in x.hunk:
786 lines.append(l)
786 lines.append(l)
787 if l[-1:] != b'\n':
787 if l[-1:] != b'\n':
788 lines.append(b"\n\\ No newline at end of file\n")
788 lines.append(b"\n\\ No newline at end of file\n")
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790
790
791 def apply(self, h):
791 def apply(self, h):
792 if not h.complete():
792 if not h.complete():
793 raise PatchError(
793 raise PatchError(
794 _(b"bad hunk #%d %s (%d %d %d %d)")
794 _(b"bad hunk #%d %s (%d %d %d %d)")
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 )
796 )
797
797
798 self.hunks += 1
798 self.hunks += 1
799
799
800 if self.missing:
800 if self.missing:
801 self.rej.append(h)
801 self.rej.append(h)
802 return -1
802 return -1
803
803
804 if self.exists and self.create:
804 if self.exists and self.create:
805 if self.copysource:
805 if self.copysource:
806 self.ui.warn(
806 self.ui.warn(
807 _(b"cannot create %s: destination already exists\n")
807 _(b"cannot create %s: destination already exists\n")
808 % self.fname
808 % self.fname
809 )
809 )
810 else:
810 else:
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 self.rej.append(h)
812 self.rej.append(h)
813 return -1
813 return -1
814
814
815 if isinstance(h, binhunk):
815 if isinstance(h, binhunk):
816 if self.remove:
816 if self.remove:
817 self.backend.unlink(self.fname)
817 self.backend.unlink(self.fname)
818 else:
818 else:
819 l = h.new(self.lines)
819 l = h.new(self.lines)
820 self.lines[:] = l
820 self.lines[:] = l
821 self.offset += len(l)
821 self.offset += len(l)
822 self.dirty = True
822 self.dirty = True
823 return 0
823 return 0
824
824
825 horig = h
825 horig = h
826 if (
826 if (
827 self.eolmode in (b'crlf', b'lf')
827 self.eolmode in (b'crlf', b'lf')
828 or self.eolmode == b'auto'
828 or self.eolmode == b'auto'
829 and self.eol
829 and self.eol
830 ):
830 ):
831 # If new eols are going to be normalized, then normalize
831 # If new eols are going to be normalized, then normalize
832 # hunk data before patching. Otherwise, preserve input
832 # hunk data before patching. Otherwise, preserve input
833 # line-endings.
833 # line-endings.
834 h = h.getnormalized()
834 h = h.getnormalized()
835
835
836 # fast case first, no offsets, no fuzz
836 # fast case first, no offsets, no fuzz
837 old, oldstart, new, newstart = h.fuzzit(0, False)
837 old, oldstart, new, newstart = h.fuzzit(0, False)
838 oldstart += self.offset
838 oldstart += self.offset
839 orig_start = oldstart
839 orig_start = oldstart
840 # if there's skew we want to emit the "(offset %d lines)" even
840 # if there's skew we want to emit the "(offset %d lines)" even
841 # when the hunk cleanly applies at start + skew, so skip the
841 # when the hunk cleanly applies at start + skew, so skip the
842 # fast case code
842 # fast case code
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 if self.remove:
844 if self.remove:
845 self.backend.unlink(self.fname)
845 self.backend.unlink(self.fname)
846 else:
846 else:
847 self.lines[oldstart : oldstart + len(old)] = new
847 self.lines[oldstart : oldstart + len(old)] = new
848 self.offset += len(new) - len(old)
848 self.offset += len(new) - len(old)
849 self.dirty = True
849 self.dirty = True
850 return 0
850 return 0
851
851
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 self.hash = {}
853 self.hash = {}
854 for x, s in enumerate(self.lines):
854 for x, s in enumerate(self.lines):
855 self.hash.setdefault(s, []).append(x)
855 self.hash.setdefault(s, []).append(x)
856
856
857 for fuzzlen in pycompat.xrange(
857 for fuzzlen in pycompat.xrange(
858 self.ui.configint(b"patch", b"fuzz") + 1
858 self.ui.configint(b"patch", b"fuzz") + 1
859 ):
859 ):
860 for toponly in [True, False]:
860 for toponly in [True, False]:
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 oldstart = oldstart + self.offset + self.skew
862 oldstart = oldstart + self.offset + self.skew
863 oldstart = min(oldstart, len(self.lines))
863 oldstart = min(oldstart, len(self.lines))
864 if old:
864 if old:
865 cand = self.findlines(old[0][1:], oldstart)
865 cand = self.findlines(old[0][1:], oldstart)
866 else:
866 else:
867 # Only adding lines with no or fuzzed context, just
867 # Only adding lines with no or fuzzed context, just
868 # take the skew in account
868 # take the skew in account
869 cand = [oldstart]
869 cand = [oldstart]
870
870
871 for l in cand:
871 for l in cand:
872 if not old or diffhelper.testhunk(old, self.lines, l):
872 if not old or diffhelper.testhunk(old, self.lines, l):
873 self.lines[l : l + len(old)] = new
873 self.lines[l : l + len(old)] = new
874 self.offset += len(new) - len(old)
874 self.offset += len(new) - len(old)
875 self.skew = l - orig_start
875 self.skew = l - orig_start
876 self.dirty = True
876 self.dirty = True
877 offset = l - orig_start - fuzzlen
877 offset = l - orig_start - fuzzlen
878 if fuzzlen:
878 if fuzzlen:
879 msg = _(
879 msg = _(
880 b"Hunk #%d succeeded at %d "
880 b"Hunk #%d succeeded at %d "
881 b"with fuzz %d "
881 b"with fuzz %d "
882 b"(offset %d lines).\n"
882 b"(offset %d lines).\n"
883 )
883 )
884 self.printfile(True)
884 self.printfile(True)
885 self.ui.warn(
885 self.ui.warn(
886 msg % (h.number, l + 1, fuzzlen, offset)
886 msg % (h.number, l + 1, fuzzlen, offset)
887 )
887 )
888 else:
888 else:
889 msg = _(
889 msg = _(
890 b"Hunk #%d succeeded at %d "
890 b"Hunk #%d succeeded at %d "
891 b"(offset %d lines).\n"
891 b"(offset %d lines).\n"
892 )
892 )
893 self.ui.note(msg % (h.number, l + 1, offset))
893 self.ui.note(msg % (h.number, l + 1, offset))
894 return fuzzlen
894 return fuzzlen
895 self.printfile(True)
895 self.printfile(True)
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 self.rej.append(horig)
897 self.rej.append(horig)
898 return -1
898 return -1
899
899
900 def close(self):
900 def close(self):
901 if self.dirty:
901 if self.dirty:
902 self.writelines(self.fname, self.lines, self.mode)
902 self.writelines(self.fname, self.lines, self.mode)
903 self.write_rej()
903 self.write_rej()
904 return len(self.rej)
904 return len(self.rej)
905
905
906
906
907 class header(object):
907 class header(object):
908 """patch header
908 """patch header
909 """
909 """
910
910
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
917
917
918 def __init__(self, header):
918 def __init__(self, header):
919 self.header = header
919 self.header = header
920 self.hunks = []
920 self.hunks = []
921
921
922 def binary(self):
922 def binary(self):
923 return any(h.startswith(b'index ') for h in self.header)
923 return any(h.startswith(b'index ') for h in self.header)
924
924
925 def pretty(self, fp):
925 def pretty(self, fp):
926 for h in self.header:
926 for h in self.header:
927 if h.startswith(b'index '):
927 if h.startswith(b'index '):
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
929 break
929 break
930 if self.pretty_re.match(h):
930 if self.pretty_re.match(h):
931 fp.write(h)
931 fp.write(h)
932 if self.binary():
932 if self.binary():
933 fp.write(_(b'this is a binary file\n'))
933 fp.write(_(b'this is a binary file\n'))
934 break
934 break
935 if h.startswith(b'---'):
935 if h.startswith(b'---'):
936 fp.write(
936 fp.write(
937 _(b'%d hunks, %d lines changed\n')
937 _(b'%d hunks, %d lines changed\n')
938 % (
938 % (
939 len(self.hunks),
939 len(self.hunks),
940 sum([max(h.added, h.removed) for h in self.hunks]),
940 sum([max(h.added, h.removed) for h in self.hunks]),
941 )
941 )
942 )
942 )
943 break
943 break
944 fp.write(h)
944 fp.write(h)
945
945
946 def write(self, fp):
946 def write(self, fp):
947 fp.write(b''.join(self.header))
947 fp.write(b''.join(self.header))
948
948
949 def allhunks(self):
949 def allhunks(self):
950 return any(self.allhunks_re.match(h) for h in self.header)
950 return any(self.allhunks_re.match(h) for h in self.header)
951
951
952 def files(self):
952 def files(self):
953 match = self.diffgit_re.match(self.header[0])
953 match = self.diffgit_re.match(self.header[0])
954 if match:
954 if match:
955 fromfile, tofile = match.groups()
955 fromfile, tofile = match.groups()
956 if fromfile == tofile:
956 if fromfile == tofile:
957 return [fromfile]
957 return [fromfile]
958 return [fromfile, tofile]
958 return [fromfile, tofile]
959 else:
959 else:
960 return self.diff_re.match(self.header[0]).groups()
960 return self.diff_re.match(self.header[0]).groups()
961
961
962 def filename(self):
962 def filename(self):
963 return self.files()[-1]
963 return self.files()[-1]
964
964
965 def __repr__(self):
965 def __repr__(self):
966 return '<header %s>' % (
966 return '<header %s>' % (
967 ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
967 ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
968 )
968 )
969
969
970 def isnewfile(self):
970 def isnewfile(self):
971 return any(self.newfile_re.match(h) for h in self.header)
971 return any(self.newfile_re.match(h) for h in self.header)
972
972
973 def special(self):
973 def special(self):
974 # Special files are shown only at the header level and not at the hunk
974 # Special files are shown only at the header level and not at the hunk
975 # level for example a file that has been deleted is a special file.
975 # level for example a file that has been deleted is a special file.
976 # The user cannot change the content of the operation, in the case of
976 # The user cannot change the content of the operation, in the case of
977 # the deleted file he has to take the deletion or not take it, he
977 # the deleted file he has to take the deletion or not take it, he
978 # cannot take some of it.
978 # cannot take some of it.
979 # Newly added files are special if they are empty, they are not special
979 # Newly added files are special if they are empty, they are not special
980 # if they have some content as we want to be able to change it
980 # if they have some content as we want to be able to change it
981 nocontent = len(self.header) == 2
981 nocontent = len(self.header) == 2
982 emptynewfile = self.isnewfile() and nocontent
982 emptynewfile = self.isnewfile() and nocontent
983 return emptynewfile or any(
983 return emptynewfile or any(
984 self.special_re.match(h) for h in self.header
984 self.special_re.match(h) for h in self.header
985 )
985 )
986
986
987
987
988 class recordhunk(object):
988 class recordhunk(object):
989 """patch hunk
989 """patch hunk
990
990
991 XXX shouldn't we merge this with the other hunk class?
991 XXX shouldn't we merge this with the other hunk class?
992 """
992 """
993
993
994 def __init__(
994 def __init__(
995 self,
995 self,
996 header,
996 header,
997 fromline,
997 fromline,
998 toline,
998 toline,
999 proc,
999 proc,
1000 before,
1000 before,
1001 hunk,
1001 hunk,
1002 after,
1002 after,
1003 maxcontext=None,
1003 maxcontext=None,
1004 ):
1004 ):
1005 def trimcontext(lines, reverse=False):
1005 def trimcontext(lines, reverse=False):
1006 if maxcontext is not None:
1006 if maxcontext is not None:
1007 delta = len(lines) - maxcontext
1007 delta = len(lines) - maxcontext
1008 if delta > 0:
1008 if delta > 0:
1009 if reverse:
1009 if reverse:
1010 return delta, lines[delta:]
1010 return delta, lines[delta:]
1011 else:
1011 else:
1012 return delta, lines[:maxcontext]
1012 return delta, lines[:maxcontext]
1013 return 0, lines
1013 return 0, lines
1014
1014
1015 self.header = header
1015 self.header = header
1016 trimedbefore, self.before = trimcontext(before, True)
1016 trimedbefore, self.before = trimcontext(before, True)
1017 self.fromline = fromline + trimedbefore
1017 self.fromline = fromline + trimedbefore
1018 self.toline = toline + trimedbefore
1018 self.toline = toline + trimedbefore
1019 _trimedafter, self.after = trimcontext(after, False)
1019 _trimedafter, self.after = trimcontext(after, False)
1020 self.proc = proc
1020 self.proc = proc
1021 self.hunk = hunk
1021 self.hunk = hunk
1022 self.added, self.removed = self.countchanges(self.hunk)
1022 self.added, self.removed = self.countchanges(self.hunk)
1023
1023
1024 def __eq__(self, v):
1024 def __eq__(self, v):
1025 if not isinstance(v, recordhunk):
1025 if not isinstance(v, recordhunk):
1026 return False
1026 return False
1027
1027
1028 return (
1028 return (
1029 (v.hunk == self.hunk)
1029 (v.hunk == self.hunk)
1030 and (v.proc == self.proc)
1030 and (v.proc == self.proc)
1031 and (self.fromline == v.fromline)
1031 and (self.fromline == v.fromline)
1032 and (self.header.files() == v.header.files())
1032 and (self.header.files() == v.header.files())
1033 )
1033 )
1034
1034
1035 def __hash__(self):
1035 def __hash__(self):
1036 return hash(
1036 return hash(
1037 (
1037 (
1038 tuple(self.hunk),
1038 tuple(self.hunk),
1039 tuple(self.header.files()),
1039 tuple(self.header.files()),
1040 self.fromline,
1040 self.fromline,
1041 self.proc,
1041 self.proc,
1042 )
1042 )
1043 )
1043 )
1044
1044
1045 def countchanges(self, hunk):
1045 def countchanges(self, hunk):
1046 """hunk -> (n+,n-)"""
1046 """hunk -> (n+,n-)"""
1047 add = len([h for h in hunk if h.startswith(b'+')])
1047 add = len([h for h in hunk if h.startswith(b'+')])
1048 rem = len([h for h in hunk if h.startswith(b'-')])
1048 rem = len([h for h in hunk if h.startswith(b'-')])
1049 return add, rem
1049 return add, rem
1050
1050
1051 def reversehunk(self):
1051 def reversehunk(self):
1052 """return another recordhunk which is the reverse of the hunk
1052 """return another recordhunk which is the reverse of the hunk
1053
1053
1054 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1054 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1055 that, swap fromline/toline and +/- signs while keep other things
1055 that, swap fromline/toline and +/- signs while keep other things
1056 unchanged.
1056 unchanged.
1057 """
1057 """
1058 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1058 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1059 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1059 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1060 return recordhunk(
1060 return recordhunk(
1061 self.header,
1061 self.header,
1062 self.toline,
1062 self.toline,
1063 self.fromline,
1063 self.fromline,
1064 self.proc,
1064 self.proc,
1065 self.before,
1065 self.before,
1066 hunk,
1066 hunk,
1067 self.after,
1067 self.after,
1068 )
1068 )
1069
1069
1070 def write(self, fp):
1070 def write(self, fp):
1071 delta = len(self.before) + len(self.after)
1071 delta = len(self.before) + len(self.after)
1072 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1072 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1073 delta -= 1
1073 delta -= 1
1074 fromlen = delta + self.removed
1074 fromlen = delta + self.removed
1075 tolen = delta + self.added
1075 tolen = delta + self.added
1076 fp.write(
1076 fp.write(
1077 b'@@ -%d,%d +%d,%d @@%s\n'
1077 b'@@ -%d,%d +%d,%d @@%s\n'
1078 % (
1078 % (
1079 self.fromline,
1079 self.fromline,
1080 fromlen,
1080 fromlen,
1081 self.toline,
1081 self.toline,
1082 tolen,
1082 tolen,
1083 self.proc and (b' ' + self.proc),
1083 self.proc and (b' ' + self.proc),
1084 )
1084 )
1085 )
1085 )
1086 fp.write(b''.join(self.before + self.hunk + self.after))
1086 fp.write(b''.join(self.before + self.hunk + self.after))
1087
1087
1088 pretty = write
1088 pretty = write
1089
1089
1090 def filename(self):
1090 def filename(self):
1091 return self.header.filename()
1091 return self.header.filename()
1092
1092
1093 @encoding.strmethod
1093 @encoding.strmethod
1094 def __repr__(self):
1094 def __repr__(self):
1095 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1095 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1096
1096
1097
1097
1098 def getmessages():
1098 def getmessages():
1099 return {
1099 return {
1100 b'multiple': {
1100 b'multiple': {
1101 b'apply': _(b"apply change %d/%d to '%s'?"),
1101 b'apply': _(b"apply change %d/%d to '%s'?"),
1102 b'discard': _(b"discard change %d/%d to '%s'?"),
1102 b'discard': _(b"discard change %d/%d to '%s'?"),
1103 b'keep': _(b"keep change %d/%d to '%s'?"),
1103 b'keep': _(b"keep change %d/%d to '%s'?"),
1104 b'record': _(b"record change %d/%d to '%s'?"),
1104 b'record': _(b"record change %d/%d to '%s'?"),
1105 },
1105 },
1106 b'single': {
1106 b'single': {
1107 b'apply': _(b"apply this change to '%s'?"),
1107 b'apply': _(b"apply this change to '%s'?"),
1108 b'discard': _(b"discard this change to '%s'?"),
1108 b'discard': _(b"discard this change to '%s'?"),
1109 b'keep': _(b"keep this change to '%s'?"),
1109 b'keep': _(b"keep this change to '%s'?"),
1110 b'record': _(b"record this change to '%s'?"),
1110 b'record': _(b"record this change to '%s'?"),
1111 },
1111 },
1112 b'help': {
1112 b'help': {
1113 b'apply': _(
1113 b'apply': _(
1114 b'[Ynesfdaq?]'
1114 b'[Ynesfdaq?]'
1115 b'$$ &Yes, apply this change'
1115 b'$$ &Yes, apply this change'
1116 b'$$ &No, skip this change'
1116 b'$$ &No, skip this change'
1117 b'$$ &Edit this change manually'
1117 b'$$ &Edit this change manually'
1118 b'$$ &Skip remaining changes to this file'
1118 b'$$ &Skip remaining changes to this file'
1119 b'$$ Apply remaining changes to this &file'
1119 b'$$ Apply remaining changes to this &file'
1120 b'$$ &Done, skip remaining changes and files'
1120 b'$$ &Done, skip remaining changes and files'
1121 b'$$ Apply &all changes to all remaining files'
1121 b'$$ Apply &all changes to all remaining files'
1122 b'$$ &Quit, applying no changes'
1122 b'$$ &Quit, applying no changes'
1123 b'$$ &? (display help)'
1123 b'$$ &? (display help)'
1124 ),
1124 ),
1125 b'discard': _(
1125 b'discard': _(
1126 b'[Ynesfdaq?]'
1126 b'[Ynesfdaq?]'
1127 b'$$ &Yes, discard this change'
1127 b'$$ &Yes, discard this change'
1128 b'$$ &No, skip this change'
1128 b'$$ &No, skip this change'
1129 b'$$ &Edit this change manually'
1129 b'$$ &Edit this change manually'
1130 b'$$ &Skip remaining changes to this file'
1130 b'$$ &Skip remaining changes to this file'
1131 b'$$ Discard remaining changes to this &file'
1131 b'$$ Discard remaining changes to this &file'
1132 b'$$ &Done, skip remaining changes and files'
1132 b'$$ &Done, skip remaining changes and files'
1133 b'$$ Discard &all changes to all remaining files'
1133 b'$$ Discard &all changes to all remaining files'
1134 b'$$ &Quit, discarding no changes'
1134 b'$$ &Quit, discarding no changes'
1135 b'$$ &? (display help)'
1135 b'$$ &? (display help)'
1136 ),
1136 ),
1137 b'keep': _(
1137 b'keep': _(
1138 b'[Ynesfdaq?]'
1138 b'[Ynesfdaq?]'
1139 b'$$ &Yes, keep this change'
1139 b'$$ &Yes, keep this change'
1140 b'$$ &No, skip this change'
1140 b'$$ &No, skip this change'
1141 b'$$ &Edit this change manually'
1141 b'$$ &Edit this change manually'
1142 b'$$ &Skip remaining changes to this file'
1142 b'$$ &Skip remaining changes to this file'
1143 b'$$ Keep remaining changes to this &file'
1143 b'$$ Keep remaining changes to this &file'
1144 b'$$ &Done, skip remaining changes and files'
1144 b'$$ &Done, skip remaining changes and files'
1145 b'$$ Keep &all changes to all remaining files'
1145 b'$$ Keep &all changes to all remaining files'
1146 b'$$ &Quit, keeping all changes'
1146 b'$$ &Quit, keeping all changes'
1147 b'$$ &? (display help)'
1147 b'$$ &? (display help)'
1148 ),
1148 ),
1149 b'record': _(
1149 b'record': _(
1150 b'[Ynesfdaq?]'
1150 b'[Ynesfdaq?]'
1151 b'$$ &Yes, record this change'
1151 b'$$ &Yes, record this change'
1152 b'$$ &No, skip this change'
1152 b'$$ &No, skip this change'
1153 b'$$ &Edit this change manually'
1153 b'$$ &Edit this change manually'
1154 b'$$ &Skip remaining changes to this file'
1154 b'$$ &Skip remaining changes to this file'
1155 b'$$ Record remaining changes to this &file'
1155 b'$$ Record remaining changes to this &file'
1156 b'$$ &Done, skip remaining changes and files'
1156 b'$$ &Done, skip remaining changes and files'
1157 b'$$ Record &all changes to all remaining files'
1157 b'$$ Record &all changes to all remaining files'
1158 b'$$ &Quit, recording no changes'
1158 b'$$ &Quit, recording no changes'
1159 b'$$ &? (display help)'
1159 b'$$ &? (display help)'
1160 ),
1160 ),
1161 },
1161 },
1162 }
1162 }
1163
1163
1164
1164
1165 def filterpatch(ui, headers, match, operation=None):
1165 def filterpatch(ui, headers, match, operation=None):
1166 """Interactively filter patch chunks into applied-only chunks"""
1166 """Interactively filter patch chunks into applied-only chunks"""
1167 messages = getmessages()
1167 messages = getmessages()
1168
1168
1169 if operation is None:
1169 if operation is None:
1170 operation = b'record'
1170 operation = b'record'
1171
1171
1172 def prompt(skipfile, skipall, query, chunk):
1172 def prompt(skipfile, skipall, query, chunk):
1173 """prompt query, and process base inputs
1173 """prompt query, and process base inputs
1174
1174
1175 - y/n for the rest of file
1175 - y/n for the rest of file
1176 - y/n for the rest
1176 - y/n for the rest
1177 - ? (help)
1177 - ? (help)
1178 - q (quit)
1178 - q (quit)
1179
1179
1180 Return True/False and possibly updated skipfile and skipall.
1180 Return True/False and possibly updated skipfile and skipall.
1181 """
1181 """
1182 newpatches = None
1182 newpatches = None
1183 if skipall is not None:
1183 if skipall is not None:
1184 return skipall, skipfile, skipall, newpatches
1184 return skipall, skipfile, skipall, newpatches
1185 if skipfile is not None:
1185 if skipfile is not None:
1186 return skipfile, skipfile, skipall, newpatches
1186 return skipfile, skipfile, skipall, newpatches
1187 while True:
1187 while True:
1188 resps = messages[b'help'][operation]
1188 resps = messages[b'help'][operation]
1189 # IMPORTANT: keep the last line of this prompt short (<40 english
1189 # IMPORTANT: keep the last line of this prompt short (<40 english
1190 # chars is a good target) because of issue6158.
1190 # chars is a good target) because of issue6158.
1191 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1191 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1192 ui.write(b"\n")
1192 ui.write(b"\n")
1193 if r == 8: # ?
1193 if r == 8: # ?
1194 for c, t in ui.extractchoices(resps)[1]:
1194 for c, t in ui.extractchoices(resps)[1]:
1195 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1195 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1196 continue
1196 continue
1197 elif r == 0: # yes
1197 elif r == 0: # yes
1198 ret = True
1198 ret = True
1199 elif r == 1: # no
1199 elif r == 1: # no
1200 ret = False
1200 ret = False
1201 elif r == 2: # Edit patch
1201 elif r == 2: # Edit patch
1202 if chunk is None:
1202 if chunk is None:
1203 ui.write(_(b'cannot edit patch for whole file'))
1203 ui.write(_(b'cannot edit patch for whole file'))
1204 ui.write(b"\n")
1204 ui.write(b"\n")
1205 continue
1205 continue
1206 if chunk.header.binary():
1206 if chunk.header.binary():
1207 ui.write(_(b'cannot edit patch for binary file'))
1207 ui.write(_(b'cannot edit patch for binary file'))
1208 ui.write(b"\n")
1208 ui.write(b"\n")
1209 continue
1209 continue
1210 # Patch comment based on the Git one (based on comment at end of
1210 # Patch comment based on the Git one (based on comment at end of
1211 # https://mercurial-scm.org/wiki/RecordExtension)
1211 # https://mercurial-scm.org/wiki/RecordExtension)
1212 phelp = b'---' + _(
1212 phelp = b'---' + _(
1213 """
1213 """
1214 To remove '-' lines, make them ' ' lines (context).
1214 To remove '-' lines, make them ' ' lines (context).
1215 To remove '+' lines, delete them.
1215 To remove '+' lines, delete them.
1216 Lines starting with # will be removed from the patch.
1216 Lines starting with # will be removed from the patch.
1217
1217
1218 If the patch applies cleanly, the edited hunk will immediately be
1218 If the patch applies cleanly, the edited hunk will immediately be
1219 added to the record list. If it does not apply cleanly, a rejects
1219 added to the record list. If it does not apply cleanly, a rejects
1220 file will be generated: you can use that when you try again. If
1220 file will be generated: you can use that when you try again. If
1221 all lines of the hunk are removed, then the edit is aborted and
1221 all lines of the hunk are removed, then the edit is aborted and
1222 the hunk is left unchanged.
1222 the hunk is left unchanged.
1223 """
1223 """
1224 )
1224 )
1225 (patchfd, patchfn) = pycompat.mkstemp(
1225 (patchfd, patchfn) = pycompat.mkstemp(
1226 prefix=b"hg-editor-", suffix=b".diff"
1226 prefix=b"hg-editor-", suffix=b".diff"
1227 )
1227 )
1228 ncpatchfp = None
1228 ncpatchfp = None
1229 try:
1229 try:
1230 # Write the initial patch
1230 # Write the initial patch
1231 f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
1231 f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
1232 chunk.header.write(f)
1232 chunk.header.write(f)
1233 chunk.write(f)
1233 chunk.write(f)
1234 f.write(
1234 f.write(
1235 b''.join(
1235 b''.join(
1236 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1236 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1237 )
1237 )
1238 )
1238 )
1239 f.close()
1239 f.close()
1240 # Start the editor and wait for it to complete
1240 # Start the editor and wait for it to complete
1241 editor = ui.geteditor()
1241 editor = ui.geteditor()
1242 ret = ui.system(
1242 ret = ui.system(
1243 b"%s \"%s\"" % (editor, patchfn),
1243 b"%s \"%s\"" % (editor, patchfn),
1244 environ={b'HGUSER': ui.username()},
1244 environ={b'HGUSER': ui.username()},
1245 blockedtag=b'filterpatch',
1245 blockedtag=b'filterpatch',
1246 )
1246 )
1247 if ret != 0:
1247 if ret != 0:
1248 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1248 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1249 continue
1249 continue
1250 # Remove comment lines
1250 # Remove comment lines
1251 patchfp = open(patchfn, 'rb')
1251 patchfp = open(patchfn, 'rb')
1252 ncpatchfp = stringio()
1252 ncpatchfp = stringio()
1253 for line in util.iterfile(patchfp):
1253 for line in util.iterfile(patchfp):
1254 line = util.fromnativeeol(line)
1254 line = util.fromnativeeol(line)
1255 if not line.startswith(b'#'):
1255 if not line.startswith(b'#'):
1256 ncpatchfp.write(line)
1256 ncpatchfp.write(line)
1257 patchfp.close()
1257 patchfp.close()
1258 ncpatchfp.seek(0)
1258 ncpatchfp.seek(0)
1259 newpatches = parsepatch(ncpatchfp)
1259 newpatches = parsepatch(ncpatchfp)
1260 finally:
1260 finally:
1261 os.unlink(patchfn)
1261 os.unlink(patchfn)
1262 del ncpatchfp
1262 del ncpatchfp
1263 # Signal that the chunk shouldn't be applied as-is, but
1263 # Signal that the chunk shouldn't be applied as-is, but
1264 # provide the new patch to be used instead.
1264 # provide the new patch to be used instead.
1265 ret = False
1265 ret = False
1266 elif r == 3: # Skip
1266 elif r == 3: # Skip
1267 ret = skipfile = False
1267 ret = skipfile = False
1268 elif r == 4: # file (Record remaining)
1268 elif r == 4: # file (Record remaining)
1269 ret = skipfile = True
1269 ret = skipfile = True
1270 elif r == 5: # done, skip remaining
1270 elif r == 5: # done, skip remaining
1271 ret = skipall = False
1271 ret = skipall = False
1272 elif r == 6: # all
1272 elif r == 6: # all
1273 ret = skipall = True
1273 ret = skipall = True
1274 elif r == 7: # quit
1274 elif r == 7: # quit
1275 raise error.Abort(_(b'user quit'))
1275 raise error.Abort(_(b'user quit'))
1276 return ret, skipfile, skipall, newpatches
1276 return ret, skipfile, skipall, newpatches
1277
1277
1278 seen = set()
1278 seen = set()
1279 applied = {} # 'filename' -> [] of chunks
1279 applied = {} # 'filename' -> [] of chunks
1280 skipfile, skipall = None, None
1280 skipfile, skipall = None, None
1281 pos, total = 1, sum(len(h.hunks) for h in headers)
1281 pos, total = 1, sum(len(h.hunks) for h in headers)
1282 for h in headers:
1282 for h in headers:
1283 pos += len(h.hunks)
1283 pos += len(h.hunks)
1284 skipfile = None
1284 skipfile = None
1285 fixoffset = 0
1285 fixoffset = 0
1286 hdr = b''.join(h.header)
1286 hdr = b''.join(h.header)
1287 if hdr in seen:
1287 if hdr in seen:
1288 continue
1288 continue
1289 seen.add(hdr)
1289 seen.add(hdr)
1290 if skipall is None:
1290 if skipall is None:
1291 h.pretty(ui)
1291 h.pretty(ui)
1292 files = h.files()
1292 files = h.files()
1293 msg = _(b'examine changes to %s?') % _(b' and ').join(
1293 msg = _(b'examine changes to %s?') % _(b' and ').join(
1294 b"'%s'" % f for f in files
1294 b"'%s'" % f for f in files
1295 )
1295 )
1296 if all(match.exact(f) for f in files):
1296 if all(match.exact(f) for f in files):
1297 r, skipall, np = True, None, None
1297 r, skipall, np = True, None, None
1298 else:
1298 else:
1299 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1299 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1300 if not r:
1300 if not r:
1301 continue
1301 continue
1302 applied[h.filename()] = [h]
1302 applied[h.filename()] = [h]
1303 if h.allhunks():
1303 if h.allhunks():
1304 applied[h.filename()] += h.hunks
1304 applied[h.filename()] += h.hunks
1305 continue
1305 continue
1306 for i, chunk in enumerate(h.hunks):
1306 for i, chunk in enumerate(h.hunks):
1307 if skipfile is None and skipall is None:
1307 if skipfile is None and skipall is None:
1308 chunk.pretty(ui)
1308 chunk.pretty(ui)
1309 if total == 1:
1309 if total == 1:
1310 msg = messages[b'single'][operation] % chunk.filename()
1310 msg = messages[b'single'][operation] % chunk.filename()
1311 else:
1311 else:
1312 idx = pos - len(h.hunks) + i
1312 idx = pos - len(h.hunks) + i
1313 msg = messages[b'multiple'][operation] % (
1313 msg = messages[b'multiple'][operation] % (
1314 idx,
1314 idx,
1315 total,
1315 total,
1316 chunk.filename(),
1316 chunk.filename(),
1317 )
1317 )
1318 r, skipfile, skipall, newpatches = prompt(
1318 r, skipfile, skipall, newpatches = prompt(
1319 skipfile, skipall, msg, chunk
1319 skipfile, skipall, msg, chunk
1320 )
1320 )
1321 if r:
1321 if r:
1322 if fixoffset:
1322 if fixoffset:
1323 chunk = copy.copy(chunk)
1323 chunk = copy.copy(chunk)
1324 chunk.toline += fixoffset
1324 chunk.toline += fixoffset
1325 applied[chunk.filename()].append(chunk)
1325 applied[chunk.filename()].append(chunk)
1326 elif newpatches is not None:
1326 elif newpatches is not None:
1327 for newpatch in newpatches:
1327 for newpatch in newpatches:
1328 for newhunk in newpatch.hunks:
1328 for newhunk in newpatch.hunks:
1329 if fixoffset:
1329 if fixoffset:
1330 newhunk.toline += fixoffset
1330 newhunk.toline += fixoffset
1331 applied[newhunk.filename()].append(newhunk)
1331 applied[newhunk.filename()].append(newhunk)
1332 else:
1332 else:
1333 fixoffset += chunk.removed - chunk.added
1333 fixoffset += chunk.removed - chunk.added
1334 return (
1334 return (
1335 sum(
1335 sum(
1336 [
1336 [
1337 h
1337 h
1338 for h in pycompat.itervalues(applied)
1338 for h in pycompat.itervalues(applied)
1339 if h[0].special() or len(h) > 1
1339 if h[0].special() or len(h) > 1
1340 ],
1340 ],
1341 [],
1341 [],
1342 ),
1342 ),
1343 {},
1343 {},
1344 )
1344 )
1345
1345
1346
1346
1347 class hunk(object):
1347 class hunk(object):
1348 def __init__(self, desc, num, lr, context):
1348 def __init__(self, desc, num, lr, context):
1349 self.number = num
1349 self.number = num
1350 self.desc = desc
1350 self.desc = desc
1351 self.hunk = [desc]
1351 self.hunk = [desc]
1352 self.a = []
1352 self.a = []
1353 self.b = []
1353 self.b = []
1354 self.starta = self.lena = None
1354 self.starta = self.lena = None
1355 self.startb = self.lenb = None
1355 self.startb = self.lenb = None
1356 if lr is not None:
1356 if lr is not None:
1357 if context:
1357 if context:
1358 self.read_context_hunk(lr)
1358 self.read_context_hunk(lr)
1359 else:
1359 else:
1360 self.read_unified_hunk(lr)
1360 self.read_unified_hunk(lr)
1361
1361
1362 def getnormalized(self):
1362 def getnormalized(self):
1363 """Return a copy with line endings normalized to LF."""
1363 """Return a copy with line endings normalized to LF."""
1364
1364
1365 def normalize(lines):
1365 def normalize(lines):
1366 nlines = []
1366 nlines = []
1367 for line in lines:
1367 for line in lines:
1368 if line.endswith(b'\r\n'):
1368 if line.endswith(b'\r\n'):
1369 line = line[:-2] + b'\n'
1369 line = line[:-2] + b'\n'
1370 nlines.append(line)
1370 nlines.append(line)
1371 return nlines
1371 return nlines
1372
1372
1373 # Dummy object, it is rebuilt manually
1373 # Dummy object, it is rebuilt manually
1374 nh = hunk(self.desc, self.number, None, None)
1374 nh = hunk(self.desc, self.number, None, None)
1375 nh.number = self.number
1375 nh.number = self.number
1376 nh.desc = self.desc
1376 nh.desc = self.desc
1377 nh.hunk = self.hunk
1377 nh.hunk = self.hunk
1378 nh.a = normalize(self.a)
1378 nh.a = normalize(self.a)
1379 nh.b = normalize(self.b)
1379 nh.b = normalize(self.b)
1380 nh.starta = self.starta
1380 nh.starta = self.starta
1381 nh.startb = self.startb
1381 nh.startb = self.startb
1382 nh.lena = self.lena
1382 nh.lena = self.lena
1383 nh.lenb = self.lenb
1383 nh.lenb = self.lenb
1384 return nh
1384 return nh
1385
1385
1386 def read_unified_hunk(self, lr):
1386 def read_unified_hunk(self, lr):
1387 m = unidesc.match(self.desc)
1387 m = unidesc.match(self.desc)
1388 if not m:
1388 if not m:
1389 raise PatchError(_(b"bad hunk #%d") % self.number)
1389 raise PatchError(_(b"bad hunk #%d") % self.number)
1390 self.starta, self.lena, self.startb, self.lenb = m.groups()
1390 self.starta, self.lena, self.startb, self.lenb = m.groups()
1391 if self.lena is None:
1391 if self.lena is None:
1392 self.lena = 1
1392 self.lena = 1
1393 else:
1393 else:
1394 self.lena = int(self.lena)
1394 self.lena = int(self.lena)
1395 if self.lenb is None:
1395 if self.lenb is None:
1396 self.lenb = 1
1396 self.lenb = 1
1397 else:
1397 else:
1398 self.lenb = int(self.lenb)
1398 self.lenb = int(self.lenb)
1399 self.starta = int(self.starta)
1399 self.starta = int(self.starta)
1400 self.startb = int(self.startb)
1400 self.startb = int(self.startb)
1401 try:
1401 try:
1402 diffhelper.addlines(
1402 diffhelper.addlines(
1403 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1403 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1404 )
1404 )
1405 except error.ParseError as e:
1405 except error.ParseError as e:
1406 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1406 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1407 # if we hit eof before finishing out the hunk, the last line will
1407 # if we hit eof before finishing out the hunk, the last line will
1408 # be zero length. Lets try to fix it up.
1408 # be zero length. Lets try to fix it up.
1409 while len(self.hunk[-1]) == 0:
1409 while len(self.hunk[-1]) == 0:
1410 del self.hunk[-1]
1410 del self.hunk[-1]
1411 del self.a[-1]
1411 del self.a[-1]
1412 del self.b[-1]
1412 del self.b[-1]
1413 self.lena -= 1
1413 self.lena -= 1
1414 self.lenb -= 1
1414 self.lenb -= 1
1415 self._fixnewline(lr)
1415 self._fixnewline(lr)
1416
1416
1417 def read_context_hunk(self, lr):
1417 def read_context_hunk(self, lr):
1418 self.desc = lr.readline()
1418 self.desc = lr.readline()
1419 m = contextdesc.match(self.desc)
1419 m = contextdesc.match(self.desc)
1420 if not m:
1420 if not m:
1421 raise PatchError(_(b"bad hunk #%d") % self.number)
1421 raise PatchError(_(b"bad hunk #%d") % self.number)
1422 self.starta, aend = m.groups()
1422 self.starta, aend = m.groups()
1423 self.starta = int(self.starta)
1423 self.starta = int(self.starta)
1424 if aend is None:
1424 if aend is None:
1425 aend = self.starta
1425 aend = self.starta
1426 self.lena = int(aend) - self.starta
1426 self.lena = int(aend) - self.starta
1427 if self.starta:
1427 if self.starta:
1428 self.lena += 1
1428 self.lena += 1
1429 for x in pycompat.xrange(self.lena):
1429 for x in pycompat.xrange(self.lena):
1430 l = lr.readline()
1430 l = lr.readline()
1431 if l.startswith(b'---'):
1431 if l.startswith(b'---'):
1432 # lines addition, old block is empty
1432 # lines addition, old block is empty
1433 lr.push(l)
1433 lr.push(l)
1434 break
1434 break
1435 s = l[2:]
1435 s = l[2:]
1436 if l.startswith(b'- ') or l.startswith(b'! '):
1436 if l.startswith(b'- ') or l.startswith(b'! '):
1437 u = b'-' + s
1437 u = b'-' + s
1438 elif l.startswith(b' '):
1438 elif l.startswith(b' '):
1439 u = b' ' + s
1439 u = b' ' + s
1440 else:
1440 else:
1441 raise PatchError(
1441 raise PatchError(
1442 _(b"bad hunk #%d old text line %d") % (self.number, x)
1442 _(b"bad hunk #%d old text line %d") % (self.number, x)
1443 )
1443 )
1444 self.a.append(u)
1444 self.a.append(u)
1445 self.hunk.append(u)
1445 self.hunk.append(u)
1446
1446
1447 l = lr.readline()
1447 l = lr.readline()
1448 if l.startswith(br'\ '):
1448 if l.startswith(br'\ '):
1449 s = self.a[-1][:-1]
1449 s = self.a[-1][:-1]
1450 self.a[-1] = s
1450 self.a[-1] = s
1451 self.hunk[-1] = s
1451 self.hunk[-1] = s
1452 l = lr.readline()
1452 l = lr.readline()
1453 m = contextdesc.match(l)
1453 m = contextdesc.match(l)
1454 if not m:
1454 if not m:
1455 raise PatchError(_(b"bad hunk #%d") % self.number)
1455 raise PatchError(_(b"bad hunk #%d") % self.number)
1456 self.startb, bend = m.groups()
1456 self.startb, bend = m.groups()
1457 self.startb = int(self.startb)
1457 self.startb = int(self.startb)
1458 if bend is None:
1458 if bend is None:
1459 bend = self.startb
1459 bend = self.startb
1460 self.lenb = int(bend) - self.startb
1460 self.lenb = int(bend) - self.startb
1461 if self.startb:
1461 if self.startb:
1462 self.lenb += 1
1462 self.lenb += 1
1463 hunki = 1
1463 hunki = 1
1464 for x in pycompat.xrange(self.lenb):
1464 for x in pycompat.xrange(self.lenb):
1465 l = lr.readline()
1465 l = lr.readline()
1466 if l.startswith(br'\ '):
1466 if l.startswith(br'\ '):
1467 # XXX: the only way to hit this is with an invalid line range.
1467 # XXX: the only way to hit this is with an invalid line range.
1468 # The no-eol marker is not counted in the line range, but I
1468 # The no-eol marker is not counted in the line range, but I
1469 # guess there are diff(1) out there which behave differently.
1469 # guess there are diff(1) out there which behave differently.
1470 s = self.b[-1][:-1]
1470 s = self.b[-1][:-1]
1471 self.b[-1] = s
1471 self.b[-1] = s
1472 self.hunk[hunki - 1] = s
1472 self.hunk[hunki - 1] = s
1473 continue
1473 continue
1474 if not l:
1474 if not l:
1475 # line deletions, new block is empty and we hit EOF
1475 # line deletions, new block is empty and we hit EOF
1476 lr.push(l)
1476 lr.push(l)
1477 break
1477 break
1478 s = l[2:]
1478 s = l[2:]
1479 if l.startswith(b'+ ') or l.startswith(b'! '):
1479 if l.startswith(b'+ ') or l.startswith(b'! '):
1480 u = b'+' + s
1480 u = b'+' + s
1481 elif l.startswith(b' '):
1481 elif l.startswith(b' '):
1482 u = b' ' + s
1482 u = b' ' + s
1483 elif len(self.b) == 0:
1483 elif len(self.b) == 0:
1484 # line deletions, new block is empty
1484 # line deletions, new block is empty
1485 lr.push(l)
1485 lr.push(l)
1486 break
1486 break
1487 else:
1487 else:
1488 raise PatchError(
1488 raise PatchError(
1489 _(b"bad hunk #%d old text line %d") % (self.number, x)
1489 _(b"bad hunk #%d old text line %d") % (self.number, x)
1490 )
1490 )
1491 self.b.append(s)
1491 self.b.append(s)
1492 while True:
1492 while True:
1493 if hunki >= len(self.hunk):
1493 if hunki >= len(self.hunk):
1494 h = b""
1494 h = b""
1495 else:
1495 else:
1496 h = self.hunk[hunki]
1496 h = self.hunk[hunki]
1497 hunki += 1
1497 hunki += 1
1498 if h == u:
1498 if h == u:
1499 break
1499 break
1500 elif h.startswith(b'-'):
1500 elif h.startswith(b'-'):
1501 continue
1501 continue
1502 else:
1502 else:
1503 self.hunk.insert(hunki - 1, u)
1503 self.hunk.insert(hunki - 1, u)
1504 break
1504 break
1505
1505
1506 if not self.a:
1506 if not self.a:
1507 # this happens when lines were only added to the hunk
1507 # this happens when lines were only added to the hunk
1508 for x in self.hunk:
1508 for x in self.hunk:
1509 if x.startswith(b'-') or x.startswith(b' '):
1509 if x.startswith(b'-') or x.startswith(b' '):
1510 self.a.append(x)
1510 self.a.append(x)
1511 if not self.b:
1511 if not self.b:
1512 # this happens when lines were only deleted from the hunk
1512 # this happens when lines were only deleted from the hunk
1513 for x in self.hunk:
1513 for x in self.hunk:
1514 if x.startswith(b'+') or x.startswith(b' '):
1514 if x.startswith(b'+') or x.startswith(b' '):
1515 self.b.append(x[1:])
1515 self.b.append(x[1:])
1516 # @@ -start,len +start,len @@
1516 # @@ -start,len +start,len @@
1517 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1517 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1518 self.starta,
1518 self.starta,
1519 self.lena,
1519 self.lena,
1520 self.startb,
1520 self.startb,
1521 self.lenb,
1521 self.lenb,
1522 )
1522 )
1523 self.hunk[0] = self.desc
1523 self.hunk[0] = self.desc
1524 self._fixnewline(lr)
1524 self._fixnewline(lr)
1525
1525
1526 def _fixnewline(self, lr):
1526 def _fixnewline(self, lr):
1527 l = lr.readline()
1527 l = lr.readline()
1528 if l.startswith(br'\ '):
1528 if l.startswith(br'\ '):
1529 diffhelper.fixnewline(self.hunk, self.a, self.b)
1529 diffhelper.fixnewline(self.hunk, self.a, self.b)
1530 else:
1530 else:
1531 lr.push(l)
1531 lr.push(l)
1532
1532
1533 def complete(self):
1533 def complete(self):
1534 return len(self.a) == self.lena and len(self.b) == self.lenb
1534 return len(self.a) == self.lena and len(self.b) == self.lenb
1535
1535
1536 def _fuzzit(self, old, new, fuzz, toponly):
1536 def _fuzzit(self, old, new, fuzz, toponly):
1537 # this removes context lines from the top and bottom of list 'l'. It
1537 # this removes context lines from the top and bottom of list 'l'. It
1538 # checks the hunk to make sure only context lines are removed, and then
1538 # checks the hunk to make sure only context lines are removed, and then
1539 # returns a new shortened list of lines.
1539 # returns a new shortened list of lines.
1540 fuzz = min(fuzz, len(old))
1540 fuzz = min(fuzz, len(old))
1541 if fuzz:
1541 if fuzz:
1542 top = 0
1542 top = 0
1543 bot = 0
1543 bot = 0
1544 hlen = len(self.hunk)
1544 hlen = len(self.hunk)
1545 for x in pycompat.xrange(hlen - 1):
1545 for x in pycompat.xrange(hlen - 1):
1546 # the hunk starts with the @@ line, so use x+1
1546 # the hunk starts with the @@ line, so use x+1
1547 if self.hunk[x + 1].startswith(b' '):
1547 if self.hunk[x + 1].startswith(b' '):
1548 top += 1
1548 top += 1
1549 else:
1549 else:
1550 break
1550 break
1551 if not toponly:
1551 if not toponly:
1552 for x in pycompat.xrange(hlen - 1):
1552 for x in pycompat.xrange(hlen - 1):
1553 if self.hunk[hlen - bot - 1].startswith(b' '):
1553 if self.hunk[hlen - bot - 1].startswith(b' '):
1554 bot += 1
1554 bot += 1
1555 else:
1555 else:
1556 break
1556 break
1557
1557
1558 bot = min(fuzz, bot)
1558 bot = min(fuzz, bot)
1559 top = min(fuzz, top)
1559 top = min(fuzz, top)
1560 return old[top : len(old) - bot], new[top : len(new) - bot], top
1560 return old[top : len(old) - bot], new[top : len(new) - bot], top
1561 return old, new, 0
1561 return old, new, 0
1562
1562
1563 def fuzzit(self, fuzz, toponly):
1563 def fuzzit(self, fuzz, toponly):
1564 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1564 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1565 oldstart = self.starta + top
1565 oldstart = self.starta + top
1566 newstart = self.startb + top
1566 newstart = self.startb + top
1567 # zero length hunk ranges already have their start decremented
1567 # zero length hunk ranges already have their start decremented
1568 if self.lena and oldstart > 0:
1568 if self.lena and oldstart > 0:
1569 oldstart -= 1
1569 oldstart -= 1
1570 if self.lenb and newstart > 0:
1570 if self.lenb and newstart > 0:
1571 newstart -= 1
1571 newstart -= 1
1572 return old, oldstart, new, newstart
1572 return old, oldstart, new, newstart
1573
1573
1574
1574
1575 class binhunk(object):
1575 class binhunk(object):
1576 """A binary patch file."""
1576 """A binary patch file."""
1577
1577
1578 def __init__(self, lr, fname):
1578 def __init__(self, lr, fname):
1579 self.text = None
1579 self.text = None
1580 self.delta = False
1580 self.delta = False
1581 self.hunk = [b'GIT binary patch\n']
1581 self.hunk = [b'GIT binary patch\n']
1582 self._fname = fname
1582 self._fname = fname
1583 self._read(lr)
1583 self._read(lr)
1584
1584
1585 def complete(self):
1585 def complete(self):
1586 return self.text is not None
1586 return self.text is not None
1587
1587
1588 def new(self, lines):
1588 def new(self, lines):
1589 if self.delta:
1589 if self.delta:
1590 return [applybindelta(self.text, b''.join(lines))]
1590 return [applybindelta(self.text, b''.join(lines))]
1591 return [self.text]
1591 return [self.text]
1592
1592
1593 def _read(self, lr):
1593 def _read(self, lr):
1594 def getline(lr, hunk):
1594 def getline(lr, hunk):
1595 l = lr.readline()
1595 l = lr.readline()
1596 hunk.append(l)
1596 hunk.append(l)
1597 return l.rstrip(b'\r\n')
1597 return l.rstrip(b'\r\n')
1598
1598
1599 while True:
1599 while True:
1600 line = getline(lr, self.hunk)
1600 line = getline(lr, self.hunk)
1601 if not line:
1601 if not line:
1602 raise PatchError(
1602 raise PatchError(
1603 _(b'could not extract "%s" binary data') % self._fname
1603 _(b'could not extract "%s" binary data') % self._fname
1604 )
1604 )
1605 if line.startswith(b'literal '):
1605 if line.startswith(b'literal '):
1606 size = int(line[8:].rstrip())
1606 size = int(line[8:].rstrip())
1607 break
1607 break
1608 if line.startswith(b'delta '):
1608 if line.startswith(b'delta '):
1609 size = int(line[6:].rstrip())
1609 size = int(line[6:].rstrip())
1610 self.delta = True
1610 self.delta = True
1611 break
1611 break
1612 dec = []
1612 dec = []
1613 line = getline(lr, self.hunk)
1613 line = getline(lr, self.hunk)
1614 while len(line) > 1:
1614 while len(line) > 1:
1615 l = line[0:1]
1615 l = line[0:1]
1616 if l <= b'Z' and l >= b'A':
1616 if l <= b'Z' and l >= b'A':
1617 l = ord(l) - ord(b'A') + 1
1617 l = ord(l) - ord(b'A') + 1
1618 else:
1618 else:
1619 l = ord(l) - ord(b'a') + 27
1619 l = ord(l) - ord(b'a') + 27
1620 try:
1620 try:
1621 dec.append(util.b85decode(line[1:])[:l])
1621 dec.append(util.b85decode(line[1:])[:l])
1622 except ValueError as e:
1622 except ValueError as e:
1623 raise PatchError(
1623 raise PatchError(
1624 _(b'could not decode "%s" binary patch: %s')
1624 _(b'could not decode "%s" binary patch: %s')
1625 % (self._fname, stringutil.forcebytestr(e))
1625 % (self._fname, stringutil.forcebytestr(e))
1626 )
1626 )
1627 line = getline(lr, self.hunk)
1627 line = getline(lr, self.hunk)
1628 text = zlib.decompress(b''.join(dec))
1628 text = zlib.decompress(b''.join(dec))
1629 if len(text) != size:
1629 if len(text) != size:
1630 raise PatchError(
1630 raise PatchError(
1631 _(b'"%s" length is %d bytes, should be %d')
1631 _(b'"%s" length is %d bytes, should be %d')
1632 % (self._fname, len(text), size)
1632 % (self._fname, len(text), size)
1633 )
1633 )
1634 self.text = text
1634 self.text = text
1635
1635
1636
1636
1637 def parsefilename(str):
1637 def parsefilename(str):
1638 # --- filename \t|space stuff
1638 # --- filename \t|space stuff
1639 s = str[4:].rstrip(b'\r\n')
1639 s = str[4:].rstrip(b'\r\n')
1640 i = s.find(b'\t')
1640 i = s.find(b'\t')
1641 if i < 0:
1641 if i < 0:
1642 i = s.find(b' ')
1642 i = s.find(b' ')
1643 if i < 0:
1643 if i < 0:
1644 return s
1644 return s
1645 return s[:i]
1645 return s[:i]
1646
1646
1647
1647
1648 def reversehunks(hunks):
1648 def reversehunks(hunks):
1649 '''reverse the signs in the hunks given as argument
1649 '''reverse the signs in the hunks given as argument
1650
1650
1651 This function operates on hunks coming out of patch.filterpatch, that is
1651 This function operates on hunks coming out of patch.filterpatch, that is
1652 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1652 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1653
1653
1654 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1654 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1655 ... --- a/folder1/g
1655 ... --- a/folder1/g
1656 ... +++ b/folder1/g
1656 ... +++ b/folder1/g
1657 ... @@ -1,7 +1,7 @@
1657 ... @@ -1,7 +1,7 @@
1658 ... +firstline
1658 ... +firstline
1659 ... c
1659 ... c
1660 ... 1
1660 ... 1
1661 ... 2
1661 ... 2
1662 ... + 3
1662 ... + 3
1663 ... -4
1663 ... -4
1664 ... 5
1664 ... 5
1665 ... d
1665 ... d
1666 ... +lastline"""
1666 ... +lastline"""
1667 >>> hunks = parsepatch([rawpatch])
1667 >>> hunks = parsepatch([rawpatch])
1668 >>> hunkscomingfromfilterpatch = []
1668 >>> hunkscomingfromfilterpatch = []
1669 >>> for h in hunks:
1669 >>> for h in hunks:
1670 ... hunkscomingfromfilterpatch.append(h)
1670 ... hunkscomingfromfilterpatch.append(h)
1671 ... hunkscomingfromfilterpatch.extend(h.hunks)
1671 ... hunkscomingfromfilterpatch.extend(h.hunks)
1672
1672
1673 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1673 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1674 >>> from . import util
1674 >>> from . import util
1675 >>> fp = util.stringio()
1675 >>> fp = util.stringio()
1676 >>> for c in reversedhunks:
1676 >>> for c in reversedhunks:
1677 ... c.write(fp)
1677 ... c.write(fp)
1678 >>> fp.seek(0) or None
1678 >>> fp.seek(0) or None
1679 >>> reversedpatch = fp.read()
1679 >>> reversedpatch = fp.read()
1680 >>> print(pycompat.sysstr(reversedpatch))
1680 >>> print(pycompat.sysstr(reversedpatch))
1681 diff --git a/folder1/g b/folder1/g
1681 diff --git a/folder1/g b/folder1/g
1682 --- a/folder1/g
1682 --- a/folder1/g
1683 +++ b/folder1/g
1683 +++ b/folder1/g
1684 @@ -1,4 +1,3 @@
1684 @@ -1,4 +1,3 @@
1685 -firstline
1685 -firstline
1686 c
1686 c
1687 1
1687 1
1688 2
1688 2
1689 @@ -2,6 +1,6 @@
1689 @@ -2,6 +1,6 @@
1690 c
1690 c
1691 1
1691 1
1692 2
1692 2
1693 - 3
1693 - 3
1694 +4
1694 +4
1695 5
1695 5
1696 d
1696 d
1697 @@ -6,3 +5,2 @@
1697 @@ -6,3 +5,2 @@
1698 5
1698 5
1699 d
1699 d
1700 -lastline
1700 -lastline
1701
1701
1702 '''
1702 '''
1703
1703
1704 newhunks = []
1704 newhunks = []
1705 for c in hunks:
1705 for c in hunks:
1706 if util.safehasattr(c, b'reversehunk'):
1706 if util.safehasattr(c, b'reversehunk'):
1707 c = c.reversehunk()
1707 c = c.reversehunk()
1708 newhunks.append(c)
1708 newhunks.append(c)
1709 return newhunks
1709 return newhunks
1710
1710
1711
1711
1712 def parsepatch(originalchunks, maxcontext=None):
1712 def parsepatch(originalchunks, maxcontext=None):
1713 """patch -> [] of headers -> [] of hunks
1713 """patch -> [] of headers -> [] of hunks
1714
1714
1715 If maxcontext is not None, trim context lines if necessary.
1715 If maxcontext is not None, trim context lines if necessary.
1716
1716
1717 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1717 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1718 ... --- a/folder1/g
1718 ... --- a/folder1/g
1719 ... +++ b/folder1/g
1719 ... +++ b/folder1/g
1720 ... @@ -1,8 +1,10 @@
1720 ... @@ -1,8 +1,10 @@
1721 ... 1
1721 ... 1
1722 ... 2
1722 ... 2
1723 ... -3
1723 ... -3
1724 ... 4
1724 ... 4
1725 ... 5
1725 ... 5
1726 ... 6
1726 ... 6
1727 ... +6.1
1727 ... +6.1
1728 ... +6.2
1728 ... +6.2
1729 ... 7
1729 ... 7
1730 ... 8
1730 ... 8
1731 ... +9'''
1731 ... +9'''
1732 >>> out = util.stringio()
1732 >>> out = util.stringio()
1733 >>> headers = parsepatch([rawpatch], maxcontext=1)
1733 >>> headers = parsepatch([rawpatch], maxcontext=1)
1734 >>> for header in headers:
1734 >>> for header in headers:
1735 ... header.write(out)
1735 ... header.write(out)
1736 ... for hunk in header.hunks:
1736 ... for hunk in header.hunks:
1737 ... hunk.write(out)
1737 ... hunk.write(out)
1738 >>> print(pycompat.sysstr(out.getvalue()))
1738 >>> print(pycompat.sysstr(out.getvalue()))
1739 diff --git a/folder1/g b/folder1/g
1739 diff --git a/folder1/g b/folder1/g
1740 --- a/folder1/g
1740 --- a/folder1/g
1741 +++ b/folder1/g
1741 +++ b/folder1/g
1742 @@ -2,3 +2,2 @@
1742 @@ -2,3 +2,2 @@
1743 2
1743 2
1744 -3
1744 -3
1745 4
1745 4
1746 @@ -6,2 +5,4 @@
1746 @@ -6,2 +5,4 @@
1747 6
1747 6
1748 +6.1
1748 +6.1
1749 +6.2
1749 +6.2
1750 7
1750 7
1751 @@ -8,1 +9,2 @@
1751 @@ -8,1 +9,2 @@
1752 8
1752 8
1753 +9
1753 +9
1754 """
1754 """
1755
1755
1756 class parser(object):
1756 class parser(object):
1757 """patch parsing state machine"""
1757 """patch parsing state machine"""
1758
1758
1759 def __init__(self):
1759 def __init__(self):
1760 self.fromline = 0
1760 self.fromline = 0
1761 self.toline = 0
1761 self.toline = 0
1762 self.proc = b''
1762 self.proc = b''
1763 self.header = None
1763 self.header = None
1764 self.context = []
1764 self.context = []
1765 self.before = []
1765 self.before = []
1766 self.hunk = []
1766 self.hunk = []
1767 self.headers = []
1767 self.headers = []
1768
1768
1769 def addrange(self, limits):
1769 def addrange(self, limits):
1770 self.addcontext([])
1770 self.addcontext([])
1771 fromstart, fromend, tostart, toend, proc = limits
1771 fromstart, fromend, tostart, toend, proc = limits
1772 self.fromline = int(fromstart)
1772 self.fromline = int(fromstart)
1773 self.toline = int(tostart)
1773 self.toline = int(tostart)
1774 self.proc = proc
1774 self.proc = proc
1775
1775
1776 def addcontext(self, context):
1776 def addcontext(self, context):
1777 if self.hunk:
1777 if self.hunk:
1778 h = recordhunk(
1778 h = recordhunk(
1779 self.header,
1779 self.header,
1780 self.fromline,
1780 self.fromline,
1781 self.toline,
1781 self.toline,
1782 self.proc,
1782 self.proc,
1783 self.before,
1783 self.before,
1784 self.hunk,
1784 self.hunk,
1785 context,
1785 context,
1786 maxcontext,
1786 maxcontext,
1787 )
1787 )
1788 self.header.hunks.append(h)
1788 self.header.hunks.append(h)
1789 self.fromline += len(self.before) + h.removed
1789 self.fromline += len(self.before) + h.removed
1790 self.toline += len(self.before) + h.added
1790 self.toline += len(self.before) + h.added
1791 self.before = []
1791 self.before = []
1792 self.hunk = []
1792 self.hunk = []
1793 self.context = context
1793 self.context = context
1794
1794
1795 def addhunk(self, hunk):
1795 def addhunk(self, hunk):
1796 if self.context:
1796 if self.context:
1797 self.before = self.context
1797 self.before = self.context
1798 self.context = []
1798 self.context = []
1799 if self.hunk:
1799 if self.hunk:
1800 self.addcontext([])
1800 self.addcontext([])
1801 self.hunk = hunk
1801 self.hunk = hunk
1802
1802
1803 def newfile(self, hdr):
1803 def newfile(self, hdr):
1804 self.addcontext([])
1804 self.addcontext([])
1805 h = header(hdr)
1805 h = header(hdr)
1806 self.headers.append(h)
1806 self.headers.append(h)
1807 self.header = h
1807 self.header = h
1808
1808
1809 def addother(self, line):
1809 def addother(self, line):
1810 pass # 'other' lines are ignored
1810 pass # 'other' lines are ignored
1811
1811
1812 def finished(self):
1812 def finished(self):
1813 self.addcontext([])
1813 self.addcontext([])
1814 return self.headers
1814 return self.headers
1815
1815
1816 transitions = {
1816 transitions = {
1817 b'file': {
1817 b'file': {
1818 b'context': addcontext,
1818 b'context': addcontext,
1819 b'file': newfile,
1819 b'file': newfile,
1820 b'hunk': addhunk,
1820 b'hunk': addhunk,
1821 b'range': addrange,
1821 b'range': addrange,
1822 },
1822 },
1823 b'context': {
1823 b'context': {
1824 b'file': newfile,
1824 b'file': newfile,
1825 b'hunk': addhunk,
1825 b'hunk': addhunk,
1826 b'range': addrange,
1826 b'range': addrange,
1827 b'other': addother,
1827 b'other': addother,
1828 },
1828 },
1829 b'hunk': {
1829 b'hunk': {
1830 b'context': addcontext,
1830 b'context': addcontext,
1831 b'file': newfile,
1831 b'file': newfile,
1832 b'range': addrange,
1832 b'range': addrange,
1833 },
1833 },
1834 b'range': {b'context': addcontext, b'hunk': addhunk},
1834 b'range': {b'context': addcontext, b'hunk': addhunk},
1835 b'other': {b'other': addother},
1835 b'other': {b'other': addother},
1836 }
1836 }
1837
1837
1838 p = parser()
1838 p = parser()
1839 fp = stringio()
1839 fp = stringio()
1840 fp.write(b''.join(originalchunks))
1840 fp.write(b''.join(originalchunks))
1841 fp.seek(0)
1841 fp.seek(0)
1842
1842
1843 state = b'context'
1843 state = b'context'
1844 for newstate, data in scanpatch(fp):
1844 for newstate, data in scanpatch(fp):
1845 try:
1845 try:
1846 p.transitions[state][newstate](p, data)
1846 p.transitions[state][newstate](p, data)
1847 except KeyError:
1847 except KeyError:
1848 raise PatchError(
1848 raise PatchError(
1849 b'unhandled transition: %s -> %s' % (state, newstate)
1849 b'unhandled transition: %s -> %s' % (state, newstate)
1850 )
1850 )
1851 state = newstate
1851 state = newstate
1852 del fp
1852 del fp
1853 return p.finished()
1853 return p.finished()
1854
1854
1855
1855
1856 def pathtransform(path, strip, prefix):
1856 def pathtransform(path, strip, prefix):
1857 '''turn a path from a patch into a path suitable for the repository
1857 '''turn a path from a patch into a path suitable for the repository
1858
1858
1859 prefix, if not empty, is expected to be normalized with a / at the end.
1859 prefix, if not empty, is expected to be normalized with a / at the end.
1860
1860
1861 Returns (stripped components, path in repository).
1861 Returns (stripped components, path in repository).
1862
1862
1863 >>> pathtransform(b'a/b/c', 0, b'')
1863 >>> pathtransform(b'a/b/c', 0, b'')
1864 ('', 'a/b/c')
1864 ('', 'a/b/c')
1865 >>> pathtransform(b' a/b/c ', 0, b'')
1865 >>> pathtransform(b' a/b/c ', 0, b'')
1866 ('', ' a/b/c')
1866 ('', ' a/b/c')
1867 >>> pathtransform(b' a/b/c ', 2, b'')
1867 >>> pathtransform(b' a/b/c ', 2, b'')
1868 ('a/b/', 'c')
1868 ('a/b/', 'c')
1869 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1869 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1870 ('', 'd/e/a/b/c')
1870 ('', 'd/e/a/b/c')
1871 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1871 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1872 ('a//b/', 'd/e/c')
1872 ('a//b/', 'd/e/c')
1873 >>> pathtransform(b'a/b/c', 3, b'')
1873 >>> pathtransform(b'a/b/c', 3, b'')
1874 Traceback (most recent call last):
1874 Traceback (most recent call last):
1875 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1875 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1876 '''
1876 '''
1877 pathlen = len(path)
1877 pathlen = len(path)
1878 i = 0
1878 i = 0
1879 if strip == 0:
1879 if strip == 0:
1880 return b'', prefix + path.rstrip()
1880 return b'', prefix + path.rstrip()
1881 count = strip
1881 count = strip
1882 while count > 0:
1882 while count > 0:
1883 i = path.find(b'/', i)
1883 i = path.find(b'/', i)
1884 if i == -1:
1884 if i == -1:
1885 raise PatchError(
1885 raise PatchError(
1886 _(b"unable to strip away %d of %d dirs from %s")
1886 _(b"unable to strip away %d of %d dirs from %s")
1887 % (count, strip, path)
1887 % (count, strip, path)
1888 )
1888 )
1889 i += 1
1889 i += 1
1890 # consume '//' in the path
1890 # consume '//' in the path
1891 while i < pathlen - 1 and path[i : i + 1] == b'/':
1891 while i < pathlen - 1 and path[i : i + 1] == b'/':
1892 i += 1
1892 i += 1
1893 count -= 1
1893 count -= 1
1894 return path[:i].lstrip(), prefix + path[i:].rstrip()
1894 return path[:i].lstrip(), prefix + path[i:].rstrip()
1895
1895
1896
1896
1897 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1897 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1898 nulla = afile_orig == b"/dev/null"
1898 nulla = afile_orig == b"/dev/null"
1899 nullb = bfile_orig == b"/dev/null"
1899 nullb = bfile_orig == b"/dev/null"
1900 create = nulla and hunk.starta == 0 and hunk.lena == 0
1900 create = nulla and hunk.starta == 0 and hunk.lena == 0
1901 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1901 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1902 abase, afile = pathtransform(afile_orig, strip, prefix)
1902 abase, afile = pathtransform(afile_orig, strip, prefix)
1903 gooda = not nulla and backend.exists(afile)
1903 gooda = not nulla and backend.exists(afile)
1904 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1904 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1905 if afile == bfile:
1905 if afile == bfile:
1906 goodb = gooda
1906 goodb = gooda
1907 else:
1907 else:
1908 goodb = not nullb and backend.exists(bfile)
1908 goodb = not nullb and backend.exists(bfile)
1909 missing = not goodb and not gooda and not create
1909 missing = not goodb and not gooda and not create
1910
1910
1911 # some diff programs apparently produce patches where the afile is
1911 # some diff programs apparently produce patches where the afile is
1912 # not /dev/null, but afile starts with bfile
1912 # not /dev/null, but afile starts with bfile
1913 abasedir = afile[: afile.rfind(b'/') + 1]
1913 abasedir = afile[: afile.rfind(b'/') + 1]
1914 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1914 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1915 if (
1915 if (
1916 missing
1916 missing
1917 and abasedir == bbasedir
1917 and abasedir == bbasedir
1918 and afile.startswith(bfile)
1918 and afile.startswith(bfile)
1919 and hunk.starta == 0
1919 and hunk.starta == 0
1920 and hunk.lena == 0
1920 and hunk.lena == 0
1921 ):
1921 ):
1922 create = True
1922 create = True
1923 missing = False
1923 missing = False
1924
1924
1925 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1925 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1926 # diff is between a file and its backup. In this case, the original
1926 # diff is between a file and its backup. In this case, the original
1927 # file should be patched (see original mpatch code).
1927 # file should be patched (see original mpatch code).
1928 isbackup = abase == bbase and bfile.startswith(afile)
1928 isbackup = abase == bbase and bfile.startswith(afile)
1929 fname = None
1929 fname = None
1930 if not missing:
1930 if not missing:
1931 if gooda and goodb:
1931 if gooda and goodb:
1932 if isbackup:
1932 if isbackup:
1933 fname = afile
1933 fname = afile
1934 else:
1934 else:
1935 fname = bfile
1935 fname = bfile
1936 elif gooda:
1936 elif gooda:
1937 fname = afile
1937 fname = afile
1938
1938
1939 if not fname:
1939 if not fname:
1940 if not nullb:
1940 if not nullb:
1941 if isbackup:
1941 if isbackup:
1942 fname = afile
1942 fname = afile
1943 else:
1943 else:
1944 fname = bfile
1944 fname = bfile
1945 elif not nulla:
1945 elif not nulla:
1946 fname = afile
1946 fname = afile
1947 else:
1947 else:
1948 raise PatchError(_(b"undefined source and destination files"))
1948 raise PatchError(_(b"undefined source and destination files"))
1949
1949
1950 gp = patchmeta(fname)
1950 gp = patchmeta(fname)
1951 if create:
1951 if create:
1952 gp.op = b'ADD'
1952 gp.op = b'ADD'
1953 elif remove:
1953 elif remove:
1954 gp.op = b'DELETE'
1954 gp.op = b'DELETE'
1955 return gp
1955 return gp
1956
1956
1957
1957
1958 def scanpatch(fp):
1958 def scanpatch(fp):
1959 """like patch.iterhunks, but yield different events
1959 """like patch.iterhunks, but yield different events
1960
1960
1961 - ('file', [header_lines + fromfile + tofile])
1961 - ('file', [header_lines + fromfile + tofile])
1962 - ('context', [context_lines])
1962 - ('context', [context_lines])
1963 - ('hunk', [hunk_lines])
1963 - ('hunk', [hunk_lines])
1964 - ('range', (-start,len, +start,len, proc))
1964 - ('range', (-start,len, +start,len, proc))
1965 """
1965 """
1966 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1966 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1967 lr = linereader(fp)
1967 lr = linereader(fp)
1968
1968
1969 def scanwhile(first, p):
1969 def scanwhile(first, p):
1970 """scan lr while predicate holds"""
1970 """scan lr while predicate holds"""
1971 lines = [first]
1971 lines = [first]
1972 for line in iter(lr.readline, b''):
1972 for line in iter(lr.readline, b''):
1973 if p(line):
1973 if p(line):
1974 lines.append(line)
1974 lines.append(line)
1975 else:
1975 else:
1976 lr.push(line)
1976 lr.push(line)
1977 break
1977 break
1978 return lines
1978 return lines
1979
1979
1980 for line in iter(lr.readline, b''):
1980 for line in iter(lr.readline, b''):
1981 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1981 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1982
1982
1983 def notheader(line):
1983 def notheader(line):
1984 s = line.split(None, 1)
1984 s = line.split(None, 1)
1985 return not s or s[0] not in (b'---', b'diff')
1985 return not s or s[0] not in (b'---', b'diff')
1986
1986
1987 header = scanwhile(line, notheader)
1987 header = scanwhile(line, notheader)
1988 fromfile = lr.readline()
1988 fromfile = lr.readline()
1989 if fromfile.startswith(b'---'):
1989 if fromfile.startswith(b'---'):
1990 tofile = lr.readline()
1990 tofile = lr.readline()
1991 header += [fromfile, tofile]
1991 header += [fromfile, tofile]
1992 else:
1992 else:
1993 lr.push(fromfile)
1993 lr.push(fromfile)
1994 yield b'file', header
1994 yield b'file', header
1995 elif line.startswith(b' '):
1995 elif line.startswith(b' '):
1996 cs = (b' ', b'\\')
1996 cs = (b' ', b'\\')
1997 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1997 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1998 elif line.startswith((b'-', b'+')):
1998 elif line.startswith((b'-', b'+')):
1999 cs = (b'-', b'+', b'\\')
1999 cs = (b'-', b'+', b'\\')
2000 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2000 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2001 else:
2001 else:
2002 m = lines_re.match(line)
2002 m = lines_re.match(line)
2003 if m:
2003 if m:
2004 yield b'range', m.groups()
2004 yield b'range', m.groups()
2005 else:
2005 else:
2006 yield b'other', line
2006 yield b'other', line
2007
2007
2008
2008
2009 def scangitpatch(lr, firstline):
2009 def scangitpatch(lr, firstline):
2010 """
2010 """
2011 Git patches can emit:
2011 Git patches can emit:
2012 - rename a to b
2012 - rename a to b
2013 - change b
2013 - change b
2014 - copy a to c
2014 - copy a to c
2015 - change c
2015 - change c
2016
2016
2017 We cannot apply this sequence as-is, the renamed 'a' could not be
2017 We cannot apply this sequence as-is, the renamed 'a' could not be
2018 found for it would have been renamed already. And we cannot copy
2018 found for it would have been renamed already. And we cannot copy
2019 from 'b' instead because 'b' would have been changed already. So
2019 from 'b' instead because 'b' would have been changed already. So
2020 we scan the git patch for copy and rename commands so we can
2020 we scan the git patch for copy and rename commands so we can
2021 perform the copies ahead of time.
2021 perform the copies ahead of time.
2022 """
2022 """
2023 pos = 0
2023 pos = 0
2024 try:
2024 try:
2025 pos = lr.fp.tell()
2025 pos = lr.fp.tell()
2026 fp = lr.fp
2026 fp = lr.fp
2027 except IOError:
2027 except IOError:
2028 fp = stringio(lr.fp.read())
2028 fp = stringio(lr.fp.read())
2029 gitlr = linereader(fp)
2029 gitlr = linereader(fp)
2030 gitlr.push(firstline)
2030 gitlr.push(firstline)
2031 gitpatches = readgitpatch(gitlr)
2031 gitpatches = readgitpatch(gitlr)
2032 fp.seek(pos)
2032 fp.seek(pos)
2033 return gitpatches
2033 return gitpatches
2034
2034
2035
2035
2036 def iterhunks(fp):
2036 def iterhunks(fp):
2037 """Read a patch and yield the following events:
2037 """Read a patch and yield the following events:
2038 - ("file", afile, bfile, firsthunk): select a new target file.
2038 - ("file", afile, bfile, firsthunk): select a new target file.
2039 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2039 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2040 "file" event.
2040 "file" event.
2041 - ("git", gitchanges): current diff is in git format, gitchanges
2041 - ("git", gitchanges): current diff is in git format, gitchanges
2042 maps filenames to gitpatch records. Unique event.
2042 maps filenames to gitpatch records. Unique event.
2043 """
2043 """
2044 afile = b""
2044 afile = b""
2045 bfile = b""
2045 bfile = b""
2046 state = None
2046 state = None
2047 hunknum = 0
2047 hunknum = 0
2048 emitfile = newfile = False
2048 emitfile = newfile = False
2049 gitpatches = None
2049 gitpatches = None
2050
2050
2051 # our states
2051 # our states
2052 BFILE = 1
2052 BFILE = 1
2053 context = None
2053 context = None
2054 lr = linereader(fp)
2054 lr = linereader(fp)
2055
2055
2056 for x in iter(lr.readline, b''):
2056 for x in iter(lr.readline, b''):
2057 if state == BFILE and (
2057 if state == BFILE and (
2058 (not context and x.startswith(b'@'))
2058 (not context and x.startswith(b'@'))
2059 or (context is not False and x.startswith(b'***************'))
2059 or (context is not False and x.startswith(b'***************'))
2060 or x.startswith(b'GIT binary patch')
2060 or x.startswith(b'GIT binary patch')
2061 ):
2061 ):
2062 gp = None
2062 gp = None
2063 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2063 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2064 gp = gitpatches.pop()
2064 gp = gitpatches.pop()
2065 if x.startswith(b'GIT binary patch'):
2065 if x.startswith(b'GIT binary patch'):
2066 h = binhunk(lr, gp.path)
2066 h = binhunk(lr, gp.path)
2067 else:
2067 else:
2068 if context is None and x.startswith(b'***************'):
2068 if context is None and x.startswith(b'***************'):
2069 context = True
2069 context = True
2070 h = hunk(x, hunknum + 1, lr, context)
2070 h = hunk(x, hunknum + 1, lr, context)
2071 hunknum += 1
2071 hunknum += 1
2072 if emitfile:
2072 if emitfile:
2073 emitfile = False
2073 emitfile = False
2074 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2074 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2075 yield b'hunk', h
2075 yield b'hunk', h
2076 elif x.startswith(b'diff --git a/'):
2076 elif x.startswith(b'diff --git a/'):
2077 m = gitre.match(x.rstrip(b' \r\n'))
2077 m = gitre.match(x.rstrip(b' \r\n'))
2078 if not m:
2078 if not m:
2079 continue
2079 continue
2080 if gitpatches is None:
2080 if gitpatches is None:
2081 # scan whole input for git metadata
2081 # scan whole input for git metadata
2082 gitpatches = scangitpatch(lr, x)
2082 gitpatches = scangitpatch(lr, x)
2083 yield b'git', [
2083 yield b'git', [
2084 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2084 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2085 ]
2085 ]
2086 gitpatches.reverse()
2086 gitpatches.reverse()
2087 afile = b'a/' + m.group(1)
2087 afile = b'a/' + m.group(1)
2088 bfile = b'b/' + m.group(2)
2088 bfile = b'b/' + m.group(2)
2089 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2089 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2090 gp = gitpatches.pop()
2090 gp = gitpatches.pop()
2091 yield b'file', (
2091 yield b'file', (
2092 b'a/' + gp.path,
2092 b'a/' + gp.path,
2093 b'b/' + gp.path,
2093 b'b/' + gp.path,
2094 None,
2094 None,
2095 gp.copy(),
2095 gp.copy(),
2096 )
2096 )
2097 if not gitpatches:
2097 if not gitpatches:
2098 raise PatchError(
2098 raise PatchError(
2099 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2099 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2100 )
2100 )
2101 newfile = True
2101 newfile = True
2102 elif x.startswith(b'---'):
2102 elif x.startswith(b'---'):
2103 # check for a unified diff
2103 # check for a unified diff
2104 l2 = lr.readline()
2104 l2 = lr.readline()
2105 if not l2.startswith(b'+++'):
2105 if not l2.startswith(b'+++'):
2106 lr.push(l2)
2106 lr.push(l2)
2107 continue
2107 continue
2108 newfile = True
2108 newfile = True
2109 context = False
2109 context = False
2110 afile = parsefilename(x)
2110 afile = parsefilename(x)
2111 bfile = parsefilename(l2)
2111 bfile = parsefilename(l2)
2112 elif x.startswith(b'***'):
2112 elif x.startswith(b'***'):
2113 # check for a context diff
2113 # check for a context diff
2114 l2 = lr.readline()
2114 l2 = lr.readline()
2115 if not l2.startswith(b'---'):
2115 if not l2.startswith(b'---'):
2116 lr.push(l2)
2116 lr.push(l2)
2117 continue
2117 continue
2118 l3 = lr.readline()
2118 l3 = lr.readline()
2119 lr.push(l3)
2119 lr.push(l3)
2120 if not l3.startswith(b"***************"):
2120 if not l3.startswith(b"***************"):
2121 lr.push(l2)
2121 lr.push(l2)
2122 continue
2122 continue
2123 newfile = True
2123 newfile = True
2124 context = True
2124 context = True
2125 afile = parsefilename(x)
2125 afile = parsefilename(x)
2126 bfile = parsefilename(l2)
2126 bfile = parsefilename(l2)
2127
2127
2128 if newfile:
2128 if newfile:
2129 newfile = False
2129 newfile = False
2130 emitfile = True
2130 emitfile = True
2131 state = BFILE
2131 state = BFILE
2132 hunknum = 0
2132 hunknum = 0
2133
2133
2134 while gitpatches:
2134 while gitpatches:
2135 gp = gitpatches.pop()
2135 gp = gitpatches.pop()
2136 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2136 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2137
2137
2138
2138
2139 def applybindelta(binchunk, data):
2139 def applybindelta(binchunk, data):
2140 """Apply a binary delta hunk
2140 """Apply a binary delta hunk
2141 The algorithm used is the algorithm from git's patch-delta.c
2141 The algorithm used is the algorithm from git's patch-delta.c
2142 """
2142 """
2143
2143
2144 def deltahead(binchunk):
2144 def deltahead(binchunk):
2145 i = 0
2145 i = 0
2146 for c in pycompat.bytestr(binchunk):
2146 for c in pycompat.bytestr(binchunk):
2147 i += 1
2147 i += 1
2148 if not (ord(c) & 0x80):
2148 if not (ord(c) & 0x80):
2149 return i
2149 return i
2150 return i
2150 return i
2151
2151
2152 out = b""
2152 out = b""
2153 s = deltahead(binchunk)
2153 s = deltahead(binchunk)
2154 binchunk = binchunk[s:]
2154 binchunk = binchunk[s:]
2155 s = deltahead(binchunk)
2155 s = deltahead(binchunk)
2156 binchunk = binchunk[s:]
2156 binchunk = binchunk[s:]
2157 i = 0
2157 i = 0
2158 while i < len(binchunk):
2158 while i < len(binchunk):
2159 cmd = ord(binchunk[i : i + 1])
2159 cmd = ord(binchunk[i : i + 1])
2160 i += 1
2160 i += 1
2161 if cmd & 0x80:
2161 if cmd & 0x80:
2162 offset = 0
2162 offset = 0
2163 size = 0
2163 size = 0
2164 if cmd & 0x01:
2164 if cmd & 0x01:
2165 offset = ord(binchunk[i : i + 1])
2165 offset = ord(binchunk[i : i + 1])
2166 i += 1
2166 i += 1
2167 if cmd & 0x02:
2167 if cmd & 0x02:
2168 offset |= ord(binchunk[i : i + 1]) << 8
2168 offset |= ord(binchunk[i : i + 1]) << 8
2169 i += 1
2169 i += 1
2170 if cmd & 0x04:
2170 if cmd & 0x04:
2171 offset |= ord(binchunk[i : i + 1]) << 16
2171 offset |= ord(binchunk[i : i + 1]) << 16
2172 i += 1
2172 i += 1
2173 if cmd & 0x08:
2173 if cmd & 0x08:
2174 offset |= ord(binchunk[i : i + 1]) << 24
2174 offset |= ord(binchunk[i : i + 1]) << 24
2175 i += 1
2175 i += 1
2176 if cmd & 0x10:
2176 if cmd & 0x10:
2177 size = ord(binchunk[i : i + 1])
2177 size = ord(binchunk[i : i + 1])
2178 i += 1
2178 i += 1
2179 if cmd & 0x20:
2179 if cmd & 0x20:
2180 size |= ord(binchunk[i : i + 1]) << 8
2180 size |= ord(binchunk[i : i + 1]) << 8
2181 i += 1
2181 i += 1
2182 if cmd & 0x40:
2182 if cmd & 0x40:
2183 size |= ord(binchunk[i : i + 1]) << 16
2183 size |= ord(binchunk[i : i + 1]) << 16
2184 i += 1
2184 i += 1
2185 if size == 0:
2185 if size == 0:
2186 size = 0x10000
2186 size = 0x10000
2187 offset_end = offset + size
2187 offset_end = offset + size
2188 out += data[offset:offset_end]
2188 out += data[offset:offset_end]
2189 elif cmd != 0:
2189 elif cmd != 0:
2190 offset_end = i + cmd
2190 offset_end = i + cmd
2191 out += binchunk[i:offset_end]
2191 out += binchunk[i:offset_end]
2192 i += cmd
2192 i += cmd
2193 else:
2193 else:
2194 raise PatchError(_(b'unexpected delta opcode 0'))
2194 raise PatchError(_(b'unexpected delta opcode 0'))
2195 return out
2195 return out
2196
2196
2197
2197
2198 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2198 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2199 """Reads a patch from fp and tries to apply it.
2199 """Reads a patch from fp and tries to apply it.
2200
2200
2201 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2201 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2202 there was any fuzz.
2202 there was any fuzz.
2203
2203
2204 If 'eolmode' is 'strict', the patch content and patched file are
2204 If 'eolmode' is 'strict', the patch content and patched file are
2205 read in binary mode. Otherwise, line endings are ignored when
2205 read in binary mode. Otherwise, line endings are ignored when
2206 patching then normalized according to 'eolmode'.
2206 patching then normalized according to 'eolmode'.
2207 """
2207 """
2208 return _applydiff(
2208 return _applydiff(
2209 ui,
2209 ui,
2210 fp,
2210 fp,
2211 patchfile,
2211 patchfile,
2212 backend,
2212 backend,
2213 store,
2213 store,
2214 strip=strip,
2214 strip=strip,
2215 prefix=prefix,
2215 prefix=prefix,
2216 eolmode=eolmode,
2216 eolmode=eolmode,
2217 )
2217 )
2218
2218
2219
2219
2220 def _canonprefix(repo, prefix):
2220 def _canonprefix(repo, prefix):
2221 if prefix:
2221 if prefix:
2222 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2222 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2223 if prefix != b'':
2223 if prefix != b'':
2224 prefix += b'/'
2224 prefix += b'/'
2225 return prefix
2225 return prefix
2226
2226
2227
2227
2228 def _applydiff(
2228 def _applydiff(
2229 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2229 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2230 ):
2230 ):
2231 prefix = _canonprefix(backend.repo, prefix)
2231 prefix = _canonprefix(backend.repo, prefix)
2232
2232
2233 def pstrip(p):
2233 def pstrip(p):
2234 return pathtransform(p, strip - 1, prefix)[1]
2234 return pathtransform(p, strip - 1, prefix)[1]
2235
2235
2236 rejects = 0
2236 rejects = 0
2237 err = 0
2237 err = 0
2238 current_file = None
2238 current_file = None
2239
2239
2240 for state, values in iterhunks(fp):
2240 for state, values in iterhunks(fp):
2241 if state == b'hunk':
2241 if state == b'hunk':
2242 if not current_file:
2242 if not current_file:
2243 continue
2243 continue
2244 ret = current_file.apply(values)
2244 ret = current_file.apply(values)
2245 if ret > 0:
2245 if ret > 0:
2246 err = 1
2246 err = 1
2247 elif state == b'file':
2247 elif state == b'file':
2248 if current_file:
2248 if current_file:
2249 rejects += current_file.close()
2249 rejects += current_file.close()
2250 current_file = None
2250 current_file = None
2251 afile, bfile, first_hunk, gp = values
2251 afile, bfile, first_hunk, gp = values
2252 if gp:
2252 if gp:
2253 gp.path = pstrip(gp.path)
2253 gp.path = pstrip(gp.path)
2254 if gp.oldpath:
2254 if gp.oldpath:
2255 gp.oldpath = pstrip(gp.oldpath)
2255 gp.oldpath = pstrip(gp.oldpath)
2256 else:
2256 else:
2257 gp = makepatchmeta(
2257 gp = makepatchmeta(
2258 backend, afile, bfile, first_hunk, strip, prefix
2258 backend, afile, bfile, first_hunk, strip, prefix
2259 )
2259 )
2260 if gp.op == b'RENAME':
2260 if gp.op == b'RENAME':
2261 backend.unlink(gp.oldpath)
2261 backend.unlink(gp.oldpath)
2262 if not first_hunk:
2262 if not first_hunk:
2263 if gp.op == b'DELETE':
2263 if gp.op == b'DELETE':
2264 backend.unlink(gp.path)
2264 backend.unlink(gp.path)
2265 continue
2265 continue
2266 data, mode = None, None
2266 data, mode = None, None
2267 if gp.op in (b'RENAME', b'COPY'):
2267 if gp.op in (b'RENAME', b'COPY'):
2268 data, mode = store.getfile(gp.oldpath)[:2]
2268 data, mode = store.getfile(gp.oldpath)[:2]
2269 if data is None:
2269 if data is None:
2270 # This means that the old path does not exist
2270 # This means that the old path does not exist
2271 raise PatchError(
2271 raise PatchError(
2272 _(b"source file '%s' does not exist") % gp.oldpath
2272 _(b"source file '%s' does not exist") % gp.oldpath
2273 )
2273 )
2274 if gp.mode:
2274 if gp.mode:
2275 mode = gp.mode
2275 mode = gp.mode
2276 if gp.op == b'ADD':
2276 if gp.op == b'ADD':
2277 # Added files without content have no hunk and
2277 # Added files without content have no hunk and
2278 # must be created
2278 # must be created
2279 data = b''
2279 data = b''
2280 if data or mode:
2280 if data or mode:
2281 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2281 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2282 gp.path
2282 gp.path
2283 ):
2283 ):
2284 raise PatchError(
2284 raise PatchError(
2285 _(
2285 _(
2286 b"cannot create %s: destination "
2286 b"cannot create %s: destination "
2287 b"already exists"
2287 b"already exists"
2288 )
2288 )
2289 % gp.path
2289 % gp.path
2290 )
2290 )
2291 backend.setfile(gp.path, data, mode, gp.oldpath)
2291 backend.setfile(gp.path, data, mode, gp.oldpath)
2292 continue
2292 continue
2293 try:
2293 try:
2294 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2294 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2295 except PatchError as inst:
2295 except PatchError as inst:
2296 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2296 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2297 current_file = None
2297 current_file = None
2298 rejects += 1
2298 rejects += 1
2299 continue
2299 continue
2300 elif state == b'git':
2300 elif state == b'git':
2301 for gp in values:
2301 for gp in values:
2302 path = pstrip(gp.oldpath)
2302 path = pstrip(gp.oldpath)
2303 data, mode = backend.getfile(path)
2303 data, mode = backend.getfile(path)
2304 if data is None:
2304 if data is None:
2305 # The error ignored here will trigger a getfile()
2305 # The error ignored here will trigger a getfile()
2306 # error in a place more appropriate for error
2306 # error in a place more appropriate for error
2307 # handling, and will not interrupt the patching
2307 # handling, and will not interrupt the patching
2308 # process.
2308 # process.
2309 pass
2309 pass
2310 else:
2310 else:
2311 store.setfile(path, data, mode)
2311 store.setfile(path, data, mode)
2312 else:
2312 else:
2313 raise error.Abort(_(b'unsupported parser state: %s') % state)
2313 raise error.Abort(_(b'unsupported parser state: %s') % state)
2314
2314
2315 if current_file:
2315 if current_file:
2316 rejects += current_file.close()
2316 rejects += current_file.close()
2317
2317
2318 if rejects:
2318 if rejects:
2319 return -1
2319 return -1
2320 return err
2320 return err
2321
2321
2322
2322
2323 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2323 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2324 """use <patcher> to apply <patchname> to the working directory.
2324 """use <patcher> to apply <patchname> to the working directory.
2325 returns whether patch was applied with fuzz factor."""
2325 returns whether patch was applied with fuzz factor."""
2326
2326
2327 fuzz = False
2327 fuzz = False
2328 args = []
2328 args = []
2329 cwd = repo.root
2329 cwd = repo.root
2330 if cwd:
2330 if cwd:
2331 args.append(b'-d %s' % procutil.shellquote(cwd))
2331 args.append(b'-d %s' % procutil.shellquote(cwd))
2332 cmd = b'%s %s -p%d < %s' % (
2332 cmd = b'%s %s -p%d < %s' % (
2333 patcher,
2333 patcher,
2334 b' '.join(args),
2334 b' '.join(args),
2335 strip,
2335 strip,
2336 procutil.shellquote(patchname),
2336 procutil.shellquote(patchname),
2337 )
2337 )
2338 ui.debug(b'Using external patch tool: %s\n' % cmd)
2338 ui.debug(b'Using external patch tool: %s\n' % cmd)
2339 fp = procutil.popen(cmd, b'rb')
2339 fp = procutil.popen(cmd, b'rb')
2340 try:
2340 try:
2341 for line in util.iterfile(fp):
2341 for line in util.iterfile(fp):
2342 line = line.rstrip()
2342 line = line.rstrip()
2343 ui.note(line + b'\n')
2343 ui.note(line + b'\n')
2344 if line.startswith(b'patching file '):
2344 if line.startswith(b'patching file '):
2345 pf = util.parsepatchoutput(line)
2345 pf = util.parsepatchoutput(line)
2346 printed_file = False
2346 printed_file = False
2347 files.add(pf)
2347 files.add(pf)
2348 elif line.find(b'with fuzz') >= 0:
2348 elif line.find(b'with fuzz') >= 0:
2349 fuzz = True
2349 fuzz = True
2350 if not printed_file:
2350 if not printed_file:
2351 ui.warn(pf + b'\n')
2351 ui.warn(pf + b'\n')
2352 printed_file = True
2352 printed_file = True
2353 ui.warn(line + b'\n')
2353 ui.warn(line + b'\n')
2354 elif line.find(b'saving rejects to file') >= 0:
2354 elif line.find(b'saving rejects to file') >= 0:
2355 ui.warn(line + b'\n')
2355 ui.warn(line + b'\n')
2356 elif line.find(b'FAILED') >= 0:
2356 elif line.find(b'FAILED') >= 0:
2357 if not printed_file:
2357 if not printed_file:
2358 ui.warn(pf + b'\n')
2358 ui.warn(pf + b'\n')
2359 printed_file = True
2359 printed_file = True
2360 ui.warn(line + b'\n')
2360 ui.warn(line + b'\n')
2361 finally:
2361 finally:
2362 if files:
2362 if files:
2363 scmutil.marktouched(repo, files, similarity)
2363 scmutil.marktouched(repo, files, similarity)
2364 code = fp.close()
2364 code = fp.close()
2365 if code:
2365 if code:
2366 raise PatchError(
2366 raise PatchError(
2367 _(b"patch command failed: %s") % procutil.explainexit(code)
2367 _(b"patch command failed: %s") % procutil.explainexit(code)
2368 )
2368 )
2369 return fuzz
2369 return fuzz
2370
2370
2371
2371
2372 def patchbackend(
2372 def patchbackend(
2373 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2373 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2374 ):
2374 ):
2375 if files is None:
2375 if files is None:
2376 files = set()
2376 files = set()
2377 if eolmode is None:
2377 if eolmode is None:
2378 eolmode = ui.config(b'patch', b'eol')
2378 eolmode = ui.config(b'patch', b'eol')
2379 if eolmode.lower() not in eolmodes:
2379 if eolmode.lower() not in eolmodes:
2380 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2380 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2381 eolmode = eolmode.lower()
2381 eolmode = eolmode.lower()
2382
2382
2383 store = filestore()
2383 store = filestore()
2384 try:
2384 try:
2385 fp = open(patchobj, b'rb')
2385 fp = open(patchobj, b'rb')
2386 except TypeError:
2386 except TypeError:
2387 fp = patchobj
2387 fp = patchobj
2388 try:
2388 try:
2389 ret = applydiff(
2389 ret = applydiff(
2390 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2390 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2391 )
2391 )
2392 finally:
2392 finally:
2393 if fp != patchobj:
2393 if fp != patchobj:
2394 fp.close()
2394 fp.close()
2395 files.update(backend.close())
2395 files.update(backend.close())
2396 store.close()
2396 store.close()
2397 if ret < 0:
2397 if ret < 0:
2398 raise PatchError(_(b'patch failed to apply'))
2398 raise PatchError(_(b'patch failed to apply'))
2399 return ret > 0
2399 return ret > 0
2400
2400
2401
2401
2402 def internalpatch(
2402 def internalpatch(
2403 ui,
2403 ui,
2404 repo,
2404 repo,
2405 patchobj,
2405 patchobj,
2406 strip,
2406 strip,
2407 prefix=b'',
2407 prefix=b'',
2408 files=None,
2408 files=None,
2409 eolmode=b'strict',
2409 eolmode=b'strict',
2410 similarity=0,
2410 similarity=0,
2411 ):
2411 ):
2412 """use builtin patch to apply <patchobj> to the working directory.
2412 """use builtin patch to apply <patchobj> to the working directory.
2413 returns whether patch was applied with fuzz factor."""
2413 returns whether patch was applied with fuzz factor."""
2414 backend = workingbackend(ui, repo, similarity)
2414 backend = workingbackend(ui, repo, similarity)
2415 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2415 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2416
2416
2417
2417
2418 def patchrepo(
2418 def patchrepo(
2419 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2419 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2420 ):
2420 ):
2421 backend = repobackend(ui, repo, ctx, store)
2421 backend = repobackend(ui, repo, ctx, store)
2422 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2422 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2423
2423
2424
2424
2425 def patch(
2425 def patch(
2426 ui,
2426 ui,
2427 repo,
2427 repo,
2428 patchname,
2428 patchname,
2429 strip=1,
2429 strip=1,
2430 prefix=b'',
2430 prefix=b'',
2431 files=None,
2431 files=None,
2432 eolmode=b'strict',
2432 eolmode=b'strict',
2433 similarity=0,
2433 similarity=0,
2434 ):
2434 ):
2435 """Apply <patchname> to the working directory.
2435 """Apply <patchname> to the working directory.
2436
2436
2437 'eolmode' specifies how end of lines should be handled. It can be:
2437 'eolmode' specifies how end of lines should be handled. It can be:
2438 - 'strict': inputs are read in binary mode, EOLs are preserved
2438 - 'strict': inputs are read in binary mode, EOLs are preserved
2439 - 'crlf': EOLs are ignored when patching and reset to CRLF
2439 - 'crlf': EOLs are ignored when patching and reset to CRLF
2440 - 'lf': EOLs are ignored when patching and reset to LF
2440 - 'lf': EOLs are ignored when patching and reset to LF
2441 - None: get it from user settings, default to 'strict'
2441 - None: get it from user settings, default to 'strict'
2442 'eolmode' is ignored when using an external patcher program.
2442 'eolmode' is ignored when using an external patcher program.
2443
2443
2444 Returns whether patch was applied with fuzz factor.
2444 Returns whether patch was applied with fuzz factor.
2445 """
2445 """
2446 patcher = ui.config(b'ui', b'patch')
2446 patcher = ui.config(b'ui', b'patch')
2447 if files is None:
2447 if files is None:
2448 files = set()
2448 files = set()
2449 if patcher:
2449 if patcher:
2450 return _externalpatch(
2450 return _externalpatch(
2451 ui, repo, patcher, patchname, strip, files, similarity
2451 ui, repo, patcher, patchname, strip, files, similarity
2452 )
2452 )
2453 return internalpatch(
2453 return internalpatch(
2454 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2454 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2455 )
2455 )
2456
2456
2457
2457
2458 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2458 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2459 backend = fsbackend(ui, repo.root)
2459 backend = fsbackend(ui, repo.root)
2460 prefix = _canonprefix(repo, prefix)
2460 prefix = _canonprefix(repo, prefix)
2461 with open(patchpath, b'rb') as fp:
2461 with open(patchpath, b'rb') as fp:
2462 changed = set()
2462 changed = set()
2463 for state, values in iterhunks(fp):
2463 for state, values in iterhunks(fp):
2464 if state == b'file':
2464 if state == b'file':
2465 afile, bfile, first_hunk, gp = values
2465 afile, bfile, first_hunk, gp = values
2466 if gp:
2466 if gp:
2467 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2467 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2468 if gp.oldpath:
2468 if gp.oldpath:
2469 gp.oldpath = pathtransform(
2469 gp.oldpath = pathtransform(
2470 gp.oldpath, strip - 1, prefix
2470 gp.oldpath, strip - 1, prefix
2471 )[1]
2471 )[1]
2472 else:
2472 else:
2473 gp = makepatchmeta(
2473 gp = makepatchmeta(
2474 backend, afile, bfile, first_hunk, strip, prefix
2474 backend, afile, bfile, first_hunk, strip, prefix
2475 )
2475 )
2476 changed.add(gp.path)
2476 changed.add(gp.path)
2477 if gp.op == b'RENAME':
2477 if gp.op == b'RENAME':
2478 changed.add(gp.oldpath)
2478 changed.add(gp.oldpath)
2479 elif state not in (b'hunk', b'git'):
2479 elif state not in (b'hunk', b'git'):
2480 raise error.Abort(_(b'unsupported parser state: %s') % state)
2480 raise error.Abort(_(b'unsupported parser state: %s') % state)
2481 return changed
2481 return changed
2482
2482
2483
2483
2484 class GitDiffRequired(Exception):
2484 class GitDiffRequired(Exception):
2485 pass
2485 pass
2486
2486
2487
2487
2488 diffopts = diffutil.diffallopts
2488 diffopts = diffutil.diffallopts
2489 diffallopts = diffutil.diffallopts
2489 diffallopts = diffutil.diffallopts
2490 difffeatureopts = diffutil.difffeatureopts
2490 difffeatureopts = diffutil.difffeatureopts
2491
2491
2492
2492
2493 def diff(
2493 def diff(
2494 repo,
2494 repo,
2495 node1=None,
2495 node1=None,
2496 node2=None,
2496 node2=None,
2497 match=None,
2497 match=None,
2498 changes=None,
2498 changes=None,
2499 opts=None,
2499 opts=None,
2500 losedatafn=None,
2500 losedatafn=None,
2501 pathfn=None,
2501 pathfn=None,
2502 copy=None,
2502 copy=None,
2503 copysourcematch=None,
2503 copysourcematch=None,
2504 hunksfilterfn=None,
2504 hunksfilterfn=None,
2505 ):
2505 ):
2506 '''yields diff of changes to files between two nodes, or node and
2506 '''yields diff of changes to files between two nodes, or node and
2507 working directory.
2507 working directory.
2508
2508
2509 if node1 is None, use first dirstate parent instead.
2509 if node1 is None, use first dirstate parent instead.
2510 if node2 is None, compare node1 with working directory.
2510 if node2 is None, compare node1 with working directory.
2511
2511
2512 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2512 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2513 every time some change cannot be represented with the current
2513 every time some change cannot be represented with the current
2514 patch format. Return False to upgrade to git patch format, True to
2514 patch format. Return False to upgrade to git patch format, True to
2515 accept the loss or raise an exception to abort the diff. It is
2515 accept the loss or raise an exception to abort the diff. It is
2516 called with the name of current file being diffed as 'fn'. If set
2516 called with the name of current file being diffed as 'fn'. If set
2517 to None, patches will always be upgraded to git format when
2517 to None, patches will always be upgraded to git format when
2518 necessary.
2518 necessary.
2519
2519
2520 prefix is a filename prefix that is prepended to all filenames on
2520 prefix is a filename prefix that is prepended to all filenames on
2521 display (used for subrepos).
2521 display (used for subrepos).
2522
2522
2523 relroot, if not empty, must be normalized with a trailing /. Any match
2523 relroot, if not empty, must be normalized with a trailing /. Any match
2524 patterns that fall outside it will be ignored.
2524 patterns that fall outside it will be ignored.
2525
2525
2526 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2526 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2527 information.
2527 information.
2528
2528
2529 if copysourcematch is not None, then copy sources will be filtered by this
2529 if copysourcematch is not None, then copy sources will be filtered by this
2530 matcher
2530 matcher
2531
2531
2532 hunksfilterfn, if not None, should be a function taking a filectx and
2532 hunksfilterfn, if not None, should be a function taking a filectx and
2533 hunks generator that may yield filtered hunks.
2533 hunks generator that may yield filtered hunks.
2534 '''
2534 '''
2535 if not node1 and not node2:
2535 if not node1 and not node2:
2536 node1 = repo.dirstate.p1()
2536 node1 = repo.dirstate.p1()
2537
2537
2538 ctx1 = repo[node1]
2538 ctx1 = repo[node1]
2539 ctx2 = repo[node2]
2539 ctx2 = repo[node2]
2540
2540
2541 for fctx1, fctx2, hdr, hunks in diffhunks(
2541 for fctx1, fctx2, hdr, hunks in diffhunks(
2542 repo,
2542 repo,
2543 ctx1=ctx1,
2543 ctx1=ctx1,
2544 ctx2=ctx2,
2544 ctx2=ctx2,
2545 match=match,
2545 match=match,
2546 changes=changes,
2546 changes=changes,
2547 opts=opts,
2547 opts=opts,
2548 losedatafn=losedatafn,
2548 losedatafn=losedatafn,
2549 pathfn=pathfn,
2549 pathfn=pathfn,
2550 copy=copy,
2550 copy=copy,
2551 copysourcematch=copysourcematch,
2551 copysourcematch=copysourcematch,
2552 ):
2552 ):
2553 if hunksfilterfn is not None:
2553 if hunksfilterfn is not None:
2554 # If the file has been removed, fctx2 is None; but this should
2554 # If the file has been removed, fctx2 is None; but this should
2555 # not occur here since we catch removed files early in
2555 # not occur here since we catch removed files early in
2556 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2556 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2557 assert (
2557 assert (
2558 fctx2 is not None
2558 fctx2 is not None
2559 ), b'fctx2 unexpectly None in diff hunks filtering'
2559 ), b'fctx2 unexpectly None in diff hunks filtering'
2560 hunks = hunksfilterfn(fctx2, hunks)
2560 hunks = hunksfilterfn(fctx2, hunks)
2561 text = b''.join(b''.join(hlines) for hrange, hlines in hunks)
2561 text = b''.join(b''.join(hlines) for hrange, hlines in hunks)
2562 if hdr and (text or len(hdr) > 1):
2562 if hdr and (text or len(hdr) > 1):
2563 yield b'\n'.join(hdr) + b'\n'
2563 yield b'\n'.join(hdr) + b'\n'
2564 if text:
2564 if text:
2565 yield text
2565 yield text
2566
2566
2567
2567
2568 def diffhunks(
2568 def diffhunks(
2569 repo,
2569 repo,
2570 ctx1,
2570 ctx1,
2571 ctx2,
2571 ctx2,
2572 match=None,
2572 match=None,
2573 changes=None,
2573 changes=None,
2574 opts=None,
2574 opts=None,
2575 losedatafn=None,
2575 losedatafn=None,
2576 pathfn=None,
2576 pathfn=None,
2577 copy=None,
2577 copy=None,
2578 copysourcematch=None,
2578 copysourcematch=None,
2579 ):
2579 ):
2580 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2580 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2581 where `header` is a list of diff headers and `hunks` is an iterable of
2581 where `header` is a list of diff headers and `hunks` is an iterable of
2582 (`hunkrange`, `hunklines`) tuples.
2582 (`hunkrange`, `hunklines`) tuples.
2583
2583
2584 See diff() for the meaning of parameters.
2584 See diff() for the meaning of parameters.
2585 """
2585 """
2586
2586
2587 if opts is None:
2587 if opts is None:
2588 opts = mdiff.defaultopts
2588 opts = mdiff.defaultopts
2589
2589
2590 def lrugetfilectx():
2590 def lrugetfilectx():
2591 cache = {}
2591 cache = {}
2592 order = collections.deque()
2592 order = collections.deque()
2593
2593
2594 def getfilectx(f, ctx):
2594 def getfilectx(f, ctx):
2595 fctx = ctx.filectx(f, filelog=cache.get(f))
2595 fctx = ctx.filectx(f, filelog=cache.get(f))
2596 if f not in cache:
2596 if f not in cache:
2597 if len(cache) > 20:
2597 if len(cache) > 20:
2598 del cache[order.popleft()]
2598 del cache[order.popleft()]
2599 cache[f] = fctx.filelog()
2599 cache[f] = fctx.filelog()
2600 else:
2600 else:
2601 order.remove(f)
2601 order.remove(f)
2602 order.append(f)
2602 order.append(f)
2603 return fctx
2603 return fctx
2604
2604
2605 return getfilectx
2605 return getfilectx
2606
2606
2607 getfilectx = lrugetfilectx()
2607 getfilectx = lrugetfilectx()
2608
2608
2609 if not changes:
2609 if not changes:
2610 changes = ctx1.status(ctx2, match=match)
2610 changes = ctx1.status(ctx2, match=match)
2611 if isinstance(changes, list):
2611 if isinstance(changes, list):
2612 modified, added, removed = changes[:3]
2612 modified, added, removed = changes[:3]
2613 else:
2613 else:
2614 modified, added, removed = (
2614 modified, added, removed = (
2615 changes.modified,
2615 changes.modified,
2616 changes.added,
2616 changes.added,
2617 changes.removed,
2617 changes.removed,
2618 )
2618 )
2619
2619
2620 if not modified and not added and not removed:
2620 if not modified and not added and not removed:
2621 return []
2621 return []
2622
2622
2623 if repo.ui.debugflag:
2623 if repo.ui.debugflag:
2624 hexfunc = hex
2624 hexfunc = hex
2625 else:
2625 else:
2626 hexfunc = short
2626 hexfunc = short
2627 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2627 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2628
2628
2629 if copy is None:
2629 if copy is None:
2630 copy = {}
2630 copy = {}
2631 if opts.git or opts.upgrade:
2631 if opts.git or opts.upgrade:
2632 copy = copies.pathcopies(ctx1, ctx2, match=match)
2632 copy = copies.pathcopies(ctx1, ctx2, match=match)
2633
2633
2634 if copysourcematch:
2634 if copysourcematch:
2635 # filter out copies where source side isn't inside the matcher
2635 # filter out copies where source side isn't inside the matcher
2636 # (copies.pathcopies() already filtered out the destination)
2636 # (copies.pathcopies() already filtered out the destination)
2637 copy = {
2637 copy = {
2638 dst: src
2638 dst: src
2639 for dst, src in pycompat.iteritems(copy)
2639 for dst, src in pycompat.iteritems(copy)
2640 if copysourcematch(src)
2640 if copysourcematch(src)
2641 }
2641 }
2642
2642
2643 modifiedset = set(modified)
2643 modifiedset = set(modified)
2644 addedset = set(added)
2644 addedset = set(added)
2645 removedset = set(removed)
2645 removedset = set(removed)
2646 for f in modified:
2646 for f in modified:
2647 if f not in ctx1:
2647 if f not in ctx1:
2648 # Fix up added, since merged-in additions appear as
2648 # Fix up added, since merged-in additions appear as
2649 # modifications during merges
2649 # modifications during merges
2650 modifiedset.remove(f)
2650 modifiedset.remove(f)
2651 addedset.add(f)
2651 addedset.add(f)
2652 for f in removed:
2652 for f in removed:
2653 if f not in ctx1:
2653 if f not in ctx1:
2654 # Merged-in additions that are then removed are reported as removed.
2654 # Merged-in additions that are then removed are reported as removed.
2655 # They are not in ctx1, so We don't want to show them in the diff.
2655 # They are not in ctx1, so We don't want to show them in the diff.
2656 removedset.remove(f)
2656 removedset.remove(f)
2657 modified = sorted(modifiedset)
2657 modified = sorted(modifiedset)
2658 added = sorted(addedset)
2658 added = sorted(addedset)
2659 removed = sorted(removedset)
2659 removed = sorted(removedset)
2660 for dst, src in list(copy.items()):
2660 for dst, src in list(copy.items()):
2661 if src not in ctx1:
2661 if src not in ctx1:
2662 # Files merged in during a merge and then copied/renamed are
2662 # Files merged in during a merge and then copied/renamed are
2663 # reported as copies. We want to show them in the diff as additions.
2663 # reported as copies. We want to show them in the diff as additions.
2664 del copy[dst]
2664 del copy[dst]
2665
2665
2666 prefetchmatch = scmutil.matchfiles(
2666 prefetchmatch = scmutil.matchfiles(
2667 repo, list(modifiedset | addedset | removedset)
2667 repo, list(modifiedset | addedset | removedset)
2668 )
2668 )
2669 revmatches = [
2669 revmatches = [
2670 (ctx1.rev(), prefetchmatch),
2670 (ctx1.rev(), prefetchmatch),
2671 (ctx2.rev(), prefetchmatch),
2671 (ctx2.rev(), prefetchmatch),
2672 ]
2672 ]
2673 scmutil.prefetchfiles(repo, revmatches)
2673 scmutil.prefetchfiles(repo, revmatches)
2674
2674
2675 def difffn(opts, losedata):
2675 def difffn(opts, losedata):
2676 return trydiff(
2676 return trydiff(
2677 repo,
2677 repo,
2678 revs,
2678 revs,
2679 ctx1,
2679 ctx1,
2680 ctx2,
2680 ctx2,
2681 modified,
2681 modified,
2682 added,
2682 added,
2683 removed,
2683 removed,
2684 copy,
2684 copy,
2685 getfilectx,
2685 getfilectx,
2686 opts,
2686 opts,
2687 losedata,
2687 losedata,
2688 pathfn,
2688 pathfn,
2689 )
2689 )
2690
2690
2691 if opts.upgrade and not opts.git:
2691 if opts.upgrade and not opts.git:
2692 try:
2692 try:
2693
2693
2694 def losedata(fn):
2694 def losedata(fn):
2695 if not losedatafn or not losedatafn(fn=fn):
2695 if not losedatafn or not losedatafn(fn=fn):
2696 raise GitDiffRequired
2696 raise GitDiffRequired
2697
2697
2698 # Buffer the whole output until we are sure it can be generated
2698 # Buffer the whole output until we are sure it can be generated
2699 return list(difffn(opts.copy(git=False), losedata))
2699 return list(difffn(opts.copy(git=False), losedata))
2700 except GitDiffRequired:
2700 except GitDiffRequired:
2701 return difffn(opts.copy(git=True), None)
2701 return difffn(opts.copy(git=True), None)
2702 else:
2702 else:
2703 return difffn(opts, None)
2703 return difffn(opts, None)
2704
2704
2705
2705
2706 def diffsinglehunk(hunklines):
2706 def diffsinglehunk(hunklines):
2707 """yield tokens for a list of lines in a single hunk"""
2707 """yield tokens for a list of lines in a single hunk"""
2708 for line in hunklines:
2708 for line in hunklines:
2709 # chomp
2709 # chomp
2710 chompline = line.rstrip(b'\r\n')
2710 chompline = line.rstrip(b'\r\n')
2711 # highlight tabs and trailing whitespace
2711 # highlight tabs and trailing whitespace
2712 stripline = chompline.rstrip()
2712 stripline = chompline.rstrip()
2713 if line.startswith(b'-'):
2713 if line.startswith(b'-'):
2714 label = b'diff.deleted'
2714 label = b'diff.deleted'
2715 elif line.startswith(b'+'):
2715 elif line.startswith(b'+'):
2716 label = b'diff.inserted'
2716 label = b'diff.inserted'
2717 else:
2717 else:
2718 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2718 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2719 for token in tabsplitter.findall(stripline):
2719 for token in tabsplitter.findall(stripline):
2720 if token.startswith(b'\t'):
2720 if token.startswith(b'\t'):
2721 yield (token, b'diff.tab')
2721 yield (token, b'diff.tab')
2722 else:
2722 else:
2723 yield (token, label)
2723 yield (token, label)
2724
2724
2725 if chompline != stripline:
2725 if chompline != stripline:
2726 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2726 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2727 if chompline != line:
2727 if chompline != line:
2728 yield (line[len(chompline) :], b'')
2728 yield (line[len(chompline) :], b'')
2729
2729
2730
2730
2731 def diffsinglehunkinline(hunklines):
2731 def diffsinglehunkinline(hunklines):
2732 """yield tokens for a list of lines in a single hunk, with inline colors"""
2732 """yield tokens for a list of lines in a single hunk, with inline colors"""
2733 # prepare deleted, and inserted content
2733 # prepare deleted, and inserted content
2734 a = b''
2734 a = b''
2735 b = b''
2735 b = b''
2736 for line in hunklines:
2736 for line in hunklines:
2737 if line[0:1] == b'-':
2737 if line[0:1] == b'-':
2738 a += line[1:]
2738 a += line[1:]
2739 elif line[0:1] == b'+':
2739 elif line[0:1] == b'+':
2740 b += line[1:]
2740 b += line[1:]
2741 else:
2741 else:
2742 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2742 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2743 # fast path: if either side is empty, use diffsinglehunk
2743 # fast path: if either side is empty, use diffsinglehunk
2744 if not a or not b:
2744 if not a or not b:
2745 for t in diffsinglehunk(hunklines):
2745 for t in diffsinglehunk(hunklines):
2746 yield t
2746 yield t
2747 return
2747 return
2748 # re-split the content into words
2748 # re-split the content into words
2749 al = wordsplitter.findall(a)
2749 al = wordsplitter.findall(a)
2750 bl = wordsplitter.findall(b)
2750 bl = wordsplitter.findall(b)
2751 # re-arrange the words to lines since the diff algorithm is line-based
2751 # re-arrange the words to lines since the diff algorithm is line-based
2752 aln = [s if s == b'\n' else s + b'\n' for s in al]
2752 aln = [s if s == b'\n' else s + b'\n' for s in al]
2753 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2753 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2754 an = b''.join(aln)
2754 an = b''.join(aln)
2755 bn = b''.join(bln)
2755 bn = b''.join(bln)
2756 # run the diff algorithm, prepare atokens and btokens
2756 # run the diff algorithm, prepare atokens and btokens
2757 atokens = []
2757 atokens = []
2758 btokens = []
2758 btokens = []
2759 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2759 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2760 for (a1, a2, b1, b2), btype in blocks:
2760 for (a1, a2, b1, b2), btype in blocks:
2761 changed = btype == b'!'
2761 changed = btype == b'!'
2762 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2762 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2763 atokens.append((changed, token))
2763 atokens.append((changed, token))
2764 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2764 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2765 btokens.append((changed, token))
2765 btokens.append((changed, token))
2766
2766
2767 # yield deleted tokens, then inserted ones
2767 # yield deleted tokens, then inserted ones
2768 for prefix, label, tokens in [
2768 for prefix, label, tokens in [
2769 (b'-', b'diff.deleted', atokens),
2769 (b'-', b'diff.deleted', atokens),
2770 (b'+', b'diff.inserted', btokens),
2770 (b'+', b'diff.inserted', btokens),
2771 ]:
2771 ]:
2772 nextisnewline = True
2772 nextisnewline = True
2773 for changed, token in tokens:
2773 for changed, token in tokens:
2774 if nextisnewline:
2774 if nextisnewline:
2775 yield (prefix, label)
2775 yield (prefix, label)
2776 nextisnewline = False
2776 nextisnewline = False
2777 # special handling line end
2777 # special handling line end
2778 isendofline = token.endswith(b'\n')
2778 isendofline = token.endswith(b'\n')
2779 if isendofline:
2779 if isendofline:
2780 chomp = token[:-1] # chomp
2780 chomp = token[:-1] # chomp
2781 if chomp.endswith(b'\r'):
2781 if chomp.endswith(b'\r'):
2782 chomp = chomp[:-1]
2782 chomp = chomp[:-1]
2783 endofline = token[len(chomp) :]
2783 endofline = token[len(chomp) :]
2784 token = chomp.rstrip() # detect spaces at the end
2784 token = chomp.rstrip() # detect spaces at the end
2785 endspaces = chomp[len(token) :]
2785 endspaces = chomp[len(token) :]
2786 # scan tabs
2786 # scan tabs
2787 for maybetab in tabsplitter.findall(token):
2787 for maybetab in tabsplitter.findall(token):
2788 if b'\t' == maybetab[0:1]:
2788 if b'\t' == maybetab[0:1]:
2789 currentlabel = b'diff.tab'
2789 currentlabel = b'diff.tab'
2790 else:
2790 else:
2791 if changed:
2791 if changed:
2792 currentlabel = label + b'.changed'
2792 currentlabel = label + b'.changed'
2793 else:
2793 else:
2794 currentlabel = label + b'.unchanged'
2794 currentlabel = label + b'.unchanged'
2795 yield (maybetab, currentlabel)
2795 yield (maybetab, currentlabel)
2796 if isendofline:
2796 if isendofline:
2797 if endspaces:
2797 if endspaces:
2798 yield (endspaces, b'diff.trailingwhitespace')
2798 yield (endspaces, b'diff.trailingwhitespace')
2799 yield (endofline, b'')
2799 yield (endofline, b'')
2800 nextisnewline = True
2800 nextisnewline = True
2801
2801
2802
2802
2803 def difflabel(func, *args, **kw):
2803 def difflabel(func, *args, **kw):
2804 '''yields 2-tuples of (output, label) based on the output of func()'''
2804 '''yields 2-tuples of (output, label) based on the output of func()'''
2805 if kw.get('opts') and kw['opts'].worddiff:
2805 if kw.get('opts') and kw['opts'].worddiff:
2806 dodiffhunk = diffsinglehunkinline
2806 dodiffhunk = diffsinglehunkinline
2807 else:
2807 else:
2808 dodiffhunk = diffsinglehunk
2808 dodiffhunk = diffsinglehunk
2809 headprefixes = [
2809 headprefixes = [
2810 (b'diff', b'diff.diffline'),
2810 (b'diff', b'diff.diffline'),
2811 (b'copy', b'diff.extended'),
2811 (b'copy', b'diff.extended'),
2812 (b'rename', b'diff.extended'),
2812 (b'rename', b'diff.extended'),
2813 (b'old', b'diff.extended'),
2813 (b'old', b'diff.extended'),
2814 (b'new', b'diff.extended'),
2814 (b'new', b'diff.extended'),
2815 (b'deleted', b'diff.extended'),
2815 (b'deleted', b'diff.extended'),
2816 (b'index', b'diff.extended'),
2816 (b'index', b'diff.extended'),
2817 (b'similarity', b'diff.extended'),
2817 (b'similarity', b'diff.extended'),
2818 (b'---', b'diff.file_a'),
2818 (b'---', b'diff.file_a'),
2819 (b'+++', b'diff.file_b'),
2819 (b'+++', b'diff.file_b'),
2820 ]
2820 ]
2821 textprefixes = [
2821 textprefixes = [
2822 (b'@', b'diff.hunk'),
2822 (b'@', b'diff.hunk'),
2823 # - and + are handled by diffsinglehunk
2823 # - and + are handled by diffsinglehunk
2824 ]
2824 ]
2825 head = False
2825 head = False
2826
2826
2827 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2827 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2828 hunkbuffer = []
2828 hunkbuffer = []
2829
2829
2830 def consumehunkbuffer():
2830 def consumehunkbuffer():
2831 if hunkbuffer:
2831 if hunkbuffer:
2832 for token in dodiffhunk(hunkbuffer):
2832 for token in dodiffhunk(hunkbuffer):
2833 yield token
2833 yield token
2834 hunkbuffer[:] = []
2834 hunkbuffer[:] = []
2835
2835
2836 for chunk in func(*args, **kw):
2836 for chunk in func(*args, **kw):
2837 lines = chunk.split(b'\n')
2837 lines = chunk.split(b'\n')
2838 linecount = len(lines)
2838 linecount = len(lines)
2839 for i, line in enumerate(lines):
2839 for i, line in enumerate(lines):
2840 if head:
2840 if head:
2841 if line.startswith(b'@'):
2841 if line.startswith(b'@'):
2842 head = False
2842 head = False
2843 else:
2843 else:
2844 if line and not line.startswith(
2844 if line and not line.startswith(
2845 (b' ', b'+', b'-', b'@', b'\\')
2845 (b' ', b'+', b'-', b'@', b'\\')
2846 ):
2846 ):
2847 head = True
2847 head = True
2848 diffline = False
2848 diffline = False
2849 if not head and line and line.startswith((b'+', b'-')):
2849 if not head and line and line.startswith((b'+', b'-')):
2850 diffline = True
2850 diffline = True
2851
2851
2852 prefixes = textprefixes
2852 prefixes = textprefixes
2853 if head:
2853 if head:
2854 prefixes = headprefixes
2854 prefixes = headprefixes
2855 if diffline:
2855 if diffline:
2856 # buffered
2856 # buffered
2857 bufferedline = line
2857 bufferedline = line
2858 if i + 1 < linecount:
2858 if i + 1 < linecount:
2859 bufferedline += b"\n"
2859 bufferedline += b"\n"
2860 hunkbuffer.append(bufferedline)
2860 hunkbuffer.append(bufferedline)
2861 else:
2861 else:
2862 # unbuffered
2862 # unbuffered
2863 for token in consumehunkbuffer():
2863 for token in consumehunkbuffer():
2864 yield token
2864 yield token
2865 stripline = line.rstrip()
2865 stripline = line.rstrip()
2866 for prefix, label in prefixes:
2866 for prefix, label in prefixes:
2867 if stripline.startswith(prefix):
2867 if stripline.startswith(prefix):
2868 yield (stripline, label)
2868 yield (stripline, label)
2869 if line != stripline:
2869 if line != stripline:
2870 yield (
2870 yield (
2871 line[len(stripline) :],
2871 line[len(stripline) :],
2872 b'diff.trailingwhitespace',
2872 b'diff.trailingwhitespace',
2873 )
2873 )
2874 break
2874 break
2875 else:
2875 else:
2876 yield (line, b'')
2876 yield (line, b'')
2877 if i + 1 < linecount:
2877 if i + 1 < linecount:
2878 yield (b'\n', b'')
2878 yield (b'\n', b'')
2879 for token in consumehunkbuffer():
2879 for token in consumehunkbuffer():
2880 yield token
2880 yield token
2881
2881
2882
2882
2883 def diffui(*args, **kw):
2883 def diffui(*args, **kw):
2884 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2884 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2885 return difflabel(diff, *args, **kw)
2885 return difflabel(diff, *args, **kw)
2886
2886
2887
2887
2888 def _filepairs(modified, added, removed, copy, opts):
2888 def _filepairs(modified, added, removed, copy, opts):
2889 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2889 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2890 before and f2 is the the name after. For added files, f1 will be None,
2890 before and f2 is the the name after. For added files, f1 will be None,
2891 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2891 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2892 or 'rename' (the latter two only if opts.git is set).'''
2892 or 'rename' (the latter two only if opts.git is set).'''
2893 gone = set()
2893 gone = set()
2894
2894
2895 copyto = {v: k for k, v in copy.items()}
2895 copyto = {v: k for k, v in copy.items()}
2896
2896
2897 addedset, removedset = set(added), set(removed)
2897 addedset, removedset = set(added), set(removed)
2898
2898
2899 for f in sorted(modified + added + removed):
2899 for f in sorted(modified + added + removed):
2900 copyop = None
2900 copyop = None
2901 f1, f2 = f, f
2901 f1, f2 = f, f
2902 if f in addedset:
2902 if f in addedset:
2903 f1 = None
2903 f1 = None
2904 if f in copy:
2904 if f in copy:
2905 if opts.git:
2905 if opts.git:
2906 f1 = copy[f]
2906 f1 = copy[f]
2907 if f1 in removedset and f1 not in gone:
2907 if f1 in removedset and f1 not in gone:
2908 copyop = b'rename'
2908 copyop = b'rename'
2909 gone.add(f1)
2909 gone.add(f1)
2910 else:
2910 else:
2911 copyop = b'copy'
2911 copyop = b'copy'
2912 elif f in removedset:
2912 elif f in removedset:
2913 f2 = None
2913 f2 = None
2914 if opts.git:
2914 if opts.git:
2915 # have we already reported a copy above?
2915 # have we already reported a copy above?
2916 if (
2916 if (
2917 f in copyto
2917 f in copyto
2918 and copyto[f] in addedset
2918 and copyto[f] in addedset
2919 and copy[copyto[f]] == f
2919 and copy[copyto[f]] == f
2920 ):
2920 ):
2921 continue
2921 continue
2922 yield f1, f2, copyop
2922 yield f1, f2, copyop
2923
2923
2924
2924
2925 def _gitindex(text):
2926 if not text:
2927 text = b""
2928 l = len(text)
2929 s = hashutil.sha1(b'blob %d\0' % l)
2930 s.update(text)
2931 return hex(s.digest())
2932
2933
2934 _gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2935
2936
2925 def trydiff(
2937 def trydiff(
2926 repo,
2938 repo,
2927 revs,
2939 revs,
2928 ctx1,
2940 ctx1,
2929 ctx2,
2941 ctx2,
2930 modified,
2942 modified,
2931 added,
2943 added,
2932 removed,
2944 removed,
2933 copy,
2945 copy,
2934 getfilectx,
2946 getfilectx,
2935 opts,
2947 opts,
2936 losedatafn,
2948 losedatafn,
2937 pathfn,
2949 pathfn,
2938 ):
2950 ):
2939 '''given input data, generate a diff and yield it in blocks
2951 '''given input data, generate a diff and yield it in blocks
2940
2952
2941 If generating a diff would lose data like flags or binary data and
2953 If generating a diff would lose data like flags or binary data and
2942 losedatafn is not None, it will be called.
2954 losedatafn is not None, it will be called.
2943
2955
2944 pathfn is applied to every path in the diff output.
2956 pathfn is applied to every path in the diff output.
2945 '''
2957 '''
2946
2958
2947 def gitindex(text):
2948 if not text:
2949 text = b""
2950 l = len(text)
2951 s = hashutil.sha1(b'blob %d\0' % l)
2952 s.update(text)
2953 return hex(s.digest())
2954
2955 if opts.noprefix:
2959 if opts.noprefix:
2956 aprefix = bprefix = b''
2960 aprefix = bprefix = b''
2957 else:
2961 else:
2958 aprefix = b'a/'
2962 aprefix = b'a/'
2959 bprefix = b'b/'
2963 bprefix = b'b/'
2960
2964
2961 def diffline(f, revs):
2965 def diffline(f, revs):
2962 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2966 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2963 return b'diff %s %s' % (revinfo, f)
2967 return b'diff %s %s' % (revinfo, f)
2964
2968
2965 def isempty(fctx):
2969 def isempty(fctx):
2966 return fctx is None or fctx.size() == 0
2970 return fctx is None or fctx.size() == 0
2967
2971
2968 date1 = dateutil.datestr(ctx1.date())
2972 date1 = dateutil.datestr(ctx1.date())
2969 date2 = dateutil.datestr(ctx2.date())
2973 date2 = dateutil.datestr(ctx2.date())
2970
2974
2971 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2972
2973 if not pathfn:
2975 if not pathfn:
2974 pathfn = lambda f: f
2976 pathfn = lambda f: f
2975
2977
2976 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2978 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2977 content1 = None
2979 content1 = None
2978 content2 = None
2980 content2 = None
2979 fctx1 = None
2981 fctx1 = None
2980 fctx2 = None
2982 fctx2 = None
2981 flag1 = None
2983 flag1 = None
2982 flag2 = None
2984 flag2 = None
2983 if f1:
2985 if f1:
2984 fctx1 = getfilectx(f1, ctx1)
2986 fctx1 = getfilectx(f1, ctx1)
2985 if opts.git or losedatafn:
2987 if opts.git or losedatafn:
2986 flag1 = ctx1.flags(f1)
2988 flag1 = ctx1.flags(f1)
2987 if f2:
2989 if f2:
2988 fctx2 = getfilectx(f2, ctx2)
2990 fctx2 = getfilectx(f2, ctx2)
2989 if opts.git or losedatafn:
2991 if opts.git or losedatafn:
2990 flag2 = ctx2.flags(f2)
2992 flag2 = ctx2.flags(f2)
2991 # if binary is True, output "summary" or "base85", but not "text diff"
2993 # if binary is True, output "summary" or "base85", but not "text diff"
2992 if opts.text:
2994 if opts.text:
2993 binary = False
2995 binary = False
2994 else:
2996 else:
2995 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2997 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2996
2998
2997 if losedatafn and not opts.git:
2999 if losedatafn and not opts.git:
2998 if (
3000 if (
2999 binary
3001 binary
3000 or
3002 or
3001 # copy/rename
3003 # copy/rename
3002 f2 in copy
3004 f2 in copy
3003 or
3005 or
3004 # empty file creation
3006 # empty file creation
3005 (not f1 and isempty(fctx2))
3007 (not f1 and isempty(fctx2))
3006 or
3008 or
3007 # empty file deletion
3009 # empty file deletion
3008 (isempty(fctx1) and not f2)
3010 (isempty(fctx1) and not f2)
3009 or
3011 or
3010 # create with flags
3012 # create with flags
3011 (not f1 and flag2)
3013 (not f1 and flag2)
3012 or
3014 or
3013 # change flags
3015 # change flags
3014 (f1 and f2 and flag1 != flag2)
3016 (f1 and f2 and flag1 != flag2)
3015 ):
3017 ):
3016 losedatafn(f2 or f1)
3018 losedatafn(f2 or f1)
3017
3019
3018 path1 = pathfn(f1 or f2)
3020 path1 = pathfn(f1 or f2)
3019 path2 = pathfn(f2 or f1)
3021 path2 = pathfn(f2 or f1)
3020 header = []
3022 header = []
3021 if opts.git:
3023 if opts.git:
3022 header.append(
3024 header.append(
3023 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3025 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3024 )
3026 )
3025 if not f1: # added
3027 if not f1: # added
3026 header.append(b'new file mode %s' % gitmode[flag2])
3028 header.append(b'new file mode %s' % _gitmode[flag2])
3027 elif not f2: # removed
3029 elif not f2: # removed
3028 header.append(b'deleted file mode %s' % gitmode[flag1])
3030 header.append(b'deleted file mode %s' % _gitmode[flag1])
3029 else: # modified/copied/renamed
3031 else: # modified/copied/renamed
3030 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3032 mode1, mode2 = _gitmode[flag1], _gitmode[flag2]
3031 if mode1 != mode2:
3033 if mode1 != mode2:
3032 header.append(b'old mode %s' % mode1)
3034 header.append(b'old mode %s' % mode1)
3033 header.append(b'new mode %s' % mode2)
3035 header.append(b'new mode %s' % mode2)
3034 if copyop is not None:
3036 if copyop is not None:
3035 if opts.showsimilarity:
3037 if opts.showsimilarity:
3036 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3038 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3037 header.append(b'similarity index %d%%' % sim)
3039 header.append(b'similarity index %d%%' % sim)
3038 header.append(b'%s from %s' % (copyop, path1))
3040 header.append(b'%s from %s' % (copyop, path1))
3039 header.append(b'%s to %s' % (copyop, path2))
3041 header.append(b'%s to %s' % (copyop, path2))
3040 elif revs:
3042 elif revs:
3041 header.append(diffline(path1, revs))
3043 header.append(diffline(path1, revs))
3042
3044
3043 # fctx.is | diffopts | what to | is fctx.data()
3045 # fctx.is | diffopts | what to | is fctx.data()
3044 # binary() | text nobinary git index | output? | outputted?
3046 # binary() | text nobinary git index | output? | outputted?
3045 # ------------------------------------|----------------------------
3047 # ------------------------------------|----------------------------
3046 # yes | no no no * | summary | no
3048 # yes | no no no * | summary | no
3047 # yes | no no yes * | base85 | yes
3049 # yes | no no yes * | base85 | yes
3048 # yes | no yes no * | summary | no
3050 # yes | no yes no * | summary | no
3049 # yes | no yes yes 0 | summary | no
3051 # yes | no yes yes 0 | summary | no
3050 # yes | no yes yes >0 | summary | semi [1]
3052 # yes | no yes yes >0 | summary | semi [1]
3051 # yes | yes * * * | text diff | yes
3053 # yes | yes * * * | text diff | yes
3052 # no | * * * * | text diff | yes
3054 # no | * * * * | text diff | yes
3053 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3055 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3054 if binary and (
3056 if binary and (
3055 not opts.git or (opts.git and opts.nobinary and not opts.index)
3057 not opts.git or (opts.git and opts.nobinary and not opts.index)
3056 ):
3058 ):
3057 # fast path: no binary content will be displayed, content1 and
3059 # fast path: no binary content will be displayed, content1 and
3058 # content2 are only used for equivalent test. cmp() could have a
3060 # content2 are only used for equivalent test. cmp() could have a
3059 # fast path.
3061 # fast path.
3060 if fctx1 is not None:
3062 if fctx1 is not None:
3061 content1 = b'\0'
3063 content1 = b'\0'
3062 if fctx2 is not None:
3064 if fctx2 is not None:
3063 if fctx1 is not None and not fctx1.cmp(fctx2):
3065 if fctx1 is not None and not fctx1.cmp(fctx2):
3064 content2 = b'\0' # not different
3066 content2 = b'\0' # not different
3065 else:
3067 else:
3066 content2 = b'\0\0'
3068 content2 = b'\0\0'
3067 else:
3069 else:
3068 # normal path: load contents
3070 # normal path: load contents
3069 if fctx1 is not None:
3071 if fctx1 is not None:
3070 content1 = fctx1.data()
3072 content1 = fctx1.data()
3071 if fctx2 is not None:
3073 if fctx2 is not None:
3072 content2 = fctx2.data()
3074 content2 = fctx2.data()
3073
3075
3076 data1 = (ctx1, fctx1, path1, flag1, content1, date1)
3077 data2 = (ctx2, fctx2, path2, flag2, content2, date2)
3078 yield diffcontent(data1, data2, header, binary, opts)
3079
3080
3081 def diffcontent(data1, data2, header, binary, opts):
3082 """ diffs two versions of a file.
3083
3084 data1 and data2 are tuples containg:
3085
3086 * ctx: changeset for the file
3087 * fctx: file context for that file
3088 * path1: name of the file
3089 * flag: flags of the file
3090 * content: full content of the file (can be null in case of binary)
3091 * date: date of the changeset
3092
3093 header: the patch header
3094 binary: whether the any of the version of file is binary or not
3095 opts: user passed options
3096
3097 It exists as a separate function so that extensions like extdiff can wrap
3098 it and use the file content directly.
3099 """
3100
3101 ctx1, fctx1, path1, flag1, content1, date1 = data1
3102 ctx2, fctx2, path2, flag2, content2, date2 = data2
3074 if binary and opts.git and not opts.nobinary:
3103 if binary and opts.git and not opts.nobinary:
3075 text = mdiff.b85diff(content1, content2)
3104 text = mdiff.b85diff(content1, content2)
3076 if text:
3105 if text:
3077 header.append(
3106 header.append(
3078 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3107 b'index %s..%s' % (_gitindex(content1), _gitindex(content2))
3079 )
3108 )
3080 hunks = ((None, [text]),)
3109 hunks = ((None, [text]),)
3081 else:
3110 else:
3082 if opts.git and opts.index > 0:
3111 if opts.git and opts.index > 0:
3083 flag = flag1
3112 flag = flag1
3084 if flag is None:
3113 if flag is None:
3085 flag = flag2
3114 flag = flag2
3086 header.append(
3115 header.append(
3087 b'index %s..%s %s'
3116 b'index %s..%s %s'
3088 % (
3117 % (
3089 gitindex(content1)[0 : opts.index],
3118 _gitindex(content1)[0 : opts.index],
3090 gitindex(content2)[0 : opts.index],
3119 _gitindex(content2)[0 : opts.index],
3091 gitmode[flag],
3120 _gitmode[flag],
3092 )
3121 )
3093 )
3122 )
3094
3123
3095 uheaders, hunks = mdiff.unidiff(
3124 uheaders, hunks = mdiff.unidiff(
3096 content1,
3125 content1,
3097 date1,
3126 date1,
3098 content2,
3127 content2,
3099 date2,
3128 date2,
3100 path1,
3129 path1,
3101 path2,
3130 path2,
3102 binary=binary,
3131 binary=binary,
3103 opts=opts,
3132 opts=opts,
3104 )
3133 )
3105 header.extend(uheaders)
3134 header.extend(uheaders)
3106 yield fctx1, fctx2, header, hunks
3135 return fctx1, fctx2, header, hunks
3107
3136
3108
3137
3109 def diffstatsum(stats):
3138 def diffstatsum(stats):
3110 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3139 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3111 for f, a, r, b in stats:
3140 for f, a, r, b in stats:
3112 maxfile = max(maxfile, encoding.colwidth(f))
3141 maxfile = max(maxfile, encoding.colwidth(f))
3113 maxtotal = max(maxtotal, a + r)
3142 maxtotal = max(maxtotal, a + r)
3114 addtotal += a
3143 addtotal += a
3115 removetotal += r
3144 removetotal += r
3116 binary = binary or b
3145 binary = binary or b
3117
3146
3118 return maxfile, maxtotal, addtotal, removetotal, binary
3147 return maxfile, maxtotal, addtotal, removetotal, binary
3119
3148
3120
3149
3121 def diffstatdata(lines):
3150 def diffstatdata(lines):
3122 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3151 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3123
3152
3124 results = []
3153 results = []
3125 filename, adds, removes, isbinary = None, 0, 0, False
3154 filename, adds, removes, isbinary = None, 0, 0, False
3126
3155
3127 def addresult():
3156 def addresult():
3128 if filename:
3157 if filename:
3129 results.append((filename, adds, removes, isbinary))
3158 results.append((filename, adds, removes, isbinary))
3130
3159
3131 # inheader is used to track if a line is in the
3160 # inheader is used to track if a line is in the
3132 # header portion of the diff. This helps properly account
3161 # header portion of the diff. This helps properly account
3133 # for lines that start with '--' or '++'
3162 # for lines that start with '--' or '++'
3134 inheader = False
3163 inheader = False
3135
3164
3136 for line in lines:
3165 for line in lines:
3137 if line.startswith(b'diff'):
3166 if line.startswith(b'diff'):
3138 addresult()
3167 addresult()
3139 # starting a new file diff
3168 # starting a new file diff
3140 # set numbers to 0 and reset inheader
3169 # set numbers to 0 and reset inheader
3141 inheader = True
3170 inheader = True
3142 adds, removes, isbinary = 0, 0, False
3171 adds, removes, isbinary = 0, 0, False
3143 if line.startswith(b'diff --git a/'):
3172 if line.startswith(b'diff --git a/'):
3144 filename = gitre.search(line).group(2)
3173 filename = gitre.search(line).group(2)
3145 elif line.startswith(b'diff -r'):
3174 elif line.startswith(b'diff -r'):
3146 # format: "diff -r ... -r ... filename"
3175 # format: "diff -r ... -r ... filename"
3147 filename = diffre.search(line).group(1)
3176 filename = diffre.search(line).group(1)
3148 elif line.startswith(b'@@'):
3177 elif line.startswith(b'@@'):
3149 inheader = False
3178 inheader = False
3150 elif line.startswith(b'+') and not inheader:
3179 elif line.startswith(b'+') and not inheader:
3151 adds += 1
3180 adds += 1
3152 elif line.startswith(b'-') and not inheader:
3181 elif line.startswith(b'-') and not inheader:
3153 removes += 1
3182 removes += 1
3154 elif line.startswith(b'GIT binary patch') or line.startswith(
3183 elif line.startswith(b'GIT binary patch') or line.startswith(
3155 b'Binary file'
3184 b'Binary file'
3156 ):
3185 ):
3157 isbinary = True
3186 isbinary = True
3158 elif line.startswith(b'rename from'):
3187 elif line.startswith(b'rename from'):
3159 filename = line[12:]
3188 filename = line[12:]
3160 elif line.startswith(b'rename to'):
3189 elif line.startswith(b'rename to'):
3161 filename += b' => %s' % line[10:]
3190 filename += b' => %s' % line[10:]
3162 addresult()
3191 addresult()
3163 return results
3192 return results
3164
3193
3165
3194
3166 def diffstat(lines, width=80):
3195 def diffstat(lines, width=80):
3167 output = []
3196 output = []
3168 stats = diffstatdata(lines)
3197 stats = diffstatdata(lines)
3169 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3198 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3170
3199
3171 countwidth = len(str(maxtotal))
3200 countwidth = len(str(maxtotal))
3172 if hasbinary and countwidth < 3:
3201 if hasbinary and countwidth < 3:
3173 countwidth = 3
3202 countwidth = 3
3174 graphwidth = width - countwidth - maxname - 6
3203 graphwidth = width - countwidth - maxname - 6
3175 if graphwidth < 10:
3204 if graphwidth < 10:
3176 graphwidth = 10
3205 graphwidth = 10
3177
3206
3178 def scale(i):
3207 def scale(i):
3179 if maxtotal <= graphwidth:
3208 if maxtotal <= graphwidth:
3180 return i
3209 return i
3181 # If diffstat runs out of room it doesn't print anything,
3210 # If diffstat runs out of room it doesn't print anything,
3182 # which isn't very useful, so always print at least one + or -
3211 # which isn't very useful, so always print at least one + or -
3183 # if there were at least some changes.
3212 # if there were at least some changes.
3184 return max(i * graphwidth // maxtotal, int(bool(i)))
3213 return max(i * graphwidth // maxtotal, int(bool(i)))
3185
3214
3186 for filename, adds, removes, isbinary in stats:
3215 for filename, adds, removes, isbinary in stats:
3187 if isbinary:
3216 if isbinary:
3188 count = b'Bin'
3217 count = b'Bin'
3189 else:
3218 else:
3190 count = b'%d' % (adds + removes)
3219 count = b'%d' % (adds + removes)
3191 pluses = b'+' * scale(adds)
3220 pluses = b'+' * scale(adds)
3192 minuses = b'-' * scale(removes)
3221 minuses = b'-' * scale(removes)
3193 output.append(
3222 output.append(
3194 b' %s%s | %*s %s%s\n'
3223 b' %s%s | %*s %s%s\n'
3195 % (
3224 % (
3196 filename,
3225 filename,
3197 b' ' * (maxname - encoding.colwidth(filename)),
3226 b' ' * (maxname - encoding.colwidth(filename)),
3198 countwidth,
3227 countwidth,
3199 count,
3228 count,
3200 pluses,
3229 pluses,
3201 minuses,
3230 minuses,
3202 )
3231 )
3203 )
3232 )
3204
3233
3205 if stats:
3234 if stats:
3206 output.append(
3235 output.append(
3207 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3236 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3208 % (len(stats), totaladds, totalremoves)
3237 % (len(stats), totaladds, totalremoves)
3209 )
3238 )
3210
3239
3211 return b''.join(output)
3240 return b''.join(output)
3212
3241
3213
3242
3214 def diffstatui(*args, **kw):
3243 def diffstatui(*args, **kw):
3215 '''like diffstat(), but yields 2-tuples of (output, label) for
3244 '''like diffstat(), but yields 2-tuples of (output, label) for
3216 ui.write()
3245 ui.write()
3217 '''
3246 '''
3218
3247
3219 for line in diffstat(*args, **kw).splitlines():
3248 for line in diffstat(*args, **kw).splitlines():
3220 if line and line[-1] in b'+-':
3249 if line and line[-1] in b'+-':
3221 name, graph = line.rsplit(b' ', 1)
3250 name, graph = line.rsplit(b' ', 1)
3222 yield (name + b' ', b'')
3251 yield (name + b' ', b'')
3223 m = re.search(br'\++', graph)
3252 m = re.search(br'\++', graph)
3224 if m:
3253 if m:
3225 yield (m.group(0), b'diffstat.inserted')
3254 yield (m.group(0), b'diffstat.inserted')
3226 m = re.search(br'-+', graph)
3255 m = re.search(br'-+', graph)
3227 if m:
3256 if m:
3228 yield (m.group(0), b'diffstat.deleted')
3257 yield (m.group(0), b'diffstat.deleted')
3229 else:
3258 else:
3230 yield (line, b'')
3259 yield (line, b'')
3231 yield (b'\n', b'')
3260 yield (b'\n', b'')
General Comments 0
You need to be logged in to leave comments. Login now