##// END OF EJS Templates
py3: only flush before prompting during interactive patch filtering...
Denis Laxalde -
r43425:a83c9c79 default
parent child Browse files
Show More
@@ -1,3223 +1,3219 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import re
18 import re
19 import shutil
19 import shutil
20 import zlib
20 import zlib
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 hex,
24 hex,
25 short,
25 short,
26 )
26 )
27 from .pycompat import open
27 from .pycompat import open
28 from . import (
28 from . import (
29 copies,
29 copies,
30 diffhelper,
30 diffhelper,
31 diffutil,
31 diffutil,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 from .utils import (
43 from .utils import (
44 dateutil,
44 dateutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 wordsplitter = re.compile(
53 wordsplitter = re.compile(
54 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
55 )
55 )
56
56
57 PatchError = error.PatchError
57 PatchError = error.PatchError
58
58
59 # public functions
59 # public functions
60
60
61
61
62 def split(stream):
62 def split(stream):
63 '''return an iterator of individual patches from a stream'''
63 '''return an iterator of individual patches from a stream'''
64
64
65 def isheader(line, inheader):
65 def isheader(line, inheader):
66 if inheader and line.startswith((b' ', b'\t')):
66 if inheader and line.startswith((b' ', b'\t')):
67 # continuation
67 # continuation
68 return True
68 return True
69 if line.startswith((b' ', b'-', b'+')):
69 if line.startswith((b' ', b'-', b'+')):
70 # diff line - don't check for header pattern in there
70 # diff line - don't check for header pattern in there
71 return False
71 return False
72 l = line.split(b': ', 1)
72 l = line.split(b': ', 1)
73 return len(l) == 2 and b' ' not in l[0]
73 return len(l) == 2 and b' ' not in l[0]
74
74
75 def chunk(lines):
75 def chunk(lines):
76 return stringio(b''.join(lines))
76 return stringio(b''.join(lines))
77
77
78 def hgsplit(stream, cur):
78 def hgsplit(stream, cur):
79 inheader = True
79 inheader = True
80
80
81 for line in stream:
81 for line in stream:
82 if not line.strip():
82 if not line.strip():
83 inheader = False
83 inheader = False
84 if not inheader and line.startswith(b'# HG changeset patch'):
84 if not inheader and line.startswith(b'# HG changeset patch'):
85 yield chunk(cur)
85 yield chunk(cur)
86 cur = []
86 cur = []
87 inheader = True
87 inheader = True
88
88
89 cur.append(line)
89 cur.append(line)
90
90
91 if cur:
91 if cur:
92 yield chunk(cur)
92 yield chunk(cur)
93
93
94 def mboxsplit(stream, cur):
94 def mboxsplit(stream, cur):
95 for line in stream:
95 for line in stream:
96 if line.startswith(b'From '):
96 if line.startswith(b'From '):
97 for c in split(chunk(cur[1:])):
97 for c in split(chunk(cur[1:])):
98 yield c
98 yield c
99 cur = []
99 cur = []
100
100
101 cur.append(line)
101 cur.append(line)
102
102
103 if cur:
103 if cur:
104 for c in split(chunk(cur[1:])):
104 for c in split(chunk(cur[1:])):
105 yield c
105 yield c
106
106
107 def mimesplit(stream, cur):
107 def mimesplit(stream, cur):
108 def msgfp(m):
108 def msgfp(m):
109 fp = stringio()
109 fp = stringio()
110 g = email.Generator.Generator(fp, mangle_from_=False)
110 g = email.Generator.Generator(fp, mangle_from_=False)
111 g.flatten(m)
111 g.flatten(m)
112 fp.seek(0)
112 fp.seek(0)
113 return fp
113 return fp
114
114
115 for line in stream:
115 for line in stream:
116 cur.append(line)
116 cur.append(line)
117 c = chunk(cur)
117 c = chunk(cur)
118
118
119 m = mail.parse(c)
119 m = mail.parse(c)
120 if not m.is_multipart():
120 if not m.is_multipart():
121 yield msgfp(m)
121 yield msgfp(m)
122 else:
122 else:
123 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
124 for part in m.walk():
124 for part in m.walk():
125 ct = part.get_content_type()
125 ct = part.get_content_type()
126 if ct not in ok_types:
126 if ct not in ok_types:
127 continue
127 continue
128 yield msgfp(part)
128 yield msgfp(part)
129
129
130 def headersplit(stream, cur):
130 def headersplit(stream, cur):
131 inheader = False
131 inheader = False
132
132
133 for line in stream:
133 for line in stream:
134 if not inheader and isheader(line, inheader):
134 if not inheader and isheader(line, inheader):
135 yield chunk(cur)
135 yield chunk(cur)
136 cur = []
136 cur = []
137 inheader = True
137 inheader = True
138 if inheader and not isheader(line, inheader):
138 if inheader and not isheader(line, inheader):
139 inheader = False
139 inheader = False
140
140
141 cur.append(line)
141 cur.append(line)
142
142
143 if cur:
143 if cur:
144 yield chunk(cur)
144 yield chunk(cur)
145
145
146 def remainder(cur):
146 def remainder(cur):
147 yield chunk(cur)
147 yield chunk(cur)
148
148
149 class fiter(object):
149 class fiter(object):
150 def __init__(self, fp):
150 def __init__(self, fp):
151 self.fp = fp
151 self.fp = fp
152
152
153 def __iter__(self):
153 def __iter__(self):
154 return self
154 return self
155
155
156 def next(self):
156 def next(self):
157 l = self.fp.readline()
157 l = self.fp.readline()
158 if not l:
158 if not l:
159 raise StopIteration
159 raise StopIteration
160 return l
160 return l
161
161
162 __next__ = next
162 __next__ = next
163
163
164 inheader = False
164 inheader = False
165 cur = []
165 cur = []
166
166
167 mimeheaders = [b'content-type']
167 mimeheaders = [b'content-type']
168
168
169 if not util.safehasattr(stream, b'next'):
169 if not util.safehasattr(stream, b'next'):
170 # http responses, for example, have readline but not next
170 # http responses, for example, have readline but not next
171 stream = fiter(stream)
171 stream = fiter(stream)
172
172
173 for line in stream:
173 for line in stream:
174 cur.append(line)
174 cur.append(line)
175 if line.startswith(b'# HG changeset patch'):
175 if line.startswith(b'# HG changeset patch'):
176 return hgsplit(stream, cur)
176 return hgsplit(stream, cur)
177 elif line.startswith(b'From '):
177 elif line.startswith(b'From '):
178 return mboxsplit(stream, cur)
178 return mboxsplit(stream, cur)
179 elif isheader(line, inheader):
179 elif isheader(line, inheader):
180 inheader = True
180 inheader = True
181 if line.split(b':', 1)[0].lower() in mimeheaders:
181 if line.split(b':', 1)[0].lower() in mimeheaders:
182 # let email parser handle this
182 # let email parser handle this
183 return mimesplit(stream, cur)
183 return mimesplit(stream, cur)
184 elif line.startswith(b'--- ') and inheader:
184 elif line.startswith(b'--- ') and inheader:
185 # No evil headers seen by diff start, split by hand
185 # No evil headers seen by diff start, split by hand
186 return headersplit(stream, cur)
186 return headersplit(stream, cur)
187 # Not enough info, keep reading
187 # Not enough info, keep reading
188
188
189 # if we are here, we have a very plain patch
189 # if we are here, we have a very plain patch
190 return remainder(cur)
190 return remainder(cur)
191
191
192
192
193 ## Some facility for extensible patch parsing:
193 ## Some facility for extensible patch parsing:
194 # list of pairs ("header to match", "data key")
194 # list of pairs ("header to match", "data key")
195 patchheadermap = [
195 patchheadermap = [
196 (b'Date', b'date'),
196 (b'Date', b'date'),
197 (b'Branch', b'branch'),
197 (b'Branch', b'branch'),
198 (b'Node ID', b'nodeid'),
198 (b'Node ID', b'nodeid'),
199 ]
199 ]
200
200
201
201
202 @contextlib.contextmanager
202 @contextlib.contextmanager
203 def extract(ui, fileobj):
203 def extract(ui, fileobj):
204 '''extract patch from data read from fileobj.
204 '''extract patch from data read from fileobj.
205
205
206 patch can be a normal patch or contained in an email message.
206 patch can be a normal patch or contained in an email message.
207
207
208 return a dictionary. Standard keys are:
208 return a dictionary. Standard keys are:
209 - filename,
209 - filename,
210 - message,
210 - message,
211 - user,
211 - user,
212 - date,
212 - date,
213 - branch,
213 - branch,
214 - node,
214 - node,
215 - p1,
215 - p1,
216 - p2.
216 - p2.
217 Any item can be missing from the dictionary. If filename is missing,
217 Any item can be missing from the dictionary. If filename is missing,
218 fileobj did not contain a patch. Caller must unlink filename when done.'''
218 fileobj did not contain a patch. Caller must unlink filename when done.'''
219
219
220 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
221 tmpfp = os.fdopen(fd, r'wb')
221 tmpfp = os.fdopen(fd, r'wb')
222 try:
222 try:
223 yield _extract(ui, fileobj, tmpname, tmpfp)
223 yield _extract(ui, fileobj, tmpname, tmpfp)
224 finally:
224 finally:
225 tmpfp.close()
225 tmpfp.close()
226 os.unlink(tmpname)
226 os.unlink(tmpname)
227
227
228
228
229 def _extract(ui, fileobj, tmpname, tmpfp):
229 def _extract(ui, fileobj, tmpname, tmpfp):
230
230
231 # attempt to detect the start of a patch
231 # attempt to detect the start of a patch
232 # (this heuristic is borrowed from quilt)
232 # (this heuristic is borrowed from quilt)
233 diffre = re.compile(
233 diffre = re.compile(
234 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
235 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
236 br'---[ \t].*?^\+\+\+[ \t]|'
236 br'---[ \t].*?^\+\+\+[ \t]|'
237 br'\*\*\*[ \t].*?^---[ \t])',
237 br'\*\*\*[ \t].*?^---[ \t])',
238 re.MULTILINE | re.DOTALL,
238 re.MULTILINE | re.DOTALL,
239 )
239 )
240
240
241 data = {}
241 data = {}
242
242
243 msg = mail.parse(fileobj)
243 msg = mail.parse(fileobj)
244
244
245 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
245 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
246 data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
246 data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
247 if not subject and not data[b'user']:
247 if not subject and not data[b'user']:
248 # Not an email, restore parsed headers if any
248 # Not an email, restore parsed headers if any
249 subject = (
249 subject = (
250 b'\n'.join(
250 b'\n'.join(
251 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
252 )
252 )
253 + b'\n'
253 + b'\n'
254 )
254 )
255
255
256 # should try to parse msg['Date']
256 # should try to parse msg['Date']
257 parents = []
257 parents = []
258
258
259 nodeid = msg[r'X-Mercurial-Node']
259 nodeid = msg[r'X-Mercurial-Node']
260 if nodeid:
260 if nodeid:
261 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
262 ui.debug(b'Node ID: %s\n' % nodeid)
262 ui.debug(b'Node ID: %s\n' % nodeid)
263
263
264 if subject:
264 if subject:
265 if subject.startswith(b'[PATCH'):
265 if subject.startswith(b'[PATCH'):
266 pend = subject.find(b']')
266 pend = subject.find(b']')
267 if pend >= 0:
267 if pend >= 0:
268 subject = subject[pend + 1 :].lstrip()
268 subject = subject[pend + 1 :].lstrip()
269 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 subject = re.sub(br'\n[ \t]+', b' ', subject)
270 ui.debug(b'Subject: %s\n' % subject)
270 ui.debug(b'Subject: %s\n' % subject)
271 if data[b'user']:
271 if data[b'user']:
272 ui.debug(b'From: %s\n' % data[b'user'])
272 ui.debug(b'From: %s\n' % data[b'user'])
273 diffs_seen = 0
273 diffs_seen = 0
274 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
275 message = b''
275 message = b''
276 for part in msg.walk():
276 for part in msg.walk():
277 content_type = pycompat.bytestr(part.get_content_type())
277 content_type = pycompat.bytestr(part.get_content_type())
278 ui.debug(b'Content-Type: %s\n' % content_type)
278 ui.debug(b'Content-Type: %s\n' % content_type)
279 if content_type not in ok_types:
279 if content_type not in ok_types:
280 continue
280 continue
281 payload = part.get_payload(decode=True)
281 payload = part.get_payload(decode=True)
282 m = diffre.search(payload)
282 m = diffre.search(payload)
283 if m:
283 if m:
284 hgpatch = False
284 hgpatch = False
285 hgpatchheader = False
285 hgpatchheader = False
286 ignoretext = False
286 ignoretext = False
287
287
288 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 ui.debug(b'found patch at byte %d\n' % m.start(0))
289 diffs_seen += 1
289 diffs_seen += 1
290 cfp = stringio()
290 cfp = stringio()
291 for line in payload[: m.start(0)].splitlines():
291 for line in payload[: m.start(0)].splitlines():
292 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 if line.startswith(b'# HG changeset patch') and not hgpatch:
293 ui.debug(b'patch generated by hg export\n')
293 ui.debug(b'patch generated by hg export\n')
294 hgpatch = True
294 hgpatch = True
295 hgpatchheader = True
295 hgpatchheader = True
296 # drop earlier commit message content
296 # drop earlier commit message content
297 cfp.seek(0)
297 cfp.seek(0)
298 cfp.truncate()
298 cfp.truncate()
299 subject = None
299 subject = None
300 elif hgpatchheader:
300 elif hgpatchheader:
301 if line.startswith(b'# User '):
301 if line.startswith(b'# User '):
302 data[b'user'] = line[7:]
302 data[b'user'] = line[7:]
303 ui.debug(b'From: %s\n' % data[b'user'])
303 ui.debug(b'From: %s\n' % data[b'user'])
304 elif line.startswith(b"# Parent "):
304 elif line.startswith(b"# Parent "):
305 parents.append(line[9:].lstrip())
305 parents.append(line[9:].lstrip())
306 elif line.startswith(b"# "):
306 elif line.startswith(b"# "):
307 for header, key in patchheadermap:
307 for header, key in patchheadermap:
308 prefix = b'# %s ' % header
308 prefix = b'# %s ' % header
309 if line.startswith(prefix):
309 if line.startswith(prefix):
310 data[key] = line[len(prefix) :]
310 data[key] = line[len(prefix) :]
311 ui.debug(b'%s: %s\n' % (header, data[key]))
311 ui.debug(b'%s: %s\n' % (header, data[key]))
312 else:
312 else:
313 hgpatchheader = False
313 hgpatchheader = False
314 elif line == b'---':
314 elif line == b'---':
315 ignoretext = True
315 ignoretext = True
316 if not hgpatchheader and not ignoretext:
316 if not hgpatchheader and not ignoretext:
317 cfp.write(line)
317 cfp.write(line)
318 cfp.write(b'\n')
318 cfp.write(b'\n')
319 message = cfp.getvalue()
319 message = cfp.getvalue()
320 if tmpfp:
320 if tmpfp:
321 tmpfp.write(payload)
321 tmpfp.write(payload)
322 if not payload.endswith(b'\n'):
322 if not payload.endswith(b'\n'):
323 tmpfp.write(b'\n')
323 tmpfp.write(b'\n')
324 elif not diffs_seen and message and content_type == b'text/plain':
324 elif not diffs_seen and message and content_type == b'text/plain':
325 message += b'\n' + payload
325 message += b'\n' + payload
326
326
327 if subject and not message.startswith(subject):
327 if subject and not message.startswith(subject):
328 message = b'%s\n%s' % (subject, message)
328 message = b'%s\n%s' % (subject, message)
329 data[b'message'] = message
329 data[b'message'] = message
330 tmpfp.close()
330 tmpfp.close()
331 if parents:
331 if parents:
332 data[b'p1'] = parents.pop(0)
332 data[b'p1'] = parents.pop(0)
333 if parents:
333 if parents:
334 data[b'p2'] = parents.pop(0)
334 data[b'p2'] = parents.pop(0)
335
335
336 if diffs_seen:
336 if diffs_seen:
337 data[b'filename'] = tmpname
337 data[b'filename'] = tmpname
338
338
339 return data
339 return data
340
340
341
341
342 class patchmeta(object):
342 class patchmeta(object):
343 """Patched file metadata
343 """Patched file metadata
344
344
345 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
346 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 or COPY. 'path' is patched file path. 'oldpath' is set to the
347 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 origin file when 'op' is either COPY or RENAME, None otherwise. If
348 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 file mode is changed, 'mode' is a tuple (islink, isexec) where
349 'islink' is True if the file is a symlink and 'isexec' is True if
349 'islink' is True if the file is a symlink and 'isexec' is True if
350 the file is executable. Otherwise, 'mode' is None.
350 the file is executable. Otherwise, 'mode' is None.
351 """
351 """
352
352
353 def __init__(self, path):
353 def __init__(self, path):
354 self.path = path
354 self.path = path
355 self.oldpath = None
355 self.oldpath = None
356 self.mode = None
356 self.mode = None
357 self.op = b'MODIFY'
357 self.op = b'MODIFY'
358 self.binary = False
358 self.binary = False
359
359
360 def setmode(self, mode):
360 def setmode(self, mode):
361 islink = mode & 0o20000
361 islink = mode & 0o20000
362 isexec = mode & 0o100
362 isexec = mode & 0o100
363 self.mode = (islink, isexec)
363 self.mode = (islink, isexec)
364
364
365 def copy(self):
365 def copy(self):
366 other = patchmeta(self.path)
366 other = patchmeta(self.path)
367 other.oldpath = self.oldpath
367 other.oldpath = self.oldpath
368 other.mode = self.mode
368 other.mode = self.mode
369 other.op = self.op
369 other.op = self.op
370 other.binary = self.binary
370 other.binary = self.binary
371 return other
371 return other
372
372
373 def _ispatchinga(self, afile):
373 def _ispatchinga(self, afile):
374 if afile == b'/dev/null':
374 if afile == b'/dev/null':
375 return self.op == b'ADD'
375 return self.op == b'ADD'
376 return afile == b'a/' + (self.oldpath or self.path)
376 return afile == b'a/' + (self.oldpath or self.path)
377
377
378 def _ispatchingb(self, bfile):
378 def _ispatchingb(self, bfile):
379 if bfile == b'/dev/null':
379 if bfile == b'/dev/null':
380 return self.op == b'DELETE'
380 return self.op == b'DELETE'
381 return bfile == b'b/' + self.path
381 return bfile == b'b/' + self.path
382
382
383 def ispatching(self, afile, bfile):
383 def ispatching(self, afile, bfile):
384 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384 return self._ispatchinga(afile) and self._ispatchingb(bfile)
385
385
386 def __repr__(self):
386 def __repr__(self):
387 return r"<patchmeta %s %r>" % (self.op, self.path)
387 return r"<patchmeta %s %r>" % (self.op, self.path)
388
388
389
389
390 def readgitpatch(lr):
390 def readgitpatch(lr):
391 """extract git-style metadata about patches from <patchname>"""
391 """extract git-style metadata about patches from <patchname>"""
392
392
393 # Filter patch for git information
393 # Filter patch for git information
394 gp = None
394 gp = None
395 gitpatches = []
395 gitpatches = []
396 for line in lr:
396 for line in lr:
397 line = line.rstrip(b' \r\n')
397 line = line.rstrip(b' \r\n')
398 if line.startswith(b'diff --git a/'):
398 if line.startswith(b'diff --git a/'):
399 m = gitre.match(line)
399 m = gitre.match(line)
400 if m:
400 if m:
401 if gp:
401 if gp:
402 gitpatches.append(gp)
402 gitpatches.append(gp)
403 dst = m.group(2)
403 dst = m.group(2)
404 gp = patchmeta(dst)
404 gp = patchmeta(dst)
405 elif gp:
405 elif gp:
406 if line.startswith(b'--- '):
406 if line.startswith(b'--- '):
407 gitpatches.append(gp)
407 gitpatches.append(gp)
408 gp = None
408 gp = None
409 continue
409 continue
410 if line.startswith(b'rename from '):
410 if line.startswith(b'rename from '):
411 gp.op = b'RENAME'
411 gp.op = b'RENAME'
412 gp.oldpath = line[12:]
412 gp.oldpath = line[12:]
413 elif line.startswith(b'rename to '):
413 elif line.startswith(b'rename to '):
414 gp.path = line[10:]
414 gp.path = line[10:]
415 elif line.startswith(b'copy from '):
415 elif line.startswith(b'copy from '):
416 gp.op = b'COPY'
416 gp.op = b'COPY'
417 gp.oldpath = line[10:]
417 gp.oldpath = line[10:]
418 elif line.startswith(b'copy to '):
418 elif line.startswith(b'copy to '):
419 gp.path = line[8:]
419 gp.path = line[8:]
420 elif line.startswith(b'deleted file'):
420 elif line.startswith(b'deleted file'):
421 gp.op = b'DELETE'
421 gp.op = b'DELETE'
422 elif line.startswith(b'new file mode '):
422 elif line.startswith(b'new file mode '):
423 gp.op = b'ADD'
423 gp.op = b'ADD'
424 gp.setmode(int(line[-6:], 8))
424 gp.setmode(int(line[-6:], 8))
425 elif line.startswith(b'new mode '):
425 elif line.startswith(b'new mode '):
426 gp.setmode(int(line[-6:], 8))
426 gp.setmode(int(line[-6:], 8))
427 elif line.startswith(b'GIT binary patch'):
427 elif line.startswith(b'GIT binary patch'):
428 gp.binary = True
428 gp.binary = True
429 if gp:
429 if gp:
430 gitpatches.append(gp)
430 gitpatches.append(gp)
431
431
432 return gitpatches
432 return gitpatches
433
433
434
434
435 class linereader(object):
435 class linereader(object):
436 # simple class to allow pushing lines back into the input stream
436 # simple class to allow pushing lines back into the input stream
437 def __init__(self, fp):
437 def __init__(self, fp):
438 self.fp = fp
438 self.fp = fp
439 self.buf = []
439 self.buf = []
440
440
441 def push(self, line):
441 def push(self, line):
442 if line is not None:
442 if line is not None:
443 self.buf.append(line)
443 self.buf.append(line)
444
444
445 def readline(self):
445 def readline(self):
446 if self.buf:
446 if self.buf:
447 l = self.buf[0]
447 l = self.buf[0]
448 del self.buf[0]
448 del self.buf[0]
449 return l
449 return l
450 return self.fp.readline()
450 return self.fp.readline()
451
451
452 def __iter__(self):
452 def __iter__(self):
453 return iter(self.readline, b'')
453 return iter(self.readline, b'')
454
454
455
455
456 class abstractbackend(object):
456 class abstractbackend(object):
457 def __init__(self, ui):
457 def __init__(self, ui):
458 self.ui = ui
458 self.ui = ui
459
459
460 def getfile(self, fname):
460 def getfile(self, fname):
461 """Return target file data and flags as a (data, (islink,
461 """Return target file data and flags as a (data, (islink,
462 isexec)) tuple. Data is None if file is missing/deleted.
462 isexec)) tuple. Data is None if file is missing/deleted.
463 """
463 """
464 raise NotImplementedError
464 raise NotImplementedError
465
465
466 def setfile(self, fname, data, mode, copysource):
466 def setfile(self, fname, data, mode, copysource):
467 """Write data to target file fname and set its mode. mode is a
467 """Write data to target file fname and set its mode. mode is a
468 (islink, isexec) tuple. If data is None, the file content should
468 (islink, isexec) tuple. If data is None, the file content should
469 be left unchanged. If the file is modified after being copied,
469 be left unchanged. If the file is modified after being copied,
470 copysource is set to the original file name.
470 copysource is set to the original file name.
471 """
471 """
472 raise NotImplementedError
472 raise NotImplementedError
473
473
474 def unlink(self, fname):
474 def unlink(self, fname):
475 """Unlink target file."""
475 """Unlink target file."""
476 raise NotImplementedError
476 raise NotImplementedError
477
477
478 def writerej(self, fname, failed, total, lines):
478 def writerej(self, fname, failed, total, lines):
479 """Write rejected lines for fname. total is the number of hunks
479 """Write rejected lines for fname. total is the number of hunks
480 which failed to apply and total the total number of hunks for this
480 which failed to apply and total the total number of hunks for this
481 files.
481 files.
482 """
482 """
483
483
484 def exists(self, fname):
484 def exists(self, fname):
485 raise NotImplementedError
485 raise NotImplementedError
486
486
487 def close(self):
487 def close(self):
488 raise NotImplementedError
488 raise NotImplementedError
489
489
490
490
491 class fsbackend(abstractbackend):
491 class fsbackend(abstractbackend):
492 def __init__(self, ui, basedir):
492 def __init__(self, ui, basedir):
493 super(fsbackend, self).__init__(ui)
493 super(fsbackend, self).__init__(ui)
494 self.opener = vfsmod.vfs(basedir)
494 self.opener = vfsmod.vfs(basedir)
495
495
496 def getfile(self, fname):
496 def getfile(self, fname):
497 if self.opener.islink(fname):
497 if self.opener.islink(fname):
498 return (self.opener.readlink(fname), (True, False))
498 return (self.opener.readlink(fname), (True, False))
499
499
500 isexec = False
500 isexec = False
501 try:
501 try:
502 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
503 except OSError as e:
503 except OSError as e:
504 if e.errno != errno.ENOENT:
504 if e.errno != errno.ENOENT:
505 raise
505 raise
506 try:
506 try:
507 return (self.opener.read(fname), (False, isexec))
507 return (self.opener.read(fname), (False, isexec))
508 except IOError as e:
508 except IOError as e:
509 if e.errno != errno.ENOENT:
509 if e.errno != errno.ENOENT:
510 raise
510 raise
511 return None, None
511 return None, None
512
512
513 def setfile(self, fname, data, mode, copysource):
513 def setfile(self, fname, data, mode, copysource):
514 islink, isexec = mode
514 islink, isexec = mode
515 if data is None:
515 if data is None:
516 self.opener.setflags(fname, islink, isexec)
516 self.opener.setflags(fname, islink, isexec)
517 return
517 return
518 if islink:
518 if islink:
519 self.opener.symlink(data, fname)
519 self.opener.symlink(data, fname)
520 else:
520 else:
521 self.opener.write(fname, data)
521 self.opener.write(fname, data)
522 if isexec:
522 if isexec:
523 self.opener.setflags(fname, False, True)
523 self.opener.setflags(fname, False, True)
524
524
525 def unlink(self, fname):
525 def unlink(self, fname):
526 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
527 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
528
528
529 def writerej(self, fname, failed, total, lines):
529 def writerej(self, fname, failed, total, lines):
530 fname = fname + b".rej"
530 fname = fname + b".rej"
531 self.ui.warn(
531 self.ui.warn(
532 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
533 % (failed, total, fname)
533 % (failed, total, fname)
534 )
534 )
535 fp = self.opener(fname, b'w')
535 fp = self.opener(fname, b'w')
536 fp.writelines(lines)
536 fp.writelines(lines)
537 fp.close()
537 fp.close()
538
538
539 def exists(self, fname):
539 def exists(self, fname):
540 return self.opener.lexists(fname)
540 return self.opener.lexists(fname)
541
541
542
542
543 class workingbackend(fsbackend):
543 class workingbackend(fsbackend):
544 def __init__(self, ui, repo, similarity):
544 def __init__(self, ui, repo, similarity):
545 super(workingbackend, self).__init__(ui, repo.root)
545 super(workingbackend, self).__init__(ui, repo.root)
546 self.repo = repo
546 self.repo = repo
547 self.similarity = similarity
547 self.similarity = similarity
548 self.removed = set()
548 self.removed = set()
549 self.changed = set()
549 self.changed = set()
550 self.copied = []
550 self.copied = []
551
551
552 def _checkknown(self, fname):
552 def _checkknown(self, fname):
553 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
554 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
555
555
556 def setfile(self, fname, data, mode, copysource):
556 def setfile(self, fname, data, mode, copysource):
557 self._checkknown(fname)
557 self._checkknown(fname)
558 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 super(workingbackend, self).setfile(fname, data, mode, copysource)
559 if copysource is not None:
559 if copysource is not None:
560 self.copied.append((copysource, fname))
560 self.copied.append((copysource, fname))
561 self.changed.add(fname)
561 self.changed.add(fname)
562
562
563 def unlink(self, fname):
563 def unlink(self, fname):
564 self._checkknown(fname)
564 self._checkknown(fname)
565 super(workingbackend, self).unlink(fname)
565 super(workingbackend, self).unlink(fname)
566 self.removed.add(fname)
566 self.removed.add(fname)
567 self.changed.add(fname)
567 self.changed.add(fname)
568
568
569 def close(self):
569 def close(self):
570 wctx = self.repo[None]
570 wctx = self.repo[None]
571 changed = set(self.changed)
571 changed = set(self.changed)
572 for src, dst in self.copied:
572 for src, dst in self.copied:
573 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
574 if self.removed:
574 if self.removed:
575 wctx.forget(sorted(self.removed))
575 wctx.forget(sorted(self.removed))
576 for f in self.removed:
576 for f in self.removed:
577 if f not in self.repo.dirstate:
577 if f not in self.repo.dirstate:
578 # File was deleted and no longer belongs to the
578 # File was deleted and no longer belongs to the
579 # dirstate, it was probably marked added then
579 # dirstate, it was probably marked added then
580 # deleted, and should not be considered by
580 # deleted, and should not be considered by
581 # marktouched().
581 # marktouched().
582 changed.discard(f)
582 changed.discard(f)
583 if changed:
583 if changed:
584 scmutil.marktouched(self.repo, changed, self.similarity)
584 scmutil.marktouched(self.repo, changed, self.similarity)
585 return sorted(self.changed)
585 return sorted(self.changed)
586
586
587
587
588 class filestore(object):
588 class filestore(object):
589 def __init__(self, maxsize=None):
589 def __init__(self, maxsize=None):
590 self.opener = None
590 self.opener = None
591 self.files = {}
591 self.files = {}
592 self.created = 0
592 self.created = 0
593 self.maxsize = maxsize
593 self.maxsize = maxsize
594 if self.maxsize is None:
594 if self.maxsize is None:
595 self.maxsize = 4 * (2 ** 20)
595 self.maxsize = 4 * (2 ** 20)
596 self.size = 0
596 self.size = 0
597 self.data = {}
597 self.data = {}
598
598
599 def setfile(self, fname, data, mode, copied=None):
599 def setfile(self, fname, data, mode, copied=None):
600 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
601 self.data[fname] = (data, mode, copied)
601 self.data[fname] = (data, mode, copied)
602 self.size += len(data)
602 self.size += len(data)
603 else:
603 else:
604 if self.opener is None:
604 if self.opener is None:
605 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 root = pycompat.mkdtemp(prefix=b'hg-patch-')
606 self.opener = vfsmod.vfs(root)
606 self.opener = vfsmod.vfs(root)
607 # Avoid filename issues with these simple names
607 # Avoid filename issues with these simple names
608 fn = b'%d' % self.created
608 fn = b'%d' % self.created
609 self.opener.write(fn, data)
609 self.opener.write(fn, data)
610 self.created += 1
610 self.created += 1
611 self.files[fname] = (fn, mode, copied)
611 self.files[fname] = (fn, mode, copied)
612
612
613 def getfile(self, fname):
613 def getfile(self, fname):
614 if fname in self.data:
614 if fname in self.data:
615 return self.data[fname]
615 return self.data[fname]
616 if not self.opener or fname not in self.files:
616 if not self.opener or fname not in self.files:
617 return None, None, None
617 return None, None, None
618 fn, mode, copied = self.files[fname]
618 fn, mode, copied = self.files[fname]
619 return self.opener.read(fn), mode, copied
619 return self.opener.read(fn), mode, copied
620
620
621 def close(self):
621 def close(self):
622 if self.opener:
622 if self.opener:
623 shutil.rmtree(self.opener.base)
623 shutil.rmtree(self.opener.base)
624
624
625
625
626 class repobackend(abstractbackend):
626 class repobackend(abstractbackend):
627 def __init__(self, ui, repo, ctx, store):
627 def __init__(self, ui, repo, ctx, store):
628 super(repobackend, self).__init__(ui)
628 super(repobackend, self).__init__(ui)
629 self.repo = repo
629 self.repo = repo
630 self.ctx = ctx
630 self.ctx = ctx
631 self.store = store
631 self.store = store
632 self.changed = set()
632 self.changed = set()
633 self.removed = set()
633 self.removed = set()
634 self.copied = {}
634 self.copied = {}
635
635
636 def _checkknown(self, fname):
636 def _checkknown(self, fname):
637 if fname not in self.ctx:
637 if fname not in self.ctx:
638 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
639
639
640 def getfile(self, fname):
640 def getfile(self, fname):
641 try:
641 try:
642 fctx = self.ctx[fname]
642 fctx = self.ctx[fname]
643 except error.LookupError:
643 except error.LookupError:
644 return None, None
644 return None, None
645 flags = fctx.flags()
645 flags = fctx.flags()
646 return fctx.data(), (b'l' in flags, b'x' in flags)
646 return fctx.data(), (b'l' in flags, b'x' in flags)
647
647
648 def setfile(self, fname, data, mode, copysource):
648 def setfile(self, fname, data, mode, copysource):
649 if copysource:
649 if copysource:
650 self._checkknown(copysource)
650 self._checkknown(copysource)
651 if data is None:
651 if data is None:
652 data = self.ctx[fname].data()
652 data = self.ctx[fname].data()
653 self.store.setfile(fname, data, mode, copysource)
653 self.store.setfile(fname, data, mode, copysource)
654 self.changed.add(fname)
654 self.changed.add(fname)
655 if copysource:
655 if copysource:
656 self.copied[fname] = copysource
656 self.copied[fname] = copysource
657
657
658 def unlink(self, fname):
658 def unlink(self, fname):
659 self._checkknown(fname)
659 self._checkknown(fname)
660 self.removed.add(fname)
660 self.removed.add(fname)
661
661
662 def exists(self, fname):
662 def exists(self, fname):
663 return fname in self.ctx
663 return fname in self.ctx
664
664
665 def close(self):
665 def close(self):
666 return self.changed | self.removed
666 return self.changed | self.removed
667
667
668
668
669 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
670 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
671 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
672 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
673
673
674
674
675 class patchfile(object):
675 class patchfile(object):
676 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
677 self.fname = gp.path
677 self.fname = gp.path
678 self.eolmode = eolmode
678 self.eolmode = eolmode
679 self.eol = None
679 self.eol = None
680 self.backend = backend
680 self.backend = backend
681 self.ui = ui
681 self.ui = ui
682 self.lines = []
682 self.lines = []
683 self.exists = False
683 self.exists = False
684 self.missing = True
684 self.missing = True
685 self.mode = gp.mode
685 self.mode = gp.mode
686 self.copysource = gp.oldpath
686 self.copysource = gp.oldpath
687 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
688 self.remove = gp.op == b'DELETE'
688 self.remove = gp.op == b'DELETE'
689 if self.copysource is None:
689 if self.copysource is None:
690 data, mode = backend.getfile(self.fname)
690 data, mode = backend.getfile(self.fname)
691 else:
691 else:
692 data, mode = store.getfile(self.copysource)[:2]
692 data, mode = store.getfile(self.copysource)[:2]
693 if data is not None:
693 if data is not None:
694 self.exists = self.copysource is None or backend.exists(self.fname)
694 self.exists = self.copysource is None or backend.exists(self.fname)
695 self.missing = False
695 self.missing = False
696 if data:
696 if data:
697 self.lines = mdiff.splitnewlines(data)
697 self.lines = mdiff.splitnewlines(data)
698 if self.mode is None:
698 if self.mode is None:
699 self.mode = mode
699 self.mode = mode
700 if self.lines:
700 if self.lines:
701 # Normalize line endings
701 # Normalize line endings
702 if self.lines[0].endswith(b'\r\n'):
702 if self.lines[0].endswith(b'\r\n'):
703 self.eol = b'\r\n'
703 self.eol = b'\r\n'
704 elif self.lines[0].endswith(b'\n'):
704 elif self.lines[0].endswith(b'\n'):
705 self.eol = b'\n'
705 self.eol = b'\n'
706 if eolmode != b'strict':
706 if eolmode != b'strict':
707 nlines = []
707 nlines = []
708 for l in self.lines:
708 for l in self.lines:
709 if l.endswith(b'\r\n'):
709 if l.endswith(b'\r\n'):
710 l = l[:-2] + b'\n'
710 l = l[:-2] + b'\n'
711 nlines.append(l)
711 nlines.append(l)
712 self.lines = nlines
712 self.lines = nlines
713 else:
713 else:
714 if self.create:
714 if self.create:
715 self.missing = False
715 self.missing = False
716 if self.mode is None:
716 if self.mode is None:
717 self.mode = (False, False)
717 self.mode = (False, False)
718 if self.missing:
718 if self.missing:
719 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
720 self.ui.warn(
720 self.ui.warn(
721 _(
721 _(
722 b"(use '--prefix' to apply patch relative to the "
722 b"(use '--prefix' to apply patch relative to the "
723 b"current directory)\n"
723 b"current directory)\n"
724 )
724 )
725 )
725 )
726
726
727 self.hash = {}
727 self.hash = {}
728 self.dirty = 0
728 self.dirty = 0
729 self.offset = 0
729 self.offset = 0
730 self.skew = 0
730 self.skew = 0
731 self.rej = []
731 self.rej = []
732 self.fileprinted = False
732 self.fileprinted = False
733 self.printfile(False)
733 self.printfile(False)
734 self.hunks = 0
734 self.hunks = 0
735
735
736 def writelines(self, fname, lines, mode):
736 def writelines(self, fname, lines, mode):
737 if self.eolmode == b'auto':
737 if self.eolmode == b'auto':
738 eol = self.eol
738 eol = self.eol
739 elif self.eolmode == b'crlf':
739 elif self.eolmode == b'crlf':
740 eol = b'\r\n'
740 eol = b'\r\n'
741 else:
741 else:
742 eol = b'\n'
742 eol = b'\n'
743
743
744 if self.eolmode != b'strict' and eol and eol != b'\n':
744 if self.eolmode != b'strict' and eol and eol != b'\n':
745 rawlines = []
745 rawlines = []
746 for l in lines:
746 for l in lines:
747 if l and l.endswith(b'\n'):
747 if l and l.endswith(b'\n'):
748 l = l[:-1] + eol
748 l = l[:-1] + eol
749 rawlines.append(l)
749 rawlines.append(l)
750 lines = rawlines
750 lines = rawlines
751
751
752 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
753
753
754 def printfile(self, warn):
754 def printfile(self, warn):
755 if self.fileprinted:
755 if self.fileprinted:
756 return
756 return
757 if warn or self.ui.verbose:
757 if warn or self.ui.verbose:
758 self.fileprinted = True
758 self.fileprinted = True
759 s = _(b"patching file %s\n") % self.fname
759 s = _(b"patching file %s\n") % self.fname
760 if warn:
760 if warn:
761 self.ui.warn(s)
761 self.ui.warn(s)
762 else:
762 else:
763 self.ui.note(s)
763 self.ui.note(s)
764
764
765 def findlines(self, l, linenum):
765 def findlines(self, l, linenum):
766 # looks through the hash and finds candidate lines. The
766 # looks through the hash and finds candidate lines. The
767 # result is a list of line numbers sorted based on distance
767 # result is a list of line numbers sorted based on distance
768 # from linenum
768 # from linenum
769
769
770 cand = self.hash.get(l, [])
770 cand = self.hash.get(l, [])
771 if len(cand) > 1:
771 if len(cand) > 1:
772 # resort our list of potentials forward then back.
772 # resort our list of potentials forward then back.
773 cand.sort(key=lambda x: abs(x - linenum))
773 cand.sort(key=lambda x: abs(x - linenum))
774 return cand
774 return cand
775
775
776 def write_rej(self):
776 def write_rej(self):
777 # our rejects are a little different from patch(1). This always
777 # our rejects are a little different from patch(1). This always
778 # creates rejects in the same form as the original patch. A file
778 # creates rejects in the same form as the original patch. A file
779 # header is inserted so that you can run the reject through patch again
779 # header is inserted so that you can run the reject through patch again
780 # without having to type the filename.
780 # without having to type the filename.
781 if not self.rej:
781 if not self.rej:
782 return
782 return
783 base = os.path.basename(self.fname)
783 base = os.path.basename(self.fname)
784 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 lines = [b"--- %s\n+++ %s\n" % (base, base)]
785 for x in self.rej:
785 for x in self.rej:
786 for l in x.hunk:
786 for l in x.hunk:
787 lines.append(l)
787 lines.append(l)
788 if l[-1:] != b'\n':
788 if l[-1:] != b'\n':
789 lines.append(b"\n\\ No newline at end of file\n")
789 lines.append(b"\n\\ No newline at end of file\n")
790 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
791
791
792 def apply(self, h):
792 def apply(self, h):
793 if not h.complete():
793 if not h.complete():
794 raise PatchError(
794 raise PatchError(
795 _(b"bad hunk #%d %s (%d %d %d %d)")
795 _(b"bad hunk #%d %s (%d %d %d %d)")
796 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
797 )
797 )
798
798
799 self.hunks += 1
799 self.hunks += 1
800
800
801 if self.missing:
801 if self.missing:
802 self.rej.append(h)
802 self.rej.append(h)
803 return -1
803 return -1
804
804
805 if self.exists and self.create:
805 if self.exists and self.create:
806 if self.copysource:
806 if self.copysource:
807 self.ui.warn(
807 self.ui.warn(
808 _(b"cannot create %s: destination already exists\n")
808 _(b"cannot create %s: destination already exists\n")
809 % self.fname
809 % self.fname
810 )
810 )
811 else:
811 else:
812 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 self.ui.warn(_(b"file %s already exists\n") % self.fname)
813 self.rej.append(h)
813 self.rej.append(h)
814 return -1
814 return -1
815
815
816 if isinstance(h, binhunk):
816 if isinstance(h, binhunk):
817 if self.remove:
817 if self.remove:
818 self.backend.unlink(self.fname)
818 self.backend.unlink(self.fname)
819 else:
819 else:
820 l = h.new(self.lines)
820 l = h.new(self.lines)
821 self.lines[:] = l
821 self.lines[:] = l
822 self.offset += len(l)
822 self.offset += len(l)
823 self.dirty = True
823 self.dirty = True
824 return 0
824 return 0
825
825
826 horig = h
826 horig = h
827 if (
827 if (
828 self.eolmode in (b'crlf', b'lf')
828 self.eolmode in (b'crlf', b'lf')
829 or self.eolmode == b'auto'
829 or self.eolmode == b'auto'
830 and self.eol
830 and self.eol
831 ):
831 ):
832 # If new eols are going to be normalized, then normalize
832 # If new eols are going to be normalized, then normalize
833 # hunk data before patching. Otherwise, preserve input
833 # hunk data before patching. Otherwise, preserve input
834 # line-endings.
834 # line-endings.
835 h = h.getnormalized()
835 h = h.getnormalized()
836
836
837 # fast case first, no offsets, no fuzz
837 # fast case first, no offsets, no fuzz
838 old, oldstart, new, newstart = h.fuzzit(0, False)
838 old, oldstart, new, newstart = h.fuzzit(0, False)
839 oldstart += self.offset
839 oldstart += self.offset
840 orig_start = oldstart
840 orig_start = oldstart
841 # if there's skew we want to emit the "(offset %d lines)" even
841 # if there's skew we want to emit the "(offset %d lines)" even
842 # when the hunk cleanly applies at start + skew, so skip the
842 # when the hunk cleanly applies at start + skew, so skip the
843 # fast case code
843 # fast case code
844 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
845 if self.remove:
845 if self.remove:
846 self.backend.unlink(self.fname)
846 self.backend.unlink(self.fname)
847 else:
847 else:
848 self.lines[oldstart : oldstart + len(old)] = new
848 self.lines[oldstart : oldstart + len(old)] = new
849 self.offset += len(new) - len(old)
849 self.offset += len(new) - len(old)
850 self.dirty = True
850 self.dirty = True
851 return 0
851 return 0
852
852
853 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
854 self.hash = {}
854 self.hash = {}
855 for x, s in enumerate(self.lines):
855 for x, s in enumerate(self.lines):
856 self.hash.setdefault(s, []).append(x)
856 self.hash.setdefault(s, []).append(x)
857
857
858 for fuzzlen in pycompat.xrange(
858 for fuzzlen in pycompat.xrange(
859 self.ui.configint(b"patch", b"fuzz") + 1
859 self.ui.configint(b"patch", b"fuzz") + 1
860 ):
860 ):
861 for toponly in [True, False]:
861 for toponly in [True, False]:
862 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
863 oldstart = oldstart + self.offset + self.skew
863 oldstart = oldstart + self.offset + self.skew
864 oldstart = min(oldstart, len(self.lines))
864 oldstart = min(oldstart, len(self.lines))
865 if old:
865 if old:
866 cand = self.findlines(old[0][1:], oldstart)
866 cand = self.findlines(old[0][1:], oldstart)
867 else:
867 else:
868 # Only adding lines with no or fuzzed context, just
868 # Only adding lines with no or fuzzed context, just
869 # take the skew in account
869 # take the skew in account
870 cand = [oldstart]
870 cand = [oldstart]
871
871
872 for l in cand:
872 for l in cand:
873 if not old or diffhelper.testhunk(old, self.lines, l):
873 if not old or diffhelper.testhunk(old, self.lines, l):
874 self.lines[l : l + len(old)] = new
874 self.lines[l : l + len(old)] = new
875 self.offset += len(new) - len(old)
875 self.offset += len(new) - len(old)
876 self.skew = l - orig_start
876 self.skew = l - orig_start
877 self.dirty = True
877 self.dirty = True
878 offset = l - orig_start - fuzzlen
878 offset = l - orig_start - fuzzlen
879 if fuzzlen:
879 if fuzzlen:
880 msg = _(
880 msg = _(
881 b"Hunk #%d succeeded at %d "
881 b"Hunk #%d succeeded at %d "
882 b"with fuzz %d "
882 b"with fuzz %d "
883 b"(offset %d lines).\n"
883 b"(offset %d lines).\n"
884 )
884 )
885 self.printfile(True)
885 self.printfile(True)
886 self.ui.warn(
886 self.ui.warn(
887 msg % (h.number, l + 1, fuzzlen, offset)
887 msg % (h.number, l + 1, fuzzlen, offset)
888 )
888 )
889 else:
889 else:
890 msg = _(
890 msg = _(
891 b"Hunk #%d succeeded at %d "
891 b"Hunk #%d succeeded at %d "
892 b"(offset %d lines).\n"
892 b"(offset %d lines).\n"
893 )
893 )
894 self.ui.note(msg % (h.number, l + 1, offset))
894 self.ui.note(msg % (h.number, l + 1, offset))
895 return fuzzlen
895 return fuzzlen
896 self.printfile(True)
896 self.printfile(True)
897 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
898 self.rej.append(horig)
898 self.rej.append(horig)
899 return -1
899 return -1
900
900
901 def close(self):
901 def close(self):
902 if self.dirty:
902 if self.dirty:
903 self.writelines(self.fname, self.lines, self.mode)
903 self.writelines(self.fname, self.lines, self.mode)
904 self.write_rej()
904 self.write_rej()
905 return len(self.rej)
905 return len(self.rej)
906
906
907
907
908 class header(object):
908 class header(object):
909 """patch header
909 """patch header
910 """
910 """
911
911
912 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
912 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
913 diff_re = re.compile(b'diff -r .* (.*)$')
913 diff_re = re.compile(b'diff -r .* (.*)$')
914 allhunks_re = re.compile(b'(?:index|deleted file) ')
914 allhunks_re = re.compile(b'(?:index|deleted file) ')
915 pretty_re = re.compile(b'(?:new file|deleted file) ')
915 pretty_re = re.compile(b'(?:new file|deleted file) ')
916 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
916 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
917 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
917 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
918
918
919 def __init__(self, header):
919 def __init__(self, header):
920 self.header = header
920 self.header = header
921 self.hunks = []
921 self.hunks = []
922
922
923 def binary(self):
923 def binary(self):
924 return any(h.startswith(b'index ') for h in self.header)
924 return any(h.startswith(b'index ') for h in self.header)
925
925
926 def pretty(self, fp):
926 def pretty(self, fp):
927 for h in self.header:
927 for h in self.header:
928 if h.startswith(b'index '):
928 if h.startswith(b'index '):
929 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
929 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
930 break
930 break
931 if self.pretty_re.match(h):
931 if self.pretty_re.match(h):
932 fp.write(h)
932 fp.write(h)
933 if self.binary():
933 if self.binary():
934 fp.write(_(b'this is a binary file\n'))
934 fp.write(_(b'this is a binary file\n'))
935 break
935 break
936 if h.startswith(b'---'):
936 if h.startswith(b'---'):
937 fp.write(
937 fp.write(
938 _(b'%d hunks, %d lines changed\n')
938 _(b'%d hunks, %d lines changed\n')
939 % (
939 % (
940 len(self.hunks),
940 len(self.hunks),
941 sum([max(h.added, h.removed) for h in self.hunks]),
941 sum([max(h.added, h.removed) for h in self.hunks]),
942 )
942 )
943 )
943 )
944 break
944 break
945 fp.write(h)
945 fp.write(h)
946
946
947 def write(self, fp):
947 def write(self, fp):
948 fp.write(b''.join(self.header))
948 fp.write(b''.join(self.header))
949
949
950 def allhunks(self):
950 def allhunks(self):
951 return any(self.allhunks_re.match(h) for h in self.header)
951 return any(self.allhunks_re.match(h) for h in self.header)
952
952
953 def files(self):
953 def files(self):
954 match = self.diffgit_re.match(self.header[0])
954 match = self.diffgit_re.match(self.header[0])
955 if match:
955 if match:
956 fromfile, tofile = match.groups()
956 fromfile, tofile = match.groups()
957 if fromfile == tofile:
957 if fromfile == tofile:
958 return [fromfile]
958 return [fromfile]
959 return [fromfile, tofile]
959 return [fromfile, tofile]
960 else:
960 else:
961 return self.diff_re.match(self.header[0]).groups()
961 return self.diff_re.match(self.header[0]).groups()
962
962
963 def filename(self):
963 def filename(self):
964 return self.files()[-1]
964 return self.files()[-1]
965
965
966 def __repr__(self):
966 def __repr__(self):
967 return b'<header %s>' % (b' '.join(map(repr, self.files())))
967 return b'<header %s>' % (b' '.join(map(repr, self.files())))
968
968
969 def isnewfile(self):
969 def isnewfile(self):
970 return any(self.newfile_re.match(h) for h in self.header)
970 return any(self.newfile_re.match(h) for h in self.header)
971
971
972 def special(self):
972 def special(self):
973 # Special files are shown only at the header level and not at the hunk
973 # Special files are shown only at the header level and not at the hunk
974 # level for example a file that has been deleted is a special file.
974 # level for example a file that has been deleted is a special file.
975 # The user cannot change the content of the operation, in the case of
975 # The user cannot change the content of the operation, in the case of
976 # the deleted file he has to take the deletion or not take it, he
976 # the deleted file he has to take the deletion or not take it, he
977 # cannot take some of it.
977 # cannot take some of it.
978 # Newly added files are special if they are empty, they are not special
978 # Newly added files are special if they are empty, they are not special
979 # if they have some content as we want to be able to change it
979 # if they have some content as we want to be able to change it
980 nocontent = len(self.header) == 2
980 nocontent = len(self.header) == 2
981 emptynewfile = self.isnewfile() and nocontent
981 emptynewfile = self.isnewfile() and nocontent
982 return emptynewfile or any(
982 return emptynewfile or any(
983 self.special_re.match(h) for h in self.header
983 self.special_re.match(h) for h in self.header
984 )
984 )
985
985
986
986
987 class recordhunk(object):
987 class recordhunk(object):
988 """patch hunk
988 """patch hunk
989
989
990 XXX shouldn't we merge this with the other hunk class?
990 XXX shouldn't we merge this with the other hunk class?
991 """
991 """
992
992
993 def __init__(
993 def __init__(
994 self,
994 self,
995 header,
995 header,
996 fromline,
996 fromline,
997 toline,
997 toline,
998 proc,
998 proc,
999 before,
999 before,
1000 hunk,
1000 hunk,
1001 after,
1001 after,
1002 maxcontext=None,
1002 maxcontext=None,
1003 ):
1003 ):
1004 def trimcontext(lines, reverse=False):
1004 def trimcontext(lines, reverse=False):
1005 if maxcontext is not None:
1005 if maxcontext is not None:
1006 delta = len(lines) - maxcontext
1006 delta = len(lines) - maxcontext
1007 if delta > 0:
1007 if delta > 0:
1008 if reverse:
1008 if reverse:
1009 return delta, lines[delta:]
1009 return delta, lines[delta:]
1010 else:
1010 else:
1011 return delta, lines[:maxcontext]
1011 return delta, lines[:maxcontext]
1012 return 0, lines
1012 return 0, lines
1013
1013
1014 self.header = header
1014 self.header = header
1015 trimedbefore, self.before = trimcontext(before, True)
1015 trimedbefore, self.before = trimcontext(before, True)
1016 self.fromline = fromline + trimedbefore
1016 self.fromline = fromline + trimedbefore
1017 self.toline = toline + trimedbefore
1017 self.toline = toline + trimedbefore
1018 _trimedafter, self.after = trimcontext(after, False)
1018 _trimedafter, self.after = trimcontext(after, False)
1019 self.proc = proc
1019 self.proc = proc
1020 self.hunk = hunk
1020 self.hunk = hunk
1021 self.added, self.removed = self.countchanges(self.hunk)
1021 self.added, self.removed = self.countchanges(self.hunk)
1022
1022
1023 def __eq__(self, v):
1023 def __eq__(self, v):
1024 if not isinstance(v, recordhunk):
1024 if not isinstance(v, recordhunk):
1025 return False
1025 return False
1026
1026
1027 return (
1027 return (
1028 (v.hunk == self.hunk)
1028 (v.hunk == self.hunk)
1029 and (v.proc == self.proc)
1029 and (v.proc == self.proc)
1030 and (self.fromline == v.fromline)
1030 and (self.fromline == v.fromline)
1031 and (self.header.files() == v.header.files())
1031 and (self.header.files() == v.header.files())
1032 )
1032 )
1033
1033
1034 def __hash__(self):
1034 def __hash__(self):
1035 return hash(
1035 return hash(
1036 (
1036 (
1037 tuple(self.hunk),
1037 tuple(self.hunk),
1038 tuple(self.header.files()),
1038 tuple(self.header.files()),
1039 self.fromline,
1039 self.fromline,
1040 self.proc,
1040 self.proc,
1041 )
1041 )
1042 )
1042 )
1043
1043
1044 def countchanges(self, hunk):
1044 def countchanges(self, hunk):
1045 """hunk -> (n+,n-)"""
1045 """hunk -> (n+,n-)"""
1046 add = len([h for h in hunk if h.startswith(b'+')])
1046 add = len([h for h in hunk if h.startswith(b'+')])
1047 rem = len([h for h in hunk if h.startswith(b'-')])
1047 rem = len([h for h in hunk if h.startswith(b'-')])
1048 return add, rem
1048 return add, rem
1049
1049
1050 def reversehunk(self):
1050 def reversehunk(self):
1051 """return another recordhunk which is the reverse of the hunk
1051 """return another recordhunk which is the reverse of the hunk
1052
1052
1053 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1053 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1054 that, swap fromline/toline and +/- signs while keep other things
1054 that, swap fromline/toline and +/- signs while keep other things
1055 unchanged.
1055 unchanged.
1056 """
1056 """
1057 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1057 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1058 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1058 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1059 return recordhunk(
1059 return recordhunk(
1060 self.header,
1060 self.header,
1061 self.toline,
1061 self.toline,
1062 self.fromline,
1062 self.fromline,
1063 self.proc,
1063 self.proc,
1064 self.before,
1064 self.before,
1065 hunk,
1065 hunk,
1066 self.after,
1066 self.after,
1067 )
1067 )
1068
1068
1069 def write(self, fp):
1069 def write(self, fp):
1070 delta = len(self.before) + len(self.after)
1070 delta = len(self.before) + len(self.after)
1071 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1071 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1072 delta -= 1
1072 delta -= 1
1073 fromlen = delta + self.removed
1073 fromlen = delta + self.removed
1074 tolen = delta + self.added
1074 tolen = delta + self.added
1075 fp.write(
1075 fp.write(
1076 b'@@ -%d,%d +%d,%d @@%s\n'
1076 b'@@ -%d,%d +%d,%d @@%s\n'
1077 % (
1077 % (
1078 self.fromline,
1078 self.fromline,
1079 fromlen,
1079 fromlen,
1080 self.toline,
1080 self.toline,
1081 tolen,
1081 tolen,
1082 self.proc and (b' ' + self.proc),
1082 self.proc and (b' ' + self.proc),
1083 )
1083 )
1084 )
1084 )
1085 fp.write(b''.join(self.before + self.hunk + self.after))
1085 fp.write(b''.join(self.before + self.hunk + self.after))
1086
1086
1087 pretty = write
1087 pretty = write
1088
1088
1089 def filename(self):
1089 def filename(self):
1090 return self.header.filename()
1090 return self.header.filename()
1091
1091
1092 def __repr__(self):
1092 def __repr__(self):
1093 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1093 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1094
1094
1095
1095
1096 def getmessages():
1096 def getmessages():
1097 return {
1097 return {
1098 b'multiple': {
1098 b'multiple': {
1099 b'apply': _(b"apply change %d/%d to '%s'?"),
1099 b'apply': _(b"apply change %d/%d to '%s'?"),
1100 b'discard': _(b"discard change %d/%d to '%s'?"),
1100 b'discard': _(b"discard change %d/%d to '%s'?"),
1101 b'keep': _(b"keep change %d/%d to '%s'?"),
1101 b'keep': _(b"keep change %d/%d to '%s'?"),
1102 b'record': _(b"record change %d/%d to '%s'?"),
1102 b'record': _(b"record change %d/%d to '%s'?"),
1103 },
1103 },
1104 b'single': {
1104 b'single': {
1105 b'apply': _(b"apply this change to '%s'?"),
1105 b'apply': _(b"apply this change to '%s'?"),
1106 b'discard': _(b"discard this change to '%s'?"),
1106 b'discard': _(b"discard this change to '%s'?"),
1107 b'keep': _(b"keep this change to '%s'?"),
1107 b'keep': _(b"keep this change to '%s'?"),
1108 b'record': _(b"record this change to '%s'?"),
1108 b'record': _(b"record this change to '%s'?"),
1109 },
1109 },
1110 b'help': {
1110 b'help': {
1111 b'apply': _(
1111 b'apply': _(
1112 b'[Ynesfdaq?]'
1112 b'[Ynesfdaq?]'
1113 b'$$ &Yes, apply this change'
1113 b'$$ &Yes, apply this change'
1114 b'$$ &No, skip this change'
1114 b'$$ &No, skip this change'
1115 b'$$ &Edit this change manually'
1115 b'$$ &Edit this change manually'
1116 b'$$ &Skip remaining changes to this file'
1116 b'$$ &Skip remaining changes to this file'
1117 b'$$ Apply remaining changes to this &file'
1117 b'$$ Apply remaining changes to this &file'
1118 b'$$ &Done, skip remaining changes and files'
1118 b'$$ &Done, skip remaining changes and files'
1119 b'$$ Apply &all changes to all remaining files'
1119 b'$$ Apply &all changes to all remaining files'
1120 b'$$ &Quit, applying no changes'
1120 b'$$ &Quit, applying no changes'
1121 b'$$ &? (display help)'
1121 b'$$ &? (display help)'
1122 ),
1122 ),
1123 b'discard': _(
1123 b'discard': _(
1124 b'[Ynesfdaq?]'
1124 b'[Ynesfdaq?]'
1125 b'$$ &Yes, discard this change'
1125 b'$$ &Yes, discard this change'
1126 b'$$ &No, skip this change'
1126 b'$$ &No, skip this change'
1127 b'$$ &Edit this change manually'
1127 b'$$ &Edit this change manually'
1128 b'$$ &Skip remaining changes to this file'
1128 b'$$ &Skip remaining changes to this file'
1129 b'$$ Discard remaining changes to this &file'
1129 b'$$ Discard remaining changes to this &file'
1130 b'$$ &Done, skip remaining changes and files'
1130 b'$$ &Done, skip remaining changes and files'
1131 b'$$ Discard &all changes to all remaining files'
1131 b'$$ Discard &all changes to all remaining files'
1132 b'$$ &Quit, discarding no changes'
1132 b'$$ &Quit, discarding no changes'
1133 b'$$ &? (display help)'
1133 b'$$ &? (display help)'
1134 ),
1134 ),
1135 b'keep': _(
1135 b'keep': _(
1136 b'[Ynesfdaq?]'
1136 b'[Ynesfdaq?]'
1137 b'$$ &Yes, keep this change'
1137 b'$$ &Yes, keep this change'
1138 b'$$ &No, skip this change'
1138 b'$$ &No, skip this change'
1139 b'$$ &Edit this change manually'
1139 b'$$ &Edit this change manually'
1140 b'$$ &Skip remaining changes to this file'
1140 b'$$ &Skip remaining changes to this file'
1141 b'$$ Keep remaining changes to this &file'
1141 b'$$ Keep remaining changes to this &file'
1142 b'$$ &Done, skip remaining changes and files'
1142 b'$$ &Done, skip remaining changes and files'
1143 b'$$ Keep &all changes to all remaining files'
1143 b'$$ Keep &all changes to all remaining files'
1144 b'$$ &Quit, keeping all changes'
1144 b'$$ &Quit, keeping all changes'
1145 b'$$ &? (display help)'
1145 b'$$ &? (display help)'
1146 ),
1146 ),
1147 b'record': _(
1147 b'record': _(
1148 b'[Ynesfdaq?]'
1148 b'[Ynesfdaq?]'
1149 b'$$ &Yes, record this change'
1149 b'$$ &Yes, record this change'
1150 b'$$ &No, skip this change'
1150 b'$$ &No, skip this change'
1151 b'$$ &Edit this change manually'
1151 b'$$ &Edit this change manually'
1152 b'$$ &Skip remaining changes to this file'
1152 b'$$ &Skip remaining changes to this file'
1153 b'$$ Record remaining changes to this &file'
1153 b'$$ Record remaining changes to this &file'
1154 b'$$ &Done, skip remaining changes and files'
1154 b'$$ &Done, skip remaining changes and files'
1155 b'$$ Record &all changes to all remaining files'
1155 b'$$ Record &all changes to all remaining files'
1156 b'$$ &Quit, recording no changes'
1156 b'$$ &Quit, recording no changes'
1157 b'$$ &? (display help)'
1157 b'$$ &? (display help)'
1158 ),
1158 ),
1159 },
1159 },
1160 }
1160 }
1161
1161
1162
1162
1163 def filterpatch(ui, headers, match, operation=None):
1163 def filterpatch(ui, headers, match, operation=None):
1164 """Interactively filter patch chunks into applied-only chunks"""
1164 """Interactively filter patch chunks into applied-only chunks"""
1165 messages = getmessages()
1165 messages = getmessages()
1166
1166
1167 if operation is None:
1167 if operation is None:
1168 operation = b'record'
1168 operation = b'record'
1169
1169
1170 def prompt(skipfile, skipall, query, chunk):
1170 def prompt(skipfile, skipall, query, chunk):
1171 """prompt query, and process base inputs
1171 """prompt query, and process base inputs
1172
1172
1173 - y/n for the rest of file
1173 - y/n for the rest of file
1174 - y/n for the rest
1174 - y/n for the rest
1175 - ? (help)
1175 - ? (help)
1176 - q (quit)
1176 - q (quit)
1177
1177
1178 Return True/False and possibly updated skipfile and skipall.
1178 Return True/False and possibly updated skipfile and skipall.
1179 """
1179 """
1180 newpatches = None
1180 newpatches = None
1181 if skipall is not None:
1181 if skipall is not None:
1182 return skipall, skipfile, skipall, newpatches
1182 return skipall, skipfile, skipall, newpatches
1183 if skipfile is not None:
1183 if skipfile is not None:
1184 return skipfile, skipfile, skipall, newpatches
1184 return skipfile, skipfile, skipall, newpatches
1185 while True:
1185 while True:
1186 ui.flush()
1186 resps = messages[b'help'][operation]
1187 resps = messages[b'help'][operation]
1187 # IMPORTANT: keep the last line of this prompt short (<40 english
1188 # IMPORTANT: keep the last line of this prompt short (<40 english
1188 # chars is a good target) because of issue6158.
1189 # chars is a good target) because of issue6158.
1189 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1190 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1190 ui.write(b"\n")
1191 ui.write(b"\n")
1191 ui.flush()
1192 if r == 8: # ?
1192 if r == 8: # ?
1193 for c, t in ui.extractchoices(resps)[1]:
1193 for c, t in ui.extractchoices(resps)[1]:
1194 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1194 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1195 ui.flush()
1196 continue
1195 continue
1197 elif r == 0: # yes
1196 elif r == 0: # yes
1198 ret = True
1197 ret = True
1199 elif r == 1: # no
1198 elif r == 1: # no
1200 ret = False
1199 ret = False
1201 elif r == 2: # Edit patch
1200 elif r == 2: # Edit patch
1202 if chunk is None:
1201 if chunk is None:
1203 ui.write(_(b'cannot edit patch for whole file'))
1202 ui.write(_(b'cannot edit patch for whole file'))
1204 ui.write(b"\n")
1203 ui.write(b"\n")
1205 ui.flush()
1206 continue
1204 continue
1207 if chunk.header.binary():
1205 if chunk.header.binary():
1208 ui.write(_(b'cannot edit patch for binary file'))
1206 ui.write(_(b'cannot edit patch for binary file'))
1209 ui.write(b"\n")
1207 ui.write(b"\n")
1210 ui.flush()
1211 continue
1208 continue
1212 # Patch comment based on the Git one (based on comment at end of
1209 # Patch comment based on the Git one (based on comment at end of
1213 # https://mercurial-scm.org/wiki/RecordExtension)
1210 # https://mercurial-scm.org/wiki/RecordExtension)
1214 phelp = b'---' + _(
1211 phelp = b'---' + _(
1215 """
1212 """
1216 To remove '-' lines, make them ' ' lines (context).
1213 To remove '-' lines, make them ' ' lines (context).
1217 To remove '+' lines, delete them.
1214 To remove '+' lines, delete them.
1218 Lines starting with # will be removed from the patch.
1215 Lines starting with # will be removed from the patch.
1219
1216
1220 If the patch applies cleanly, the edited hunk will immediately be
1217 If the patch applies cleanly, the edited hunk will immediately be
1221 added to the record list. If it does not apply cleanly, a rejects
1218 added to the record list. If it does not apply cleanly, a rejects
1222 file will be generated: you can use that when you try again. If
1219 file will be generated: you can use that when you try again. If
1223 all lines of the hunk are removed, then the edit is aborted and
1220 all lines of the hunk are removed, then the edit is aborted and
1224 the hunk is left unchanged.
1221 the hunk is left unchanged.
1225 """
1222 """
1226 )
1223 )
1227 (patchfd, patchfn) = pycompat.mkstemp(
1224 (patchfd, patchfn) = pycompat.mkstemp(
1228 prefix=b"hg-editor-", suffix=b".diff"
1225 prefix=b"hg-editor-", suffix=b".diff"
1229 )
1226 )
1230 ncpatchfp = None
1227 ncpatchfp = None
1231 try:
1228 try:
1232 # Write the initial patch
1229 # Write the initial patch
1233 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1230 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1234 chunk.header.write(f)
1231 chunk.header.write(f)
1235 chunk.write(f)
1232 chunk.write(f)
1236 f.write(
1233 f.write(
1237 b''.join(
1234 b''.join(
1238 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1235 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1239 )
1236 )
1240 )
1237 )
1241 f.close()
1238 f.close()
1242 # Start the editor and wait for it to complete
1239 # Start the editor and wait for it to complete
1243 editor = ui.geteditor()
1240 editor = ui.geteditor()
1244 ret = ui.system(
1241 ret = ui.system(
1245 b"%s \"%s\"" % (editor, patchfn),
1242 b"%s \"%s\"" % (editor, patchfn),
1246 environ={b'HGUSER': ui.username()},
1243 environ={b'HGUSER': ui.username()},
1247 blockedtag=b'filterpatch',
1244 blockedtag=b'filterpatch',
1248 )
1245 )
1249 if ret != 0:
1246 if ret != 0:
1250 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1247 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1251 continue
1248 continue
1252 # Remove comment lines
1249 # Remove comment lines
1253 patchfp = open(patchfn, r'rb')
1250 patchfp = open(patchfn, r'rb')
1254 ncpatchfp = stringio()
1251 ncpatchfp = stringio()
1255 for line in util.iterfile(patchfp):
1252 for line in util.iterfile(patchfp):
1256 line = util.fromnativeeol(line)
1253 line = util.fromnativeeol(line)
1257 if not line.startswith(b'#'):
1254 if not line.startswith(b'#'):
1258 ncpatchfp.write(line)
1255 ncpatchfp.write(line)
1259 patchfp.close()
1256 patchfp.close()
1260 ncpatchfp.seek(0)
1257 ncpatchfp.seek(0)
1261 newpatches = parsepatch(ncpatchfp)
1258 newpatches = parsepatch(ncpatchfp)
1262 finally:
1259 finally:
1263 os.unlink(patchfn)
1260 os.unlink(patchfn)
1264 del ncpatchfp
1261 del ncpatchfp
1265 # Signal that the chunk shouldn't be applied as-is, but
1262 # Signal that the chunk shouldn't be applied as-is, but
1266 # provide the new patch to be used instead.
1263 # provide the new patch to be used instead.
1267 ret = False
1264 ret = False
1268 elif r == 3: # Skip
1265 elif r == 3: # Skip
1269 ret = skipfile = False
1266 ret = skipfile = False
1270 elif r == 4: # file (Record remaining)
1267 elif r == 4: # file (Record remaining)
1271 ret = skipfile = True
1268 ret = skipfile = True
1272 elif r == 5: # done, skip remaining
1269 elif r == 5: # done, skip remaining
1273 ret = skipall = False
1270 ret = skipall = False
1274 elif r == 6: # all
1271 elif r == 6: # all
1275 ret = skipall = True
1272 ret = skipall = True
1276 elif r == 7: # quit
1273 elif r == 7: # quit
1277 raise error.Abort(_(b'user quit'))
1274 raise error.Abort(_(b'user quit'))
1278 return ret, skipfile, skipall, newpatches
1275 return ret, skipfile, skipall, newpatches
1279
1276
1280 seen = set()
1277 seen = set()
1281 applied = {} # 'filename' -> [] of chunks
1278 applied = {} # 'filename' -> [] of chunks
1282 skipfile, skipall = None, None
1279 skipfile, skipall = None, None
1283 pos, total = 1, sum(len(h.hunks) for h in headers)
1280 pos, total = 1, sum(len(h.hunks) for h in headers)
1284 for h in headers:
1281 for h in headers:
1285 pos += len(h.hunks)
1282 pos += len(h.hunks)
1286 skipfile = None
1283 skipfile = None
1287 fixoffset = 0
1284 fixoffset = 0
1288 hdr = b''.join(h.header)
1285 hdr = b''.join(h.header)
1289 if hdr in seen:
1286 if hdr in seen:
1290 continue
1287 continue
1291 seen.add(hdr)
1288 seen.add(hdr)
1292 if skipall is None:
1289 if skipall is None:
1293 h.pretty(ui)
1290 h.pretty(ui)
1294 files = h.files()
1291 files = h.files()
1295 msg = _(b'examine changes to %s?') % _(b' and ').join(
1292 msg = _(b'examine changes to %s?') % _(b' and ').join(
1296 b"'%s'" % f for f in files
1293 b"'%s'" % f for f in files
1297 )
1294 )
1298 if all(match.exact(f) for f in files):
1295 if all(match.exact(f) for f in files):
1299 r, skipall, np = True, None, None
1296 r, skipall, np = True, None, None
1300 else:
1297 else:
1301 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1298 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1302 if not r:
1299 if not r:
1303 continue
1300 continue
1304 applied[h.filename()] = [h]
1301 applied[h.filename()] = [h]
1305 if h.allhunks():
1302 if h.allhunks():
1306 applied[h.filename()] += h.hunks
1303 applied[h.filename()] += h.hunks
1307 continue
1304 continue
1308 for i, chunk in enumerate(h.hunks):
1305 for i, chunk in enumerate(h.hunks):
1309 if skipfile is None and skipall is None:
1306 if skipfile is None and skipall is None:
1310 chunk.pretty(ui)
1307 chunk.pretty(ui)
1311 ui.flush()
1312 if total == 1:
1308 if total == 1:
1313 msg = messages[b'single'][operation] % chunk.filename()
1309 msg = messages[b'single'][operation] % chunk.filename()
1314 else:
1310 else:
1315 idx = pos - len(h.hunks) + i
1311 idx = pos - len(h.hunks) + i
1316 msg = messages[b'multiple'][operation] % (
1312 msg = messages[b'multiple'][operation] % (
1317 idx,
1313 idx,
1318 total,
1314 total,
1319 chunk.filename(),
1315 chunk.filename(),
1320 )
1316 )
1321 r, skipfile, skipall, newpatches = prompt(
1317 r, skipfile, skipall, newpatches = prompt(
1322 skipfile, skipall, msg, chunk
1318 skipfile, skipall, msg, chunk
1323 )
1319 )
1324 if r:
1320 if r:
1325 if fixoffset:
1321 if fixoffset:
1326 chunk = copy.copy(chunk)
1322 chunk = copy.copy(chunk)
1327 chunk.toline += fixoffset
1323 chunk.toline += fixoffset
1328 applied[chunk.filename()].append(chunk)
1324 applied[chunk.filename()].append(chunk)
1329 elif newpatches is not None:
1325 elif newpatches is not None:
1330 for newpatch in newpatches:
1326 for newpatch in newpatches:
1331 for newhunk in newpatch.hunks:
1327 for newhunk in newpatch.hunks:
1332 if fixoffset:
1328 if fixoffset:
1333 newhunk.toline += fixoffset
1329 newhunk.toline += fixoffset
1334 applied[newhunk.filename()].append(newhunk)
1330 applied[newhunk.filename()].append(newhunk)
1335 else:
1331 else:
1336 fixoffset += chunk.removed - chunk.added
1332 fixoffset += chunk.removed - chunk.added
1337 return (
1333 return (
1338 sum(
1334 sum(
1339 [
1335 [
1340 h
1336 h
1341 for h in pycompat.itervalues(applied)
1337 for h in pycompat.itervalues(applied)
1342 if h[0].special() or len(h) > 1
1338 if h[0].special() or len(h) > 1
1343 ],
1339 ],
1344 [],
1340 [],
1345 ),
1341 ),
1346 {},
1342 {},
1347 )
1343 )
1348
1344
1349
1345
1350 class hunk(object):
1346 class hunk(object):
1351 def __init__(self, desc, num, lr, context):
1347 def __init__(self, desc, num, lr, context):
1352 self.number = num
1348 self.number = num
1353 self.desc = desc
1349 self.desc = desc
1354 self.hunk = [desc]
1350 self.hunk = [desc]
1355 self.a = []
1351 self.a = []
1356 self.b = []
1352 self.b = []
1357 self.starta = self.lena = None
1353 self.starta = self.lena = None
1358 self.startb = self.lenb = None
1354 self.startb = self.lenb = None
1359 if lr is not None:
1355 if lr is not None:
1360 if context:
1356 if context:
1361 self.read_context_hunk(lr)
1357 self.read_context_hunk(lr)
1362 else:
1358 else:
1363 self.read_unified_hunk(lr)
1359 self.read_unified_hunk(lr)
1364
1360
1365 def getnormalized(self):
1361 def getnormalized(self):
1366 """Return a copy with line endings normalized to LF."""
1362 """Return a copy with line endings normalized to LF."""
1367
1363
1368 def normalize(lines):
1364 def normalize(lines):
1369 nlines = []
1365 nlines = []
1370 for line in lines:
1366 for line in lines:
1371 if line.endswith(b'\r\n'):
1367 if line.endswith(b'\r\n'):
1372 line = line[:-2] + b'\n'
1368 line = line[:-2] + b'\n'
1373 nlines.append(line)
1369 nlines.append(line)
1374 return nlines
1370 return nlines
1375
1371
1376 # Dummy object, it is rebuilt manually
1372 # Dummy object, it is rebuilt manually
1377 nh = hunk(self.desc, self.number, None, None)
1373 nh = hunk(self.desc, self.number, None, None)
1378 nh.number = self.number
1374 nh.number = self.number
1379 nh.desc = self.desc
1375 nh.desc = self.desc
1380 nh.hunk = self.hunk
1376 nh.hunk = self.hunk
1381 nh.a = normalize(self.a)
1377 nh.a = normalize(self.a)
1382 nh.b = normalize(self.b)
1378 nh.b = normalize(self.b)
1383 nh.starta = self.starta
1379 nh.starta = self.starta
1384 nh.startb = self.startb
1380 nh.startb = self.startb
1385 nh.lena = self.lena
1381 nh.lena = self.lena
1386 nh.lenb = self.lenb
1382 nh.lenb = self.lenb
1387 return nh
1383 return nh
1388
1384
1389 def read_unified_hunk(self, lr):
1385 def read_unified_hunk(self, lr):
1390 m = unidesc.match(self.desc)
1386 m = unidesc.match(self.desc)
1391 if not m:
1387 if not m:
1392 raise PatchError(_(b"bad hunk #%d") % self.number)
1388 raise PatchError(_(b"bad hunk #%d") % self.number)
1393 self.starta, self.lena, self.startb, self.lenb = m.groups()
1389 self.starta, self.lena, self.startb, self.lenb = m.groups()
1394 if self.lena is None:
1390 if self.lena is None:
1395 self.lena = 1
1391 self.lena = 1
1396 else:
1392 else:
1397 self.lena = int(self.lena)
1393 self.lena = int(self.lena)
1398 if self.lenb is None:
1394 if self.lenb is None:
1399 self.lenb = 1
1395 self.lenb = 1
1400 else:
1396 else:
1401 self.lenb = int(self.lenb)
1397 self.lenb = int(self.lenb)
1402 self.starta = int(self.starta)
1398 self.starta = int(self.starta)
1403 self.startb = int(self.startb)
1399 self.startb = int(self.startb)
1404 try:
1400 try:
1405 diffhelper.addlines(
1401 diffhelper.addlines(
1406 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1402 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1407 )
1403 )
1408 except error.ParseError as e:
1404 except error.ParseError as e:
1409 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1405 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1410 # if we hit eof before finishing out the hunk, the last line will
1406 # if we hit eof before finishing out the hunk, the last line will
1411 # be zero length. Lets try to fix it up.
1407 # be zero length. Lets try to fix it up.
1412 while len(self.hunk[-1]) == 0:
1408 while len(self.hunk[-1]) == 0:
1413 del self.hunk[-1]
1409 del self.hunk[-1]
1414 del self.a[-1]
1410 del self.a[-1]
1415 del self.b[-1]
1411 del self.b[-1]
1416 self.lena -= 1
1412 self.lena -= 1
1417 self.lenb -= 1
1413 self.lenb -= 1
1418 self._fixnewline(lr)
1414 self._fixnewline(lr)
1419
1415
1420 def read_context_hunk(self, lr):
1416 def read_context_hunk(self, lr):
1421 self.desc = lr.readline()
1417 self.desc = lr.readline()
1422 m = contextdesc.match(self.desc)
1418 m = contextdesc.match(self.desc)
1423 if not m:
1419 if not m:
1424 raise PatchError(_(b"bad hunk #%d") % self.number)
1420 raise PatchError(_(b"bad hunk #%d") % self.number)
1425 self.starta, aend = m.groups()
1421 self.starta, aend = m.groups()
1426 self.starta = int(self.starta)
1422 self.starta = int(self.starta)
1427 if aend is None:
1423 if aend is None:
1428 aend = self.starta
1424 aend = self.starta
1429 self.lena = int(aend) - self.starta
1425 self.lena = int(aend) - self.starta
1430 if self.starta:
1426 if self.starta:
1431 self.lena += 1
1427 self.lena += 1
1432 for x in pycompat.xrange(self.lena):
1428 for x in pycompat.xrange(self.lena):
1433 l = lr.readline()
1429 l = lr.readline()
1434 if l.startswith(b'---'):
1430 if l.startswith(b'---'):
1435 # lines addition, old block is empty
1431 # lines addition, old block is empty
1436 lr.push(l)
1432 lr.push(l)
1437 break
1433 break
1438 s = l[2:]
1434 s = l[2:]
1439 if l.startswith(b'- ') or l.startswith(b'! '):
1435 if l.startswith(b'- ') or l.startswith(b'! '):
1440 u = b'-' + s
1436 u = b'-' + s
1441 elif l.startswith(b' '):
1437 elif l.startswith(b' '):
1442 u = b' ' + s
1438 u = b' ' + s
1443 else:
1439 else:
1444 raise PatchError(
1440 raise PatchError(
1445 _(b"bad hunk #%d old text line %d") % (self.number, x)
1441 _(b"bad hunk #%d old text line %d") % (self.number, x)
1446 )
1442 )
1447 self.a.append(u)
1443 self.a.append(u)
1448 self.hunk.append(u)
1444 self.hunk.append(u)
1449
1445
1450 l = lr.readline()
1446 l = lr.readline()
1451 if l.startswith(br'\ '):
1447 if l.startswith(br'\ '):
1452 s = self.a[-1][:-1]
1448 s = self.a[-1][:-1]
1453 self.a[-1] = s
1449 self.a[-1] = s
1454 self.hunk[-1] = s
1450 self.hunk[-1] = s
1455 l = lr.readline()
1451 l = lr.readline()
1456 m = contextdesc.match(l)
1452 m = contextdesc.match(l)
1457 if not m:
1453 if not m:
1458 raise PatchError(_(b"bad hunk #%d") % self.number)
1454 raise PatchError(_(b"bad hunk #%d") % self.number)
1459 self.startb, bend = m.groups()
1455 self.startb, bend = m.groups()
1460 self.startb = int(self.startb)
1456 self.startb = int(self.startb)
1461 if bend is None:
1457 if bend is None:
1462 bend = self.startb
1458 bend = self.startb
1463 self.lenb = int(bend) - self.startb
1459 self.lenb = int(bend) - self.startb
1464 if self.startb:
1460 if self.startb:
1465 self.lenb += 1
1461 self.lenb += 1
1466 hunki = 1
1462 hunki = 1
1467 for x in pycompat.xrange(self.lenb):
1463 for x in pycompat.xrange(self.lenb):
1468 l = lr.readline()
1464 l = lr.readline()
1469 if l.startswith(br'\ '):
1465 if l.startswith(br'\ '):
1470 # XXX: the only way to hit this is with an invalid line range.
1466 # XXX: the only way to hit this is with an invalid line range.
1471 # The no-eol marker is not counted in the line range, but I
1467 # The no-eol marker is not counted in the line range, but I
1472 # guess there are diff(1) out there which behave differently.
1468 # guess there are diff(1) out there which behave differently.
1473 s = self.b[-1][:-1]
1469 s = self.b[-1][:-1]
1474 self.b[-1] = s
1470 self.b[-1] = s
1475 self.hunk[hunki - 1] = s
1471 self.hunk[hunki - 1] = s
1476 continue
1472 continue
1477 if not l:
1473 if not l:
1478 # line deletions, new block is empty and we hit EOF
1474 # line deletions, new block is empty and we hit EOF
1479 lr.push(l)
1475 lr.push(l)
1480 break
1476 break
1481 s = l[2:]
1477 s = l[2:]
1482 if l.startswith(b'+ ') or l.startswith(b'! '):
1478 if l.startswith(b'+ ') or l.startswith(b'! '):
1483 u = b'+' + s
1479 u = b'+' + s
1484 elif l.startswith(b' '):
1480 elif l.startswith(b' '):
1485 u = b' ' + s
1481 u = b' ' + s
1486 elif len(self.b) == 0:
1482 elif len(self.b) == 0:
1487 # line deletions, new block is empty
1483 # line deletions, new block is empty
1488 lr.push(l)
1484 lr.push(l)
1489 break
1485 break
1490 else:
1486 else:
1491 raise PatchError(
1487 raise PatchError(
1492 _(b"bad hunk #%d old text line %d") % (self.number, x)
1488 _(b"bad hunk #%d old text line %d") % (self.number, x)
1493 )
1489 )
1494 self.b.append(s)
1490 self.b.append(s)
1495 while True:
1491 while True:
1496 if hunki >= len(self.hunk):
1492 if hunki >= len(self.hunk):
1497 h = b""
1493 h = b""
1498 else:
1494 else:
1499 h = self.hunk[hunki]
1495 h = self.hunk[hunki]
1500 hunki += 1
1496 hunki += 1
1501 if h == u:
1497 if h == u:
1502 break
1498 break
1503 elif h.startswith(b'-'):
1499 elif h.startswith(b'-'):
1504 continue
1500 continue
1505 else:
1501 else:
1506 self.hunk.insert(hunki - 1, u)
1502 self.hunk.insert(hunki - 1, u)
1507 break
1503 break
1508
1504
1509 if not self.a:
1505 if not self.a:
1510 # this happens when lines were only added to the hunk
1506 # this happens when lines were only added to the hunk
1511 for x in self.hunk:
1507 for x in self.hunk:
1512 if x.startswith(b'-') or x.startswith(b' '):
1508 if x.startswith(b'-') or x.startswith(b' '):
1513 self.a.append(x)
1509 self.a.append(x)
1514 if not self.b:
1510 if not self.b:
1515 # this happens when lines were only deleted from the hunk
1511 # this happens when lines were only deleted from the hunk
1516 for x in self.hunk:
1512 for x in self.hunk:
1517 if x.startswith(b'+') or x.startswith(b' '):
1513 if x.startswith(b'+') or x.startswith(b' '):
1518 self.b.append(x[1:])
1514 self.b.append(x[1:])
1519 # @@ -start,len +start,len @@
1515 # @@ -start,len +start,len @@
1520 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1516 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1521 self.starta,
1517 self.starta,
1522 self.lena,
1518 self.lena,
1523 self.startb,
1519 self.startb,
1524 self.lenb,
1520 self.lenb,
1525 )
1521 )
1526 self.hunk[0] = self.desc
1522 self.hunk[0] = self.desc
1527 self._fixnewline(lr)
1523 self._fixnewline(lr)
1528
1524
1529 def _fixnewline(self, lr):
1525 def _fixnewline(self, lr):
1530 l = lr.readline()
1526 l = lr.readline()
1531 if l.startswith(br'\ '):
1527 if l.startswith(br'\ '):
1532 diffhelper.fixnewline(self.hunk, self.a, self.b)
1528 diffhelper.fixnewline(self.hunk, self.a, self.b)
1533 else:
1529 else:
1534 lr.push(l)
1530 lr.push(l)
1535
1531
1536 def complete(self):
1532 def complete(self):
1537 return len(self.a) == self.lena and len(self.b) == self.lenb
1533 return len(self.a) == self.lena and len(self.b) == self.lenb
1538
1534
1539 def _fuzzit(self, old, new, fuzz, toponly):
1535 def _fuzzit(self, old, new, fuzz, toponly):
1540 # this removes context lines from the top and bottom of list 'l'. It
1536 # this removes context lines from the top and bottom of list 'l'. It
1541 # checks the hunk to make sure only context lines are removed, and then
1537 # checks the hunk to make sure only context lines are removed, and then
1542 # returns a new shortened list of lines.
1538 # returns a new shortened list of lines.
1543 fuzz = min(fuzz, len(old))
1539 fuzz = min(fuzz, len(old))
1544 if fuzz:
1540 if fuzz:
1545 top = 0
1541 top = 0
1546 bot = 0
1542 bot = 0
1547 hlen = len(self.hunk)
1543 hlen = len(self.hunk)
1548 for x in pycompat.xrange(hlen - 1):
1544 for x in pycompat.xrange(hlen - 1):
1549 # the hunk starts with the @@ line, so use x+1
1545 # the hunk starts with the @@ line, so use x+1
1550 if self.hunk[x + 1].startswith(b' '):
1546 if self.hunk[x + 1].startswith(b' '):
1551 top += 1
1547 top += 1
1552 else:
1548 else:
1553 break
1549 break
1554 if not toponly:
1550 if not toponly:
1555 for x in pycompat.xrange(hlen - 1):
1551 for x in pycompat.xrange(hlen - 1):
1556 if self.hunk[hlen - bot - 1].startswith(b' '):
1552 if self.hunk[hlen - bot - 1].startswith(b' '):
1557 bot += 1
1553 bot += 1
1558 else:
1554 else:
1559 break
1555 break
1560
1556
1561 bot = min(fuzz, bot)
1557 bot = min(fuzz, bot)
1562 top = min(fuzz, top)
1558 top = min(fuzz, top)
1563 return old[top : len(old) - bot], new[top : len(new) - bot], top
1559 return old[top : len(old) - bot], new[top : len(new) - bot], top
1564 return old, new, 0
1560 return old, new, 0
1565
1561
1566 def fuzzit(self, fuzz, toponly):
1562 def fuzzit(self, fuzz, toponly):
1567 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1563 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1568 oldstart = self.starta + top
1564 oldstart = self.starta + top
1569 newstart = self.startb + top
1565 newstart = self.startb + top
1570 # zero length hunk ranges already have their start decremented
1566 # zero length hunk ranges already have their start decremented
1571 if self.lena and oldstart > 0:
1567 if self.lena and oldstart > 0:
1572 oldstart -= 1
1568 oldstart -= 1
1573 if self.lenb and newstart > 0:
1569 if self.lenb and newstart > 0:
1574 newstart -= 1
1570 newstart -= 1
1575 return old, oldstart, new, newstart
1571 return old, oldstart, new, newstart
1576
1572
1577
1573
1578 class binhunk(object):
1574 class binhunk(object):
1579 b'A binary patch file.'
1575 b'A binary patch file.'
1580
1576
1581 def __init__(self, lr, fname):
1577 def __init__(self, lr, fname):
1582 self.text = None
1578 self.text = None
1583 self.delta = False
1579 self.delta = False
1584 self.hunk = [b'GIT binary patch\n']
1580 self.hunk = [b'GIT binary patch\n']
1585 self._fname = fname
1581 self._fname = fname
1586 self._read(lr)
1582 self._read(lr)
1587
1583
1588 def complete(self):
1584 def complete(self):
1589 return self.text is not None
1585 return self.text is not None
1590
1586
1591 def new(self, lines):
1587 def new(self, lines):
1592 if self.delta:
1588 if self.delta:
1593 return [applybindelta(self.text, b''.join(lines))]
1589 return [applybindelta(self.text, b''.join(lines))]
1594 return [self.text]
1590 return [self.text]
1595
1591
1596 def _read(self, lr):
1592 def _read(self, lr):
1597 def getline(lr, hunk):
1593 def getline(lr, hunk):
1598 l = lr.readline()
1594 l = lr.readline()
1599 hunk.append(l)
1595 hunk.append(l)
1600 return l.rstrip(b'\r\n')
1596 return l.rstrip(b'\r\n')
1601
1597
1602 while True:
1598 while True:
1603 line = getline(lr, self.hunk)
1599 line = getline(lr, self.hunk)
1604 if not line:
1600 if not line:
1605 raise PatchError(
1601 raise PatchError(
1606 _(b'could not extract "%s" binary data') % self._fname
1602 _(b'could not extract "%s" binary data') % self._fname
1607 )
1603 )
1608 if line.startswith(b'literal '):
1604 if line.startswith(b'literal '):
1609 size = int(line[8:].rstrip())
1605 size = int(line[8:].rstrip())
1610 break
1606 break
1611 if line.startswith(b'delta '):
1607 if line.startswith(b'delta '):
1612 size = int(line[6:].rstrip())
1608 size = int(line[6:].rstrip())
1613 self.delta = True
1609 self.delta = True
1614 break
1610 break
1615 dec = []
1611 dec = []
1616 line = getline(lr, self.hunk)
1612 line = getline(lr, self.hunk)
1617 while len(line) > 1:
1613 while len(line) > 1:
1618 l = line[0:1]
1614 l = line[0:1]
1619 if l <= b'Z' and l >= b'A':
1615 if l <= b'Z' and l >= b'A':
1620 l = ord(l) - ord(b'A') + 1
1616 l = ord(l) - ord(b'A') + 1
1621 else:
1617 else:
1622 l = ord(l) - ord(b'a') + 27
1618 l = ord(l) - ord(b'a') + 27
1623 try:
1619 try:
1624 dec.append(util.b85decode(line[1:])[:l])
1620 dec.append(util.b85decode(line[1:])[:l])
1625 except ValueError as e:
1621 except ValueError as e:
1626 raise PatchError(
1622 raise PatchError(
1627 _(b'could not decode "%s" binary patch: %s')
1623 _(b'could not decode "%s" binary patch: %s')
1628 % (self._fname, stringutil.forcebytestr(e))
1624 % (self._fname, stringutil.forcebytestr(e))
1629 )
1625 )
1630 line = getline(lr, self.hunk)
1626 line = getline(lr, self.hunk)
1631 text = zlib.decompress(b''.join(dec))
1627 text = zlib.decompress(b''.join(dec))
1632 if len(text) != size:
1628 if len(text) != size:
1633 raise PatchError(
1629 raise PatchError(
1634 _(b'"%s" length is %d bytes, should be %d')
1630 _(b'"%s" length is %d bytes, should be %d')
1635 % (self._fname, len(text), size)
1631 % (self._fname, len(text), size)
1636 )
1632 )
1637 self.text = text
1633 self.text = text
1638
1634
1639
1635
1640 def parsefilename(str):
1636 def parsefilename(str):
1641 # --- filename \t|space stuff
1637 # --- filename \t|space stuff
1642 s = str[4:].rstrip(b'\r\n')
1638 s = str[4:].rstrip(b'\r\n')
1643 i = s.find(b'\t')
1639 i = s.find(b'\t')
1644 if i < 0:
1640 if i < 0:
1645 i = s.find(b' ')
1641 i = s.find(b' ')
1646 if i < 0:
1642 if i < 0:
1647 return s
1643 return s
1648 return s[:i]
1644 return s[:i]
1649
1645
1650
1646
1651 def reversehunks(hunks):
1647 def reversehunks(hunks):
1652 '''reverse the signs in the hunks given as argument
1648 '''reverse the signs in the hunks given as argument
1653
1649
1654 This function operates on hunks coming out of patch.filterpatch, that is
1650 This function operates on hunks coming out of patch.filterpatch, that is
1655 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1651 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1656
1652
1657 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1653 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1658 ... --- a/folder1/g
1654 ... --- a/folder1/g
1659 ... +++ b/folder1/g
1655 ... +++ b/folder1/g
1660 ... @@ -1,7 +1,7 @@
1656 ... @@ -1,7 +1,7 @@
1661 ... +firstline
1657 ... +firstline
1662 ... c
1658 ... c
1663 ... 1
1659 ... 1
1664 ... 2
1660 ... 2
1665 ... + 3
1661 ... + 3
1666 ... -4
1662 ... -4
1667 ... 5
1663 ... 5
1668 ... d
1664 ... d
1669 ... +lastline"""
1665 ... +lastline"""
1670 >>> hunks = parsepatch([rawpatch])
1666 >>> hunks = parsepatch([rawpatch])
1671 >>> hunkscomingfromfilterpatch = []
1667 >>> hunkscomingfromfilterpatch = []
1672 >>> for h in hunks:
1668 >>> for h in hunks:
1673 ... hunkscomingfromfilterpatch.append(h)
1669 ... hunkscomingfromfilterpatch.append(h)
1674 ... hunkscomingfromfilterpatch.extend(h.hunks)
1670 ... hunkscomingfromfilterpatch.extend(h.hunks)
1675
1671
1676 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1672 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1677 >>> from . import util
1673 >>> from . import util
1678 >>> fp = util.stringio()
1674 >>> fp = util.stringio()
1679 >>> for c in reversedhunks:
1675 >>> for c in reversedhunks:
1680 ... c.write(fp)
1676 ... c.write(fp)
1681 >>> fp.seek(0) or None
1677 >>> fp.seek(0) or None
1682 >>> reversedpatch = fp.read()
1678 >>> reversedpatch = fp.read()
1683 >>> print(pycompat.sysstr(reversedpatch))
1679 >>> print(pycompat.sysstr(reversedpatch))
1684 diff --git a/folder1/g b/folder1/g
1680 diff --git a/folder1/g b/folder1/g
1685 --- a/folder1/g
1681 --- a/folder1/g
1686 +++ b/folder1/g
1682 +++ b/folder1/g
1687 @@ -1,4 +1,3 @@
1683 @@ -1,4 +1,3 @@
1688 -firstline
1684 -firstline
1689 c
1685 c
1690 1
1686 1
1691 2
1687 2
1692 @@ -2,6 +1,6 @@
1688 @@ -2,6 +1,6 @@
1693 c
1689 c
1694 1
1690 1
1695 2
1691 2
1696 - 3
1692 - 3
1697 +4
1693 +4
1698 5
1694 5
1699 d
1695 d
1700 @@ -6,3 +5,2 @@
1696 @@ -6,3 +5,2 @@
1701 5
1697 5
1702 d
1698 d
1703 -lastline
1699 -lastline
1704
1700
1705 '''
1701 '''
1706
1702
1707 newhunks = []
1703 newhunks = []
1708 for c in hunks:
1704 for c in hunks:
1709 if util.safehasattr(c, b'reversehunk'):
1705 if util.safehasattr(c, b'reversehunk'):
1710 c = c.reversehunk()
1706 c = c.reversehunk()
1711 newhunks.append(c)
1707 newhunks.append(c)
1712 return newhunks
1708 return newhunks
1713
1709
1714
1710
1715 def parsepatch(originalchunks, maxcontext=None):
1711 def parsepatch(originalchunks, maxcontext=None):
1716 """patch -> [] of headers -> [] of hunks
1712 """patch -> [] of headers -> [] of hunks
1717
1713
1718 If maxcontext is not None, trim context lines if necessary.
1714 If maxcontext is not None, trim context lines if necessary.
1719
1715
1720 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1716 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1721 ... --- a/folder1/g
1717 ... --- a/folder1/g
1722 ... +++ b/folder1/g
1718 ... +++ b/folder1/g
1723 ... @@ -1,8 +1,10 @@
1719 ... @@ -1,8 +1,10 @@
1724 ... 1
1720 ... 1
1725 ... 2
1721 ... 2
1726 ... -3
1722 ... -3
1727 ... 4
1723 ... 4
1728 ... 5
1724 ... 5
1729 ... 6
1725 ... 6
1730 ... +6.1
1726 ... +6.1
1731 ... +6.2
1727 ... +6.2
1732 ... 7
1728 ... 7
1733 ... 8
1729 ... 8
1734 ... +9'''
1730 ... +9'''
1735 >>> out = util.stringio()
1731 >>> out = util.stringio()
1736 >>> headers = parsepatch([rawpatch], maxcontext=1)
1732 >>> headers = parsepatch([rawpatch], maxcontext=1)
1737 >>> for header in headers:
1733 >>> for header in headers:
1738 ... header.write(out)
1734 ... header.write(out)
1739 ... for hunk in header.hunks:
1735 ... for hunk in header.hunks:
1740 ... hunk.write(out)
1736 ... hunk.write(out)
1741 >>> print(pycompat.sysstr(out.getvalue()))
1737 >>> print(pycompat.sysstr(out.getvalue()))
1742 diff --git a/folder1/g b/folder1/g
1738 diff --git a/folder1/g b/folder1/g
1743 --- a/folder1/g
1739 --- a/folder1/g
1744 +++ b/folder1/g
1740 +++ b/folder1/g
1745 @@ -2,3 +2,2 @@
1741 @@ -2,3 +2,2 @@
1746 2
1742 2
1747 -3
1743 -3
1748 4
1744 4
1749 @@ -6,2 +5,4 @@
1745 @@ -6,2 +5,4 @@
1750 6
1746 6
1751 +6.1
1747 +6.1
1752 +6.2
1748 +6.2
1753 7
1749 7
1754 @@ -8,1 +9,2 @@
1750 @@ -8,1 +9,2 @@
1755 8
1751 8
1756 +9
1752 +9
1757 """
1753 """
1758
1754
1759 class parser(object):
1755 class parser(object):
1760 """patch parsing state machine"""
1756 """patch parsing state machine"""
1761
1757
1762 def __init__(self):
1758 def __init__(self):
1763 self.fromline = 0
1759 self.fromline = 0
1764 self.toline = 0
1760 self.toline = 0
1765 self.proc = b''
1761 self.proc = b''
1766 self.header = None
1762 self.header = None
1767 self.context = []
1763 self.context = []
1768 self.before = []
1764 self.before = []
1769 self.hunk = []
1765 self.hunk = []
1770 self.headers = []
1766 self.headers = []
1771
1767
1772 def addrange(self, limits):
1768 def addrange(self, limits):
1773 self.addcontext([])
1769 self.addcontext([])
1774 fromstart, fromend, tostart, toend, proc = limits
1770 fromstart, fromend, tostart, toend, proc = limits
1775 self.fromline = int(fromstart)
1771 self.fromline = int(fromstart)
1776 self.toline = int(tostart)
1772 self.toline = int(tostart)
1777 self.proc = proc
1773 self.proc = proc
1778
1774
1779 def addcontext(self, context):
1775 def addcontext(self, context):
1780 if self.hunk:
1776 if self.hunk:
1781 h = recordhunk(
1777 h = recordhunk(
1782 self.header,
1778 self.header,
1783 self.fromline,
1779 self.fromline,
1784 self.toline,
1780 self.toline,
1785 self.proc,
1781 self.proc,
1786 self.before,
1782 self.before,
1787 self.hunk,
1783 self.hunk,
1788 context,
1784 context,
1789 maxcontext,
1785 maxcontext,
1790 )
1786 )
1791 self.header.hunks.append(h)
1787 self.header.hunks.append(h)
1792 self.fromline += len(self.before) + h.removed
1788 self.fromline += len(self.before) + h.removed
1793 self.toline += len(self.before) + h.added
1789 self.toline += len(self.before) + h.added
1794 self.before = []
1790 self.before = []
1795 self.hunk = []
1791 self.hunk = []
1796 self.context = context
1792 self.context = context
1797
1793
1798 def addhunk(self, hunk):
1794 def addhunk(self, hunk):
1799 if self.context:
1795 if self.context:
1800 self.before = self.context
1796 self.before = self.context
1801 self.context = []
1797 self.context = []
1802 if self.hunk:
1798 if self.hunk:
1803 self.addcontext([])
1799 self.addcontext([])
1804 self.hunk = hunk
1800 self.hunk = hunk
1805
1801
1806 def newfile(self, hdr):
1802 def newfile(self, hdr):
1807 self.addcontext([])
1803 self.addcontext([])
1808 h = header(hdr)
1804 h = header(hdr)
1809 self.headers.append(h)
1805 self.headers.append(h)
1810 self.header = h
1806 self.header = h
1811
1807
1812 def addother(self, line):
1808 def addother(self, line):
1813 pass # 'other' lines are ignored
1809 pass # 'other' lines are ignored
1814
1810
1815 def finished(self):
1811 def finished(self):
1816 self.addcontext([])
1812 self.addcontext([])
1817 return self.headers
1813 return self.headers
1818
1814
1819 transitions = {
1815 transitions = {
1820 b'file': {
1816 b'file': {
1821 b'context': addcontext,
1817 b'context': addcontext,
1822 b'file': newfile,
1818 b'file': newfile,
1823 b'hunk': addhunk,
1819 b'hunk': addhunk,
1824 b'range': addrange,
1820 b'range': addrange,
1825 },
1821 },
1826 b'context': {
1822 b'context': {
1827 b'file': newfile,
1823 b'file': newfile,
1828 b'hunk': addhunk,
1824 b'hunk': addhunk,
1829 b'range': addrange,
1825 b'range': addrange,
1830 b'other': addother,
1826 b'other': addother,
1831 },
1827 },
1832 b'hunk': {
1828 b'hunk': {
1833 b'context': addcontext,
1829 b'context': addcontext,
1834 b'file': newfile,
1830 b'file': newfile,
1835 b'range': addrange,
1831 b'range': addrange,
1836 },
1832 },
1837 b'range': {b'context': addcontext, b'hunk': addhunk},
1833 b'range': {b'context': addcontext, b'hunk': addhunk},
1838 b'other': {b'other': addother},
1834 b'other': {b'other': addother},
1839 }
1835 }
1840
1836
1841 p = parser()
1837 p = parser()
1842 fp = stringio()
1838 fp = stringio()
1843 fp.write(b''.join(originalchunks))
1839 fp.write(b''.join(originalchunks))
1844 fp.seek(0)
1840 fp.seek(0)
1845
1841
1846 state = b'context'
1842 state = b'context'
1847 for newstate, data in scanpatch(fp):
1843 for newstate, data in scanpatch(fp):
1848 try:
1844 try:
1849 p.transitions[state][newstate](p, data)
1845 p.transitions[state][newstate](p, data)
1850 except KeyError:
1846 except KeyError:
1851 raise PatchError(
1847 raise PatchError(
1852 b'unhandled transition: %s -> %s' % (state, newstate)
1848 b'unhandled transition: %s -> %s' % (state, newstate)
1853 )
1849 )
1854 state = newstate
1850 state = newstate
1855 del fp
1851 del fp
1856 return p.finished()
1852 return p.finished()
1857
1853
1858
1854
1859 def pathtransform(path, strip, prefix):
1855 def pathtransform(path, strip, prefix):
1860 '''turn a path from a patch into a path suitable for the repository
1856 '''turn a path from a patch into a path suitable for the repository
1861
1857
1862 prefix, if not empty, is expected to be normalized with a / at the end.
1858 prefix, if not empty, is expected to be normalized with a / at the end.
1863
1859
1864 Returns (stripped components, path in repository).
1860 Returns (stripped components, path in repository).
1865
1861
1866 >>> pathtransform(b'a/b/c', 0, b'')
1862 >>> pathtransform(b'a/b/c', 0, b'')
1867 ('', 'a/b/c')
1863 ('', 'a/b/c')
1868 >>> pathtransform(b' a/b/c ', 0, b'')
1864 >>> pathtransform(b' a/b/c ', 0, b'')
1869 ('', ' a/b/c')
1865 ('', ' a/b/c')
1870 >>> pathtransform(b' a/b/c ', 2, b'')
1866 >>> pathtransform(b' a/b/c ', 2, b'')
1871 ('a/b/', 'c')
1867 ('a/b/', 'c')
1872 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1868 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1873 ('', 'd/e/a/b/c')
1869 ('', 'd/e/a/b/c')
1874 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1870 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1875 ('a//b/', 'd/e/c')
1871 ('a//b/', 'd/e/c')
1876 >>> pathtransform(b'a/b/c', 3, b'')
1872 >>> pathtransform(b'a/b/c', 3, b'')
1877 Traceback (most recent call last):
1873 Traceback (most recent call last):
1878 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1874 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1879 '''
1875 '''
1880 pathlen = len(path)
1876 pathlen = len(path)
1881 i = 0
1877 i = 0
1882 if strip == 0:
1878 if strip == 0:
1883 return b'', prefix + path.rstrip()
1879 return b'', prefix + path.rstrip()
1884 count = strip
1880 count = strip
1885 while count > 0:
1881 while count > 0:
1886 i = path.find(b'/', i)
1882 i = path.find(b'/', i)
1887 if i == -1:
1883 if i == -1:
1888 raise PatchError(
1884 raise PatchError(
1889 _(b"unable to strip away %d of %d dirs from %s")
1885 _(b"unable to strip away %d of %d dirs from %s")
1890 % (count, strip, path)
1886 % (count, strip, path)
1891 )
1887 )
1892 i += 1
1888 i += 1
1893 # consume '//' in the path
1889 # consume '//' in the path
1894 while i < pathlen - 1 and path[i : i + 1] == b'/':
1890 while i < pathlen - 1 and path[i : i + 1] == b'/':
1895 i += 1
1891 i += 1
1896 count -= 1
1892 count -= 1
1897 return path[:i].lstrip(), prefix + path[i:].rstrip()
1893 return path[:i].lstrip(), prefix + path[i:].rstrip()
1898
1894
1899
1895
1900 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1896 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1901 nulla = afile_orig == b"/dev/null"
1897 nulla = afile_orig == b"/dev/null"
1902 nullb = bfile_orig == b"/dev/null"
1898 nullb = bfile_orig == b"/dev/null"
1903 create = nulla and hunk.starta == 0 and hunk.lena == 0
1899 create = nulla and hunk.starta == 0 and hunk.lena == 0
1904 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1900 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1905 abase, afile = pathtransform(afile_orig, strip, prefix)
1901 abase, afile = pathtransform(afile_orig, strip, prefix)
1906 gooda = not nulla and backend.exists(afile)
1902 gooda = not nulla and backend.exists(afile)
1907 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1903 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1908 if afile == bfile:
1904 if afile == bfile:
1909 goodb = gooda
1905 goodb = gooda
1910 else:
1906 else:
1911 goodb = not nullb and backend.exists(bfile)
1907 goodb = not nullb and backend.exists(bfile)
1912 missing = not goodb and not gooda and not create
1908 missing = not goodb and not gooda and not create
1913
1909
1914 # some diff programs apparently produce patches where the afile is
1910 # some diff programs apparently produce patches where the afile is
1915 # not /dev/null, but afile starts with bfile
1911 # not /dev/null, but afile starts with bfile
1916 abasedir = afile[: afile.rfind(b'/') + 1]
1912 abasedir = afile[: afile.rfind(b'/') + 1]
1917 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1913 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1918 if (
1914 if (
1919 missing
1915 missing
1920 and abasedir == bbasedir
1916 and abasedir == bbasedir
1921 and afile.startswith(bfile)
1917 and afile.startswith(bfile)
1922 and hunk.starta == 0
1918 and hunk.starta == 0
1923 and hunk.lena == 0
1919 and hunk.lena == 0
1924 ):
1920 ):
1925 create = True
1921 create = True
1926 missing = False
1922 missing = False
1927
1923
1928 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1924 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1929 # diff is between a file and its backup. In this case, the original
1925 # diff is between a file and its backup. In this case, the original
1930 # file should be patched (see original mpatch code).
1926 # file should be patched (see original mpatch code).
1931 isbackup = abase == bbase and bfile.startswith(afile)
1927 isbackup = abase == bbase and bfile.startswith(afile)
1932 fname = None
1928 fname = None
1933 if not missing:
1929 if not missing:
1934 if gooda and goodb:
1930 if gooda and goodb:
1935 if isbackup:
1931 if isbackup:
1936 fname = afile
1932 fname = afile
1937 else:
1933 else:
1938 fname = bfile
1934 fname = bfile
1939 elif gooda:
1935 elif gooda:
1940 fname = afile
1936 fname = afile
1941
1937
1942 if not fname:
1938 if not fname:
1943 if not nullb:
1939 if not nullb:
1944 if isbackup:
1940 if isbackup:
1945 fname = afile
1941 fname = afile
1946 else:
1942 else:
1947 fname = bfile
1943 fname = bfile
1948 elif not nulla:
1944 elif not nulla:
1949 fname = afile
1945 fname = afile
1950 else:
1946 else:
1951 raise PatchError(_(b"undefined source and destination files"))
1947 raise PatchError(_(b"undefined source and destination files"))
1952
1948
1953 gp = patchmeta(fname)
1949 gp = patchmeta(fname)
1954 if create:
1950 if create:
1955 gp.op = b'ADD'
1951 gp.op = b'ADD'
1956 elif remove:
1952 elif remove:
1957 gp.op = b'DELETE'
1953 gp.op = b'DELETE'
1958 return gp
1954 return gp
1959
1955
1960
1956
1961 def scanpatch(fp):
1957 def scanpatch(fp):
1962 """like patch.iterhunks, but yield different events
1958 """like patch.iterhunks, but yield different events
1963
1959
1964 - ('file', [header_lines + fromfile + tofile])
1960 - ('file', [header_lines + fromfile + tofile])
1965 - ('context', [context_lines])
1961 - ('context', [context_lines])
1966 - ('hunk', [hunk_lines])
1962 - ('hunk', [hunk_lines])
1967 - ('range', (-start,len, +start,len, proc))
1963 - ('range', (-start,len, +start,len, proc))
1968 """
1964 """
1969 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1965 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1970 lr = linereader(fp)
1966 lr = linereader(fp)
1971
1967
1972 def scanwhile(first, p):
1968 def scanwhile(first, p):
1973 """scan lr while predicate holds"""
1969 """scan lr while predicate holds"""
1974 lines = [first]
1970 lines = [first]
1975 for line in iter(lr.readline, b''):
1971 for line in iter(lr.readline, b''):
1976 if p(line):
1972 if p(line):
1977 lines.append(line)
1973 lines.append(line)
1978 else:
1974 else:
1979 lr.push(line)
1975 lr.push(line)
1980 break
1976 break
1981 return lines
1977 return lines
1982
1978
1983 for line in iter(lr.readline, b''):
1979 for line in iter(lr.readline, b''):
1984 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1980 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1985
1981
1986 def notheader(line):
1982 def notheader(line):
1987 s = line.split(None, 1)
1983 s = line.split(None, 1)
1988 return not s or s[0] not in (b'---', b'diff')
1984 return not s or s[0] not in (b'---', b'diff')
1989
1985
1990 header = scanwhile(line, notheader)
1986 header = scanwhile(line, notheader)
1991 fromfile = lr.readline()
1987 fromfile = lr.readline()
1992 if fromfile.startswith(b'---'):
1988 if fromfile.startswith(b'---'):
1993 tofile = lr.readline()
1989 tofile = lr.readline()
1994 header += [fromfile, tofile]
1990 header += [fromfile, tofile]
1995 else:
1991 else:
1996 lr.push(fromfile)
1992 lr.push(fromfile)
1997 yield b'file', header
1993 yield b'file', header
1998 elif line.startswith(b' '):
1994 elif line.startswith(b' '):
1999 cs = (b' ', b'\\')
1995 cs = (b' ', b'\\')
2000 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1996 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
2001 elif line.startswith((b'-', b'+')):
1997 elif line.startswith((b'-', b'+')):
2002 cs = (b'-', b'+', b'\\')
1998 cs = (b'-', b'+', b'\\')
2003 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
1999 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2004 else:
2000 else:
2005 m = lines_re.match(line)
2001 m = lines_re.match(line)
2006 if m:
2002 if m:
2007 yield b'range', m.groups()
2003 yield b'range', m.groups()
2008 else:
2004 else:
2009 yield b'other', line
2005 yield b'other', line
2010
2006
2011
2007
2012 def scangitpatch(lr, firstline):
2008 def scangitpatch(lr, firstline):
2013 """
2009 """
2014 Git patches can emit:
2010 Git patches can emit:
2015 - rename a to b
2011 - rename a to b
2016 - change b
2012 - change b
2017 - copy a to c
2013 - copy a to c
2018 - change c
2014 - change c
2019
2015
2020 We cannot apply this sequence as-is, the renamed 'a' could not be
2016 We cannot apply this sequence as-is, the renamed 'a' could not be
2021 found for it would have been renamed already. And we cannot copy
2017 found for it would have been renamed already. And we cannot copy
2022 from 'b' instead because 'b' would have been changed already. So
2018 from 'b' instead because 'b' would have been changed already. So
2023 we scan the git patch for copy and rename commands so we can
2019 we scan the git patch for copy and rename commands so we can
2024 perform the copies ahead of time.
2020 perform the copies ahead of time.
2025 """
2021 """
2026 pos = 0
2022 pos = 0
2027 try:
2023 try:
2028 pos = lr.fp.tell()
2024 pos = lr.fp.tell()
2029 fp = lr.fp
2025 fp = lr.fp
2030 except IOError:
2026 except IOError:
2031 fp = stringio(lr.fp.read())
2027 fp = stringio(lr.fp.read())
2032 gitlr = linereader(fp)
2028 gitlr = linereader(fp)
2033 gitlr.push(firstline)
2029 gitlr.push(firstline)
2034 gitpatches = readgitpatch(gitlr)
2030 gitpatches = readgitpatch(gitlr)
2035 fp.seek(pos)
2031 fp.seek(pos)
2036 return gitpatches
2032 return gitpatches
2037
2033
2038
2034
2039 def iterhunks(fp):
2035 def iterhunks(fp):
2040 """Read a patch and yield the following events:
2036 """Read a patch and yield the following events:
2041 - ("file", afile, bfile, firsthunk): select a new target file.
2037 - ("file", afile, bfile, firsthunk): select a new target file.
2042 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2038 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2043 "file" event.
2039 "file" event.
2044 - ("git", gitchanges): current diff is in git format, gitchanges
2040 - ("git", gitchanges): current diff is in git format, gitchanges
2045 maps filenames to gitpatch records. Unique event.
2041 maps filenames to gitpatch records. Unique event.
2046 """
2042 """
2047 afile = b""
2043 afile = b""
2048 bfile = b""
2044 bfile = b""
2049 state = None
2045 state = None
2050 hunknum = 0
2046 hunknum = 0
2051 emitfile = newfile = False
2047 emitfile = newfile = False
2052 gitpatches = None
2048 gitpatches = None
2053
2049
2054 # our states
2050 # our states
2055 BFILE = 1
2051 BFILE = 1
2056 context = None
2052 context = None
2057 lr = linereader(fp)
2053 lr = linereader(fp)
2058
2054
2059 for x in iter(lr.readline, b''):
2055 for x in iter(lr.readline, b''):
2060 if state == BFILE and (
2056 if state == BFILE and (
2061 (not context and x.startswith(b'@'))
2057 (not context and x.startswith(b'@'))
2062 or (context is not False and x.startswith(b'***************'))
2058 or (context is not False and x.startswith(b'***************'))
2063 or x.startswith(b'GIT binary patch')
2059 or x.startswith(b'GIT binary patch')
2064 ):
2060 ):
2065 gp = None
2061 gp = None
2066 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2062 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2067 gp = gitpatches.pop()
2063 gp = gitpatches.pop()
2068 if x.startswith(b'GIT binary patch'):
2064 if x.startswith(b'GIT binary patch'):
2069 h = binhunk(lr, gp.path)
2065 h = binhunk(lr, gp.path)
2070 else:
2066 else:
2071 if context is None and x.startswith(b'***************'):
2067 if context is None and x.startswith(b'***************'):
2072 context = True
2068 context = True
2073 h = hunk(x, hunknum + 1, lr, context)
2069 h = hunk(x, hunknum + 1, lr, context)
2074 hunknum += 1
2070 hunknum += 1
2075 if emitfile:
2071 if emitfile:
2076 emitfile = False
2072 emitfile = False
2077 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2073 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2078 yield b'hunk', h
2074 yield b'hunk', h
2079 elif x.startswith(b'diff --git a/'):
2075 elif x.startswith(b'diff --git a/'):
2080 m = gitre.match(x.rstrip(b' \r\n'))
2076 m = gitre.match(x.rstrip(b' \r\n'))
2081 if not m:
2077 if not m:
2082 continue
2078 continue
2083 if gitpatches is None:
2079 if gitpatches is None:
2084 # scan whole input for git metadata
2080 # scan whole input for git metadata
2085 gitpatches = scangitpatch(lr, x)
2081 gitpatches = scangitpatch(lr, x)
2086 yield b'git', [
2082 yield b'git', [
2087 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2083 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2088 ]
2084 ]
2089 gitpatches.reverse()
2085 gitpatches.reverse()
2090 afile = b'a/' + m.group(1)
2086 afile = b'a/' + m.group(1)
2091 bfile = b'b/' + m.group(2)
2087 bfile = b'b/' + m.group(2)
2092 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2088 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2093 gp = gitpatches.pop()
2089 gp = gitpatches.pop()
2094 yield b'file', (
2090 yield b'file', (
2095 b'a/' + gp.path,
2091 b'a/' + gp.path,
2096 b'b/' + gp.path,
2092 b'b/' + gp.path,
2097 None,
2093 None,
2098 gp.copy(),
2094 gp.copy(),
2099 )
2095 )
2100 if not gitpatches:
2096 if not gitpatches:
2101 raise PatchError(
2097 raise PatchError(
2102 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2098 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2103 )
2099 )
2104 newfile = True
2100 newfile = True
2105 elif x.startswith(b'---'):
2101 elif x.startswith(b'---'):
2106 # check for a unified diff
2102 # check for a unified diff
2107 l2 = lr.readline()
2103 l2 = lr.readline()
2108 if not l2.startswith(b'+++'):
2104 if not l2.startswith(b'+++'):
2109 lr.push(l2)
2105 lr.push(l2)
2110 continue
2106 continue
2111 newfile = True
2107 newfile = True
2112 context = False
2108 context = False
2113 afile = parsefilename(x)
2109 afile = parsefilename(x)
2114 bfile = parsefilename(l2)
2110 bfile = parsefilename(l2)
2115 elif x.startswith(b'***'):
2111 elif x.startswith(b'***'):
2116 # check for a context diff
2112 # check for a context diff
2117 l2 = lr.readline()
2113 l2 = lr.readline()
2118 if not l2.startswith(b'---'):
2114 if not l2.startswith(b'---'):
2119 lr.push(l2)
2115 lr.push(l2)
2120 continue
2116 continue
2121 l3 = lr.readline()
2117 l3 = lr.readline()
2122 lr.push(l3)
2118 lr.push(l3)
2123 if not l3.startswith(b"***************"):
2119 if not l3.startswith(b"***************"):
2124 lr.push(l2)
2120 lr.push(l2)
2125 continue
2121 continue
2126 newfile = True
2122 newfile = True
2127 context = True
2123 context = True
2128 afile = parsefilename(x)
2124 afile = parsefilename(x)
2129 bfile = parsefilename(l2)
2125 bfile = parsefilename(l2)
2130
2126
2131 if newfile:
2127 if newfile:
2132 newfile = False
2128 newfile = False
2133 emitfile = True
2129 emitfile = True
2134 state = BFILE
2130 state = BFILE
2135 hunknum = 0
2131 hunknum = 0
2136
2132
2137 while gitpatches:
2133 while gitpatches:
2138 gp = gitpatches.pop()
2134 gp = gitpatches.pop()
2139 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2135 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2140
2136
2141
2137
2142 def applybindelta(binchunk, data):
2138 def applybindelta(binchunk, data):
2143 """Apply a binary delta hunk
2139 """Apply a binary delta hunk
2144 The algorithm used is the algorithm from git's patch-delta.c
2140 The algorithm used is the algorithm from git's patch-delta.c
2145 """
2141 """
2146
2142
2147 def deltahead(binchunk):
2143 def deltahead(binchunk):
2148 i = 0
2144 i = 0
2149 for c in pycompat.bytestr(binchunk):
2145 for c in pycompat.bytestr(binchunk):
2150 i += 1
2146 i += 1
2151 if not (ord(c) & 0x80):
2147 if not (ord(c) & 0x80):
2152 return i
2148 return i
2153 return i
2149 return i
2154
2150
2155 out = b""
2151 out = b""
2156 s = deltahead(binchunk)
2152 s = deltahead(binchunk)
2157 binchunk = binchunk[s:]
2153 binchunk = binchunk[s:]
2158 s = deltahead(binchunk)
2154 s = deltahead(binchunk)
2159 binchunk = binchunk[s:]
2155 binchunk = binchunk[s:]
2160 i = 0
2156 i = 0
2161 while i < len(binchunk):
2157 while i < len(binchunk):
2162 cmd = ord(binchunk[i : i + 1])
2158 cmd = ord(binchunk[i : i + 1])
2163 i += 1
2159 i += 1
2164 if cmd & 0x80:
2160 if cmd & 0x80:
2165 offset = 0
2161 offset = 0
2166 size = 0
2162 size = 0
2167 if cmd & 0x01:
2163 if cmd & 0x01:
2168 offset = ord(binchunk[i : i + 1])
2164 offset = ord(binchunk[i : i + 1])
2169 i += 1
2165 i += 1
2170 if cmd & 0x02:
2166 if cmd & 0x02:
2171 offset |= ord(binchunk[i : i + 1]) << 8
2167 offset |= ord(binchunk[i : i + 1]) << 8
2172 i += 1
2168 i += 1
2173 if cmd & 0x04:
2169 if cmd & 0x04:
2174 offset |= ord(binchunk[i : i + 1]) << 16
2170 offset |= ord(binchunk[i : i + 1]) << 16
2175 i += 1
2171 i += 1
2176 if cmd & 0x08:
2172 if cmd & 0x08:
2177 offset |= ord(binchunk[i : i + 1]) << 24
2173 offset |= ord(binchunk[i : i + 1]) << 24
2178 i += 1
2174 i += 1
2179 if cmd & 0x10:
2175 if cmd & 0x10:
2180 size = ord(binchunk[i : i + 1])
2176 size = ord(binchunk[i : i + 1])
2181 i += 1
2177 i += 1
2182 if cmd & 0x20:
2178 if cmd & 0x20:
2183 size |= ord(binchunk[i : i + 1]) << 8
2179 size |= ord(binchunk[i : i + 1]) << 8
2184 i += 1
2180 i += 1
2185 if cmd & 0x40:
2181 if cmd & 0x40:
2186 size |= ord(binchunk[i : i + 1]) << 16
2182 size |= ord(binchunk[i : i + 1]) << 16
2187 i += 1
2183 i += 1
2188 if size == 0:
2184 if size == 0:
2189 size = 0x10000
2185 size = 0x10000
2190 offset_end = offset + size
2186 offset_end = offset + size
2191 out += data[offset:offset_end]
2187 out += data[offset:offset_end]
2192 elif cmd != 0:
2188 elif cmd != 0:
2193 offset_end = i + cmd
2189 offset_end = i + cmd
2194 out += binchunk[i:offset_end]
2190 out += binchunk[i:offset_end]
2195 i += cmd
2191 i += cmd
2196 else:
2192 else:
2197 raise PatchError(_(b'unexpected delta opcode 0'))
2193 raise PatchError(_(b'unexpected delta opcode 0'))
2198 return out
2194 return out
2199
2195
2200
2196
2201 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2197 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2202 """Reads a patch from fp and tries to apply it.
2198 """Reads a patch from fp and tries to apply it.
2203
2199
2204 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2200 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2205 there was any fuzz.
2201 there was any fuzz.
2206
2202
2207 If 'eolmode' is 'strict', the patch content and patched file are
2203 If 'eolmode' is 'strict', the patch content and patched file are
2208 read in binary mode. Otherwise, line endings are ignored when
2204 read in binary mode. Otherwise, line endings are ignored when
2209 patching then normalized according to 'eolmode'.
2205 patching then normalized according to 'eolmode'.
2210 """
2206 """
2211 return _applydiff(
2207 return _applydiff(
2212 ui,
2208 ui,
2213 fp,
2209 fp,
2214 patchfile,
2210 patchfile,
2215 backend,
2211 backend,
2216 store,
2212 store,
2217 strip=strip,
2213 strip=strip,
2218 prefix=prefix,
2214 prefix=prefix,
2219 eolmode=eolmode,
2215 eolmode=eolmode,
2220 )
2216 )
2221
2217
2222
2218
2223 def _canonprefix(repo, prefix):
2219 def _canonprefix(repo, prefix):
2224 if prefix:
2220 if prefix:
2225 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2221 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2226 if prefix != b'':
2222 if prefix != b'':
2227 prefix += b'/'
2223 prefix += b'/'
2228 return prefix
2224 return prefix
2229
2225
2230
2226
2231 def _applydiff(
2227 def _applydiff(
2232 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2228 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2233 ):
2229 ):
2234 prefix = _canonprefix(backend.repo, prefix)
2230 prefix = _canonprefix(backend.repo, prefix)
2235
2231
2236 def pstrip(p):
2232 def pstrip(p):
2237 return pathtransform(p, strip - 1, prefix)[1]
2233 return pathtransform(p, strip - 1, prefix)[1]
2238
2234
2239 rejects = 0
2235 rejects = 0
2240 err = 0
2236 err = 0
2241 current_file = None
2237 current_file = None
2242
2238
2243 for state, values in iterhunks(fp):
2239 for state, values in iterhunks(fp):
2244 if state == b'hunk':
2240 if state == b'hunk':
2245 if not current_file:
2241 if not current_file:
2246 continue
2242 continue
2247 ret = current_file.apply(values)
2243 ret = current_file.apply(values)
2248 if ret > 0:
2244 if ret > 0:
2249 err = 1
2245 err = 1
2250 elif state == b'file':
2246 elif state == b'file':
2251 if current_file:
2247 if current_file:
2252 rejects += current_file.close()
2248 rejects += current_file.close()
2253 current_file = None
2249 current_file = None
2254 afile, bfile, first_hunk, gp = values
2250 afile, bfile, first_hunk, gp = values
2255 if gp:
2251 if gp:
2256 gp.path = pstrip(gp.path)
2252 gp.path = pstrip(gp.path)
2257 if gp.oldpath:
2253 if gp.oldpath:
2258 gp.oldpath = pstrip(gp.oldpath)
2254 gp.oldpath = pstrip(gp.oldpath)
2259 else:
2255 else:
2260 gp = makepatchmeta(
2256 gp = makepatchmeta(
2261 backend, afile, bfile, first_hunk, strip, prefix
2257 backend, afile, bfile, first_hunk, strip, prefix
2262 )
2258 )
2263 if gp.op == b'RENAME':
2259 if gp.op == b'RENAME':
2264 backend.unlink(gp.oldpath)
2260 backend.unlink(gp.oldpath)
2265 if not first_hunk:
2261 if not first_hunk:
2266 if gp.op == b'DELETE':
2262 if gp.op == b'DELETE':
2267 backend.unlink(gp.path)
2263 backend.unlink(gp.path)
2268 continue
2264 continue
2269 data, mode = None, None
2265 data, mode = None, None
2270 if gp.op in (b'RENAME', b'COPY'):
2266 if gp.op in (b'RENAME', b'COPY'):
2271 data, mode = store.getfile(gp.oldpath)[:2]
2267 data, mode = store.getfile(gp.oldpath)[:2]
2272 if data is None:
2268 if data is None:
2273 # This means that the old path does not exist
2269 # This means that the old path does not exist
2274 raise PatchError(
2270 raise PatchError(
2275 _(b"source file '%s' does not exist") % gp.oldpath
2271 _(b"source file '%s' does not exist") % gp.oldpath
2276 )
2272 )
2277 if gp.mode:
2273 if gp.mode:
2278 mode = gp.mode
2274 mode = gp.mode
2279 if gp.op == b'ADD':
2275 if gp.op == b'ADD':
2280 # Added files without content have no hunk and
2276 # Added files without content have no hunk and
2281 # must be created
2277 # must be created
2282 data = b''
2278 data = b''
2283 if data or mode:
2279 if data or mode:
2284 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2280 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2285 gp.path
2281 gp.path
2286 ):
2282 ):
2287 raise PatchError(
2283 raise PatchError(
2288 _(
2284 _(
2289 b"cannot create %s: destination "
2285 b"cannot create %s: destination "
2290 b"already exists"
2286 b"already exists"
2291 )
2287 )
2292 % gp.path
2288 % gp.path
2293 )
2289 )
2294 backend.setfile(gp.path, data, mode, gp.oldpath)
2290 backend.setfile(gp.path, data, mode, gp.oldpath)
2295 continue
2291 continue
2296 try:
2292 try:
2297 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2293 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2298 except PatchError as inst:
2294 except PatchError as inst:
2299 ui.warn(str(inst) + b'\n')
2295 ui.warn(str(inst) + b'\n')
2300 current_file = None
2296 current_file = None
2301 rejects += 1
2297 rejects += 1
2302 continue
2298 continue
2303 elif state == b'git':
2299 elif state == b'git':
2304 for gp in values:
2300 for gp in values:
2305 path = pstrip(gp.oldpath)
2301 path = pstrip(gp.oldpath)
2306 data, mode = backend.getfile(path)
2302 data, mode = backend.getfile(path)
2307 if data is None:
2303 if data is None:
2308 # The error ignored here will trigger a getfile()
2304 # The error ignored here will trigger a getfile()
2309 # error in a place more appropriate for error
2305 # error in a place more appropriate for error
2310 # handling, and will not interrupt the patching
2306 # handling, and will not interrupt the patching
2311 # process.
2307 # process.
2312 pass
2308 pass
2313 else:
2309 else:
2314 store.setfile(path, data, mode)
2310 store.setfile(path, data, mode)
2315 else:
2311 else:
2316 raise error.Abort(_(b'unsupported parser state: %s') % state)
2312 raise error.Abort(_(b'unsupported parser state: %s') % state)
2317
2313
2318 if current_file:
2314 if current_file:
2319 rejects += current_file.close()
2315 rejects += current_file.close()
2320
2316
2321 if rejects:
2317 if rejects:
2322 return -1
2318 return -1
2323 return err
2319 return err
2324
2320
2325
2321
2326 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2322 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2327 """use <patcher> to apply <patchname> to the working directory.
2323 """use <patcher> to apply <patchname> to the working directory.
2328 returns whether patch was applied with fuzz factor."""
2324 returns whether patch was applied with fuzz factor."""
2329
2325
2330 fuzz = False
2326 fuzz = False
2331 args = []
2327 args = []
2332 cwd = repo.root
2328 cwd = repo.root
2333 if cwd:
2329 if cwd:
2334 args.append(b'-d %s' % procutil.shellquote(cwd))
2330 args.append(b'-d %s' % procutil.shellquote(cwd))
2335 cmd = b'%s %s -p%d < %s' % (
2331 cmd = b'%s %s -p%d < %s' % (
2336 patcher,
2332 patcher,
2337 b' '.join(args),
2333 b' '.join(args),
2338 strip,
2334 strip,
2339 procutil.shellquote(patchname),
2335 procutil.shellquote(patchname),
2340 )
2336 )
2341 ui.debug(b'Using external patch tool: %s\n' % cmd)
2337 ui.debug(b'Using external patch tool: %s\n' % cmd)
2342 fp = procutil.popen(cmd, b'rb')
2338 fp = procutil.popen(cmd, b'rb')
2343 try:
2339 try:
2344 for line in util.iterfile(fp):
2340 for line in util.iterfile(fp):
2345 line = line.rstrip()
2341 line = line.rstrip()
2346 ui.note(line + b'\n')
2342 ui.note(line + b'\n')
2347 if line.startswith(b'patching file '):
2343 if line.startswith(b'patching file '):
2348 pf = util.parsepatchoutput(line)
2344 pf = util.parsepatchoutput(line)
2349 printed_file = False
2345 printed_file = False
2350 files.add(pf)
2346 files.add(pf)
2351 elif line.find(b'with fuzz') >= 0:
2347 elif line.find(b'with fuzz') >= 0:
2352 fuzz = True
2348 fuzz = True
2353 if not printed_file:
2349 if not printed_file:
2354 ui.warn(pf + b'\n')
2350 ui.warn(pf + b'\n')
2355 printed_file = True
2351 printed_file = True
2356 ui.warn(line + b'\n')
2352 ui.warn(line + b'\n')
2357 elif line.find(b'saving rejects to file') >= 0:
2353 elif line.find(b'saving rejects to file') >= 0:
2358 ui.warn(line + b'\n')
2354 ui.warn(line + b'\n')
2359 elif line.find(b'FAILED') >= 0:
2355 elif line.find(b'FAILED') >= 0:
2360 if not printed_file:
2356 if not printed_file:
2361 ui.warn(pf + b'\n')
2357 ui.warn(pf + b'\n')
2362 printed_file = True
2358 printed_file = True
2363 ui.warn(line + b'\n')
2359 ui.warn(line + b'\n')
2364 finally:
2360 finally:
2365 if files:
2361 if files:
2366 scmutil.marktouched(repo, files, similarity)
2362 scmutil.marktouched(repo, files, similarity)
2367 code = fp.close()
2363 code = fp.close()
2368 if code:
2364 if code:
2369 raise PatchError(
2365 raise PatchError(
2370 _(b"patch command failed: %s") % procutil.explainexit(code)
2366 _(b"patch command failed: %s") % procutil.explainexit(code)
2371 )
2367 )
2372 return fuzz
2368 return fuzz
2373
2369
2374
2370
2375 def patchbackend(
2371 def patchbackend(
2376 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2372 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2377 ):
2373 ):
2378 if files is None:
2374 if files is None:
2379 files = set()
2375 files = set()
2380 if eolmode is None:
2376 if eolmode is None:
2381 eolmode = ui.config(b'patch', b'eol')
2377 eolmode = ui.config(b'patch', b'eol')
2382 if eolmode.lower() not in eolmodes:
2378 if eolmode.lower() not in eolmodes:
2383 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2379 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2384 eolmode = eolmode.lower()
2380 eolmode = eolmode.lower()
2385
2381
2386 store = filestore()
2382 store = filestore()
2387 try:
2383 try:
2388 fp = open(patchobj, b'rb')
2384 fp = open(patchobj, b'rb')
2389 except TypeError:
2385 except TypeError:
2390 fp = patchobj
2386 fp = patchobj
2391 try:
2387 try:
2392 ret = applydiff(
2388 ret = applydiff(
2393 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2389 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2394 )
2390 )
2395 finally:
2391 finally:
2396 if fp != patchobj:
2392 if fp != patchobj:
2397 fp.close()
2393 fp.close()
2398 files.update(backend.close())
2394 files.update(backend.close())
2399 store.close()
2395 store.close()
2400 if ret < 0:
2396 if ret < 0:
2401 raise PatchError(_(b'patch failed to apply'))
2397 raise PatchError(_(b'patch failed to apply'))
2402 return ret > 0
2398 return ret > 0
2403
2399
2404
2400
2405 def internalpatch(
2401 def internalpatch(
2406 ui,
2402 ui,
2407 repo,
2403 repo,
2408 patchobj,
2404 patchobj,
2409 strip,
2405 strip,
2410 prefix=b'',
2406 prefix=b'',
2411 files=None,
2407 files=None,
2412 eolmode=b'strict',
2408 eolmode=b'strict',
2413 similarity=0,
2409 similarity=0,
2414 ):
2410 ):
2415 """use builtin patch to apply <patchobj> to the working directory.
2411 """use builtin patch to apply <patchobj> to the working directory.
2416 returns whether patch was applied with fuzz factor."""
2412 returns whether patch was applied with fuzz factor."""
2417 backend = workingbackend(ui, repo, similarity)
2413 backend = workingbackend(ui, repo, similarity)
2418 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2414 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2419
2415
2420
2416
2421 def patchrepo(
2417 def patchrepo(
2422 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2418 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2423 ):
2419 ):
2424 backend = repobackend(ui, repo, ctx, store)
2420 backend = repobackend(ui, repo, ctx, store)
2425 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2421 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2426
2422
2427
2423
2428 def patch(
2424 def patch(
2429 ui,
2425 ui,
2430 repo,
2426 repo,
2431 patchname,
2427 patchname,
2432 strip=1,
2428 strip=1,
2433 prefix=b'',
2429 prefix=b'',
2434 files=None,
2430 files=None,
2435 eolmode=b'strict',
2431 eolmode=b'strict',
2436 similarity=0,
2432 similarity=0,
2437 ):
2433 ):
2438 """Apply <patchname> to the working directory.
2434 """Apply <patchname> to the working directory.
2439
2435
2440 'eolmode' specifies how end of lines should be handled. It can be:
2436 'eolmode' specifies how end of lines should be handled. It can be:
2441 - 'strict': inputs are read in binary mode, EOLs are preserved
2437 - 'strict': inputs are read in binary mode, EOLs are preserved
2442 - 'crlf': EOLs are ignored when patching and reset to CRLF
2438 - 'crlf': EOLs are ignored when patching and reset to CRLF
2443 - 'lf': EOLs are ignored when patching and reset to LF
2439 - 'lf': EOLs are ignored when patching and reset to LF
2444 - None: get it from user settings, default to 'strict'
2440 - None: get it from user settings, default to 'strict'
2445 'eolmode' is ignored when using an external patcher program.
2441 'eolmode' is ignored when using an external patcher program.
2446
2442
2447 Returns whether patch was applied with fuzz factor.
2443 Returns whether patch was applied with fuzz factor.
2448 """
2444 """
2449 patcher = ui.config(b'ui', b'patch')
2445 patcher = ui.config(b'ui', b'patch')
2450 if files is None:
2446 if files is None:
2451 files = set()
2447 files = set()
2452 if patcher:
2448 if patcher:
2453 return _externalpatch(
2449 return _externalpatch(
2454 ui, repo, patcher, patchname, strip, files, similarity
2450 ui, repo, patcher, patchname, strip, files, similarity
2455 )
2451 )
2456 return internalpatch(
2452 return internalpatch(
2457 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2453 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2458 )
2454 )
2459
2455
2460
2456
2461 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2457 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2462 backend = fsbackend(ui, repo.root)
2458 backend = fsbackend(ui, repo.root)
2463 prefix = _canonprefix(repo, prefix)
2459 prefix = _canonprefix(repo, prefix)
2464 with open(patchpath, b'rb') as fp:
2460 with open(patchpath, b'rb') as fp:
2465 changed = set()
2461 changed = set()
2466 for state, values in iterhunks(fp):
2462 for state, values in iterhunks(fp):
2467 if state == b'file':
2463 if state == b'file':
2468 afile, bfile, first_hunk, gp = values
2464 afile, bfile, first_hunk, gp = values
2469 if gp:
2465 if gp:
2470 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2466 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2471 if gp.oldpath:
2467 if gp.oldpath:
2472 gp.oldpath = pathtransform(
2468 gp.oldpath = pathtransform(
2473 gp.oldpath, strip - 1, prefix
2469 gp.oldpath, strip - 1, prefix
2474 )[1]
2470 )[1]
2475 else:
2471 else:
2476 gp = makepatchmeta(
2472 gp = makepatchmeta(
2477 backend, afile, bfile, first_hunk, strip, prefix
2473 backend, afile, bfile, first_hunk, strip, prefix
2478 )
2474 )
2479 changed.add(gp.path)
2475 changed.add(gp.path)
2480 if gp.op == b'RENAME':
2476 if gp.op == b'RENAME':
2481 changed.add(gp.oldpath)
2477 changed.add(gp.oldpath)
2482 elif state not in (b'hunk', b'git'):
2478 elif state not in (b'hunk', b'git'):
2483 raise error.Abort(_(b'unsupported parser state: %s') % state)
2479 raise error.Abort(_(b'unsupported parser state: %s') % state)
2484 return changed
2480 return changed
2485
2481
2486
2482
2487 class GitDiffRequired(Exception):
2483 class GitDiffRequired(Exception):
2488 pass
2484 pass
2489
2485
2490
2486
2491 diffopts = diffutil.diffallopts
2487 diffopts = diffutil.diffallopts
2492 diffallopts = diffutil.diffallopts
2488 diffallopts = diffutil.diffallopts
2493 difffeatureopts = diffutil.difffeatureopts
2489 difffeatureopts = diffutil.difffeatureopts
2494
2490
2495
2491
2496 def diff(
2492 def diff(
2497 repo,
2493 repo,
2498 node1=None,
2494 node1=None,
2499 node2=None,
2495 node2=None,
2500 match=None,
2496 match=None,
2501 changes=None,
2497 changes=None,
2502 opts=None,
2498 opts=None,
2503 losedatafn=None,
2499 losedatafn=None,
2504 pathfn=None,
2500 pathfn=None,
2505 copy=None,
2501 copy=None,
2506 copysourcematch=None,
2502 copysourcematch=None,
2507 hunksfilterfn=None,
2503 hunksfilterfn=None,
2508 ):
2504 ):
2509 '''yields diff of changes to files between two nodes, or node and
2505 '''yields diff of changes to files between two nodes, or node and
2510 working directory.
2506 working directory.
2511
2507
2512 if node1 is None, use first dirstate parent instead.
2508 if node1 is None, use first dirstate parent instead.
2513 if node2 is None, compare node1 with working directory.
2509 if node2 is None, compare node1 with working directory.
2514
2510
2515 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2511 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2516 every time some change cannot be represented with the current
2512 every time some change cannot be represented with the current
2517 patch format. Return False to upgrade to git patch format, True to
2513 patch format. Return False to upgrade to git patch format, True to
2518 accept the loss or raise an exception to abort the diff. It is
2514 accept the loss or raise an exception to abort the diff. It is
2519 called with the name of current file being diffed as 'fn'. If set
2515 called with the name of current file being diffed as 'fn'. If set
2520 to None, patches will always be upgraded to git format when
2516 to None, patches will always be upgraded to git format when
2521 necessary.
2517 necessary.
2522
2518
2523 prefix is a filename prefix that is prepended to all filenames on
2519 prefix is a filename prefix that is prepended to all filenames on
2524 display (used for subrepos).
2520 display (used for subrepos).
2525
2521
2526 relroot, if not empty, must be normalized with a trailing /. Any match
2522 relroot, if not empty, must be normalized with a trailing /. Any match
2527 patterns that fall outside it will be ignored.
2523 patterns that fall outside it will be ignored.
2528
2524
2529 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2525 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2530 information.
2526 information.
2531
2527
2532 if copysourcematch is not None, then copy sources will be filtered by this
2528 if copysourcematch is not None, then copy sources will be filtered by this
2533 matcher
2529 matcher
2534
2530
2535 hunksfilterfn, if not None, should be a function taking a filectx and
2531 hunksfilterfn, if not None, should be a function taking a filectx and
2536 hunks generator that may yield filtered hunks.
2532 hunks generator that may yield filtered hunks.
2537 '''
2533 '''
2538 if not node1 and not node2:
2534 if not node1 and not node2:
2539 node1 = repo.dirstate.p1()
2535 node1 = repo.dirstate.p1()
2540
2536
2541 ctx1 = repo[node1]
2537 ctx1 = repo[node1]
2542 ctx2 = repo[node2]
2538 ctx2 = repo[node2]
2543
2539
2544 for fctx1, fctx2, hdr, hunks in diffhunks(
2540 for fctx1, fctx2, hdr, hunks in diffhunks(
2545 repo,
2541 repo,
2546 ctx1=ctx1,
2542 ctx1=ctx1,
2547 ctx2=ctx2,
2543 ctx2=ctx2,
2548 match=match,
2544 match=match,
2549 changes=changes,
2545 changes=changes,
2550 opts=opts,
2546 opts=opts,
2551 losedatafn=losedatafn,
2547 losedatafn=losedatafn,
2552 pathfn=pathfn,
2548 pathfn=pathfn,
2553 copy=copy,
2549 copy=copy,
2554 copysourcematch=copysourcematch,
2550 copysourcematch=copysourcematch,
2555 ):
2551 ):
2556 if hunksfilterfn is not None:
2552 if hunksfilterfn is not None:
2557 # If the file has been removed, fctx2 is None; but this should
2553 # If the file has been removed, fctx2 is None; but this should
2558 # not occur here since we catch removed files early in
2554 # not occur here since we catch removed files early in
2559 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2555 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2560 assert (
2556 assert (
2561 fctx2 is not None
2557 fctx2 is not None
2562 ), b'fctx2 unexpectly None in diff hunks filtering'
2558 ), b'fctx2 unexpectly None in diff hunks filtering'
2563 hunks = hunksfilterfn(fctx2, hunks)
2559 hunks = hunksfilterfn(fctx2, hunks)
2564 text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2560 text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2565 if hdr and (text or len(hdr) > 1):
2561 if hdr and (text or len(hdr) > 1):
2566 yield b'\n'.join(hdr) + b'\n'
2562 yield b'\n'.join(hdr) + b'\n'
2567 if text:
2563 if text:
2568 yield text
2564 yield text
2569
2565
2570
2566
2571 def diffhunks(
2567 def diffhunks(
2572 repo,
2568 repo,
2573 ctx1,
2569 ctx1,
2574 ctx2,
2570 ctx2,
2575 match=None,
2571 match=None,
2576 changes=None,
2572 changes=None,
2577 opts=None,
2573 opts=None,
2578 losedatafn=None,
2574 losedatafn=None,
2579 pathfn=None,
2575 pathfn=None,
2580 copy=None,
2576 copy=None,
2581 copysourcematch=None,
2577 copysourcematch=None,
2582 ):
2578 ):
2583 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2579 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2584 where `header` is a list of diff headers and `hunks` is an iterable of
2580 where `header` is a list of diff headers and `hunks` is an iterable of
2585 (`hunkrange`, `hunklines`) tuples.
2581 (`hunkrange`, `hunklines`) tuples.
2586
2582
2587 See diff() for the meaning of parameters.
2583 See diff() for the meaning of parameters.
2588 """
2584 """
2589
2585
2590 if opts is None:
2586 if opts is None:
2591 opts = mdiff.defaultopts
2587 opts = mdiff.defaultopts
2592
2588
2593 def lrugetfilectx():
2589 def lrugetfilectx():
2594 cache = {}
2590 cache = {}
2595 order = collections.deque()
2591 order = collections.deque()
2596
2592
2597 def getfilectx(f, ctx):
2593 def getfilectx(f, ctx):
2598 fctx = ctx.filectx(f, filelog=cache.get(f))
2594 fctx = ctx.filectx(f, filelog=cache.get(f))
2599 if f not in cache:
2595 if f not in cache:
2600 if len(cache) > 20:
2596 if len(cache) > 20:
2601 del cache[order.popleft()]
2597 del cache[order.popleft()]
2602 cache[f] = fctx.filelog()
2598 cache[f] = fctx.filelog()
2603 else:
2599 else:
2604 order.remove(f)
2600 order.remove(f)
2605 order.append(f)
2601 order.append(f)
2606 return fctx
2602 return fctx
2607
2603
2608 return getfilectx
2604 return getfilectx
2609
2605
2610 getfilectx = lrugetfilectx()
2606 getfilectx = lrugetfilectx()
2611
2607
2612 if not changes:
2608 if not changes:
2613 changes = ctx1.status(ctx2, match=match)
2609 changes = ctx1.status(ctx2, match=match)
2614 modified, added, removed = changes[:3]
2610 modified, added, removed = changes[:3]
2615
2611
2616 if not modified and not added and not removed:
2612 if not modified and not added and not removed:
2617 return []
2613 return []
2618
2614
2619 if repo.ui.debugflag:
2615 if repo.ui.debugflag:
2620 hexfunc = hex
2616 hexfunc = hex
2621 else:
2617 else:
2622 hexfunc = short
2618 hexfunc = short
2623 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2619 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2624
2620
2625 if copy is None:
2621 if copy is None:
2626 copy = {}
2622 copy = {}
2627 if opts.git or opts.upgrade:
2623 if opts.git or opts.upgrade:
2628 copy = copies.pathcopies(ctx1, ctx2, match=match)
2624 copy = copies.pathcopies(ctx1, ctx2, match=match)
2629
2625
2630 if copysourcematch:
2626 if copysourcematch:
2631 # filter out copies where source side isn't inside the matcher
2627 # filter out copies where source side isn't inside the matcher
2632 # (copies.pathcopies() already filtered out the destination)
2628 # (copies.pathcopies() already filtered out the destination)
2633 copy = {
2629 copy = {
2634 dst: src
2630 dst: src
2635 for dst, src in pycompat.iteritems(copy)
2631 for dst, src in pycompat.iteritems(copy)
2636 if copysourcematch(src)
2632 if copysourcematch(src)
2637 }
2633 }
2638
2634
2639 modifiedset = set(modified)
2635 modifiedset = set(modified)
2640 addedset = set(added)
2636 addedset = set(added)
2641 removedset = set(removed)
2637 removedset = set(removed)
2642 for f in modified:
2638 for f in modified:
2643 if f not in ctx1:
2639 if f not in ctx1:
2644 # Fix up added, since merged-in additions appear as
2640 # Fix up added, since merged-in additions appear as
2645 # modifications during merges
2641 # modifications during merges
2646 modifiedset.remove(f)
2642 modifiedset.remove(f)
2647 addedset.add(f)
2643 addedset.add(f)
2648 for f in removed:
2644 for f in removed:
2649 if f not in ctx1:
2645 if f not in ctx1:
2650 # Merged-in additions that are then removed are reported as removed.
2646 # Merged-in additions that are then removed are reported as removed.
2651 # They are not in ctx1, so We don't want to show them in the diff.
2647 # They are not in ctx1, so We don't want to show them in the diff.
2652 removedset.remove(f)
2648 removedset.remove(f)
2653 modified = sorted(modifiedset)
2649 modified = sorted(modifiedset)
2654 added = sorted(addedset)
2650 added = sorted(addedset)
2655 removed = sorted(removedset)
2651 removed = sorted(removedset)
2656 for dst, src in list(copy.items()):
2652 for dst, src in list(copy.items()):
2657 if src not in ctx1:
2653 if src not in ctx1:
2658 # Files merged in during a merge and then copied/renamed are
2654 # Files merged in during a merge and then copied/renamed are
2659 # reported as copies. We want to show them in the diff as additions.
2655 # reported as copies. We want to show them in the diff as additions.
2660 del copy[dst]
2656 del copy[dst]
2661
2657
2662 prefetchmatch = scmutil.matchfiles(
2658 prefetchmatch = scmutil.matchfiles(
2663 repo, list(modifiedset | addedset | removedset)
2659 repo, list(modifiedset | addedset | removedset)
2664 )
2660 )
2665 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2661 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2666
2662
2667 def difffn(opts, losedata):
2663 def difffn(opts, losedata):
2668 return trydiff(
2664 return trydiff(
2669 repo,
2665 repo,
2670 revs,
2666 revs,
2671 ctx1,
2667 ctx1,
2672 ctx2,
2668 ctx2,
2673 modified,
2669 modified,
2674 added,
2670 added,
2675 removed,
2671 removed,
2676 copy,
2672 copy,
2677 getfilectx,
2673 getfilectx,
2678 opts,
2674 opts,
2679 losedata,
2675 losedata,
2680 pathfn,
2676 pathfn,
2681 )
2677 )
2682
2678
2683 if opts.upgrade and not opts.git:
2679 if opts.upgrade and not opts.git:
2684 try:
2680 try:
2685
2681
2686 def losedata(fn):
2682 def losedata(fn):
2687 if not losedatafn or not losedatafn(fn=fn):
2683 if not losedatafn or not losedatafn(fn=fn):
2688 raise GitDiffRequired
2684 raise GitDiffRequired
2689
2685
2690 # Buffer the whole output until we are sure it can be generated
2686 # Buffer the whole output until we are sure it can be generated
2691 return list(difffn(opts.copy(git=False), losedata))
2687 return list(difffn(opts.copy(git=False), losedata))
2692 except GitDiffRequired:
2688 except GitDiffRequired:
2693 return difffn(opts.copy(git=True), None)
2689 return difffn(opts.copy(git=True), None)
2694 else:
2690 else:
2695 return difffn(opts, None)
2691 return difffn(opts, None)
2696
2692
2697
2693
2698 def diffsinglehunk(hunklines):
2694 def diffsinglehunk(hunklines):
2699 """yield tokens for a list of lines in a single hunk"""
2695 """yield tokens for a list of lines in a single hunk"""
2700 for line in hunklines:
2696 for line in hunklines:
2701 # chomp
2697 # chomp
2702 chompline = line.rstrip(b'\r\n')
2698 chompline = line.rstrip(b'\r\n')
2703 # highlight tabs and trailing whitespace
2699 # highlight tabs and trailing whitespace
2704 stripline = chompline.rstrip()
2700 stripline = chompline.rstrip()
2705 if line.startswith(b'-'):
2701 if line.startswith(b'-'):
2706 label = b'diff.deleted'
2702 label = b'diff.deleted'
2707 elif line.startswith(b'+'):
2703 elif line.startswith(b'+'):
2708 label = b'diff.inserted'
2704 label = b'diff.inserted'
2709 else:
2705 else:
2710 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2706 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2711 for token in tabsplitter.findall(stripline):
2707 for token in tabsplitter.findall(stripline):
2712 if token.startswith(b'\t'):
2708 if token.startswith(b'\t'):
2713 yield (token, b'diff.tab')
2709 yield (token, b'diff.tab')
2714 else:
2710 else:
2715 yield (token, label)
2711 yield (token, label)
2716
2712
2717 if chompline != stripline:
2713 if chompline != stripline:
2718 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2714 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2719 if chompline != line:
2715 if chompline != line:
2720 yield (line[len(chompline) :], b'')
2716 yield (line[len(chompline) :], b'')
2721
2717
2722
2718
2723 def diffsinglehunkinline(hunklines):
2719 def diffsinglehunkinline(hunklines):
2724 """yield tokens for a list of lines in a single hunk, with inline colors"""
2720 """yield tokens for a list of lines in a single hunk, with inline colors"""
2725 # prepare deleted, and inserted content
2721 # prepare deleted, and inserted content
2726 a = b''
2722 a = b''
2727 b = b''
2723 b = b''
2728 for line in hunklines:
2724 for line in hunklines:
2729 if line[0:1] == b'-':
2725 if line[0:1] == b'-':
2730 a += line[1:]
2726 a += line[1:]
2731 elif line[0:1] == b'+':
2727 elif line[0:1] == b'+':
2732 b += line[1:]
2728 b += line[1:]
2733 else:
2729 else:
2734 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2730 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2735 # fast path: if either side is empty, use diffsinglehunk
2731 # fast path: if either side is empty, use diffsinglehunk
2736 if not a or not b:
2732 if not a or not b:
2737 for t in diffsinglehunk(hunklines):
2733 for t in diffsinglehunk(hunklines):
2738 yield t
2734 yield t
2739 return
2735 return
2740 # re-split the content into words
2736 # re-split the content into words
2741 al = wordsplitter.findall(a)
2737 al = wordsplitter.findall(a)
2742 bl = wordsplitter.findall(b)
2738 bl = wordsplitter.findall(b)
2743 # re-arrange the words to lines since the diff algorithm is line-based
2739 # re-arrange the words to lines since the diff algorithm is line-based
2744 aln = [s if s == b'\n' else s + b'\n' for s in al]
2740 aln = [s if s == b'\n' else s + b'\n' for s in al]
2745 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2741 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2746 an = b''.join(aln)
2742 an = b''.join(aln)
2747 bn = b''.join(bln)
2743 bn = b''.join(bln)
2748 # run the diff algorithm, prepare atokens and btokens
2744 # run the diff algorithm, prepare atokens and btokens
2749 atokens = []
2745 atokens = []
2750 btokens = []
2746 btokens = []
2751 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2747 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2752 for (a1, a2, b1, b2), btype in blocks:
2748 for (a1, a2, b1, b2), btype in blocks:
2753 changed = btype == b'!'
2749 changed = btype == b'!'
2754 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2750 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2755 atokens.append((changed, token))
2751 atokens.append((changed, token))
2756 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2752 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2757 btokens.append((changed, token))
2753 btokens.append((changed, token))
2758
2754
2759 # yield deleted tokens, then inserted ones
2755 # yield deleted tokens, then inserted ones
2760 for prefix, label, tokens in [
2756 for prefix, label, tokens in [
2761 (b'-', b'diff.deleted', atokens),
2757 (b'-', b'diff.deleted', atokens),
2762 (b'+', b'diff.inserted', btokens),
2758 (b'+', b'diff.inserted', btokens),
2763 ]:
2759 ]:
2764 nextisnewline = True
2760 nextisnewline = True
2765 for changed, token in tokens:
2761 for changed, token in tokens:
2766 if nextisnewline:
2762 if nextisnewline:
2767 yield (prefix, label)
2763 yield (prefix, label)
2768 nextisnewline = False
2764 nextisnewline = False
2769 # special handling line end
2765 # special handling line end
2770 isendofline = token.endswith(b'\n')
2766 isendofline = token.endswith(b'\n')
2771 if isendofline:
2767 if isendofline:
2772 chomp = token[:-1] # chomp
2768 chomp = token[:-1] # chomp
2773 if chomp.endswith(b'\r'):
2769 if chomp.endswith(b'\r'):
2774 chomp = chomp[:-1]
2770 chomp = chomp[:-1]
2775 endofline = token[len(chomp) :]
2771 endofline = token[len(chomp) :]
2776 token = chomp.rstrip() # detect spaces at the end
2772 token = chomp.rstrip() # detect spaces at the end
2777 endspaces = chomp[len(token) :]
2773 endspaces = chomp[len(token) :]
2778 # scan tabs
2774 # scan tabs
2779 for maybetab in tabsplitter.findall(token):
2775 for maybetab in tabsplitter.findall(token):
2780 if b'\t' == maybetab[0:1]:
2776 if b'\t' == maybetab[0:1]:
2781 currentlabel = b'diff.tab'
2777 currentlabel = b'diff.tab'
2782 else:
2778 else:
2783 if changed:
2779 if changed:
2784 currentlabel = label + b'.changed'
2780 currentlabel = label + b'.changed'
2785 else:
2781 else:
2786 currentlabel = label + b'.unchanged'
2782 currentlabel = label + b'.unchanged'
2787 yield (maybetab, currentlabel)
2783 yield (maybetab, currentlabel)
2788 if isendofline:
2784 if isendofline:
2789 if endspaces:
2785 if endspaces:
2790 yield (endspaces, b'diff.trailingwhitespace')
2786 yield (endspaces, b'diff.trailingwhitespace')
2791 yield (endofline, b'')
2787 yield (endofline, b'')
2792 nextisnewline = True
2788 nextisnewline = True
2793
2789
2794
2790
2795 def difflabel(func, *args, **kw):
2791 def difflabel(func, *args, **kw):
2796 '''yields 2-tuples of (output, label) based on the output of func()'''
2792 '''yields 2-tuples of (output, label) based on the output of func()'''
2797 if kw.get(r'opts') and kw[r'opts'].worddiff:
2793 if kw.get(r'opts') and kw[r'opts'].worddiff:
2798 dodiffhunk = diffsinglehunkinline
2794 dodiffhunk = diffsinglehunkinline
2799 else:
2795 else:
2800 dodiffhunk = diffsinglehunk
2796 dodiffhunk = diffsinglehunk
2801 headprefixes = [
2797 headprefixes = [
2802 (b'diff', b'diff.diffline'),
2798 (b'diff', b'diff.diffline'),
2803 (b'copy', b'diff.extended'),
2799 (b'copy', b'diff.extended'),
2804 (b'rename', b'diff.extended'),
2800 (b'rename', b'diff.extended'),
2805 (b'old', b'diff.extended'),
2801 (b'old', b'diff.extended'),
2806 (b'new', b'diff.extended'),
2802 (b'new', b'diff.extended'),
2807 (b'deleted', b'diff.extended'),
2803 (b'deleted', b'diff.extended'),
2808 (b'index', b'diff.extended'),
2804 (b'index', b'diff.extended'),
2809 (b'similarity', b'diff.extended'),
2805 (b'similarity', b'diff.extended'),
2810 (b'---', b'diff.file_a'),
2806 (b'---', b'diff.file_a'),
2811 (b'+++', b'diff.file_b'),
2807 (b'+++', b'diff.file_b'),
2812 ]
2808 ]
2813 textprefixes = [
2809 textprefixes = [
2814 (b'@', b'diff.hunk'),
2810 (b'@', b'diff.hunk'),
2815 # - and + are handled by diffsinglehunk
2811 # - and + are handled by diffsinglehunk
2816 ]
2812 ]
2817 head = False
2813 head = False
2818
2814
2819 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2815 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2820 hunkbuffer = []
2816 hunkbuffer = []
2821
2817
2822 def consumehunkbuffer():
2818 def consumehunkbuffer():
2823 if hunkbuffer:
2819 if hunkbuffer:
2824 for token in dodiffhunk(hunkbuffer):
2820 for token in dodiffhunk(hunkbuffer):
2825 yield token
2821 yield token
2826 hunkbuffer[:] = []
2822 hunkbuffer[:] = []
2827
2823
2828 for chunk in func(*args, **kw):
2824 for chunk in func(*args, **kw):
2829 lines = chunk.split(b'\n')
2825 lines = chunk.split(b'\n')
2830 linecount = len(lines)
2826 linecount = len(lines)
2831 for i, line in enumerate(lines):
2827 for i, line in enumerate(lines):
2832 if head:
2828 if head:
2833 if line.startswith(b'@'):
2829 if line.startswith(b'@'):
2834 head = False
2830 head = False
2835 else:
2831 else:
2836 if line and not line.startswith(
2832 if line and not line.startswith(
2837 (b' ', b'+', b'-', b'@', b'\\')
2833 (b' ', b'+', b'-', b'@', b'\\')
2838 ):
2834 ):
2839 head = True
2835 head = True
2840 diffline = False
2836 diffline = False
2841 if not head and line and line.startswith((b'+', b'-')):
2837 if not head and line and line.startswith((b'+', b'-')):
2842 diffline = True
2838 diffline = True
2843
2839
2844 prefixes = textprefixes
2840 prefixes = textprefixes
2845 if head:
2841 if head:
2846 prefixes = headprefixes
2842 prefixes = headprefixes
2847 if diffline:
2843 if diffline:
2848 # buffered
2844 # buffered
2849 bufferedline = line
2845 bufferedline = line
2850 if i + 1 < linecount:
2846 if i + 1 < linecount:
2851 bufferedline += b"\n"
2847 bufferedline += b"\n"
2852 hunkbuffer.append(bufferedline)
2848 hunkbuffer.append(bufferedline)
2853 else:
2849 else:
2854 # unbuffered
2850 # unbuffered
2855 for token in consumehunkbuffer():
2851 for token in consumehunkbuffer():
2856 yield token
2852 yield token
2857 stripline = line.rstrip()
2853 stripline = line.rstrip()
2858 for prefix, label in prefixes:
2854 for prefix, label in prefixes:
2859 if stripline.startswith(prefix):
2855 if stripline.startswith(prefix):
2860 yield (stripline, label)
2856 yield (stripline, label)
2861 if line != stripline:
2857 if line != stripline:
2862 yield (
2858 yield (
2863 line[len(stripline) :],
2859 line[len(stripline) :],
2864 b'diff.trailingwhitespace',
2860 b'diff.trailingwhitespace',
2865 )
2861 )
2866 break
2862 break
2867 else:
2863 else:
2868 yield (line, b'')
2864 yield (line, b'')
2869 if i + 1 < linecount:
2865 if i + 1 < linecount:
2870 yield (b'\n', b'')
2866 yield (b'\n', b'')
2871 for token in consumehunkbuffer():
2867 for token in consumehunkbuffer():
2872 yield token
2868 yield token
2873
2869
2874
2870
2875 def diffui(*args, **kw):
2871 def diffui(*args, **kw):
2876 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2872 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2877 return difflabel(diff, *args, **kw)
2873 return difflabel(diff, *args, **kw)
2878
2874
2879
2875
2880 def _filepairs(modified, added, removed, copy, opts):
2876 def _filepairs(modified, added, removed, copy, opts):
2881 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2877 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2882 before and f2 is the the name after. For added files, f1 will be None,
2878 before and f2 is the the name after. For added files, f1 will be None,
2883 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2879 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2884 or 'rename' (the latter two only if opts.git is set).'''
2880 or 'rename' (the latter two only if opts.git is set).'''
2885 gone = set()
2881 gone = set()
2886
2882
2887 copyto = dict([(v, k) for k, v in copy.items()])
2883 copyto = dict([(v, k) for k, v in copy.items()])
2888
2884
2889 addedset, removedset = set(added), set(removed)
2885 addedset, removedset = set(added), set(removed)
2890
2886
2891 for f in sorted(modified + added + removed):
2887 for f in sorted(modified + added + removed):
2892 copyop = None
2888 copyop = None
2893 f1, f2 = f, f
2889 f1, f2 = f, f
2894 if f in addedset:
2890 if f in addedset:
2895 f1 = None
2891 f1 = None
2896 if f in copy:
2892 if f in copy:
2897 if opts.git:
2893 if opts.git:
2898 f1 = copy[f]
2894 f1 = copy[f]
2899 if f1 in removedset and f1 not in gone:
2895 if f1 in removedset and f1 not in gone:
2900 copyop = b'rename'
2896 copyop = b'rename'
2901 gone.add(f1)
2897 gone.add(f1)
2902 else:
2898 else:
2903 copyop = b'copy'
2899 copyop = b'copy'
2904 elif f in removedset:
2900 elif f in removedset:
2905 f2 = None
2901 f2 = None
2906 if opts.git:
2902 if opts.git:
2907 # have we already reported a copy above?
2903 # have we already reported a copy above?
2908 if (
2904 if (
2909 f in copyto
2905 f in copyto
2910 and copyto[f] in addedset
2906 and copyto[f] in addedset
2911 and copy[copyto[f]] == f
2907 and copy[copyto[f]] == f
2912 ):
2908 ):
2913 continue
2909 continue
2914 yield f1, f2, copyop
2910 yield f1, f2, copyop
2915
2911
2916
2912
2917 def trydiff(
2913 def trydiff(
2918 repo,
2914 repo,
2919 revs,
2915 revs,
2920 ctx1,
2916 ctx1,
2921 ctx2,
2917 ctx2,
2922 modified,
2918 modified,
2923 added,
2919 added,
2924 removed,
2920 removed,
2925 copy,
2921 copy,
2926 getfilectx,
2922 getfilectx,
2927 opts,
2923 opts,
2928 losedatafn,
2924 losedatafn,
2929 pathfn,
2925 pathfn,
2930 ):
2926 ):
2931 '''given input data, generate a diff and yield it in blocks
2927 '''given input data, generate a diff and yield it in blocks
2932
2928
2933 If generating a diff would lose data like flags or binary data and
2929 If generating a diff would lose data like flags or binary data and
2934 losedatafn is not None, it will be called.
2930 losedatafn is not None, it will be called.
2935
2931
2936 pathfn is applied to every path in the diff output.
2932 pathfn is applied to every path in the diff output.
2937 '''
2933 '''
2938
2934
2939 def gitindex(text):
2935 def gitindex(text):
2940 if not text:
2936 if not text:
2941 text = b""
2937 text = b""
2942 l = len(text)
2938 l = len(text)
2943 s = hashlib.sha1(b'blob %d\0' % l)
2939 s = hashlib.sha1(b'blob %d\0' % l)
2944 s.update(text)
2940 s.update(text)
2945 return hex(s.digest())
2941 return hex(s.digest())
2946
2942
2947 if opts.noprefix:
2943 if opts.noprefix:
2948 aprefix = bprefix = b''
2944 aprefix = bprefix = b''
2949 else:
2945 else:
2950 aprefix = b'a/'
2946 aprefix = b'a/'
2951 bprefix = b'b/'
2947 bprefix = b'b/'
2952
2948
2953 def diffline(f, revs):
2949 def diffline(f, revs):
2954 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2950 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2955 return b'diff %s %s' % (revinfo, f)
2951 return b'diff %s %s' % (revinfo, f)
2956
2952
2957 def isempty(fctx):
2953 def isempty(fctx):
2958 return fctx is None or fctx.size() == 0
2954 return fctx is None or fctx.size() == 0
2959
2955
2960 date1 = dateutil.datestr(ctx1.date())
2956 date1 = dateutil.datestr(ctx1.date())
2961 date2 = dateutil.datestr(ctx2.date())
2957 date2 = dateutil.datestr(ctx2.date())
2962
2958
2963 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2959 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2964
2960
2965 if not pathfn:
2961 if not pathfn:
2966 pathfn = lambda f: f
2962 pathfn = lambda f: f
2967
2963
2968 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2964 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2969 content1 = None
2965 content1 = None
2970 content2 = None
2966 content2 = None
2971 fctx1 = None
2967 fctx1 = None
2972 fctx2 = None
2968 fctx2 = None
2973 flag1 = None
2969 flag1 = None
2974 flag2 = None
2970 flag2 = None
2975 if f1:
2971 if f1:
2976 fctx1 = getfilectx(f1, ctx1)
2972 fctx1 = getfilectx(f1, ctx1)
2977 if opts.git or losedatafn:
2973 if opts.git or losedatafn:
2978 flag1 = ctx1.flags(f1)
2974 flag1 = ctx1.flags(f1)
2979 if f2:
2975 if f2:
2980 fctx2 = getfilectx(f2, ctx2)
2976 fctx2 = getfilectx(f2, ctx2)
2981 if opts.git or losedatafn:
2977 if opts.git or losedatafn:
2982 flag2 = ctx2.flags(f2)
2978 flag2 = ctx2.flags(f2)
2983 # if binary is True, output "summary" or "base85", but not "text diff"
2979 # if binary is True, output "summary" or "base85", but not "text diff"
2984 if opts.text:
2980 if opts.text:
2985 binary = False
2981 binary = False
2986 else:
2982 else:
2987 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2983 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2988
2984
2989 if losedatafn and not opts.git:
2985 if losedatafn and not opts.git:
2990 if (
2986 if (
2991 binary
2987 binary
2992 or
2988 or
2993 # copy/rename
2989 # copy/rename
2994 f2 in copy
2990 f2 in copy
2995 or
2991 or
2996 # empty file creation
2992 # empty file creation
2997 (not f1 and isempty(fctx2))
2993 (not f1 and isempty(fctx2))
2998 or
2994 or
2999 # empty file deletion
2995 # empty file deletion
3000 (isempty(fctx1) and not f2)
2996 (isempty(fctx1) and not f2)
3001 or
2997 or
3002 # create with flags
2998 # create with flags
3003 (not f1 and flag2)
2999 (not f1 and flag2)
3004 or
3000 or
3005 # change flags
3001 # change flags
3006 (f1 and f2 and flag1 != flag2)
3002 (f1 and f2 and flag1 != flag2)
3007 ):
3003 ):
3008 losedatafn(f2 or f1)
3004 losedatafn(f2 or f1)
3009
3005
3010 path1 = pathfn(f1 or f2)
3006 path1 = pathfn(f1 or f2)
3011 path2 = pathfn(f2 or f1)
3007 path2 = pathfn(f2 or f1)
3012 header = []
3008 header = []
3013 if opts.git:
3009 if opts.git:
3014 header.append(
3010 header.append(
3015 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3011 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3016 )
3012 )
3017 if not f1: # added
3013 if not f1: # added
3018 header.append(b'new file mode %s' % gitmode[flag2])
3014 header.append(b'new file mode %s' % gitmode[flag2])
3019 elif not f2: # removed
3015 elif not f2: # removed
3020 header.append(b'deleted file mode %s' % gitmode[flag1])
3016 header.append(b'deleted file mode %s' % gitmode[flag1])
3021 else: # modified/copied/renamed
3017 else: # modified/copied/renamed
3022 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3018 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3023 if mode1 != mode2:
3019 if mode1 != mode2:
3024 header.append(b'old mode %s' % mode1)
3020 header.append(b'old mode %s' % mode1)
3025 header.append(b'new mode %s' % mode2)
3021 header.append(b'new mode %s' % mode2)
3026 if copyop is not None:
3022 if copyop is not None:
3027 if opts.showsimilarity:
3023 if opts.showsimilarity:
3028 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3024 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3029 header.append(b'similarity index %d%%' % sim)
3025 header.append(b'similarity index %d%%' % sim)
3030 header.append(b'%s from %s' % (copyop, path1))
3026 header.append(b'%s from %s' % (copyop, path1))
3031 header.append(b'%s to %s' % (copyop, path2))
3027 header.append(b'%s to %s' % (copyop, path2))
3032 elif revs:
3028 elif revs:
3033 header.append(diffline(path1, revs))
3029 header.append(diffline(path1, revs))
3034
3030
3035 # fctx.is | diffopts | what to | is fctx.data()
3031 # fctx.is | diffopts | what to | is fctx.data()
3036 # binary() | text nobinary git index | output? | outputted?
3032 # binary() | text nobinary git index | output? | outputted?
3037 # ------------------------------------|----------------------------
3033 # ------------------------------------|----------------------------
3038 # yes | no no no * | summary | no
3034 # yes | no no no * | summary | no
3039 # yes | no no yes * | base85 | yes
3035 # yes | no no yes * | base85 | yes
3040 # yes | no yes no * | summary | no
3036 # yes | no yes no * | summary | no
3041 # yes | no yes yes 0 | summary | no
3037 # yes | no yes yes 0 | summary | no
3042 # yes | no yes yes >0 | summary | semi [1]
3038 # yes | no yes yes >0 | summary | semi [1]
3043 # yes | yes * * * | text diff | yes
3039 # yes | yes * * * | text diff | yes
3044 # no | * * * * | text diff | yes
3040 # no | * * * * | text diff | yes
3045 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3041 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3046 if binary and (
3042 if binary and (
3047 not opts.git or (opts.git and opts.nobinary and not opts.index)
3043 not opts.git or (opts.git and opts.nobinary and not opts.index)
3048 ):
3044 ):
3049 # fast path: no binary content will be displayed, content1 and
3045 # fast path: no binary content will be displayed, content1 and
3050 # content2 are only used for equivalent test. cmp() could have a
3046 # content2 are only used for equivalent test. cmp() could have a
3051 # fast path.
3047 # fast path.
3052 if fctx1 is not None:
3048 if fctx1 is not None:
3053 content1 = b'\0'
3049 content1 = b'\0'
3054 if fctx2 is not None:
3050 if fctx2 is not None:
3055 if fctx1 is not None and not fctx1.cmp(fctx2):
3051 if fctx1 is not None and not fctx1.cmp(fctx2):
3056 content2 = b'\0' # not different
3052 content2 = b'\0' # not different
3057 else:
3053 else:
3058 content2 = b'\0\0'
3054 content2 = b'\0\0'
3059 else:
3055 else:
3060 # normal path: load contents
3056 # normal path: load contents
3061 if fctx1 is not None:
3057 if fctx1 is not None:
3062 content1 = fctx1.data()
3058 content1 = fctx1.data()
3063 if fctx2 is not None:
3059 if fctx2 is not None:
3064 content2 = fctx2.data()
3060 content2 = fctx2.data()
3065
3061
3066 if binary and opts.git and not opts.nobinary:
3062 if binary and opts.git and not opts.nobinary:
3067 text = mdiff.b85diff(content1, content2)
3063 text = mdiff.b85diff(content1, content2)
3068 if text:
3064 if text:
3069 header.append(
3065 header.append(
3070 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3066 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3071 )
3067 )
3072 hunks = ((None, [text]),)
3068 hunks = ((None, [text]),)
3073 else:
3069 else:
3074 if opts.git and opts.index > 0:
3070 if opts.git and opts.index > 0:
3075 flag = flag1
3071 flag = flag1
3076 if flag is None:
3072 if flag is None:
3077 flag = flag2
3073 flag = flag2
3078 header.append(
3074 header.append(
3079 b'index %s..%s %s'
3075 b'index %s..%s %s'
3080 % (
3076 % (
3081 gitindex(content1)[0 : opts.index],
3077 gitindex(content1)[0 : opts.index],
3082 gitindex(content2)[0 : opts.index],
3078 gitindex(content2)[0 : opts.index],
3083 gitmode[flag],
3079 gitmode[flag],
3084 )
3080 )
3085 )
3081 )
3086
3082
3087 uheaders, hunks = mdiff.unidiff(
3083 uheaders, hunks = mdiff.unidiff(
3088 content1,
3084 content1,
3089 date1,
3085 date1,
3090 content2,
3086 content2,
3091 date2,
3087 date2,
3092 path1,
3088 path1,
3093 path2,
3089 path2,
3094 binary=binary,
3090 binary=binary,
3095 opts=opts,
3091 opts=opts,
3096 )
3092 )
3097 header.extend(uheaders)
3093 header.extend(uheaders)
3098 yield fctx1, fctx2, header, hunks
3094 yield fctx1, fctx2, header, hunks
3099
3095
3100
3096
3101 def diffstatsum(stats):
3097 def diffstatsum(stats):
3102 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3098 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3103 for f, a, r, b in stats:
3099 for f, a, r, b in stats:
3104 maxfile = max(maxfile, encoding.colwidth(f))
3100 maxfile = max(maxfile, encoding.colwidth(f))
3105 maxtotal = max(maxtotal, a + r)
3101 maxtotal = max(maxtotal, a + r)
3106 addtotal += a
3102 addtotal += a
3107 removetotal += r
3103 removetotal += r
3108 binary = binary or b
3104 binary = binary or b
3109
3105
3110 return maxfile, maxtotal, addtotal, removetotal, binary
3106 return maxfile, maxtotal, addtotal, removetotal, binary
3111
3107
3112
3108
3113 def diffstatdata(lines):
3109 def diffstatdata(lines):
3114 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3110 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3115
3111
3116 results = []
3112 results = []
3117 filename, adds, removes, isbinary = None, 0, 0, False
3113 filename, adds, removes, isbinary = None, 0, 0, False
3118
3114
3119 def addresult():
3115 def addresult():
3120 if filename:
3116 if filename:
3121 results.append((filename, adds, removes, isbinary))
3117 results.append((filename, adds, removes, isbinary))
3122
3118
3123 # inheader is used to track if a line is in the
3119 # inheader is used to track if a line is in the
3124 # header portion of the diff. This helps properly account
3120 # header portion of the diff. This helps properly account
3125 # for lines that start with '--' or '++'
3121 # for lines that start with '--' or '++'
3126 inheader = False
3122 inheader = False
3127
3123
3128 for line in lines:
3124 for line in lines:
3129 if line.startswith(b'diff'):
3125 if line.startswith(b'diff'):
3130 addresult()
3126 addresult()
3131 # starting a new file diff
3127 # starting a new file diff
3132 # set numbers to 0 and reset inheader
3128 # set numbers to 0 and reset inheader
3133 inheader = True
3129 inheader = True
3134 adds, removes, isbinary = 0, 0, False
3130 adds, removes, isbinary = 0, 0, False
3135 if line.startswith(b'diff --git a/'):
3131 if line.startswith(b'diff --git a/'):
3136 filename = gitre.search(line).group(2)
3132 filename = gitre.search(line).group(2)
3137 elif line.startswith(b'diff -r'):
3133 elif line.startswith(b'diff -r'):
3138 # format: "diff -r ... -r ... filename"
3134 # format: "diff -r ... -r ... filename"
3139 filename = diffre.search(line).group(1)
3135 filename = diffre.search(line).group(1)
3140 elif line.startswith(b'@@'):
3136 elif line.startswith(b'@@'):
3141 inheader = False
3137 inheader = False
3142 elif line.startswith(b'+') and not inheader:
3138 elif line.startswith(b'+') and not inheader:
3143 adds += 1
3139 adds += 1
3144 elif line.startswith(b'-') and not inheader:
3140 elif line.startswith(b'-') and not inheader:
3145 removes += 1
3141 removes += 1
3146 elif line.startswith(b'GIT binary patch') or line.startswith(
3142 elif line.startswith(b'GIT binary patch') or line.startswith(
3147 b'Binary file'
3143 b'Binary file'
3148 ):
3144 ):
3149 isbinary = True
3145 isbinary = True
3150 elif line.startswith(b'rename from'):
3146 elif line.startswith(b'rename from'):
3151 filename = line[12:]
3147 filename = line[12:]
3152 elif line.startswith(b'rename to'):
3148 elif line.startswith(b'rename to'):
3153 filename += b' => %s' % line[10:]
3149 filename += b' => %s' % line[10:]
3154 addresult()
3150 addresult()
3155 return results
3151 return results
3156
3152
3157
3153
3158 def diffstat(lines, width=80):
3154 def diffstat(lines, width=80):
3159 output = []
3155 output = []
3160 stats = diffstatdata(lines)
3156 stats = diffstatdata(lines)
3161 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3157 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3162
3158
3163 countwidth = len(str(maxtotal))
3159 countwidth = len(str(maxtotal))
3164 if hasbinary and countwidth < 3:
3160 if hasbinary and countwidth < 3:
3165 countwidth = 3
3161 countwidth = 3
3166 graphwidth = width - countwidth - maxname - 6
3162 graphwidth = width - countwidth - maxname - 6
3167 if graphwidth < 10:
3163 if graphwidth < 10:
3168 graphwidth = 10
3164 graphwidth = 10
3169
3165
3170 def scale(i):
3166 def scale(i):
3171 if maxtotal <= graphwidth:
3167 if maxtotal <= graphwidth:
3172 return i
3168 return i
3173 # If diffstat runs out of room it doesn't print anything,
3169 # If diffstat runs out of room it doesn't print anything,
3174 # which isn't very useful, so always print at least one + or -
3170 # which isn't very useful, so always print at least one + or -
3175 # if there were at least some changes.
3171 # if there were at least some changes.
3176 return max(i * graphwidth // maxtotal, int(bool(i)))
3172 return max(i * graphwidth // maxtotal, int(bool(i)))
3177
3173
3178 for filename, adds, removes, isbinary in stats:
3174 for filename, adds, removes, isbinary in stats:
3179 if isbinary:
3175 if isbinary:
3180 count = b'Bin'
3176 count = b'Bin'
3181 else:
3177 else:
3182 count = b'%d' % (adds + removes)
3178 count = b'%d' % (adds + removes)
3183 pluses = b'+' * scale(adds)
3179 pluses = b'+' * scale(adds)
3184 minuses = b'-' * scale(removes)
3180 minuses = b'-' * scale(removes)
3185 output.append(
3181 output.append(
3186 b' %s%s | %*s %s%s\n'
3182 b' %s%s | %*s %s%s\n'
3187 % (
3183 % (
3188 filename,
3184 filename,
3189 b' ' * (maxname - encoding.colwidth(filename)),
3185 b' ' * (maxname - encoding.colwidth(filename)),
3190 countwidth,
3186 countwidth,
3191 count,
3187 count,
3192 pluses,
3188 pluses,
3193 minuses,
3189 minuses,
3194 )
3190 )
3195 )
3191 )
3196
3192
3197 if stats:
3193 if stats:
3198 output.append(
3194 output.append(
3199 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3195 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3200 % (len(stats), totaladds, totalremoves)
3196 % (len(stats), totaladds, totalremoves)
3201 )
3197 )
3202
3198
3203 return b''.join(output)
3199 return b''.join(output)
3204
3200
3205
3201
3206 def diffstatui(*args, **kw):
3202 def diffstatui(*args, **kw):
3207 '''like diffstat(), but yields 2-tuples of (output, label) for
3203 '''like diffstat(), but yields 2-tuples of (output, label) for
3208 ui.write()
3204 ui.write()
3209 '''
3205 '''
3210
3206
3211 for line in diffstat(*args, **kw).splitlines():
3207 for line in diffstat(*args, **kw).splitlines():
3212 if line and line[-1] in b'+-':
3208 if line and line[-1] in b'+-':
3213 name, graph = line.rsplit(b' ', 1)
3209 name, graph = line.rsplit(b' ', 1)
3214 yield (name + b' ', b'')
3210 yield (name + b' ', b'')
3215 m = re.search(br'\++', graph)
3211 m = re.search(br'\++', graph)
3216 if m:
3212 if m:
3217 yield (m.group(0), b'diffstat.inserted')
3213 yield (m.group(0), b'diffstat.inserted')
3218 m = re.search(br'-+', graph)
3214 m = re.search(br'-+', graph)
3219 if m:
3215 if m:
3220 yield (m.group(0), b'diffstat.deleted')
3216 yield (m.group(0), b'diffstat.deleted')
3221 else:
3217 else:
3222 yield (line, b'')
3218 yield (line, b'')
3223 yield (b'\n', b'')
3219 yield (b'\n', b'')
General Comments 0
You need to be logged in to leave comments. Login now