##// END OF EJS Templates
patch: make diff --git to differentiate b/w file is empty or doesn't exists...
Sushil khanchi -
r47664:33350deb stable
parent child Browse files
Show More
@@ -1,3259 +1,3262 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import errno
14 import errno
15 import os
15 import os
16 import re
16 import re
17 import shutil
17 import shutil
18 import zlib
18 import zlib
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import (
21 from .node import (
22 hex,
22 hex,
23 nullhex,
23 short,
24 short,
24 )
25 )
25 from .pycompat import open
26 from .pycompat import open
26 from . import (
27 from . import (
27 copies,
28 copies,
28 diffhelper,
29 diffhelper,
29 diffutil,
30 diffutil,
30 encoding,
31 encoding,
31 error,
32 error,
32 mail,
33 mail,
33 mdiff,
34 mdiff,
34 pathutil,
35 pathutil,
35 pycompat,
36 pycompat,
36 scmutil,
37 scmutil,
37 similar,
38 similar,
38 util,
39 util,
39 vfs as vfsmod,
40 vfs as vfsmod,
40 )
41 )
41 from .utils import (
42 from .utils import (
42 dateutil,
43 dateutil,
43 hashutil,
44 hashutil,
44 procutil,
45 procutil,
45 stringutil,
46 stringutil,
46 )
47 )
47
48
48 stringio = util.stringio
49 stringio = util.stringio
49
50
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(
53 wordsplitter = re.compile(
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 )
55 )
55
56
56 PatchError = error.PatchError
57 PatchError = error.PatchError
57
58
58 # public functions
59 # public functions
59
60
60
61
61 def split(stream):
62 def split(stream):
62 '''return an iterator of individual patches from a stream'''
63 '''return an iterator of individual patches from a stream'''
63
64
64 def isheader(line, inheader):
65 def isheader(line, inheader):
65 if inheader and line.startswith((b' ', b'\t')):
66 if inheader and line.startswith((b' ', b'\t')):
66 # continuation
67 # continuation
67 return True
68 return True
68 if line.startswith((b' ', b'-', b'+')):
69 if line.startswith((b' ', b'-', b'+')):
69 # diff line - don't check for header pattern in there
70 # diff line - don't check for header pattern in there
70 return False
71 return False
71 l = line.split(b': ', 1)
72 l = line.split(b': ', 1)
72 return len(l) == 2 and b' ' not in l[0]
73 return len(l) == 2 and b' ' not in l[0]
73
74
74 def chunk(lines):
75 def chunk(lines):
75 return stringio(b''.join(lines))
76 return stringio(b''.join(lines))
76
77
77 def hgsplit(stream, cur):
78 def hgsplit(stream, cur):
78 inheader = True
79 inheader = True
79
80
80 for line in stream:
81 for line in stream:
81 if not line.strip():
82 if not line.strip():
82 inheader = False
83 inheader = False
83 if not inheader and line.startswith(b'# HG changeset patch'):
84 if not inheader and line.startswith(b'# HG changeset patch'):
84 yield chunk(cur)
85 yield chunk(cur)
85 cur = []
86 cur = []
86 inheader = True
87 inheader = True
87
88
88 cur.append(line)
89 cur.append(line)
89
90
90 if cur:
91 if cur:
91 yield chunk(cur)
92 yield chunk(cur)
92
93
93 def mboxsplit(stream, cur):
94 def mboxsplit(stream, cur):
94 for line in stream:
95 for line in stream:
95 if line.startswith(b'From '):
96 if line.startswith(b'From '):
96 for c in split(chunk(cur[1:])):
97 for c in split(chunk(cur[1:])):
97 yield c
98 yield c
98 cur = []
99 cur = []
99
100
100 cur.append(line)
101 cur.append(line)
101
102
102 if cur:
103 if cur:
103 for c in split(chunk(cur[1:])):
104 for c in split(chunk(cur[1:])):
104 yield c
105 yield c
105
106
106 def mimesplit(stream, cur):
107 def mimesplit(stream, cur):
107 def msgfp(m):
108 def msgfp(m):
108 fp = stringio()
109 fp = stringio()
109 g = mail.Generator(fp, mangle_from_=False)
110 g = mail.Generator(fp, mangle_from_=False)
110 g.flatten(m)
111 g.flatten(m)
111 fp.seek(0)
112 fp.seek(0)
112 return fp
113 return fp
113
114
114 for line in stream:
115 for line in stream:
115 cur.append(line)
116 cur.append(line)
116 c = chunk(cur)
117 c = chunk(cur)
117
118
118 m = mail.parse(c)
119 m = mail.parse(c)
119 if not m.is_multipart():
120 if not m.is_multipart():
120 yield msgfp(m)
121 yield msgfp(m)
121 else:
122 else:
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 for part in m.walk():
124 for part in m.walk():
124 ct = part.get_content_type()
125 ct = part.get_content_type()
125 if ct not in ok_types:
126 if ct not in ok_types:
126 continue
127 continue
127 yield msgfp(part)
128 yield msgfp(part)
128
129
129 def headersplit(stream, cur):
130 def headersplit(stream, cur):
130 inheader = False
131 inheader = False
131
132
132 for line in stream:
133 for line in stream:
133 if not inheader and isheader(line, inheader):
134 if not inheader and isheader(line, inheader):
134 yield chunk(cur)
135 yield chunk(cur)
135 cur = []
136 cur = []
136 inheader = True
137 inheader = True
137 if inheader and not isheader(line, inheader):
138 if inheader and not isheader(line, inheader):
138 inheader = False
139 inheader = False
139
140
140 cur.append(line)
141 cur.append(line)
141
142
142 if cur:
143 if cur:
143 yield chunk(cur)
144 yield chunk(cur)
144
145
145 def remainder(cur):
146 def remainder(cur):
146 yield chunk(cur)
147 yield chunk(cur)
147
148
148 class fiter(object):
149 class fiter(object):
149 def __init__(self, fp):
150 def __init__(self, fp):
150 self.fp = fp
151 self.fp = fp
151
152
152 def __iter__(self):
153 def __iter__(self):
153 return self
154 return self
154
155
155 def next(self):
156 def next(self):
156 l = self.fp.readline()
157 l = self.fp.readline()
157 if not l:
158 if not l:
158 raise StopIteration
159 raise StopIteration
159 return l
160 return l
160
161
161 __next__ = next
162 __next__ = next
162
163
163 inheader = False
164 inheader = False
164 cur = []
165 cur = []
165
166
166 mimeheaders = [b'content-type']
167 mimeheaders = [b'content-type']
167
168
168 if not util.safehasattr(stream, b'next'):
169 if not util.safehasattr(stream, b'next'):
169 # http responses, for example, have readline but not next
170 # http responses, for example, have readline but not next
170 stream = fiter(stream)
171 stream = fiter(stream)
171
172
172 for line in stream:
173 for line in stream:
173 cur.append(line)
174 cur.append(line)
174 if line.startswith(b'# HG changeset patch'):
175 if line.startswith(b'# HG changeset patch'):
175 return hgsplit(stream, cur)
176 return hgsplit(stream, cur)
176 elif line.startswith(b'From '):
177 elif line.startswith(b'From '):
177 return mboxsplit(stream, cur)
178 return mboxsplit(stream, cur)
178 elif isheader(line, inheader):
179 elif isheader(line, inheader):
179 inheader = True
180 inheader = True
180 if line.split(b':', 1)[0].lower() in mimeheaders:
181 if line.split(b':', 1)[0].lower() in mimeheaders:
181 # let email parser handle this
182 # let email parser handle this
182 return mimesplit(stream, cur)
183 return mimesplit(stream, cur)
183 elif line.startswith(b'--- ') and inheader:
184 elif line.startswith(b'--- ') and inheader:
184 # No evil headers seen by diff start, split by hand
185 # No evil headers seen by diff start, split by hand
185 return headersplit(stream, cur)
186 return headersplit(stream, cur)
186 # Not enough info, keep reading
187 # Not enough info, keep reading
187
188
188 # if we are here, we have a very plain patch
189 # if we are here, we have a very plain patch
189 return remainder(cur)
190 return remainder(cur)
190
191
191
192
192 ## Some facility for extensible patch parsing:
193 ## Some facility for extensible patch parsing:
193 # list of pairs ("header to match", "data key")
194 # list of pairs ("header to match", "data key")
194 patchheadermap = [
195 patchheadermap = [
195 (b'Date', b'date'),
196 (b'Date', b'date'),
196 (b'Branch', b'branch'),
197 (b'Branch', b'branch'),
197 (b'Node ID', b'nodeid'),
198 (b'Node ID', b'nodeid'),
198 ]
199 ]
199
200
200
201
201 @contextlib.contextmanager
202 @contextlib.contextmanager
202 def extract(ui, fileobj):
203 def extract(ui, fileobj):
203 """extract patch from data read from fileobj.
204 """extract patch from data read from fileobj.
204
205
205 patch can be a normal patch or contained in an email message.
206 patch can be a normal patch or contained in an email message.
206
207
207 return a dictionary. Standard keys are:
208 return a dictionary. Standard keys are:
208 - filename,
209 - filename,
209 - message,
210 - message,
210 - user,
211 - user,
211 - date,
212 - date,
212 - branch,
213 - branch,
213 - node,
214 - node,
214 - p1,
215 - p1,
215 - p2.
216 - p2.
216 Any item can be missing from the dictionary. If filename is missing,
217 Any item can be missing from the dictionary. If filename is missing,
217 fileobj did not contain a patch. Caller must unlink filename when done."""
218 fileobj did not contain a patch. Caller must unlink filename when done."""
218
219
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 tmpfp = os.fdopen(fd, 'wb')
221 tmpfp = os.fdopen(fd, 'wb')
221 try:
222 try:
222 yield _extract(ui, fileobj, tmpname, tmpfp)
223 yield _extract(ui, fileobj, tmpname, tmpfp)
223 finally:
224 finally:
224 tmpfp.close()
225 tmpfp.close()
225 os.unlink(tmpname)
226 os.unlink(tmpname)
226
227
227
228
228 def _extract(ui, fileobj, tmpname, tmpfp):
229 def _extract(ui, fileobj, tmpname, tmpfp):
229
230
230 # attempt to detect the start of a patch
231 # attempt to detect the start of a patch
231 # (this heuristic is borrowed from quilt)
232 # (this heuristic is borrowed from quilt)
232 diffre = re.compile(
233 diffre = re.compile(
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
236 br'---[ \t].*?^\+\+\+[ \t]|'
236 br'\*\*\*[ \t].*?^---[ \t])',
237 br'\*\*\*[ \t].*?^---[ \t])',
237 re.MULTILINE | re.DOTALL,
238 re.MULTILINE | re.DOTALL,
238 )
239 )
239
240
240 data = {}
241 data = {}
241
242
242 msg = mail.parse(fileobj)
243 msg = mail.parse(fileobj)
243
244
244 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
245 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
245 data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
246 data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
246 if not subject and not data[b'user']:
247 if not subject and not data[b'user']:
247 # Not an email, restore parsed headers if any
248 # Not an email, restore parsed headers if any
248 subject = (
249 subject = (
249 b'\n'.join(
250 b'\n'.join(
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 )
252 )
252 + b'\n'
253 + b'\n'
253 )
254 )
254
255
255 # should try to parse msg['Date']
256 # should try to parse msg['Date']
256 parents = []
257 parents = []
257
258
258 nodeid = msg['X-Mercurial-Node']
259 nodeid = msg['X-Mercurial-Node']
259 if nodeid:
260 if nodeid:
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
262 ui.debug(b'Node ID: %s\n' % nodeid)
262
263
263 if subject:
264 if subject:
264 if subject.startswith(b'[PATCH'):
265 if subject.startswith(b'[PATCH'):
265 pend = subject.find(b']')
266 pend = subject.find(b']')
266 if pend >= 0:
267 if pend >= 0:
267 subject = subject[pend + 1 :].lstrip()
268 subject = subject[pend + 1 :].lstrip()
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 ui.debug(b'Subject: %s\n' % subject)
270 ui.debug(b'Subject: %s\n' % subject)
270 if data[b'user']:
271 if data[b'user']:
271 ui.debug(b'From: %s\n' % data[b'user'])
272 ui.debug(b'From: %s\n' % data[b'user'])
272 diffs_seen = 0
273 diffs_seen = 0
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 message = b''
275 message = b''
275 for part in msg.walk():
276 for part in msg.walk():
276 content_type = pycompat.bytestr(part.get_content_type())
277 content_type = pycompat.bytestr(part.get_content_type())
277 ui.debug(b'Content-Type: %s\n' % content_type)
278 ui.debug(b'Content-Type: %s\n' % content_type)
278 if content_type not in ok_types:
279 if content_type not in ok_types:
279 continue
280 continue
280 payload = part.get_payload(decode=True)
281 payload = part.get_payload(decode=True)
281 m = diffre.search(payload)
282 m = diffre.search(payload)
282 if m:
283 if m:
283 hgpatch = False
284 hgpatch = False
284 hgpatchheader = False
285 hgpatchheader = False
285 ignoretext = False
286 ignoretext = False
286
287
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 diffs_seen += 1
289 diffs_seen += 1
289 cfp = stringio()
290 cfp = stringio()
290 for line in payload[: m.start(0)].splitlines():
291 for line in payload[: m.start(0)].splitlines():
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 ui.debug(b'patch generated by hg export\n')
293 ui.debug(b'patch generated by hg export\n')
293 hgpatch = True
294 hgpatch = True
294 hgpatchheader = True
295 hgpatchheader = True
295 # drop earlier commit message content
296 # drop earlier commit message content
296 cfp.seek(0)
297 cfp.seek(0)
297 cfp.truncate()
298 cfp.truncate()
298 subject = None
299 subject = None
299 elif hgpatchheader:
300 elif hgpatchheader:
300 if line.startswith(b'# User '):
301 if line.startswith(b'# User '):
301 data[b'user'] = line[7:]
302 data[b'user'] = line[7:]
302 ui.debug(b'From: %s\n' % data[b'user'])
303 ui.debug(b'From: %s\n' % data[b'user'])
303 elif line.startswith(b"# Parent "):
304 elif line.startswith(b"# Parent "):
304 parents.append(line[9:].lstrip())
305 parents.append(line[9:].lstrip())
305 elif line.startswith(b"# "):
306 elif line.startswith(b"# "):
306 for header, key in patchheadermap:
307 for header, key in patchheadermap:
307 prefix = b'# %s ' % header
308 prefix = b'# %s ' % header
308 if line.startswith(prefix):
309 if line.startswith(prefix):
309 data[key] = line[len(prefix) :]
310 data[key] = line[len(prefix) :]
310 ui.debug(b'%s: %s\n' % (header, data[key]))
311 ui.debug(b'%s: %s\n' % (header, data[key]))
311 else:
312 else:
312 hgpatchheader = False
313 hgpatchheader = False
313 elif line == b'---':
314 elif line == b'---':
314 ignoretext = True
315 ignoretext = True
315 if not hgpatchheader and not ignoretext:
316 if not hgpatchheader and not ignoretext:
316 cfp.write(line)
317 cfp.write(line)
317 cfp.write(b'\n')
318 cfp.write(b'\n')
318 message = cfp.getvalue()
319 message = cfp.getvalue()
319 if tmpfp:
320 if tmpfp:
320 tmpfp.write(payload)
321 tmpfp.write(payload)
321 if not payload.endswith(b'\n'):
322 if not payload.endswith(b'\n'):
322 tmpfp.write(b'\n')
323 tmpfp.write(b'\n')
323 elif not diffs_seen and message and content_type == b'text/plain':
324 elif not diffs_seen and message and content_type == b'text/plain':
324 message += b'\n' + payload
325 message += b'\n' + payload
325
326
326 if subject and not message.startswith(subject):
327 if subject and not message.startswith(subject):
327 message = b'%s\n%s' % (subject, message)
328 message = b'%s\n%s' % (subject, message)
328 data[b'message'] = message
329 data[b'message'] = message
329 tmpfp.close()
330 tmpfp.close()
330 if parents:
331 if parents:
331 data[b'p1'] = parents.pop(0)
332 data[b'p1'] = parents.pop(0)
332 if parents:
333 if parents:
333 data[b'p2'] = parents.pop(0)
334 data[b'p2'] = parents.pop(0)
334
335
335 if diffs_seen:
336 if diffs_seen:
336 data[b'filename'] = tmpname
337 data[b'filename'] = tmpname
337
338
338 return data
339 return data
339
340
340
341
341 class patchmeta(object):
342 class patchmeta(object):
342 """Patched file metadata
343 """Patched file metadata
343
344
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 'islink' is True if the file is a symlink and 'isexec' is True if
349 'islink' is True if the file is a symlink and 'isexec' is True if
349 the file is executable. Otherwise, 'mode' is None.
350 the file is executable. Otherwise, 'mode' is None.
350 """
351 """
351
352
352 def __init__(self, path):
353 def __init__(self, path):
353 self.path = path
354 self.path = path
354 self.oldpath = None
355 self.oldpath = None
355 self.mode = None
356 self.mode = None
356 self.op = b'MODIFY'
357 self.op = b'MODIFY'
357 self.binary = False
358 self.binary = False
358
359
359 def setmode(self, mode):
360 def setmode(self, mode):
360 islink = mode & 0o20000
361 islink = mode & 0o20000
361 isexec = mode & 0o100
362 isexec = mode & 0o100
362 self.mode = (islink, isexec)
363 self.mode = (islink, isexec)
363
364
364 def copy(self):
365 def copy(self):
365 other = patchmeta(self.path)
366 other = patchmeta(self.path)
366 other.oldpath = self.oldpath
367 other.oldpath = self.oldpath
367 other.mode = self.mode
368 other.mode = self.mode
368 other.op = self.op
369 other.op = self.op
369 other.binary = self.binary
370 other.binary = self.binary
370 return other
371 return other
371
372
372 def _ispatchinga(self, afile):
373 def _ispatchinga(self, afile):
373 if afile == b'/dev/null':
374 if afile == b'/dev/null':
374 return self.op == b'ADD'
375 return self.op == b'ADD'
375 return afile == b'a/' + (self.oldpath or self.path)
376 return afile == b'a/' + (self.oldpath or self.path)
376
377
377 def _ispatchingb(self, bfile):
378 def _ispatchingb(self, bfile):
378 if bfile == b'/dev/null':
379 if bfile == b'/dev/null':
379 return self.op == b'DELETE'
380 return self.op == b'DELETE'
380 return bfile == b'b/' + self.path
381 return bfile == b'b/' + self.path
381
382
382 def ispatching(self, afile, bfile):
383 def ispatching(self, afile, bfile):
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384
385
385 def __repr__(self):
386 def __repr__(self):
386 return "<patchmeta %s %r>" % (self.op, self.path)
387 return "<patchmeta %s %r>" % (self.op, self.path)
387
388
388
389
389 def readgitpatch(lr):
390 def readgitpatch(lr):
390 """extract git-style metadata about patches from <patchname>"""
391 """extract git-style metadata about patches from <patchname>"""
391
392
392 # Filter patch for git information
393 # Filter patch for git information
393 gp = None
394 gp = None
394 gitpatches = []
395 gitpatches = []
395 for line in lr:
396 for line in lr:
396 line = line.rstrip(b'\r\n')
397 line = line.rstrip(b'\r\n')
397 if line.startswith(b'diff --git a/'):
398 if line.startswith(b'diff --git a/'):
398 m = gitre.match(line)
399 m = gitre.match(line)
399 if m:
400 if m:
400 if gp:
401 if gp:
401 gitpatches.append(gp)
402 gitpatches.append(gp)
402 dst = m.group(2)
403 dst = m.group(2)
403 gp = patchmeta(dst)
404 gp = patchmeta(dst)
404 elif gp:
405 elif gp:
405 if line.startswith(b'--- '):
406 if line.startswith(b'--- '):
406 gitpatches.append(gp)
407 gitpatches.append(gp)
407 gp = None
408 gp = None
408 continue
409 continue
409 if line.startswith(b'rename from '):
410 if line.startswith(b'rename from '):
410 gp.op = b'RENAME'
411 gp.op = b'RENAME'
411 gp.oldpath = line[12:]
412 gp.oldpath = line[12:]
412 elif line.startswith(b'rename to '):
413 elif line.startswith(b'rename to '):
413 gp.path = line[10:]
414 gp.path = line[10:]
414 elif line.startswith(b'copy from '):
415 elif line.startswith(b'copy from '):
415 gp.op = b'COPY'
416 gp.op = b'COPY'
416 gp.oldpath = line[10:]
417 gp.oldpath = line[10:]
417 elif line.startswith(b'copy to '):
418 elif line.startswith(b'copy to '):
418 gp.path = line[8:]
419 gp.path = line[8:]
419 elif line.startswith(b'deleted file'):
420 elif line.startswith(b'deleted file'):
420 gp.op = b'DELETE'
421 gp.op = b'DELETE'
421 elif line.startswith(b'new file mode '):
422 elif line.startswith(b'new file mode '):
422 gp.op = b'ADD'
423 gp.op = b'ADD'
423 gp.setmode(int(line[-6:], 8))
424 gp.setmode(int(line[-6:], 8))
424 elif line.startswith(b'new mode '):
425 elif line.startswith(b'new mode '):
425 gp.setmode(int(line[-6:], 8))
426 gp.setmode(int(line[-6:], 8))
426 elif line.startswith(b'GIT binary patch'):
427 elif line.startswith(b'GIT binary patch'):
427 gp.binary = True
428 gp.binary = True
428 if gp:
429 if gp:
429 gitpatches.append(gp)
430 gitpatches.append(gp)
430
431
431 return gitpatches
432 return gitpatches
432
433
433
434
434 class linereader(object):
435 class linereader(object):
435 # simple class to allow pushing lines back into the input stream
436 # simple class to allow pushing lines back into the input stream
436 def __init__(self, fp):
437 def __init__(self, fp):
437 self.fp = fp
438 self.fp = fp
438 self.buf = []
439 self.buf = []
439
440
440 def push(self, line):
441 def push(self, line):
441 if line is not None:
442 if line is not None:
442 self.buf.append(line)
443 self.buf.append(line)
443
444
444 def readline(self):
445 def readline(self):
445 if self.buf:
446 if self.buf:
446 l = self.buf[0]
447 l = self.buf[0]
447 del self.buf[0]
448 del self.buf[0]
448 return l
449 return l
449 return self.fp.readline()
450 return self.fp.readline()
450
451
451 def __iter__(self):
452 def __iter__(self):
452 return iter(self.readline, b'')
453 return iter(self.readline, b'')
453
454
454
455
455 class abstractbackend(object):
456 class abstractbackend(object):
456 def __init__(self, ui):
457 def __init__(self, ui):
457 self.ui = ui
458 self.ui = ui
458
459
459 def getfile(self, fname):
460 def getfile(self, fname):
460 """Return target file data and flags as a (data, (islink,
461 """Return target file data and flags as a (data, (islink,
461 isexec)) tuple. Data is None if file is missing/deleted.
462 isexec)) tuple. Data is None if file is missing/deleted.
462 """
463 """
463 raise NotImplementedError
464 raise NotImplementedError
464
465
465 def setfile(self, fname, data, mode, copysource):
466 def setfile(self, fname, data, mode, copysource):
466 """Write data to target file fname and set its mode. mode is a
467 """Write data to target file fname and set its mode. mode is a
467 (islink, isexec) tuple. If data is None, the file content should
468 (islink, isexec) tuple. If data is None, the file content should
468 be left unchanged. If the file is modified after being copied,
469 be left unchanged. If the file is modified after being copied,
469 copysource is set to the original file name.
470 copysource is set to the original file name.
470 """
471 """
471 raise NotImplementedError
472 raise NotImplementedError
472
473
473 def unlink(self, fname):
474 def unlink(self, fname):
474 """Unlink target file."""
475 """Unlink target file."""
475 raise NotImplementedError
476 raise NotImplementedError
476
477
477 def writerej(self, fname, failed, total, lines):
478 def writerej(self, fname, failed, total, lines):
478 """Write rejected lines for fname. total is the number of hunks
479 """Write rejected lines for fname. total is the number of hunks
479 which failed to apply and total the total number of hunks for this
480 which failed to apply and total the total number of hunks for this
480 files.
481 files.
481 """
482 """
482
483
483 def exists(self, fname):
484 def exists(self, fname):
484 raise NotImplementedError
485 raise NotImplementedError
485
486
486 def close(self):
487 def close(self):
487 raise NotImplementedError
488 raise NotImplementedError
488
489
489
490
490 class fsbackend(abstractbackend):
491 class fsbackend(abstractbackend):
491 def __init__(self, ui, basedir):
492 def __init__(self, ui, basedir):
492 super(fsbackend, self).__init__(ui)
493 super(fsbackend, self).__init__(ui)
493 self.opener = vfsmod.vfs(basedir)
494 self.opener = vfsmod.vfs(basedir)
494
495
495 def getfile(self, fname):
496 def getfile(self, fname):
496 if self.opener.islink(fname):
497 if self.opener.islink(fname):
497 return (self.opener.readlink(fname), (True, False))
498 return (self.opener.readlink(fname), (True, False))
498
499
499 isexec = False
500 isexec = False
500 try:
501 try:
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 except OSError as e:
503 except OSError as e:
503 if e.errno != errno.ENOENT:
504 if e.errno != errno.ENOENT:
504 raise
505 raise
505 try:
506 try:
506 return (self.opener.read(fname), (False, isexec))
507 return (self.opener.read(fname), (False, isexec))
507 except IOError as e:
508 except IOError as e:
508 if e.errno != errno.ENOENT:
509 if e.errno != errno.ENOENT:
509 raise
510 raise
510 return None, None
511 return None, None
511
512
512 def setfile(self, fname, data, mode, copysource):
513 def setfile(self, fname, data, mode, copysource):
513 islink, isexec = mode
514 islink, isexec = mode
514 if data is None:
515 if data is None:
515 self.opener.setflags(fname, islink, isexec)
516 self.opener.setflags(fname, islink, isexec)
516 return
517 return
517 if islink:
518 if islink:
518 self.opener.symlink(data, fname)
519 self.opener.symlink(data, fname)
519 else:
520 else:
520 self.opener.write(fname, data)
521 self.opener.write(fname, data)
521 if isexec:
522 if isexec:
522 self.opener.setflags(fname, False, True)
523 self.opener.setflags(fname, False, True)
523
524
524 def unlink(self, fname):
525 def unlink(self, fname):
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527
528
528 def writerej(self, fname, failed, total, lines):
529 def writerej(self, fname, failed, total, lines):
529 fname = fname + b".rej"
530 fname = fname + b".rej"
530 self.ui.warn(
531 self.ui.warn(
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 % (failed, total, fname)
533 % (failed, total, fname)
533 )
534 )
534 fp = self.opener(fname, b'w')
535 fp = self.opener(fname, b'w')
535 fp.writelines(lines)
536 fp.writelines(lines)
536 fp.close()
537 fp.close()
537
538
538 def exists(self, fname):
539 def exists(self, fname):
539 return self.opener.lexists(fname)
540 return self.opener.lexists(fname)
540
541
541
542
542 class workingbackend(fsbackend):
543 class workingbackend(fsbackend):
543 def __init__(self, ui, repo, similarity):
544 def __init__(self, ui, repo, similarity):
544 super(workingbackend, self).__init__(ui, repo.root)
545 super(workingbackend, self).__init__(ui, repo.root)
545 self.repo = repo
546 self.repo = repo
546 self.similarity = similarity
547 self.similarity = similarity
547 self.removed = set()
548 self.removed = set()
548 self.changed = set()
549 self.changed = set()
549 self.copied = []
550 self.copied = []
550
551
551 def _checkknown(self, fname):
552 def _checkknown(self, fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554
555
555 def setfile(self, fname, data, mode, copysource):
556 def setfile(self, fname, data, mode, copysource):
556 self._checkknown(fname)
557 self._checkknown(fname)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 if copysource is not None:
559 if copysource is not None:
559 self.copied.append((copysource, fname))
560 self.copied.append((copysource, fname))
560 self.changed.add(fname)
561 self.changed.add(fname)
561
562
562 def unlink(self, fname):
563 def unlink(self, fname):
563 self._checkknown(fname)
564 self._checkknown(fname)
564 super(workingbackend, self).unlink(fname)
565 super(workingbackend, self).unlink(fname)
565 self.removed.add(fname)
566 self.removed.add(fname)
566 self.changed.add(fname)
567 self.changed.add(fname)
567
568
568 def close(self):
569 def close(self):
569 wctx = self.repo[None]
570 wctx = self.repo[None]
570 changed = set(self.changed)
571 changed = set(self.changed)
571 for src, dst in self.copied:
572 for src, dst in self.copied:
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 if self.removed:
574 if self.removed:
574 wctx.forget(sorted(self.removed))
575 wctx.forget(sorted(self.removed))
575 for f in self.removed:
576 for f in self.removed:
576 if f not in self.repo.dirstate:
577 if f not in self.repo.dirstate:
577 # File was deleted and no longer belongs to the
578 # File was deleted and no longer belongs to the
578 # dirstate, it was probably marked added then
579 # dirstate, it was probably marked added then
579 # deleted, and should not be considered by
580 # deleted, and should not be considered by
580 # marktouched().
581 # marktouched().
581 changed.discard(f)
582 changed.discard(f)
582 if changed:
583 if changed:
583 scmutil.marktouched(self.repo, changed, self.similarity)
584 scmutil.marktouched(self.repo, changed, self.similarity)
584 return sorted(self.changed)
585 return sorted(self.changed)
585
586
586
587
587 class filestore(object):
588 class filestore(object):
588 def __init__(self, maxsize=None):
589 def __init__(self, maxsize=None):
589 self.opener = None
590 self.opener = None
590 self.files = {}
591 self.files = {}
591 self.created = 0
592 self.created = 0
592 self.maxsize = maxsize
593 self.maxsize = maxsize
593 if self.maxsize is None:
594 if self.maxsize is None:
594 self.maxsize = 4 * (2 ** 20)
595 self.maxsize = 4 * (2 ** 20)
595 self.size = 0
596 self.size = 0
596 self.data = {}
597 self.data = {}
597
598
598 def setfile(self, fname, data, mode, copied=None):
599 def setfile(self, fname, data, mode, copied=None):
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 self.data[fname] = (data, mode, copied)
601 self.data[fname] = (data, mode, copied)
601 self.size += len(data)
602 self.size += len(data)
602 else:
603 else:
603 if self.opener is None:
604 if self.opener is None:
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 self.opener = vfsmod.vfs(root)
606 self.opener = vfsmod.vfs(root)
606 # Avoid filename issues with these simple names
607 # Avoid filename issues with these simple names
607 fn = b'%d' % self.created
608 fn = b'%d' % self.created
608 self.opener.write(fn, data)
609 self.opener.write(fn, data)
609 self.created += 1
610 self.created += 1
610 self.files[fname] = (fn, mode, copied)
611 self.files[fname] = (fn, mode, copied)
611
612
612 def getfile(self, fname):
613 def getfile(self, fname):
613 if fname in self.data:
614 if fname in self.data:
614 return self.data[fname]
615 return self.data[fname]
615 if not self.opener or fname not in self.files:
616 if not self.opener or fname not in self.files:
616 return None, None, None
617 return None, None, None
617 fn, mode, copied = self.files[fname]
618 fn, mode, copied = self.files[fname]
618 return self.opener.read(fn), mode, copied
619 return self.opener.read(fn), mode, copied
619
620
620 def close(self):
621 def close(self):
621 if self.opener:
622 if self.opener:
622 shutil.rmtree(self.opener.base)
623 shutil.rmtree(self.opener.base)
623
624
624
625
625 class repobackend(abstractbackend):
626 class repobackend(abstractbackend):
626 def __init__(self, ui, repo, ctx, store):
627 def __init__(self, ui, repo, ctx, store):
627 super(repobackend, self).__init__(ui)
628 super(repobackend, self).__init__(ui)
628 self.repo = repo
629 self.repo = repo
629 self.ctx = ctx
630 self.ctx = ctx
630 self.store = store
631 self.store = store
631 self.changed = set()
632 self.changed = set()
632 self.removed = set()
633 self.removed = set()
633 self.copied = {}
634 self.copied = {}
634
635
635 def _checkknown(self, fname):
636 def _checkknown(self, fname):
636 if fname not in self.ctx:
637 if fname not in self.ctx:
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638
639
639 def getfile(self, fname):
640 def getfile(self, fname):
640 try:
641 try:
641 fctx = self.ctx[fname]
642 fctx = self.ctx[fname]
642 except error.LookupError:
643 except error.LookupError:
643 return None, None
644 return None, None
644 flags = fctx.flags()
645 flags = fctx.flags()
645 return fctx.data(), (b'l' in flags, b'x' in flags)
646 return fctx.data(), (b'l' in flags, b'x' in flags)
646
647
647 def setfile(self, fname, data, mode, copysource):
648 def setfile(self, fname, data, mode, copysource):
648 if copysource:
649 if copysource:
649 self._checkknown(copysource)
650 self._checkknown(copysource)
650 if data is None:
651 if data is None:
651 data = self.ctx[fname].data()
652 data = self.ctx[fname].data()
652 self.store.setfile(fname, data, mode, copysource)
653 self.store.setfile(fname, data, mode, copysource)
653 self.changed.add(fname)
654 self.changed.add(fname)
654 if copysource:
655 if copysource:
655 self.copied[fname] = copysource
656 self.copied[fname] = copysource
656
657
657 def unlink(self, fname):
658 def unlink(self, fname):
658 self._checkknown(fname)
659 self._checkknown(fname)
659 self.removed.add(fname)
660 self.removed.add(fname)
660
661
661 def exists(self, fname):
662 def exists(self, fname):
662 return fname in self.ctx
663 return fname in self.ctx
663
664
664 def close(self):
665 def close(self):
665 return self.changed | self.removed
666 return self.changed | self.removed
666
667
667
668
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672
673
673
674
674 class patchfile(object):
675 class patchfile(object):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 self.fname = gp.path
677 self.fname = gp.path
677 self.eolmode = eolmode
678 self.eolmode = eolmode
678 self.eol = None
679 self.eol = None
679 self.backend = backend
680 self.backend = backend
680 self.ui = ui
681 self.ui = ui
681 self.lines = []
682 self.lines = []
682 self.exists = False
683 self.exists = False
683 self.missing = True
684 self.missing = True
684 self.mode = gp.mode
685 self.mode = gp.mode
685 self.copysource = gp.oldpath
686 self.copysource = gp.oldpath
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 self.remove = gp.op == b'DELETE'
688 self.remove = gp.op == b'DELETE'
688 if self.copysource is None:
689 if self.copysource is None:
689 data, mode = backend.getfile(self.fname)
690 data, mode = backend.getfile(self.fname)
690 else:
691 else:
691 data, mode = store.getfile(self.copysource)[:2]
692 data, mode = store.getfile(self.copysource)[:2]
692 if data is not None:
693 if data is not None:
693 self.exists = self.copysource is None or backend.exists(self.fname)
694 self.exists = self.copysource is None or backend.exists(self.fname)
694 self.missing = False
695 self.missing = False
695 if data:
696 if data:
696 self.lines = mdiff.splitnewlines(data)
697 self.lines = mdiff.splitnewlines(data)
697 if self.mode is None:
698 if self.mode is None:
698 self.mode = mode
699 self.mode = mode
699 if self.lines:
700 if self.lines:
700 # Normalize line endings
701 # Normalize line endings
701 if self.lines[0].endswith(b'\r\n'):
702 if self.lines[0].endswith(b'\r\n'):
702 self.eol = b'\r\n'
703 self.eol = b'\r\n'
703 elif self.lines[0].endswith(b'\n'):
704 elif self.lines[0].endswith(b'\n'):
704 self.eol = b'\n'
705 self.eol = b'\n'
705 if eolmode != b'strict':
706 if eolmode != b'strict':
706 nlines = []
707 nlines = []
707 for l in self.lines:
708 for l in self.lines:
708 if l.endswith(b'\r\n'):
709 if l.endswith(b'\r\n'):
709 l = l[:-2] + b'\n'
710 l = l[:-2] + b'\n'
710 nlines.append(l)
711 nlines.append(l)
711 self.lines = nlines
712 self.lines = nlines
712 else:
713 else:
713 if self.create:
714 if self.create:
714 self.missing = False
715 self.missing = False
715 if self.mode is None:
716 if self.mode is None:
716 self.mode = (False, False)
717 self.mode = (False, False)
717 if self.missing:
718 if self.missing:
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 self.ui.warn(
720 self.ui.warn(
720 _(
721 _(
721 b"(use '--prefix' to apply patch relative to the "
722 b"(use '--prefix' to apply patch relative to the "
722 b"current directory)\n"
723 b"current directory)\n"
723 )
724 )
724 )
725 )
725
726
726 self.hash = {}
727 self.hash = {}
727 self.dirty = 0
728 self.dirty = 0
728 self.offset = 0
729 self.offset = 0
729 self.skew = 0
730 self.skew = 0
730 self.rej = []
731 self.rej = []
731 self.fileprinted = False
732 self.fileprinted = False
732 self.printfile(False)
733 self.printfile(False)
733 self.hunks = 0
734 self.hunks = 0
734
735
735 def writelines(self, fname, lines, mode):
736 def writelines(self, fname, lines, mode):
736 if self.eolmode == b'auto':
737 if self.eolmode == b'auto':
737 eol = self.eol
738 eol = self.eol
738 elif self.eolmode == b'crlf':
739 elif self.eolmode == b'crlf':
739 eol = b'\r\n'
740 eol = b'\r\n'
740 else:
741 else:
741 eol = b'\n'
742 eol = b'\n'
742
743
743 if self.eolmode != b'strict' and eol and eol != b'\n':
744 if self.eolmode != b'strict' and eol and eol != b'\n':
744 rawlines = []
745 rawlines = []
745 for l in lines:
746 for l in lines:
746 if l and l.endswith(b'\n'):
747 if l and l.endswith(b'\n'):
747 l = l[:-1] + eol
748 l = l[:-1] + eol
748 rawlines.append(l)
749 rawlines.append(l)
749 lines = rawlines
750 lines = rawlines
750
751
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752
753
753 def printfile(self, warn):
754 def printfile(self, warn):
754 if self.fileprinted:
755 if self.fileprinted:
755 return
756 return
756 if warn or self.ui.verbose:
757 if warn or self.ui.verbose:
757 self.fileprinted = True
758 self.fileprinted = True
758 s = _(b"patching file %s\n") % self.fname
759 s = _(b"patching file %s\n") % self.fname
759 if warn:
760 if warn:
760 self.ui.warn(s)
761 self.ui.warn(s)
761 else:
762 else:
762 self.ui.note(s)
763 self.ui.note(s)
763
764
764 def findlines(self, l, linenum):
765 def findlines(self, l, linenum):
765 # looks through the hash and finds candidate lines. The
766 # looks through the hash and finds candidate lines. The
766 # result is a list of line numbers sorted based on distance
767 # result is a list of line numbers sorted based on distance
767 # from linenum
768 # from linenum
768
769
769 cand = self.hash.get(l, [])
770 cand = self.hash.get(l, [])
770 if len(cand) > 1:
771 if len(cand) > 1:
771 # resort our list of potentials forward then back.
772 # resort our list of potentials forward then back.
772 cand.sort(key=lambda x: abs(x - linenum))
773 cand.sort(key=lambda x: abs(x - linenum))
773 return cand
774 return cand
774
775
775 def write_rej(self):
776 def write_rej(self):
776 # our rejects are a little different from patch(1). This always
777 # our rejects are a little different from patch(1). This always
777 # creates rejects in the same form as the original patch. A file
778 # creates rejects in the same form as the original patch. A file
778 # header is inserted so that you can run the reject through patch again
779 # header is inserted so that you can run the reject through patch again
779 # without having to type the filename.
780 # without having to type the filename.
780 if not self.rej:
781 if not self.rej:
781 return
782 return
782 base = os.path.basename(self.fname)
783 base = os.path.basename(self.fname)
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 for x in self.rej:
785 for x in self.rej:
785 for l in x.hunk:
786 for l in x.hunk:
786 lines.append(l)
787 lines.append(l)
787 if l[-1:] != b'\n':
788 if l[-1:] != b'\n':
788 lines.append(b'\n' + diffhelper.MISSING_NEWLINE_MARKER)
789 lines.append(b'\n' + diffhelper.MISSING_NEWLINE_MARKER)
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790
791
791 def apply(self, h):
792 def apply(self, h):
792 if not h.complete():
793 if not h.complete():
793 raise PatchError(
794 raise PatchError(
794 _(b"bad hunk #%d %s (%d %d %d %d)")
795 _(b"bad hunk #%d %s (%d %d %d %d)")
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 )
797 )
797
798
798 self.hunks += 1
799 self.hunks += 1
799
800
800 if self.missing:
801 if self.missing:
801 self.rej.append(h)
802 self.rej.append(h)
802 return -1
803 return -1
803
804
804 if self.exists and self.create:
805 if self.exists and self.create:
805 if self.copysource:
806 if self.copysource:
806 self.ui.warn(
807 self.ui.warn(
807 _(b"cannot create %s: destination already exists\n")
808 _(b"cannot create %s: destination already exists\n")
808 % self.fname
809 % self.fname
809 )
810 )
810 else:
811 else:
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 self.rej.append(h)
813 self.rej.append(h)
813 return -1
814 return -1
814
815
815 if isinstance(h, binhunk):
816 if isinstance(h, binhunk):
816 if self.remove:
817 if self.remove:
817 self.backend.unlink(self.fname)
818 self.backend.unlink(self.fname)
818 else:
819 else:
819 l = h.new(self.lines)
820 l = h.new(self.lines)
820 self.lines[:] = l
821 self.lines[:] = l
821 self.offset += len(l)
822 self.offset += len(l)
822 self.dirty = True
823 self.dirty = True
823 return 0
824 return 0
824
825
825 horig = h
826 horig = h
826 if (
827 if (
827 self.eolmode in (b'crlf', b'lf')
828 self.eolmode in (b'crlf', b'lf')
828 or self.eolmode == b'auto'
829 or self.eolmode == b'auto'
829 and self.eol
830 and self.eol
830 ):
831 ):
831 # If new eols are going to be normalized, then normalize
832 # If new eols are going to be normalized, then normalize
832 # hunk data before patching. Otherwise, preserve input
833 # hunk data before patching. Otherwise, preserve input
833 # line-endings.
834 # line-endings.
834 h = h.getnormalized()
835 h = h.getnormalized()
835
836
836 # fast case first, no offsets, no fuzz
837 # fast case first, no offsets, no fuzz
837 old, oldstart, new, newstart = h.fuzzit(0, False)
838 old, oldstart, new, newstart = h.fuzzit(0, False)
838 oldstart += self.offset
839 oldstart += self.offset
839 orig_start = oldstart
840 orig_start = oldstart
840 # if there's skew we want to emit the "(offset %d lines)" even
841 # if there's skew we want to emit the "(offset %d lines)" even
841 # when the hunk cleanly applies at start + skew, so skip the
842 # when the hunk cleanly applies at start + skew, so skip the
842 # fast case code
843 # fast case code
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 if self.remove:
845 if self.remove:
845 self.backend.unlink(self.fname)
846 self.backend.unlink(self.fname)
846 else:
847 else:
847 self.lines[oldstart : oldstart + len(old)] = new
848 self.lines[oldstart : oldstart + len(old)] = new
848 self.offset += len(new) - len(old)
849 self.offset += len(new) - len(old)
849 self.dirty = True
850 self.dirty = True
850 return 0
851 return 0
851
852
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 self.hash = {}
854 self.hash = {}
854 for x, s in enumerate(self.lines):
855 for x, s in enumerate(self.lines):
855 self.hash.setdefault(s, []).append(x)
856 self.hash.setdefault(s, []).append(x)
856
857
857 for fuzzlen in pycompat.xrange(
858 for fuzzlen in pycompat.xrange(
858 self.ui.configint(b"patch", b"fuzz") + 1
859 self.ui.configint(b"patch", b"fuzz") + 1
859 ):
860 ):
860 for toponly in [True, False]:
861 for toponly in [True, False]:
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 oldstart = oldstart + self.offset + self.skew
863 oldstart = oldstart + self.offset + self.skew
863 oldstart = min(oldstart, len(self.lines))
864 oldstart = min(oldstart, len(self.lines))
864 if old:
865 if old:
865 cand = self.findlines(old[0][1:], oldstart)
866 cand = self.findlines(old[0][1:], oldstart)
866 else:
867 else:
867 # Only adding lines with no or fuzzed context, just
868 # Only adding lines with no or fuzzed context, just
868 # take the skew in account
869 # take the skew in account
869 cand = [oldstart]
870 cand = [oldstart]
870
871
871 for l in cand:
872 for l in cand:
872 if not old or diffhelper.testhunk(old, self.lines, l):
873 if not old or diffhelper.testhunk(old, self.lines, l):
873 self.lines[l : l + len(old)] = new
874 self.lines[l : l + len(old)] = new
874 self.offset += len(new) - len(old)
875 self.offset += len(new) - len(old)
875 self.skew = l - orig_start
876 self.skew = l - orig_start
876 self.dirty = True
877 self.dirty = True
877 offset = l - orig_start - fuzzlen
878 offset = l - orig_start - fuzzlen
878 if fuzzlen:
879 if fuzzlen:
879 msg = _(
880 msg = _(
880 b"Hunk #%d succeeded at %d "
881 b"Hunk #%d succeeded at %d "
881 b"with fuzz %d "
882 b"with fuzz %d "
882 b"(offset %d lines).\n"
883 b"(offset %d lines).\n"
883 )
884 )
884 self.printfile(True)
885 self.printfile(True)
885 self.ui.warn(
886 self.ui.warn(
886 msg % (h.number, l + 1, fuzzlen, offset)
887 msg % (h.number, l + 1, fuzzlen, offset)
887 )
888 )
888 else:
889 else:
889 msg = _(
890 msg = _(
890 b"Hunk #%d succeeded at %d "
891 b"Hunk #%d succeeded at %d "
891 b"(offset %d lines).\n"
892 b"(offset %d lines).\n"
892 )
893 )
893 self.ui.note(msg % (h.number, l + 1, offset))
894 self.ui.note(msg % (h.number, l + 1, offset))
894 return fuzzlen
895 return fuzzlen
895 self.printfile(True)
896 self.printfile(True)
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 self.rej.append(horig)
898 self.rej.append(horig)
898 return -1
899 return -1
899
900
900 def close(self):
901 def close(self):
901 if self.dirty:
902 if self.dirty:
902 self.writelines(self.fname, self.lines, self.mode)
903 self.writelines(self.fname, self.lines, self.mode)
903 self.write_rej()
904 self.write_rej()
904 return len(self.rej)
905 return len(self.rej)
905
906
906
907
907 class header(object):
908 class header(object):
908 """patch header"""
909 """patch header"""
909
910
910 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
911 diff_re = re.compile(b'diff -r .* (.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
912 allhunks_re = re.compile(b'(?:index|deleted file) ')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
913 pretty_re = re.compile(b'(?:new file|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
914 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
915 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
916
917
917 def __init__(self, header):
918 def __init__(self, header):
918 self.header = header
919 self.header = header
919 self.hunks = []
920 self.hunks = []
920
921
921 def binary(self):
922 def binary(self):
922 return any(h.startswith(b'index ') for h in self.header)
923 return any(h.startswith(b'index ') for h in self.header)
923
924
924 def pretty(self, fp):
925 def pretty(self, fp):
925 for h in self.header:
926 for h in self.header:
926 if h.startswith(b'index '):
927 if h.startswith(b'index '):
927 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
928 break
929 break
929 if self.pretty_re.match(h):
930 if self.pretty_re.match(h):
930 fp.write(h)
931 fp.write(h)
931 if self.binary():
932 if self.binary():
932 fp.write(_(b'this is a binary file\n'))
933 fp.write(_(b'this is a binary file\n'))
933 break
934 break
934 if h.startswith(b'---'):
935 if h.startswith(b'---'):
935 fp.write(
936 fp.write(
936 _(b'%d hunks, %d lines changed\n')
937 _(b'%d hunks, %d lines changed\n')
937 % (
938 % (
938 len(self.hunks),
939 len(self.hunks),
939 sum([max(h.added, h.removed) for h in self.hunks]),
940 sum([max(h.added, h.removed) for h in self.hunks]),
940 )
941 )
941 )
942 )
942 break
943 break
943 fp.write(h)
944 fp.write(h)
944
945
945 def write(self, fp):
946 def write(self, fp):
946 fp.write(b''.join(self.header))
947 fp.write(b''.join(self.header))
947
948
948 def allhunks(self):
949 def allhunks(self):
949 return any(self.allhunks_re.match(h) for h in self.header)
950 return any(self.allhunks_re.match(h) for h in self.header)
950
951
951 def files(self):
952 def files(self):
952 match = self.diffgit_re.match(self.header[0])
953 match = self.diffgit_re.match(self.header[0])
953 if match:
954 if match:
954 fromfile, tofile = match.groups()
955 fromfile, tofile = match.groups()
955 if fromfile == tofile:
956 if fromfile == tofile:
956 return [fromfile]
957 return [fromfile]
957 return [fromfile, tofile]
958 return [fromfile, tofile]
958 else:
959 else:
959 return self.diff_re.match(self.header[0]).groups()
960 return self.diff_re.match(self.header[0]).groups()
960
961
961 def filename(self):
962 def filename(self):
962 return self.files()[-1]
963 return self.files()[-1]
963
964
964 def __repr__(self):
965 def __repr__(self):
965 return '<header %s>' % (
966 return '<header %s>' % (
966 ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
967 ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
967 )
968 )
968
969
969 def isnewfile(self):
970 def isnewfile(self):
970 return any(self.newfile_re.match(h) for h in self.header)
971 return any(self.newfile_re.match(h) for h in self.header)
971
972
972 def special(self):
973 def special(self):
973 # Special files are shown only at the header level and not at the hunk
974 # Special files are shown only at the header level and not at the hunk
974 # level for example a file that has been deleted is a special file.
975 # level for example a file that has been deleted is a special file.
975 # The user cannot change the content of the operation, in the case of
976 # The user cannot change the content of the operation, in the case of
976 # the deleted file he has to take the deletion or not take it, he
977 # the deleted file he has to take the deletion or not take it, he
977 # cannot take some of it.
978 # cannot take some of it.
978 # Newly added files are special if they are empty, they are not special
979 # Newly added files are special if they are empty, they are not special
979 # if they have some content as we want to be able to change it
980 # if they have some content as we want to be able to change it
980 nocontent = len(self.header) == 2
981 nocontent = len(self.header) == 2
981 emptynewfile = self.isnewfile() and nocontent
982 emptynewfile = self.isnewfile() and nocontent
982 return emptynewfile or any(
983 return emptynewfile or any(
983 self.special_re.match(h) for h in self.header
984 self.special_re.match(h) for h in self.header
984 )
985 )
985
986
986
987
987 class recordhunk(object):
988 class recordhunk(object):
988 """patch hunk
989 """patch hunk
989
990
990 XXX shouldn't we merge this with the other hunk class?
991 XXX shouldn't we merge this with the other hunk class?
991 """
992 """
992
993
993 def __init__(
994 def __init__(
994 self,
995 self,
995 header,
996 header,
996 fromline,
997 fromline,
997 toline,
998 toline,
998 proc,
999 proc,
999 before,
1000 before,
1000 hunk,
1001 hunk,
1001 after,
1002 after,
1002 maxcontext=None,
1003 maxcontext=None,
1003 ):
1004 ):
1004 def trimcontext(lines, reverse=False):
1005 def trimcontext(lines, reverse=False):
1005 if maxcontext is not None:
1006 if maxcontext is not None:
1006 delta = len(lines) - maxcontext
1007 delta = len(lines) - maxcontext
1007 if delta > 0:
1008 if delta > 0:
1008 if reverse:
1009 if reverse:
1009 return delta, lines[delta:]
1010 return delta, lines[delta:]
1010 else:
1011 else:
1011 return delta, lines[:maxcontext]
1012 return delta, lines[:maxcontext]
1012 return 0, lines
1013 return 0, lines
1013
1014
1014 self.header = header
1015 self.header = header
1015 trimedbefore, self.before = trimcontext(before, True)
1016 trimedbefore, self.before = trimcontext(before, True)
1016 self.fromline = fromline + trimedbefore
1017 self.fromline = fromline + trimedbefore
1017 self.toline = toline + trimedbefore
1018 self.toline = toline + trimedbefore
1018 _trimedafter, self.after = trimcontext(after, False)
1019 _trimedafter, self.after = trimcontext(after, False)
1019 self.proc = proc
1020 self.proc = proc
1020 self.hunk = hunk
1021 self.hunk = hunk
1021 self.added, self.removed = self.countchanges(self.hunk)
1022 self.added, self.removed = self.countchanges(self.hunk)
1022
1023
1023 def __eq__(self, v):
1024 def __eq__(self, v):
1024 if not isinstance(v, recordhunk):
1025 if not isinstance(v, recordhunk):
1025 return False
1026 return False
1026
1027
1027 return (
1028 return (
1028 (v.hunk == self.hunk)
1029 (v.hunk == self.hunk)
1029 and (v.proc == self.proc)
1030 and (v.proc == self.proc)
1030 and (self.fromline == v.fromline)
1031 and (self.fromline == v.fromline)
1031 and (self.header.files() == v.header.files())
1032 and (self.header.files() == v.header.files())
1032 )
1033 )
1033
1034
1034 def __hash__(self):
1035 def __hash__(self):
1035 return hash(
1036 return hash(
1036 (
1037 (
1037 tuple(self.hunk),
1038 tuple(self.hunk),
1038 tuple(self.header.files()),
1039 tuple(self.header.files()),
1039 self.fromline,
1040 self.fromline,
1040 self.proc,
1041 self.proc,
1041 )
1042 )
1042 )
1043 )
1043
1044
1044 def countchanges(self, hunk):
1045 def countchanges(self, hunk):
1045 """hunk -> (n+,n-)"""
1046 """hunk -> (n+,n-)"""
1046 add = len([h for h in hunk if h.startswith(b'+')])
1047 add = len([h for h in hunk if h.startswith(b'+')])
1047 rem = len([h for h in hunk if h.startswith(b'-')])
1048 rem = len([h for h in hunk if h.startswith(b'-')])
1048 return add, rem
1049 return add, rem
1049
1050
1050 def reversehunk(self):
1051 def reversehunk(self):
1051 """return another recordhunk which is the reverse of the hunk
1052 """return another recordhunk which is the reverse of the hunk
1052
1053
1053 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1054 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1054 that, swap fromline/toline and +/- signs while keep other things
1055 that, swap fromline/toline and +/- signs while keep other things
1055 unchanged.
1056 unchanged.
1056 """
1057 """
1057 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1058 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1058 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1059 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1059 return recordhunk(
1060 return recordhunk(
1060 self.header,
1061 self.header,
1061 self.toline,
1062 self.toline,
1062 self.fromline,
1063 self.fromline,
1063 self.proc,
1064 self.proc,
1064 self.before,
1065 self.before,
1065 hunk,
1066 hunk,
1066 self.after,
1067 self.after,
1067 )
1068 )
1068
1069
1069 def write(self, fp):
1070 def write(self, fp):
1070 delta = len(self.before) + len(self.after)
1071 delta = len(self.before) + len(self.after)
1071 if self.after and self.after[-1] == diffhelper.MISSING_NEWLINE_MARKER:
1072 if self.after and self.after[-1] == diffhelper.MISSING_NEWLINE_MARKER:
1072 delta -= 1
1073 delta -= 1
1073 fromlen = delta + self.removed
1074 fromlen = delta + self.removed
1074 tolen = delta + self.added
1075 tolen = delta + self.added
1075 fp.write(
1076 fp.write(
1076 b'@@ -%d,%d +%d,%d @@%s\n'
1077 b'@@ -%d,%d +%d,%d @@%s\n'
1077 % (
1078 % (
1078 self.fromline,
1079 self.fromline,
1079 fromlen,
1080 fromlen,
1080 self.toline,
1081 self.toline,
1081 tolen,
1082 tolen,
1082 self.proc and (b' ' + self.proc),
1083 self.proc and (b' ' + self.proc),
1083 )
1084 )
1084 )
1085 )
1085 fp.write(b''.join(self.before + self.hunk + self.after))
1086 fp.write(b''.join(self.before + self.hunk + self.after))
1086
1087
1087 pretty = write
1088 pretty = write
1088
1089
1089 def filename(self):
1090 def filename(self):
1090 return self.header.filename()
1091 return self.header.filename()
1091
1092
1092 @encoding.strmethod
1093 @encoding.strmethod
1093 def __repr__(self):
1094 def __repr__(self):
1094 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1095 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1095
1096
1096
1097
1097 def getmessages():
1098 def getmessages():
1098 return {
1099 return {
1099 b'multiple': {
1100 b'multiple': {
1100 b'apply': _(b"apply change %d/%d to '%s'?"),
1101 b'apply': _(b"apply change %d/%d to '%s'?"),
1101 b'discard': _(b"discard change %d/%d to '%s'?"),
1102 b'discard': _(b"discard change %d/%d to '%s'?"),
1102 b'keep': _(b"keep change %d/%d to '%s'?"),
1103 b'keep': _(b"keep change %d/%d to '%s'?"),
1103 b'record': _(b"record change %d/%d to '%s'?"),
1104 b'record': _(b"record change %d/%d to '%s'?"),
1104 },
1105 },
1105 b'single': {
1106 b'single': {
1106 b'apply': _(b"apply this change to '%s'?"),
1107 b'apply': _(b"apply this change to '%s'?"),
1107 b'discard': _(b"discard this change to '%s'?"),
1108 b'discard': _(b"discard this change to '%s'?"),
1108 b'keep': _(b"keep this change to '%s'?"),
1109 b'keep': _(b"keep this change to '%s'?"),
1109 b'record': _(b"record this change to '%s'?"),
1110 b'record': _(b"record this change to '%s'?"),
1110 },
1111 },
1111 b'help': {
1112 b'help': {
1112 b'apply': _(
1113 b'apply': _(
1113 b'[Ynesfdaq?]'
1114 b'[Ynesfdaq?]'
1114 b'$$ &Yes, apply this change'
1115 b'$$ &Yes, apply this change'
1115 b'$$ &No, skip this change'
1116 b'$$ &No, skip this change'
1116 b'$$ &Edit this change manually'
1117 b'$$ &Edit this change manually'
1117 b'$$ &Skip remaining changes to this file'
1118 b'$$ &Skip remaining changes to this file'
1118 b'$$ Apply remaining changes to this &file'
1119 b'$$ Apply remaining changes to this &file'
1119 b'$$ &Done, skip remaining changes and files'
1120 b'$$ &Done, skip remaining changes and files'
1120 b'$$ Apply &all changes to all remaining files'
1121 b'$$ Apply &all changes to all remaining files'
1121 b'$$ &Quit, applying no changes'
1122 b'$$ &Quit, applying no changes'
1122 b'$$ &? (display help)'
1123 b'$$ &? (display help)'
1123 ),
1124 ),
1124 b'discard': _(
1125 b'discard': _(
1125 b'[Ynesfdaq?]'
1126 b'[Ynesfdaq?]'
1126 b'$$ &Yes, discard this change'
1127 b'$$ &Yes, discard this change'
1127 b'$$ &No, skip this change'
1128 b'$$ &No, skip this change'
1128 b'$$ &Edit this change manually'
1129 b'$$ &Edit this change manually'
1129 b'$$ &Skip remaining changes to this file'
1130 b'$$ &Skip remaining changes to this file'
1130 b'$$ Discard remaining changes to this &file'
1131 b'$$ Discard remaining changes to this &file'
1131 b'$$ &Done, skip remaining changes and files'
1132 b'$$ &Done, skip remaining changes and files'
1132 b'$$ Discard &all changes to all remaining files'
1133 b'$$ Discard &all changes to all remaining files'
1133 b'$$ &Quit, discarding no changes'
1134 b'$$ &Quit, discarding no changes'
1134 b'$$ &? (display help)'
1135 b'$$ &? (display help)'
1135 ),
1136 ),
1136 b'keep': _(
1137 b'keep': _(
1137 b'[Ynesfdaq?]'
1138 b'[Ynesfdaq?]'
1138 b'$$ &Yes, keep this change'
1139 b'$$ &Yes, keep this change'
1139 b'$$ &No, skip this change'
1140 b'$$ &No, skip this change'
1140 b'$$ &Edit this change manually'
1141 b'$$ &Edit this change manually'
1141 b'$$ &Skip remaining changes to this file'
1142 b'$$ &Skip remaining changes to this file'
1142 b'$$ Keep remaining changes to this &file'
1143 b'$$ Keep remaining changes to this &file'
1143 b'$$ &Done, skip remaining changes and files'
1144 b'$$ &Done, skip remaining changes and files'
1144 b'$$ Keep &all changes to all remaining files'
1145 b'$$ Keep &all changes to all remaining files'
1145 b'$$ &Quit, keeping all changes'
1146 b'$$ &Quit, keeping all changes'
1146 b'$$ &? (display help)'
1147 b'$$ &? (display help)'
1147 ),
1148 ),
1148 b'record': _(
1149 b'record': _(
1149 b'[Ynesfdaq?]'
1150 b'[Ynesfdaq?]'
1150 b'$$ &Yes, record this change'
1151 b'$$ &Yes, record this change'
1151 b'$$ &No, skip this change'
1152 b'$$ &No, skip this change'
1152 b'$$ &Edit this change manually'
1153 b'$$ &Edit this change manually'
1153 b'$$ &Skip remaining changes to this file'
1154 b'$$ &Skip remaining changes to this file'
1154 b'$$ Record remaining changes to this &file'
1155 b'$$ Record remaining changes to this &file'
1155 b'$$ &Done, skip remaining changes and files'
1156 b'$$ &Done, skip remaining changes and files'
1156 b'$$ Record &all changes to all remaining files'
1157 b'$$ Record &all changes to all remaining files'
1157 b'$$ &Quit, recording no changes'
1158 b'$$ &Quit, recording no changes'
1158 b'$$ &? (display help)'
1159 b'$$ &? (display help)'
1159 ),
1160 ),
1160 },
1161 },
1161 }
1162 }
1162
1163
1163
1164
1164 def filterpatch(ui, headers, match, operation=None):
1165 def filterpatch(ui, headers, match, operation=None):
1165 """Interactively filter patch chunks into applied-only chunks"""
1166 """Interactively filter patch chunks into applied-only chunks"""
1166 messages = getmessages()
1167 messages = getmessages()
1167
1168
1168 if operation is None:
1169 if operation is None:
1169 operation = b'record'
1170 operation = b'record'
1170
1171
1171 def prompt(skipfile, skipall, query, chunk):
1172 def prompt(skipfile, skipall, query, chunk):
1172 """prompt query, and process base inputs
1173 """prompt query, and process base inputs
1173
1174
1174 - y/n for the rest of file
1175 - y/n for the rest of file
1175 - y/n for the rest
1176 - y/n for the rest
1176 - ? (help)
1177 - ? (help)
1177 - q (quit)
1178 - q (quit)
1178
1179
1179 Return True/False and possibly updated skipfile and skipall.
1180 Return True/False and possibly updated skipfile and skipall.
1180 """
1181 """
1181 newpatches = None
1182 newpatches = None
1182 if skipall is not None:
1183 if skipall is not None:
1183 return skipall, skipfile, skipall, newpatches
1184 return skipall, skipfile, skipall, newpatches
1184 if skipfile is not None:
1185 if skipfile is not None:
1185 return skipfile, skipfile, skipall, newpatches
1186 return skipfile, skipfile, skipall, newpatches
1186 while True:
1187 while True:
1187 resps = messages[b'help'][operation]
1188 resps = messages[b'help'][operation]
1188 # IMPORTANT: keep the last line of this prompt short (<40 english
1189 # IMPORTANT: keep the last line of this prompt short (<40 english
1189 # chars is a good target) because of issue6158.
1190 # chars is a good target) because of issue6158.
1190 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1191 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1191 ui.write(b"\n")
1192 ui.write(b"\n")
1192 if r == 8: # ?
1193 if r == 8: # ?
1193 for c, t in ui.extractchoices(resps)[1]:
1194 for c, t in ui.extractchoices(resps)[1]:
1194 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1195 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1195 continue
1196 continue
1196 elif r == 0: # yes
1197 elif r == 0: # yes
1197 ret = True
1198 ret = True
1198 elif r == 1: # no
1199 elif r == 1: # no
1199 ret = False
1200 ret = False
1200 elif r == 2: # Edit patch
1201 elif r == 2: # Edit patch
1201 if chunk is None:
1202 if chunk is None:
1202 ui.write(_(b'cannot edit patch for whole file'))
1203 ui.write(_(b'cannot edit patch for whole file'))
1203 ui.write(b"\n")
1204 ui.write(b"\n")
1204 continue
1205 continue
1205 if chunk.header.binary():
1206 if chunk.header.binary():
1206 ui.write(_(b'cannot edit patch for binary file'))
1207 ui.write(_(b'cannot edit patch for binary file'))
1207 ui.write(b"\n")
1208 ui.write(b"\n")
1208 continue
1209 continue
1209 # Patch comment based on the Git one (based on comment at end of
1210 # Patch comment based on the Git one (based on comment at end of
1210 # https://mercurial-scm.org/wiki/RecordExtension)
1211 # https://mercurial-scm.org/wiki/RecordExtension)
1211 phelp = b'---' + _(
1212 phelp = b'---' + _(
1212 """
1213 """
1213 To remove '-' lines, make them ' ' lines (context).
1214 To remove '-' lines, make them ' ' lines (context).
1214 To remove '+' lines, delete them.
1215 To remove '+' lines, delete them.
1215 Lines starting with # will be removed from the patch.
1216 Lines starting with # will be removed from the patch.
1216
1217
1217 If the patch applies cleanly, the edited hunk will immediately be
1218 If the patch applies cleanly, the edited hunk will immediately be
1218 added to the record list. If it does not apply cleanly, a rejects
1219 added to the record list. If it does not apply cleanly, a rejects
1219 file will be generated: you can use that when you try again. If
1220 file will be generated: you can use that when you try again. If
1220 all lines of the hunk are removed, then the edit is aborted and
1221 all lines of the hunk are removed, then the edit is aborted and
1221 the hunk is left unchanged.
1222 the hunk is left unchanged.
1222 """
1223 """
1223 )
1224 )
1224 (patchfd, patchfn) = pycompat.mkstemp(
1225 (patchfd, patchfn) = pycompat.mkstemp(
1225 prefix=b"hg-editor-", suffix=b".diff"
1226 prefix=b"hg-editor-", suffix=b".diff"
1226 )
1227 )
1227 ncpatchfp = None
1228 ncpatchfp = None
1228 try:
1229 try:
1229 # Write the initial patch
1230 # Write the initial patch
1230 f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
1231 f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
1231 chunk.header.write(f)
1232 chunk.header.write(f)
1232 chunk.write(f)
1233 chunk.write(f)
1233 f.write(
1234 f.write(
1234 b''.join(
1235 b''.join(
1235 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1236 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1236 )
1237 )
1237 )
1238 )
1238 f.close()
1239 f.close()
1239 # Start the editor and wait for it to complete
1240 # Start the editor and wait for it to complete
1240 editor = ui.geteditor()
1241 editor = ui.geteditor()
1241 ret = ui.system(
1242 ret = ui.system(
1242 b"%s \"%s\"" % (editor, patchfn),
1243 b"%s \"%s\"" % (editor, patchfn),
1243 environ={b'HGUSER': ui.username()},
1244 environ={b'HGUSER': ui.username()},
1244 blockedtag=b'filterpatch',
1245 blockedtag=b'filterpatch',
1245 )
1246 )
1246 if ret != 0:
1247 if ret != 0:
1247 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1248 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1248 continue
1249 continue
1249 # Remove comment lines
1250 # Remove comment lines
1250 patchfp = open(patchfn, 'rb')
1251 patchfp = open(patchfn, 'rb')
1251 ncpatchfp = stringio()
1252 ncpatchfp = stringio()
1252 for line in util.iterfile(patchfp):
1253 for line in util.iterfile(patchfp):
1253 line = util.fromnativeeol(line)
1254 line = util.fromnativeeol(line)
1254 if not line.startswith(b'#'):
1255 if not line.startswith(b'#'):
1255 ncpatchfp.write(line)
1256 ncpatchfp.write(line)
1256 patchfp.close()
1257 patchfp.close()
1257 ncpatchfp.seek(0)
1258 ncpatchfp.seek(0)
1258 newpatches = parsepatch(ncpatchfp)
1259 newpatches = parsepatch(ncpatchfp)
1259 finally:
1260 finally:
1260 os.unlink(patchfn)
1261 os.unlink(patchfn)
1261 del ncpatchfp
1262 del ncpatchfp
1262 # Signal that the chunk shouldn't be applied as-is, but
1263 # Signal that the chunk shouldn't be applied as-is, but
1263 # provide the new patch to be used instead.
1264 # provide the new patch to be used instead.
1264 ret = False
1265 ret = False
1265 elif r == 3: # Skip
1266 elif r == 3: # Skip
1266 ret = skipfile = False
1267 ret = skipfile = False
1267 elif r == 4: # file (Record remaining)
1268 elif r == 4: # file (Record remaining)
1268 ret = skipfile = True
1269 ret = skipfile = True
1269 elif r == 5: # done, skip remaining
1270 elif r == 5: # done, skip remaining
1270 ret = skipall = False
1271 ret = skipall = False
1271 elif r == 6: # all
1272 elif r == 6: # all
1272 ret = skipall = True
1273 ret = skipall = True
1273 elif r == 7: # quit
1274 elif r == 7: # quit
1274 raise error.CanceledError(_(b'user quit'))
1275 raise error.CanceledError(_(b'user quit'))
1275 return ret, skipfile, skipall, newpatches
1276 return ret, skipfile, skipall, newpatches
1276
1277
1277 seen = set()
1278 seen = set()
1278 applied = {} # 'filename' -> [] of chunks
1279 applied = {} # 'filename' -> [] of chunks
1279 skipfile, skipall = None, None
1280 skipfile, skipall = None, None
1280 pos, total = 1, sum(len(h.hunks) for h in headers)
1281 pos, total = 1, sum(len(h.hunks) for h in headers)
1281 for h in headers:
1282 for h in headers:
1282 pos += len(h.hunks)
1283 pos += len(h.hunks)
1283 skipfile = None
1284 skipfile = None
1284 fixoffset = 0
1285 fixoffset = 0
1285 hdr = b''.join(h.header)
1286 hdr = b''.join(h.header)
1286 if hdr in seen:
1287 if hdr in seen:
1287 continue
1288 continue
1288 seen.add(hdr)
1289 seen.add(hdr)
1289 if skipall is None:
1290 if skipall is None:
1290 h.pretty(ui)
1291 h.pretty(ui)
1291 files = h.files()
1292 files = h.files()
1292 msg = _(b'examine changes to %s?') % _(b' and ').join(
1293 msg = _(b'examine changes to %s?') % _(b' and ').join(
1293 b"'%s'" % f for f in files
1294 b"'%s'" % f for f in files
1294 )
1295 )
1295 if all(match.exact(f) for f in files):
1296 if all(match.exact(f) for f in files):
1296 r, skipall, np = True, None, None
1297 r, skipall, np = True, None, None
1297 else:
1298 else:
1298 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1299 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1299 if not r:
1300 if not r:
1300 continue
1301 continue
1301 applied[h.filename()] = [h]
1302 applied[h.filename()] = [h]
1302 if h.allhunks():
1303 if h.allhunks():
1303 applied[h.filename()] += h.hunks
1304 applied[h.filename()] += h.hunks
1304 continue
1305 continue
1305 for i, chunk in enumerate(h.hunks):
1306 for i, chunk in enumerate(h.hunks):
1306 if skipfile is None and skipall is None:
1307 if skipfile is None and skipall is None:
1307 chunk.pretty(ui)
1308 chunk.pretty(ui)
1308 if total == 1:
1309 if total == 1:
1309 msg = messages[b'single'][operation] % chunk.filename()
1310 msg = messages[b'single'][operation] % chunk.filename()
1310 else:
1311 else:
1311 idx = pos - len(h.hunks) + i
1312 idx = pos - len(h.hunks) + i
1312 msg = messages[b'multiple'][operation] % (
1313 msg = messages[b'multiple'][operation] % (
1313 idx,
1314 idx,
1314 total,
1315 total,
1315 chunk.filename(),
1316 chunk.filename(),
1316 )
1317 )
1317 r, skipfile, skipall, newpatches = prompt(
1318 r, skipfile, skipall, newpatches = prompt(
1318 skipfile, skipall, msg, chunk
1319 skipfile, skipall, msg, chunk
1319 )
1320 )
1320 if r:
1321 if r:
1321 if fixoffset:
1322 if fixoffset:
1322 chunk = copy.copy(chunk)
1323 chunk = copy.copy(chunk)
1323 chunk.toline += fixoffset
1324 chunk.toline += fixoffset
1324 applied[chunk.filename()].append(chunk)
1325 applied[chunk.filename()].append(chunk)
1325 elif newpatches is not None:
1326 elif newpatches is not None:
1326 for newpatch in newpatches:
1327 for newpatch in newpatches:
1327 for newhunk in newpatch.hunks:
1328 for newhunk in newpatch.hunks:
1328 if fixoffset:
1329 if fixoffset:
1329 newhunk.toline += fixoffset
1330 newhunk.toline += fixoffset
1330 applied[newhunk.filename()].append(newhunk)
1331 applied[newhunk.filename()].append(newhunk)
1331 else:
1332 else:
1332 fixoffset += chunk.removed - chunk.added
1333 fixoffset += chunk.removed - chunk.added
1333 return (
1334 return (
1334 sum(
1335 sum(
1335 [
1336 [
1336 h
1337 h
1337 for h in pycompat.itervalues(applied)
1338 for h in pycompat.itervalues(applied)
1338 if h[0].special() or len(h) > 1
1339 if h[0].special() or len(h) > 1
1339 ],
1340 ],
1340 [],
1341 [],
1341 ),
1342 ),
1342 {},
1343 {},
1343 )
1344 )
1344
1345
1345
1346
1346 class hunk(object):
1347 class hunk(object):
1347 def __init__(self, desc, num, lr, context):
1348 def __init__(self, desc, num, lr, context):
1348 self.number = num
1349 self.number = num
1349 self.desc = desc
1350 self.desc = desc
1350 self.hunk = [desc]
1351 self.hunk = [desc]
1351 self.a = []
1352 self.a = []
1352 self.b = []
1353 self.b = []
1353 self.starta = self.lena = None
1354 self.starta = self.lena = None
1354 self.startb = self.lenb = None
1355 self.startb = self.lenb = None
1355 if lr is not None:
1356 if lr is not None:
1356 if context:
1357 if context:
1357 self.read_context_hunk(lr)
1358 self.read_context_hunk(lr)
1358 else:
1359 else:
1359 self.read_unified_hunk(lr)
1360 self.read_unified_hunk(lr)
1360
1361
1361 def getnormalized(self):
1362 def getnormalized(self):
1362 """Return a copy with line endings normalized to LF."""
1363 """Return a copy with line endings normalized to LF."""
1363
1364
1364 def normalize(lines):
1365 def normalize(lines):
1365 nlines = []
1366 nlines = []
1366 for line in lines:
1367 for line in lines:
1367 if line.endswith(b'\r\n'):
1368 if line.endswith(b'\r\n'):
1368 line = line[:-2] + b'\n'
1369 line = line[:-2] + b'\n'
1369 nlines.append(line)
1370 nlines.append(line)
1370 return nlines
1371 return nlines
1371
1372
1372 # Dummy object, it is rebuilt manually
1373 # Dummy object, it is rebuilt manually
1373 nh = hunk(self.desc, self.number, None, None)
1374 nh = hunk(self.desc, self.number, None, None)
1374 nh.number = self.number
1375 nh.number = self.number
1375 nh.desc = self.desc
1376 nh.desc = self.desc
1376 nh.hunk = self.hunk
1377 nh.hunk = self.hunk
1377 nh.a = normalize(self.a)
1378 nh.a = normalize(self.a)
1378 nh.b = normalize(self.b)
1379 nh.b = normalize(self.b)
1379 nh.starta = self.starta
1380 nh.starta = self.starta
1380 nh.startb = self.startb
1381 nh.startb = self.startb
1381 nh.lena = self.lena
1382 nh.lena = self.lena
1382 nh.lenb = self.lenb
1383 nh.lenb = self.lenb
1383 return nh
1384 return nh
1384
1385
1385 def read_unified_hunk(self, lr):
1386 def read_unified_hunk(self, lr):
1386 m = unidesc.match(self.desc)
1387 m = unidesc.match(self.desc)
1387 if not m:
1388 if not m:
1388 raise PatchError(_(b"bad hunk #%d") % self.number)
1389 raise PatchError(_(b"bad hunk #%d") % self.number)
1389 self.starta, self.lena, self.startb, self.lenb = m.groups()
1390 self.starta, self.lena, self.startb, self.lenb = m.groups()
1390 if self.lena is None:
1391 if self.lena is None:
1391 self.lena = 1
1392 self.lena = 1
1392 else:
1393 else:
1393 self.lena = int(self.lena)
1394 self.lena = int(self.lena)
1394 if self.lenb is None:
1395 if self.lenb is None:
1395 self.lenb = 1
1396 self.lenb = 1
1396 else:
1397 else:
1397 self.lenb = int(self.lenb)
1398 self.lenb = int(self.lenb)
1398 self.starta = int(self.starta)
1399 self.starta = int(self.starta)
1399 self.startb = int(self.startb)
1400 self.startb = int(self.startb)
1400 try:
1401 try:
1401 diffhelper.addlines(
1402 diffhelper.addlines(
1402 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1403 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1403 )
1404 )
1404 except error.ParseError as e:
1405 except error.ParseError as e:
1405 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1406 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1406 # if we hit eof before finishing out the hunk, the last line will
1407 # if we hit eof before finishing out the hunk, the last line will
1407 # be zero length. Lets try to fix it up.
1408 # be zero length. Lets try to fix it up.
1408 while len(self.hunk[-1]) == 0:
1409 while len(self.hunk[-1]) == 0:
1409 del self.hunk[-1]
1410 del self.hunk[-1]
1410 del self.a[-1]
1411 del self.a[-1]
1411 del self.b[-1]
1412 del self.b[-1]
1412 self.lena -= 1
1413 self.lena -= 1
1413 self.lenb -= 1
1414 self.lenb -= 1
1414 self._fixnewline(lr)
1415 self._fixnewline(lr)
1415
1416
1416 def read_context_hunk(self, lr):
1417 def read_context_hunk(self, lr):
1417 self.desc = lr.readline()
1418 self.desc = lr.readline()
1418 m = contextdesc.match(self.desc)
1419 m = contextdesc.match(self.desc)
1419 if not m:
1420 if not m:
1420 raise PatchError(_(b"bad hunk #%d") % self.number)
1421 raise PatchError(_(b"bad hunk #%d") % self.number)
1421 self.starta, aend = m.groups()
1422 self.starta, aend = m.groups()
1422 self.starta = int(self.starta)
1423 self.starta = int(self.starta)
1423 if aend is None:
1424 if aend is None:
1424 aend = self.starta
1425 aend = self.starta
1425 self.lena = int(aend) - self.starta
1426 self.lena = int(aend) - self.starta
1426 if self.starta:
1427 if self.starta:
1427 self.lena += 1
1428 self.lena += 1
1428 for x in pycompat.xrange(self.lena):
1429 for x in pycompat.xrange(self.lena):
1429 l = lr.readline()
1430 l = lr.readline()
1430 if l.startswith(b'---'):
1431 if l.startswith(b'---'):
1431 # lines addition, old block is empty
1432 # lines addition, old block is empty
1432 lr.push(l)
1433 lr.push(l)
1433 break
1434 break
1434 s = l[2:]
1435 s = l[2:]
1435 if l.startswith(b'- ') or l.startswith(b'! '):
1436 if l.startswith(b'- ') or l.startswith(b'! '):
1436 u = b'-' + s
1437 u = b'-' + s
1437 elif l.startswith(b' '):
1438 elif l.startswith(b' '):
1438 u = b' ' + s
1439 u = b' ' + s
1439 else:
1440 else:
1440 raise PatchError(
1441 raise PatchError(
1441 _(b"bad hunk #%d old text line %d") % (self.number, x)
1442 _(b"bad hunk #%d old text line %d") % (self.number, x)
1442 )
1443 )
1443 self.a.append(u)
1444 self.a.append(u)
1444 self.hunk.append(u)
1445 self.hunk.append(u)
1445
1446
1446 l = lr.readline()
1447 l = lr.readline()
1447 if l.startswith(br'\ '):
1448 if l.startswith(br'\ '):
1448 s = self.a[-1][:-1]
1449 s = self.a[-1][:-1]
1449 self.a[-1] = s
1450 self.a[-1] = s
1450 self.hunk[-1] = s
1451 self.hunk[-1] = s
1451 l = lr.readline()
1452 l = lr.readline()
1452 m = contextdesc.match(l)
1453 m = contextdesc.match(l)
1453 if not m:
1454 if not m:
1454 raise PatchError(_(b"bad hunk #%d") % self.number)
1455 raise PatchError(_(b"bad hunk #%d") % self.number)
1455 self.startb, bend = m.groups()
1456 self.startb, bend = m.groups()
1456 self.startb = int(self.startb)
1457 self.startb = int(self.startb)
1457 if bend is None:
1458 if bend is None:
1458 bend = self.startb
1459 bend = self.startb
1459 self.lenb = int(bend) - self.startb
1460 self.lenb = int(bend) - self.startb
1460 if self.startb:
1461 if self.startb:
1461 self.lenb += 1
1462 self.lenb += 1
1462 hunki = 1
1463 hunki = 1
1463 for x in pycompat.xrange(self.lenb):
1464 for x in pycompat.xrange(self.lenb):
1464 l = lr.readline()
1465 l = lr.readline()
1465 if l.startswith(br'\ '):
1466 if l.startswith(br'\ '):
1466 # XXX: the only way to hit this is with an invalid line range.
1467 # XXX: the only way to hit this is with an invalid line range.
1467 # The no-eol marker is not counted in the line range, but I
1468 # The no-eol marker is not counted in the line range, but I
1468 # guess there are diff(1) out there which behave differently.
1469 # guess there are diff(1) out there which behave differently.
1469 s = self.b[-1][:-1]
1470 s = self.b[-1][:-1]
1470 self.b[-1] = s
1471 self.b[-1] = s
1471 self.hunk[hunki - 1] = s
1472 self.hunk[hunki - 1] = s
1472 continue
1473 continue
1473 if not l:
1474 if not l:
1474 # line deletions, new block is empty and we hit EOF
1475 # line deletions, new block is empty and we hit EOF
1475 lr.push(l)
1476 lr.push(l)
1476 break
1477 break
1477 s = l[2:]
1478 s = l[2:]
1478 if l.startswith(b'+ ') or l.startswith(b'! '):
1479 if l.startswith(b'+ ') or l.startswith(b'! '):
1479 u = b'+' + s
1480 u = b'+' + s
1480 elif l.startswith(b' '):
1481 elif l.startswith(b' '):
1481 u = b' ' + s
1482 u = b' ' + s
1482 elif len(self.b) == 0:
1483 elif len(self.b) == 0:
1483 # line deletions, new block is empty
1484 # line deletions, new block is empty
1484 lr.push(l)
1485 lr.push(l)
1485 break
1486 break
1486 else:
1487 else:
1487 raise PatchError(
1488 raise PatchError(
1488 _(b"bad hunk #%d old text line %d") % (self.number, x)
1489 _(b"bad hunk #%d old text line %d") % (self.number, x)
1489 )
1490 )
1490 self.b.append(s)
1491 self.b.append(s)
1491 while True:
1492 while True:
1492 if hunki >= len(self.hunk):
1493 if hunki >= len(self.hunk):
1493 h = b""
1494 h = b""
1494 else:
1495 else:
1495 h = self.hunk[hunki]
1496 h = self.hunk[hunki]
1496 hunki += 1
1497 hunki += 1
1497 if h == u:
1498 if h == u:
1498 break
1499 break
1499 elif h.startswith(b'-'):
1500 elif h.startswith(b'-'):
1500 continue
1501 continue
1501 else:
1502 else:
1502 self.hunk.insert(hunki - 1, u)
1503 self.hunk.insert(hunki - 1, u)
1503 break
1504 break
1504
1505
1505 if not self.a:
1506 if not self.a:
1506 # this happens when lines were only added to the hunk
1507 # this happens when lines were only added to the hunk
1507 for x in self.hunk:
1508 for x in self.hunk:
1508 if x.startswith(b'-') or x.startswith(b' '):
1509 if x.startswith(b'-') or x.startswith(b' '):
1509 self.a.append(x)
1510 self.a.append(x)
1510 if not self.b:
1511 if not self.b:
1511 # this happens when lines were only deleted from the hunk
1512 # this happens when lines were only deleted from the hunk
1512 for x in self.hunk:
1513 for x in self.hunk:
1513 if x.startswith(b'+') or x.startswith(b' '):
1514 if x.startswith(b'+') or x.startswith(b' '):
1514 self.b.append(x[1:])
1515 self.b.append(x[1:])
1515 # @@ -start,len +start,len @@
1516 # @@ -start,len +start,len @@
1516 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1517 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1517 self.starta,
1518 self.starta,
1518 self.lena,
1519 self.lena,
1519 self.startb,
1520 self.startb,
1520 self.lenb,
1521 self.lenb,
1521 )
1522 )
1522 self.hunk[0] = self.desc
1523 self.hunk[0] = self.desc
1523 self._fixnewline(lr)
1524 self._fixnewline(lr)
1524
1525
1525 def _fixnewline(self, lr):
1526 def _fixnewline(self, lr):
1526 l = lr.readline()
1527 l = lr.readline()
1527 if l.startswith(br'\ '):
1528 if l.startswith(br'\ '):
1528 diffhelper.fixnewline(self.hunk, self.a, self.b)
1529 diffhelper.fixnewline(self.hunk, self.a, self.b)
1529 else:
1530 else:
1530 lr.push(l)
1531 lr.push(l)
1531
1532
1532 def complete(self):
1533 def complete(self):
1533 return len(self.a) == self.lena and len(self.b) == self.lenb
1534 return len(self.a) == self.lena and len(self.b) == self.lenb
1534
1535
1535 def _fuzzit(self, old, new, fuzz, toponly):
1536 def _fuzzit(self, old, new, fuzz, toponly):
1536 # this removes context lines from the top and bottom of list 'l'. It
1537 # this removes context lines from the top and bottom of list 'l'. It
1537 # checks the hunk to make sure only context lines are removed, and then
1538 # checks the hunk to make sure only context lines are removed, and then
1538 # returns a new shortened list of lines.
1539 # returns a new shortened list of lines.
1539 fuzz = min(fuzz, len(old))
1540 fuzz = min(fuzz, len(old))
1540 if fuzz:
1541 if fuzz:
1541 top = 0
1542 top = 0
1542 bot = 0
1543 bot = 0
1543 hlen = len(self.hunk)
1544 hlen = len(self.hunk)
1544 for x in pycompat.xrange(hlen - 1):
1545 for x in pycompat.xrange(hlen - 1):
1545 # the hunk starts with the @@ line, so use x+1
1546 # the hunk starts with the @@ line, so use x+1
1546 if self.hunk[x + 1].startswith(b' '):
1547 if self.hunk[x + 1].startswith(b' '):
1547 top += 1
1548 top += 1
1548 else:
1549 else:
1549 break
1550 break
1550 if not toponly:
1551 if not toponly:
1551 for x in pycompat.xrange(hlen - 1):
1552 for x in pycompat.xrange(hlen - 1):
1552 if self.hunk[hlen - bot - 1].startswith(b' '):
1553 if self.hunk[hlen - bot - 1].startswith(b' '):
1553 bot += 1
1554 bot += 1
1554 else:
1555 else:
1555 break
1556 break
1556
1557
1557 bot = min(fuzz, bot)
1558 bot = min(fuzz, bot)
1558 top = min(fuzz, top)
1559 top = min(fuzz, top)
1559 return old[top : len(old) - bot], new[top : len(new) - bot], top
1560 return old[top : len(old) - bot], new[top : len(new) - bot], top
1560 return old, new, 0
1561 return old, new, 0
1561
1562
1562 def fuzzit(self, fuzz, toponly):
1563 def fuzzit(self, fuzz, toponly):
1563 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1564 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1564 oldstart = self.starta + top
1565 oldstart = self.starta + top
1565 newstart = self.startb + top
1566 newstart = self.startb + top
1566 # zero length hunk ranges already have their start decremented
1567 # zero length hunk ranges already have their start decremented
1567 if self.lena and oldstart > 0:
1568 if self.lena and oldstart > 0:
1568 oldstart -= 1
1569 oldstart -= 1
1569 if self.lenb and newstart > 0:
1570 if self.lenb and newstart > 0:
1570 newstart -= 1
1571 newstart -= 1
1571 return old, oldstart, new, newstart
1572 return old, oldstart, new, newstart
1572
1573
1573
1574
1574 class binhunk(object):
1575 class binhunk(object):
1575 """A binary patch file."""
1576 """A binary patch file."""
1576
1577
1577 def __init__(self, lr, fname):
1578 def __init__(self, lr, fname):
1578 self.text = None
1579 self.text = None
1579 self.delta = False
1580 self.delta = False
1580 self.hunk = [b'GIT binary patch\n']
1581 self.hunk = [b'GIT binary patch\n']
1581 self._fname = fname
1582 self._fname = fname
1582 self._read(lr)
1583 self._read(lr)
1583
1584
1584 def complete(self):
1585 def complete(self):
1585 return self.text is not None
1586 return self.text is not None
1586
1587
1587 def new(self, lines):
1588 def new(self, lines):
1588 if self.delta:
1589 if self.delta:
1589 return [applybindelta(self.text, b''.join(lines))]
1590 return [applybindelta(self.text, b''.join(lines))]
1590 return [self.text]
1591 return [self.text]
1591
1592
1592 def _read(self, lr):
1593 def _read(self, lr):
1593 def getline(lr, hunk):
1594 def getline(lr, hunk):
1594 l = lr.readline()
1595 l = lr.readline()
1595 hunk.append(l)
1596 hunk.append(l)
1596 return l.rstrip(b'\r\n')
1597 return l.rstrip(b'\r\n')
1597
1598
1598 while True:
1599 while True:
1599 line = getline(lr, self.hunk)
1600 line = getline(lr, self.hunk)
1600 if not line:
1601 if not line:
1601 raise PatchError(
1602 raise PatchError(
1602 _(b'could not extract "%s" binary data') % self._fname
1603 _(b'could not extract "%s" binary data') % self._fname
1603 )
1604 )
1604 if line.startswith(b'literal '):
1605 if line.startswith(b'literal '):
1605 size = int(line[8:].rstrip())
1606 size = int(line[8:].rstrip())
1606 break
1607 break
1607 if line.startswith(b'delta '):
1608 if line.startswith(b'delta '):
1608 size = int(line[6:].rstrip())
1609 size = int(line[6:].rstrip())
1609 self.delta = True
1610 self.delta = True
1610 break
1611 break
1611 dec = []
1612 dec = []
1612 line = getline(lr, self.hunk)
1613 line = getline(lr, self.hunk)
1613 while len(line) > 1:
1614 while len(line) > 1:
1614 l = line[0:1]
1615 l = line[0:1]
1615 if l <= b'Z' and l >= b'A':
1616 if l <= b'Z' and l >= b'A':
1616 l = ord(l) - ord(b'A') + 1
1617 l = ord(l) - ord(b'A') + 1
1617 else:
1618 else:
1618 l = ord(l) - ord(b'a') + 27
1619 l = ord(l) - ord(b'a') + 27
1619 try:
1620 try:
1620 dec.append(util.b85decode(line[1:])[:l])
1621 dec.append(util.b85decode(line[1:])[:l])
1621 except ValueError as e:
1622 except ValueError as e:
1622 raise PatchError(
1623 raise PatchError(
1623 _(b'could not decode "%s" binary patch: %s')
1624 _(b'could not decode "%s" binary patch: %s')
1624 % (self._fname, stringutil.forcebytestr(e))
1625 % (self._fname, stringutil.forcebytestr(e))
1625 )
1626 )
1626 line = getline(lr, self.hunk)
1627 line = getline(lr, self.hunk)
1627 text = zlib.decompress(b''.join(dec))
1628 text = zlib.decompress(b''.join(dec))
1628 if len(text) != size:
1629 if len(text) != size:
1629 raise PatchError(
1630 raise PatchError(
1630 _(b'"%s" length is %d bytes, should be %d')
1631 _(b'"%s" length is %d bytes, should be %d')
1631 % (self._fname, len(text), size)
1632 % (self._fname, len(text), size)
1632 )
1633 )
1633 self.text = text
1634 self.text = text
1634
1635
1635
1636
1636 def parsefilename(str):
1637 def parsefilename(str):
1637 # --- filename \t|space stuff
1638 # --- filename \t|space stuff
1638 s = str[4:].rstrip(b'\r\n')
1639 s = str[4:].rstrip(b'\r\n')
1639 i = s.find(b'\t')
1640 i = s.find(b'\t')
1640 if i < 0:
1641 if i < 0:
1641 i = s.find(b' ')
1642 i = s.find(b' ')
1642 if i < 0:
1643 if i < 0:
1643 return s
1644 return s
1644 return s[:i]
1645 return s[:i]
1645
1646
1646
1647
1647 def reversehunks(hunks):
1648 def reversehunks(hunks):
1648 '''reverse the signs in the hunks given as argument
1649 '''reverse the signs in the hunks given as argument
1649
1650
1650 This function operates on hunks coming out of patch.filterpatch, that is
1651 This function operates on hunks coming out of patch.filterpatch, that is
1651 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1652 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1652
1653
1653 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1654 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1654 ... --- a/folder1/g
1655 ... --- a/folder1/g
1655 ... +++ b/folder1/g
1656 ... +++ b/folder1/g
1656 ... @@ -1,7 +1,7 @@
1657 ... @@ -1,7 +1,7 @@
1657 ... +firstline
1658 ... +firstline
1658 ... c
1659 ... c
1659 ... 1
1660 ... 1
1660 ... 2
1661 ... 2
1661 ... + 3
1662 ... + 3
1662 ... -4
1663 ... -4
1663 ... 5
1664 ... 5
1664 ... d
1665 ... d
1665 ... +lastline"""
1666 ... +lastline"""
1666 >>> hunks = parsepatch([rawpatch])
1667 >>> hunks = parsepatch([rawpatch])
1667 >>> hunkscomingfromfilterpatch = []
1668 >>> hunkscomingfromfilterpatch = []
1668 >>> for h in hunks:
1669 >>> for h in hunks:
1669 ... hunkscomingfromfilterpatch.append(h)
1670 ... hunkscomingfromfilterpatch.append(h)
1670 ... hunkscomingfromfilterpatch.extend(h.hunks)
1671 ... hunkscomingfromfilterpatch.extend(h.hunks)
1671
1672
1672 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1673 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1673 >>> from . import util
1674 >>> from . import util
1674 >>> fp = util.stringio()
1675 >>> fp = util.stringio()
1675 >>> for c in reversedhunks:
1676 >>> for c in reversedhunks:
1676 ... c.write(fp)
1677 ... c.write(fp)
1677 >>> fp.seek(0) or None
1678 >>> fp.seek(0) or None
1678 >>> reversedpatch = fp.read()
1679 >>> reversedpatch = fp.read()
1679 >>> print(pycompat.sysstr(reversedpatch))
1680 >>> print(pycompat.sysstr(reversedpatch))
1680 diff --git a/folder1/g b/folder1/g
1681 diff --git a/folder1/g b/folder1/g
1681 --- a/folder1/g
1682 --- a/folder1/g
1682 +++ b/folder1/g
1683 +++ b/folder1/g
1683 @@ -1,4 +1,3 @@
1684 @@ -1,4 +1,3 @@
1684 -firstline
1685 -firstline
1685 c
1686 c
1686 1
1687 1
1687 2
1688 2
1688 @@ -2,6 +1,6 @@
1689 @@ -2,6 +1,6 @@
1689 c
1690 c
1690 1
1691 1
1691 2
1692 2
1692 - 3
1693 - 3
1693 +4
1694 +4
1694 5
1695 5
1695 d
1696 d
1696 @@ -6,3 +5,2 @@
1697 @@ -6,3 +5,2 @@
1697 5
1698 5
1698 d
1699 d
1699 -lastline
1700 -lastline
1700
1701
1701 '''
1702 '''
1702
1703
1703 newhunks = []
1704 newhunks = []
1704 for c in hunks:
1705 for c in hunks:
1705 if util.safehasattr(c, b'reversehunk'):
1706 if util.safehasattr(c, b'reversehunk'):
1706 c = c.reversehunk()
1707 c = c.reversehunk()
1707 newhunks.append(c)
1708 newhunks.append(c)
1708 return newhunks
1709 return newhunks
1709
1710
1710
1711
1711 def parsepatch(originalchunks, maxcontext=None):
1712 def parsepatch(originalchunks, maxcontext=None):
1712 """patch -> [] of headers -> [] of hunks
1713 """patch -> [] of headers -> [] of hunks
1713
1714
1714 If maxcontext is not None, trim context lines if necessary.
1715 If maxcontext is not None, trim context lines if necessary.
1715
1716
1716 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1717 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1717 ... --- a/folder1/g
1718 ... --- a/folder1/g
1718 ... +++ b/folder1/g
1719 ... +++ b/folder1/g
1719 ... @@ -1,8 +1,10 @@
1720 ... @@ -1,8 +1,10 @@
1720 ... 1
1721 ... 1
1721 ... 2
1722 ... 2
1722 ... -3
1723 ... -3
1723 ... 4
1724 ... 4
1724 ... 5
1725 ... 5
1725 ... 6
1726 ... 6
1726 ... +6.1
1727 ... +6.1
1727 ... +6.2
1728 ... +6.2
1728 ... 7
1729 ... 7
1729 ... 8
1730 ... 8
1730 ... +9'''
1731 ... +9'''
1731 >>> out = util.stringio()
1732 >>> out = util.stringio()
1732 >>> headers = parsepatch([rawpatch], maxcontext=1)
1733 >>> headers = parsepatch([rawpatch], maxcontext=1)
1733 >>> for header in headers:
1734 >>> for header in headers:
1734 ... header.write(out)
1735 ... header.write(out)
1735 ... for hunk in header.hunks:
1736 ... for hunk in header.hunks:
1736 ... hunk.write(out)
1737 ... hunk.write(out)
1737 >>> print(pycompat.sysstr(out.getvalue()))
1738 >>> print(pycompat.sysstr(out.getvalue()))
1738 diff --git a/folder1/g b/folder1/g
1739 diff --git a/folder1/g b/folder1/g
1739 --- a/folder1/g
1740 --- a/folder1/g
1740 +++ b/folder1/g
1741 +++ b/folder1/g
1741 @@ -2,3 +2,2 @@
1742 @@ -2,3 +2,2 @@
1742 2
1743 2
1743 -3
1744 -3
1744 4
1745 4
1745 @@ -6,2 +5,4 @@
1746 @@ -6,2 +5,4 @@
1746 6
1747 6
1747 +6.1
1748 +6.1
1748 +6.2
1749 +6.2
1749 7
1750 7
1750 @@ -8,1 +9,2 @@
1751 @@ -8,1 +9,2 @@
1751 8
1752 8
1752 +9
1753 +9
1753 """
1754 """
1754
1755
1755 class parser(object):
1756 class parser(object):
1756 """patch parsing state machine"""
1757 """patch parsing state machine"""
1757
1758
1758 def __init__(self):
1759 def __init__(self):
1759 self.fromline = 0
1760 self.fromline = 0
1760 self.toline = 0
1761 self.toline = 0
1761 self.proc = b''
1762 self.proc = b''
1762 self.header = None
1763 self.header = None
1763 self.context = []
1764 self.context = []
1764 self.before = []
1765 self.before = []
1765 self.hunk = []
1766 self.hunk = []
1766 self.headers = []
1767 self.headers = []
1767
1768
1768 def addrange(self, limits):
1769 def addrange(self, limits):
1769 self.addcontext([])
1770 self.addcontext([])
1770 fromstart, fromend, tostart, toend, proc = limits
1771 fromstart, fromend, tostart, toend, proc = limits
1771 self.fromline = int(fromstart)
1772 self.fromline = int(fromstart)
1772 self.toline = int(tostart)
1773 self.toline = int(tostart)
1773 self.proc = proc
1774 self.proc = proc
1774
1775
1775 def addcontext(self, context):
1776 def addcontext(self, context):
1776 if self.hunk:
1777 if self.hunk:
1777 h = recordhunk(
1778 h = recordhunk(
1778 self.header,
1779 self.header,
1779 self.fromline,
1780 self.fromline,
1780 self.toline,
1781 self.toline,
1781 self.proc,
1782 self.proc,
1782 self.before,
1783 self.before,
1783 self.hunk,
1784 self.hunk,
1784 context,
1785 context,
1785 maxcontext,
1786 maxcontext,
1786 )
1787 )
1787 self.header.hunks.append(h)
1788 self.header.hunks.append(h)
1788 self.fromline += len(self.before) + h.removed
1789 self.fromline += len(self.before) + h.removed
1789 self.toline += len(self.before) + h.added
1790 self.toline += len(self.before) + h.added
1790 self.before = []
1791 self.before = []
1791 self.hunk = []
1792 self.hunk = []
1792 self.context = context
1793 self.context = context
1793
1794
1794 def addhunk(self, hunk):
1795 def addhunk(self, hunk):
1795 if self.context:
1796 if self.context:
1796 self.before = self.context
1797 self.before = self.context
1797 self.context = []
1798 self.context = []
1798 if self.hunk:
1799 if self.hunk:
1799 self.addcontext([])
1800 self.addcontext([])
1800 self.hunk = hunk
1801 self.hunk = hunk
1801
1802
1802 def newfile(self, hdr):
1803 def newfile(self, hdr):
1803 self.addcontext([])
1804 self.addcontext([])
1804 h = header(hdr)
1805 h = header(hdr)
1805 self.headers.append(h)
1806 self.headers.append(h)
1806 self.header = h
1807 self.header = h
1807
1808
1808 def addother(self, line):
1809 def addother(self, line):
1809 pass # 'other' lines are ignored
1810 pass # 'other' lines are ignored
1810
1811
1811 def finished(self):
1812 def finished(self):
1812 self.addcontext([])
1813 self.addcontext([])
1813 return self.headers
1814 return self.headers
1814
1815
1815 transitions = {
1816 transitions = {
1816 b'file': {
1817 b'file': {
1817 b'context': addcontext,
1818 b'context': addcontext,
1818 b'file': newfile,
1819 b'file': newfile,
1819 b'hunk': addhunk,
1820 b'hunk': addhunk,
1820 b'range': addrange,
1821 b'range': addrange,
1821 },
1822 },
1822 b'context': {
1823 b'context': {
1823 b'file': newfile,
1824 b'file': newfile,
1824 b'hunk': addhunk,
1825 b'hunk': addhunk,
1825 b'range': addrange,
1826 b'range': addrange,
1826 b'other': addother,
1827 b'other': addother,
1827 },
1828 },
1828 b'hunk': {
1829 b'hunk': {
1829 b'context': addcontext,
1830 b'context': addcontext,
1830 b'file': newfile,
1831 b'file': newfile,
1831 b'range': addrange,
1832 b'range': addrange,
1832 },
1833 },
1833 b'range': {b'context': addcontext, b'hunk': addhunk},
1834 b'range': {b'context': addcontext, b'hunk': addhunk},
1834 b'other': {b'other': addother},
1835 b'other': {b'other': addother},
1835 }
1836 }
1836
1837
1837 p = parser()
1838 p = parser()
1838 fp = stringio()
1839 fp = stringio()
1839 fp.write(b''.join(originalchunks))
1840 fp.write(b''.join(originalchunks))
1840 fp.seek(0)
1841 fp.seek(0)
1841
1842
1842 state = b'context'
1843 state = b'context'
1843 for newstate, data in scanpatch(fp):
1844 for newstate, data in scanpatch(fp):
1844 try:
1845 try:
1845 p.transitions[state][newstate](p, data)
1846 p.transitions[state][newstate](p, data)
1846 except KeyError:
1847 except KeyError:
1847 raise PatchError(
1848 raise PatchError(
1848 b'unhandled transition: %s -> %s' % (state, newstate)
1849 b'unhandled transition: %s -> %s' % (state, newstate)
1849 )
1850 )
1850 state = newstate
1851 state = newstate
1851 del fp
1852 del fp
1852 return p.finished()
1853 return p.finished()
1853
1854
1854
1855
1855 def pathtransform(path, strip, prefix):
1856 def pathtransform(path, strip, prefix):
1856 """turn a path from a patch into a path suitable for the repository
1857 """turn a path from a patch into a path suitable for the repository
1857
1858
1858 prefix, if not empty, is expected to be normalized with a / at the end.
1859 prefix, if not empty, is expected to be normalized with a / at the end.
1859
1860
1860 Returns (stripped components, path in repository).
1861 Returns (stripped components, path in repository).
1861
1862
1862 >>> pathtransform(b'a/b/c', 0, b'')
1863 >>> pathtransform(b'a/b/c', 0, b'')
1863 ('', 'a/b/c')
1864 ('', 'a/b/c')
1864 >>> pathtransform(b' a/b/c ', 0, b'')
1865 >>> pathtransform(b' a/b/c ', 0, b'')
1865 ('', ' a/b/c')
1866 ('', ' a/b/c')
1866 >>> pathtransform(b' a/b/c ', 2, b'')
1867 >>> pathtransform(b' a/b/c ', 2, b'')
1867 ('a/b/', 'c')
1868 ('a/b/', 'c')
1868 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1869 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1869 ('', 'd/e/a/b/c')
1870 ('', 'd/e/a/b/c')
1870 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1871 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1871 ('a//b/', 'd/e/c')
1872 ('a//b/', 'd/e/c')
1872 >>> pathtransform(b'a/b/c', 3, b'')
1873 >>> pathtransform(b'a/b/c', 3, b'')
1873 Traceback (most recent call last):
1874 Traceback (most recent call last):
1874 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1875 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1875 """
1876 """
1876 pathlen = len(path)
1877 pathlen = len(path)
1877 i = 0
1878 i = 0
1878 if strip == 0:
1879 if strip == 0:
1879 return b'', prefix + path.rstrip()
1880 return b'', prefix + path.rstrip()
1880 count = strip
1881 count = strip
1881 while count > 0:
1882 while count > 0:
1882 i = path.find(b'/', i)
1883 i = path.find(b'/', i)
1883 if i == -1:
1884 if i == -1:
1884 raise PatchError(
1885 raise PatchError(
1885 _(b"unable to strip away %d of %d dirs from %s")
1886 _(b"unable to strip away %d of %d dirs from %s")
1886 % (count, strip, path)
1887 % (count, strip, path)
1887 )
1888 )
1888 i += 1
1889 i += 1
1889 # consume '//' in the path
1890 # consume '//' in the path
1890 while i < pathlen - 1 and path[i : i + 1] == b'/':
1891 while i < pathlen - 1 and path[i : i + 1] == b'/':
1891 i += 1
1892 i += 1
1892 count -= 1
1893 count -= 1
1893 return path[:i].lstrip(), prefix + path[i:].rstrip()
1894 return path[:i].lstrip(), prefix + path[i:].rstrip()
1894
1895
1895
1896
1896 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1897 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1897 nulla = afile_orig == b"/dev/null"
1898 nulla = afile_orig == b"/dev/null"
1898 nullb = bfile_orig == b"/dev/null"
1899 nullb = bfile_orig == b"/dev/null"
1899 create = nulla and hunk.starta == 0 and hunk.lena == 0
1900 create = nulla and hunk.starta == 0 and hunk.lena == 0
1900 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1901 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1901 abase, afile = pathtransform(afile_orig, strip, prefix)
1902 abase, afile = pathtransform(afile_orig, strip, prefix)
1902 gooda = not nulla and backend.exists(afile)
1903 gooda = not nulla and backend.exists(afile)
1903 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1904 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1904 if afile == bfile:
1905 if afile == bfile:
1905 goodb = gooda
1906 goodb = gooda
1906 else:
1907 else:
1907 goodb = not nullb and backend.exists(bfile)
1908 goodb = not nullb and backend.exists(bfile)
1908 missing = not goodb and not gooda and not create
1909 missing = not goodb and not gooda and not create
1909
1910
1910 # some diff programs apparently produce patches where the afile is
1911 # some diff programs apparently produce patches where the afile is
1911 # not /dev/null, but afile starts with bfile
1912 # not /dev/null, but afile starts with bfile
1912 abasedir = afile[: afile.rfind(b'/') + 1]
1913 abasedir = afile[: afile.rfind(b'/') + 1]
1913 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1914 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1914 if (
1915 if (
1915 missing
1916 missing
1916 and abasedir == bbasedir
1917 and abasedir == bbasedir
1917 and afile.startswith(bfile)
1918 and afile.startswith(bfile)
1918 and hunk.starta == 0
1919 and hunk.starta == 0
1919 and hunk.lena == 0
1920 and hunk.lena == 0
1920 ):
1921 ):
1921 create = True
1922 create = True
1922 missing = False
1923 missing = False
1923
1924
1924 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1925 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1925 # diff is between a file and its backup. In this case, the original
1926 # diff is between a file and its backup. In this case, the original
1926 # file should be patched (see original mpatch code).
1927 # file should be patched (see original mpatch code).
1927 isbackup = abase == bbase and bfile.startswith(afile)
1928 isbackup = abase == bbase and bfile.startswith(afile)
1928 fname = None
1929 fname = None
1929 if not missing:
1930 if not missing:
1930 if gooda and goodb:
1931 if gooda and goodb:
1931 if isbackup:
1932 if isbackup:
1932 fname = afile
1933 fname = afile
1933 else:
1934 else:
1934 fname = bfile
1935 fname = bfile
1935 elif gooda:
1936 elif gooda:
1936 fname = afile
1937 fname = afile
1937
1938
1938 if not fname:
1939 if not fname:
1939 if not nullb:
1940 if not nullb:
1940 if isbackup:
1941 if isbackup:
1941 fname = afile
1942 fname = afile
1942 else:
1943 else:
1943 fname = bfile
1944 fname = bfile
1944 elif not nulla:
1945 elif not nulla:
1945 fname = afile
1946 fname = afile
1946 else:
1947 else:
1947 raise PatchError(_(b"undefined source and destination files"))
1948 raise PatchError(_(b"undefined source and destination files"))
1948
1949
1949 gp = patchmeta(fname)
1950 gp = patchmeta(fname)
1950 if create:
1951 if create:
1951 gp.op = b'ADD'
1952 gp.op = b'ADD'
1952 elif remove:
1953 elif remove:
1953 gp.op = b'DELETE'
1954 gp.op = b'DELETE'
1954 return gp
1955 return gp
1955
1956
1956
1957
1957 def scanpatch(fp):
1958 def scanpatch(fp):
1958 """like patch.iterhunks, but yield different events
1959 """like patch.iterhunks, but yield different events
1959
1960
1960 - ('file', [header_lines + fromfile + tofile])
1961 - ('file', [header_lines + fromfile + tofile])
1961 - ('context', [context_lines])
1962 - ('context', [context_lines])
1962 - ('hunk', [hunk_lines])
1963 - ('hunk', [hunk_lines])
1963 - ('range', (-start,len, +start,len, proc))
1964 - ('range', (-start,len, +start,len, proc))
1964 """
1965 """
1965 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1966 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1966 lr = linereader(fp)
1967 lr = linereader(fp)
1967
1968
1968 def scanwhile(first, p):
1969 def scanwhile(first, p):
1969 """scan lr while predicate holds"""
1970 """scan lr while predicate holds"""
1970 lines = [first]
1971 lines = [first]
1971 for line in iter(lr.readline, b''):
1972 for line in iter(lr.readline, b''):
1972 if p(line):
1973 if p(line):
1973 lines.append(line)
1974 lines.append(line)
1974 else:
1975 else:
1975 lr.push(line)
1976 lr.push(line)
1976 break
1977 break
1977 return lines
1978 return lines
1978
1979
1979 for line in iter(lr.readline, b''):
1980 for line in iter(lr.readline, b''):
1980 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1981 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1981
1982
1982 def notheader(line):
1983 def notheader(line):
1983 s = line.split(None, 1)
1984 s = line.split(None, 1)
1984 return not s or s[0] not in (b'---', b'diff')
1985 return not s or s[0] not in (b'---', b'diff')
1985
1986
1986 header = scanwhile(line, notheader)
1987 header = scanwhile(line, notheader)
1987 fromfile = lr.readline()
1988 fromfile = lr.readline()
1988 if fromfile.startswith(b'---'):
1989 if fromfile.startswith(b'---'):
1989 tofile = lr.readline()
1990 tofile = lr.readline()
1990 header += [fromfile, tofile]
1991 header += [fromfile, tofile]
1991 else:
1992 else:
1992 lr.push(fromfile)
1993 lr.push(fromfile)
1993 yield b'file', header
1994 yield b'file', header
1994 elif line.startswith(b' '):
1995 elif line.startswith(b' '):
1995 cs = (b' ', b'\\')
1996 cs = (b' ', b'\\')
1996 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1997 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1997 elif line.startswith((b'-', b'+')):
1998 elif line.startswith((b'-', b'+')):
1998 cs = (b'-', b'+', b'\\')
1999 cs = (b'-', b'+', b'\\')
1999 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2000 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2000 else:
2001 else:
2001 m = lines_re.match(line)
2002 m = lines_re.match(line)
2002 if m:
2003 if m:
2003 yield b'range', m.groups()
2004 yield b'range', m.groups()
2004 else:
2005 else:
2005 yield b'other', line
2006 yield b'other', line
2006
2007
2007
2008
2008 def scangitpatch(lr, firstline):
2009 def scangitpatch(lr, firstline):
2009 """
2010 """
2010 Git patches can emit:
2011 Git patches can emit:
2011 - rename a to b
2012 - rename a to b
2012 - change b
2013 - change b
2013 - copy a to c
2014 - copy a to c
2014 - change c
2015 - change c
2015
2016
2016 We cannot apply this sequence as-is, the renamed 'a' could not be
2017 We cannot apply this sequence as-is, the renamed 'a' could not be
2017 found for it would have been renamed already. And we cannot copy
2018 found for it would have been renamed already. And we cannot copy
2018 from 'b' instead because 'b' would have been changed already. So
2019 from 'b' instead because 'b' would have been changed already. So
2019 we scan the git patch for copy and rename commands so we can
2020 we scan the git patch for copy and rename commands so we can
2020 perform the copies ahead of time.
2021 perform the copies ahead of time.
2021 """
2022 """
2022 pos = 0
2023 pos = 0
2023 try:
2024 try:
2024 pos = lr.fp.tell()
2025 pos = lr.fp.tell()
2025 fp = lr.fp
2026 fp = lr.fp
2026 except IOError:
2027 except IOError:
2027 fp = stringio(lr.fp.read())
2028 fp = stringio(lr.fp.read())
2028 gitlr = linereader(fp)
2029 gitlr = linereader(fp)
2029 gitlr.push(firstline)
2030 gitlr.push(firstline)
2030 gitpatches = readgitpatch(gitlr)
2031 gitpatches = readgitpatch(gitlr)
2031 fp.seek(pos)
2032 fp.seek(pos)
2032 return gitpatches
2033 return gitpatches
2033
2034
2034
2035
2035 def iterhunks(fp):
2036 def iterhunks(fp):
2036 """Read a patch and yield the following events:
2037 """Read a patch and yield the following events:
2037 - ("file", afile, bfile, firsthunk): select a new target file.
2038 - ("file", afile, bfile, firsthunk): select a new target file.
2038 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2039 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2039 "file" event.
2040 "file" event.
2040 - ("git", gitchanges): current diff is in git format, gitchanges
2041 - ("git", gitchanges): current diff is in git format, gitchanges
2041 maps filenames to gitpatch records. Unique event.
2042 maps filenames to gitpatch records. Unique event.
2042 """
2043 """
2043 afile = b""
2044 afile = b""
2044 bfile = b""
2045 bfile = b""
2045 state = None
2046 state = None
2046 hunknum = 0
2047 hunknum = 0
2047 emitfile = newfile = False
2048 emitfile = newfile = False
2048 gitpatches = None
2049 gitpatches = None
2049
2050
2050 # our states
2051 # our states
2051 BFILE = 1
2052 BFILE = 1
2052 context = None
2053 context = None
2053 lr = linereader(fp)
2054 lr = linereader(fp)
2054
2055
2055 for x in iter(lr.readline, b''):
2056 for x in iter(lr.readline, b''):
2056 if state == BFILE and (
2057 if state == BFILE and (
2057 (not context and x.startswith(b'@'))
2058 (not context and x.startswith(b'@'))
2058 or (context is not False and x.startswith(b'***************'))
2059 or (context is not False and x.startswith(b'***************'))
2059 or x.startswith(b'GIT binary patch')
2060 or x.startswith(b'GIT binary patch')
2060 ):
2061 ):
2061 gp = None
2062 gp = None
2062 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2063 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2063 gp = gitpatches.pop()
2064 gp = gitpatches.pop()
2064 if x.startswith(b'GIT binary patch'):
2065 if x.startswith(b'GIT binary patch'):
2065 h = binhunk(lr, gp.path)
2066 h = binhunk(lr, gp.path)
2066 else:
2067 else:
2067 if context is None and x.startswith(b'***************'):
2068 if context is None and x.startswith(b'***************'):
2068 context = True
2069 context = True
2069 h = hunk(x, hunknum + 1, lr, context)
2070 h = hunk(x, hunknum + 1, lr, context)
2070 hunknum += 1
2071 hunknum += 1
2071 if emitfile:
2072 if emitfile:
2072 emitfile = False
2073 emitfile = False
2073 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2074 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2074 yield b'hunk', h
2075 yield b'hunk', h
2075 elif x.startswith(b'diff --git a/'):
2076 elif x.startswith(b'diff --git a/'):
2076 m = gitre.match(x.rstrip(b'\r\n'))
2077 m = gitre.match(x.rstrip(b'\r\n'))
2077 if not m:
2078 if not m:
2078 continue
2079 continue
2079 if gitpatches is None:
2080 if gitpatches is None:
2080 # scan whole input for git metadata
2081 # scan whole input for git metadata
2081 gitpatches = scangitpatch(lr, x)
2082 gitpatches = scangitpatch(lr, x)
2082 yield b'git', [
2083 yield b'git', [
2083 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2084 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2084 ]
2085 ]
2085 gitpatches.reverse()
2086 gitpatches.reverse()
2086 afile = b'a/' + m.group(1)
2087 afile = b'a/' + m.group(1)
2087 bfile = b'b/' + m.group(2)
2088 bfile = b'b/' + m.group(2)
2088 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2089 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2089 gp = gitpatches.pop()
2090 gp = gitpatches.pop()
2090 yield b'file', (
2091 yield b'file', (
2091 b'a/' + gp.path,
2092 b'a/' + gp.path,
2092 b'b/' + gp.path,
2093 b'b/' + gp.path,
2093 None,
2094 None,
2094 gp.copy(),
2095 gp.copy(),
2095 )
2096 )
2096 if not gitpatches:
2097 if not gitpatches:
2097 raise PatchError(
2098 raise PatchError(
2098 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2099 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2099 )
2100 )
2100 newfile = True
2101 newfile = True
2101 elif x.startswith(b'---'):
2102 elif x.startswith(b'---'):
2102 # check for a unified diff
2103 # check for a unified diff
2103 l2 = lr.readline()
2104 l2 = lr.readline()
2104 if not l2.startswith(b'+++'):
2105 if not l2.startswith(b'+++'):
2105 lr.push(l2)
2106 lr.push(l2)
2106 continue
2107 continue
2107 newfile = True
2108 newfile = True
2108 context = False
2109 context = False
2109 afile = parsefilename(x)
2110 afile = parsefilename(x)
2110 bfile = parsefilename(l2)
2111 bfile = parsefilename(l2)
2111 elif x.startswith(b'***'):
2112 elif x.startswith(b'***'):
2112 # check for a context diff
2113 # check for a context diff
2113 l2 = lr.readline()
2114 l2 = lr.readline()
2114 if not l2.startswith(b'---'):
2115 if not l2.startswith(b'---'):
2115 lr.push(l2)
2116 lr.push(l2)
2116 continue
2117 continue
2117 l3 = lr.readline()
2118 l3 = lr.readline()
2118 lr.push(l3)
2119 lr.push(l3)
2119 if not l3.startswith(b"***************"):
2120 if not l3.startswith(b"***************"):
2120 lr.push(l2)
2121 lr.push(l2)
2121 continue
2122 continue
2122 newfile = True
2123 newfile = True
2123 context = True
2124 context = True
2124 afile = parsefilename(x)
2125 afile = parsefilename(x)
2125 bfile = parsefilename(l2)
2126 bfile = parsefilename(l2)
2126
2127
2127 if newfile:
2128 if newfile:
2128 newfile = False
2129 newfile = False
2129 emitfile = True
2130 emitfile = True
2130 state = BFILE
2131 state = BFILE
2131 hunknum = 0
2132 hunknum = 0
2132
2133
2133 while gitpatches:
2134 while gitpatches:
2134 gp = gitpatches.pop()
2135 gp = gitpatches.pop()
2135 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2136 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2136
2137
2137
2138
2138 def applybindelta(binchunk, data):
2139 def applybindelta(binchunk, data):
2139 """Apply a binary delta hunk
2140 """Apply a binary delta hunk
2140 The algorithm used is the algorithm from git's patch-delta.c
2141 The algorithm used is the algorithm from git's patch-delta.c
2141 """
2142 """
2142
2143
2143 def deltahead(binchunk):
2144 def deltahead(binchunk):
2144 i = 0
2145 i = 0
2145 for c in pycompat.bytestr(binchunk):
2146 for c in pycompat.bytestr(binchunk):
2146 i += 1
2147 i += 1
2147 if not (ord(c) & 0x80):
2148 if not (ord(c) & 0x80):
2148 return i
2149 return i
2149 return i
2150 return i
2150
2151
2151 out = b""
2152 out = b""
2152 s = deltahead(binchunk)
2153 s = deltahead(binchunk)
2153 binchunk = binchunk[s:]
2154 binchunk = binchunk[s:]
2154 s = deltahead(binchunk)
2155 s = deltahead(binchunk)
2155 binchunk = binchunk[s:]
2156 binchunk = binchunk[s:]
2156 i = 0
2157 i = 0
2157 while i < len(binchunk):
2158 while i < len(binchunk):
2158 cmd = ord(binchunk[i : i + 1])
2159 cmd = ord(binchunk[i : i + 1])
2159 i += 1
2160 i += 1
2160 if cmd & 0x80:
2161 if cmd & 0x80:
2161 offset = 0
2162 offset = 0
2162 size = 0
2163 size = 0
2163 if cmd & 0x01:
2164 if cmd & 0x01:
2164 offset = ord(binchunk[i : i + 1])
2165 offset = ord(binchunk[i : i + 1])
2165 i += 1
2166 i += 1
2166 if cmd & 0x02:
2167 if cmd & 0x02:
2167 offset |= ord(binchunk[i : i + 1]) << 8
2168 offset |= ord(binchunk[i : i + 1]) << 8
2168 i += 1
2169 i += 1
2169 if cmd & 0x04:
2170 if cmd & 0x04:
2170 offset |= ord(binchunk[i : i + 1]) << 16
2171 offset |= ord(binchunk[i : i + 1]) << 16
2171 i += 1
2172 i += 1
2172 if cmd & 0x08:
2173 if cmd & 0x08:
2173 offset |= ord(binchunk[i : i + 1]) << 24
2174 offset |= ord(binchunk[i : i + 1]) << 24
2174 i += 1
2175 i += 1
2175 if cmd & 0x10:
2176 if cmd & 0x10:
2176 size = ord(binchunk[i : i + 1])
2177 size = ord(binchunk[i : i + 1])
2177 i += 1
2178 i += 1
2178 if cmd & 0x20:
2179 if cmd & 0x20:
2179 size |= ord(binchunk[i : i + 1]) << 8
2180 size |= ord(binchunk[i : i + 1]) << 8
2180 i += 1
2181 i += 1
2181 if cmd & 0x40:
2182 if cmd & 0x40:
2182 size |= ord(binchunk[i : i + 1]) << 16
2183 size |= ord(binchunk[i : i + 1]) << 16
2183 i += 1
2184 i += 1
2184 if size == 0:
2185 if size == 0:
2185 size = 0x10000
2186 size = 0x10000
2186 offset_end = offset + size
2187 offset_end = offset + size
2187 out += data[offset:offset_end]
2188 out += data[offset:offset_end]
2188 elif cmd != 0:
2189 elif cmd != 0:
2189 offset_end = i + cmd
2190 offset_end = i + cmd
2190 out += binchunk[i:offset_end]
2191 out += binchunk[i:offset_end]
2191 i += cmd
2192 i += cmd
2192 else:
2193 else:
2193 raise PatchError(_(b'unexpected delta opcode 0'))
2194 raise PatchError(_(b'unexpected delta opcode 0'))
2194 return out
2195 return out
2195
2196
2196
2197
2197 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2198 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2198 """Reads a patch from fp and tries to apply it.
2199 """Reads a patch from fp and tries to apply it.
2199
2200
2200 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2201 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2201 there was any fuzz.
2202 there was any fuzz.
2202
2203
2203 If 'eolmode' is 'strict', the patch content and patched file are
2204 If 'eolmode' is 'strict', the patch content and patched file are
2204 read in binary mode. Otherwise, line endings are ignored when
2205 read in binary mode. Otherwise, line endings are ignored when
2205 patching then normalized according to 'eolmode'.
2206 patching then normalized according to 'eolmode'.
2206 """
2207 """
2207 return _applydiff(
2208 return _applydiff(
2208 ui,
2209 ui,
2209 fp,
2210 fp,
2210 patchfile,
2211 patchfile,
2211 backend,
2212 backend,
2212 store,
2213 store,
2213 strip=strip,
2214 strip=strip,
2214 prefix=prefix,
2215 prefix=prefix,
2215 eolmode=eolmode,
2216 eolmode=eolmode,
2216 )
2217 )
2217
2218
2218
2219
2219 def _canonprefix(repo, prefix):
2220 def _canonprefix(repo, prefix):
2220 if prefix:
2221 if prefix:
2221 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2222 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2222 if prefix != b'':
2223 if prefix != b'':
2223 prefix += b'/'
2224 prefix += b'/'
2224 return prefix
2225 return prefix
2225
2226
2226
2227
2227 def _applydiff(
2228 def _applydiff(
2228 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2229 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2229 ):
2230 ):
2230 prefix = _canonprefix(backend.repo, prefix)
2231 prefix = _canonprefix(backend.repo, prefix)
2231
2232
2232 def pstrip(p):
2233 def pstrip(p):
2233 return pathtransform(p, strip - 1, prefix)[1]
2234 return pathtransform(p, strip - 1, prefix)[1]
2234
2235
2235 rejects = 0
2236 rejects = 0
2236 err = 0
2237 err = 0
2237 current_file = None
2238 current_file = None
2238
2239
2239 for state, values in iterhunks(fp):
2240 for state, values in iterhunks(fp):
2240 if state == b'hunk':
2241 if state == b'hunk':
2241 if not current_file:
2242 if not current_file:
2242 continue
2243 continue
2243 ret = current_file.apply(values)
2244 ret = current_file.apply(values)
2244 if ret > 0:
2245 if ret > 0:
2245 err = 1
2246 err = 1
2246 elif state == b'file':
2247 elif state == b'file':
2247 if current_file:
2248 if current_file:
2248 rejects += current_file.close()
2249 rejects += current_file.close()
2249 current_file = None
2250 current_file = None
2250 afile, bfile, first_hunk, gp = values
2251 afile, bfile, first_hunk, gp = values
2251 if gp:
2252 if gp:
2252 gp.path = pstrip(gp.path)
2253 gp.path = pstrip(gp.path)
2253 if gp.oldpath:
2254 if gp.oldpath:
2254 gp.oldpath = pstrip(gp.oldpath)
2255 gp.oldpath = pstrip(gp.oldpath)
2255 else:
2256 else:
2256 gp = makepatchmeta(
2257 gp = makepatchmeta(
2257 backend, afile, bfile, first_hunk, strip, prefix
2258 backend, afile, bfile, first_hunk, strip, prefix
2258 )
2259 )
2259 if gp.op == b'RENAME':
2260 if gp.op == b'RENAME':
2260 backend.unlink(gp.oldpath)
2261 backend.unlink(gp.oldpath)
2261 if not first_hunk:
2262 if not first_hunk:
2262 if gp.op == b'DELETE':
2263 if gp.op == b'DELETE':
2263 backend.unlink(gp.path)
2264 backend.unlink(gp.path)
2264 continue
2265 continue
2265 data, mode = None, None
2266 data, mode = None, None
2266 if gp.op in (b'RENAME', b'COPY'):
2267 if gp.op in (b'RENAME', b'COPY'):
2267 data, mode = store.getfile(gp.oldpath)[:2]
2268 data, mode = store.getfile(gp.oldpath)[:2]
2268 if data is None:
2269 if data is None:
2269 # This means that the old path does not exist
2270 # This means that the old path does not exist
2270 raise PatchError(
2271 raise PatchError(
2271 _(b"source file '%s' does not exist") % gp.oldpath
2272 _(b"source file '%s' does not exist") % gp.oldpath
2272 )
2273 )
2273 if gp.mode:
2274 if gp.mode:
2274 mode = gp.mode
2275 mode = gp.mode
2275 if gp.op == b'ADD':
2276 if gp.op == b'ADD':
2276 # Added files without content have no hunk and
2277 # Added files without content have no hunk and
2277 # must be created
2278 # must be created
2278 data = b''
2279 data = b''
2279 if data or mode:
2280 if data or mode:
2280 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2281 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2281 gp.path
2282 gp.path
2282 ):
2283 ):
2283 raise PatchError(
2284 raise PatchError(
2284 _(
2285 _(
2285 b"cannot create %s: destination "
2286 b"cannot create %s: destination "
2286 b"already exists"
2287 b"already exists"
2287 )
2288 )
2288 % gp.path
2289 % gp.path
2289 )
2290 )
2290 backend.setfile(gp.path, data, mode, gp.oldpath)
2291 backend.setfile(gp.path, data, mode, gp.oldpath)
2291 continue
2292 continue
2292 try:
2293 try:
2293 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2294 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2294 except PatchError as inst:
2295 except PatchError as inst:
2295 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2296 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2296 current_file = None
2297 current_file = None
2297 rejects += 1
2298 rejects += 1
2298 continue
2299 continue
2299 elif state == b'git':
2300 elif state == b'git':
2300 for gp in values:
2301 for gp in values:
2301 path = pstrip(gp.oldpath)
2302 path = pstrip(gp.oldpath)
2302 data, mode = backend.getfile(path)
2303 data, mode = backend.getfile(path)
2303 if data is None:
2304 if data is None:
2304 # The error ignored here will trigger a getfile()
2305 # The error ignored here will trigger a getfile()
2305 # error in a place more appropriate for error
2306 # error in a place more appropriate for error
2306 # handling, and will not interrupt the patching
2307 # handling, and will not interrupt the patching
2307 # process.
2308 # process.
2308 pass
2309 pass
2309 else:
2310 else:
2310 store.setfile(path, data, mode)
2311 store.setfile(path, data, mode)
2311 else:
2312 else:
2312 raise error.Abort(_(b'unsupported parser state: %s') % state)
2313 raise error.Abort(_(b'unsupported parser state: %s') % state)
2313
2314
2314 if current_file:
2315 if current_file:
2315 rejects += current_file.close()
2316 rejects += current_file.close()
2316
2317
2317 if rejects:
2318 if rejects:
2318 return -1
2319 return -1
2319 return err
2320 return err
2320
2321
2321
2322
2322 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2323 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2323 """use <patcher> to apply <patchname> to the working directory.
2324 """use <patcher> to apply <patchname> to the working directory.
2324 returns whether patch was applied with fuzz factor."""
2325 returns whether patch was applied with fuzz factor."""
2325
2326
2326 fuzz = False
2327 fuzz = False
2327 args = []
2328 args = []
2328 cwd = repo.root
2329 cwd = repo.root
2329 if cwd:
2330 if cwd:
2330 args.append(b'-d %s' % procutil.shellquote(cwd))
2331 args.append(b'-d %s' % procutil.shellquote(cwd))
2331 cmd = b'%s %s -p%d < %s' % (
2332 cmd = b'%s %s -p%d < %s' % (
2332 patcher,
2333 patcher,
2333 b' '.join(args),
2334 b' '.join(args),
2334 strip,
2335 strip,
2335 procutil.shellquote(patchname),
2336 procutil.shellquote(patchname),
2336 )
2337 )
2337 ui.debug(b'Using external patch tool: %s\n' % cmd)
2338 ui.debug(b'Using external patch tool: %s\n' % cmd)
2338 fp = procutil.popen(cmd, b'rb')
2339 fp = procutil.popen(cmd, b'rb')
2339 try:
2340 try:
2340 for line in util.iterfile(fp):
2341 for line in util.iterfile(fp):
2341 line = line.rstrip()
2342 line = line.rstrip()
2342 ui.note(line + b'\n')
2343 ui.note(line + b'\n')
2343 if line.startswith(b'patching file '):
2344 if line.startswith(b'patching file '):
2344 pf = util.parsepatchoutput(line)
2345 pf = util.parsepatchoutput(line)
2345 printed_file = False
2346 printed_file = False
2346 files.add(pf)
2347 files.add(pf)
2347 elif line.find(b'with fuzz') >= 0:
2348 elif line.find(b'with fuzz') >= 0:
2348 fuzz = True
2349 fuzz = True
2349 if not printed_file:
2350 if not printed_file:
2350 ui.warn(pf + b'\n')
2351 ui.warn(pf + b'\n')
2351 printed_file = True
2352 printed_file = True
2352 ui.warn(line + b'\n')
2353 ui.warn(line + b'\n')
2353 elif line.find(b'saving rejects to file') >= 0:
2354 elif line.find(b'saving rejects to file') >= 0:
2354 ui.warn(line + b'\n')
2355 ui.warn(line + b'\n')
2355 elif line.find(b'FAILED') >= 0:
2356 elif line.find(b'FAILED') >= 0:
2356 if not printed_file:
2357 if not printed_file:
2357 ui.warn(pf + b'\n')
2358 ui.warn(pf + b'\n')
2358 printed_file = True
2359 printed_file = True
2359 ui.warn(line + b'\n')
2360 ui.warn(line + b'\n')
2360 finally:
2361 finally:
2361 if files:
2362 if files:
2362 scmutil.marktouched(repo, files, similarity)
2363 scmutil.marktouched(repo, files, similarity)
2363 code = fp.close()
2364 code = fp.close()
2364 if code:
2365 if code:
2365 raise PatchError(
2366 raise PatchError(
2366 _(b"patch command failed: %s") % procutil.explainexit(code)
2367 _(b"patch command failed: %s") % procutil.explainexit(code)
2367 )
2368 )
2368 return fuzz
2369 return fuzz
2369
2370
2370
2371
2371 def patchbackend(
2372 def patchbackend(
2372 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2373 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2373 ):
2374 ):
2374 if files is None:
2375 if files is None:
2375 files = set()
2376 files = set()
2376 if eolmode is None:
2377 if eolmode is None:
2377 eolmode = ui.config(b'patch', b'eol')
2378 eolmode = ui.config(b'patch', b'eol')
2378 if eolmode.lower() not in eolmodes:
2379 if eolmode.lower() not in eolmodes:
2379 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2380 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2380 eolmode = eolmode.lower()
2381 eolmode = eolmode.lower()
2381
2382
2382 store = filestore()
2383 store = filestore()
2383 try:
2384 try:
2384 fp = open(patchobj, b'rb')
2385 fp = open(patchobj, b'rb')
2385 except TypeError:
2386 except TypeError:
2386 fp = patchobj
2387 fp = patchobj
2387 try:
2388 try:
2388 ret = applydiff(
2389 ret = applydiff(
2389 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2390 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2390 )
2391 )
2391 finally:
2392 finally:
2392 if fp != patchobj:
2393 if fp != patchobj:
2393 fp.close()
2394 fp.close()
2394 files.update(backend.close())
2395 files.update(backend.close())
2395 store.close()
2396 store.close()
2396 if ret < 0:
2397 if ret < 0:
2397 raise PatchError(_(b'patch failed to apply'))
2398 raise PatchError(_(b'patch failed to apply'))
2398 return ret > 0
2399 return ret > 0
2399
2400
2400
2401
2401 def internalpatch(
2402 def internalpatch(
2402 ui,
2403 ui,
2403 repo,
2404 repo,
2404 patchobj,
2405 patchobj,
2405 strip,
2406 strip,
2406 prefix=b'',
2407 prefix=b'',
2407 files=None,
2408 files=None,
2408 eolmode=b'strict',
2409 eolmode=b'strict',
2409 similarity=0,
2410 similarity=0,
2410 ):
2411 ):
2411 """use builtin patch to apply <patchobj> to the working directory.
2412 """use builtin patch to apply <patchobj> to the working directory.
2412 returns whether patch was applied with fuzz factor."""
2413 returns whether patch was applied with fuzz factor."""
2413 backend = workingbackend(ui, repo, similarity)
2414 backend = workingbackend(ui, repo, similarity)
2414 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2415 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2415
2416
2416
2417
2417 def patchrepo(
2418 def patchrepo(
2418 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2419 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2419 ):
2420 ):
2420 backend = repobackend(ui, repo, ctx, store)
2421 backend = repobackend(ui, repo, ctx, store)
2421 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2422 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2422
2423
2423
2424
2424 def patch(
2425 def patch(
2425 ui,
2426 ui,
2426 repo,
2427 repo,
2427 patchname,
2428 patchname,
2428 strip=1,
2429 strip=1,
2429 prefix=b'',
2430 prefix=b'',
2430 files=None,
2431 files=None,
2431 eolmode=b'strict',
2432 eolmode=b'strict',
2432 similarity=0,
2433 similarity=0,
2433 ):
2434 ):
2434 """Apply <patchname> to the working directory.
2435 """Apply <patchname> to the working directory.
2435
2436
2436 'eolmode' specifies how end of lines should be handled. It can be:
2437 'eolmode' specifies how end of lines should be handled. It can be:
2437 - 'strict': inputs are read in binary mode, EOLs are preserved
2438 - 'strict': inputs are read in binary mode, EOLs are preserved
2438 - 'crlf': EOLs are ignored when patching and reset to CRLF
2439 - 'crlf': EOLs are ignored when patching and reset to CRLF
2439 - 'lf': EOLs are ignored when patching and reset to LF
2440 - 'lf': EOLs are ignored when patching and reset to LF
2440 - None: get it from user settings, default to 'strict'
2441 - None: get it from user settings, default to 'strict'
2441 'eolmode' is ignored when using an external patcher program.
2442 'eolmode' is ignored when using an external patcher program.
2442
2443
2443 Returns whether patch was applied with fuzz factor.
2444 Returns whether patch was applied with fuzz factor.
2444 """
2445 """
2445 patcher = ui.config(b'ui', b'patch')
2446 patcher = ui.config(b'ui', b'patch')
2446 if files is None:
2447 if files is None:
2447 files = set()
2448 files = set()
2448 if patcher:
2449 if patcher:
2449 return _externalpatch(
2450 return _externalpatch(
2450 ui, repo, patcher, patchname, strip, files, similarity
2451 ui, repo, patcher, patchname, strip, files, similarity
2451 )
2452 )
2452 return internalpatch(
2453 return internalpatch(
2453 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2454 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2454 )
2455 )
2455
2456
2456
2457
2457 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2458 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2458 backend = fsbackend(ui, repo.root)
2459 backend = fsbackend(ui, repo.root)
2459 prefix = _canonprefix(repo, prefix)
2460 prefix = _canonprefix(repo, prefix)
2460 with open(patchpath, b'rb') as fp:
2461 with open(patchpath, b'rb') as fp:
2461 changed = set()
2462 changed = set()
2462 for state, values in iterhunks(fp):
2463 for state, values in iterhunks(fp):
2463 if state == b'file':
2464 if state == b'file':
2464 afile, bfile, first_hunk, gp = values
2465 afile, bfile, first_hunk, gp = values
2465 if gp:
2466 if gp:
2466 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2467 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2467 if gp.oldpath:
2468 if gp.oldpath:
2468 gp.oldpath = pathtransform(
2469 gp.oldpath = pathtransform(
2469 gp.oldpath, strip - 1, prefix
2470 gp.oldpath, strip - 1, prefix
2470 )[1]
2471 )[1]
2471 else:
2472 else:
2472 gp = makepatchmeta(
2473 gp = makepatchmeta(
2473 backend, afile, bfile, first_hunk, strip, prefix
2474 backend, afile, bfile, first_hunk, strip, prefix
2474 )
2475 )
2475 changed.add(gp.path)
2476 changed.add(gp.path)
2476 if gp.op == b'RENAME':
2477 if gp.op == b'RENAME':
2477 changed.add(gp.oldpath)
2478 changed.add(gp.oldpath)
2478 elif state not in (b'hunk', b'git'):
2479 elif state not in (b'hunk', b'git'):
2479 raise error.Abort(_(b'unsupported parser state: %s') % state)
2480 raise error.Abort(_(b'unsupported parser state: %s') % state)
2480 return changed
2481 return changed
2481
2482
2482
2483
2483 class GitDiffRequired(Exception):
2484 class GitDiffRequired(Exception):
2484 pass
2485 pass
2485
2486
2486
2487
2487 diffopts = diffutil.diffallopts
2488 diffopts = diffutil.diffallopts
2488 diffallopts = diffutil.diffallopts
2489 diffallopts = diffutil.diffallopts
2489 difffeatureopts = diffutil.difffeatureopts
2490 difffeatureopts = diffutil.difffeatureopts
2490
2491
2491
2492
2492 def diff(
2493 def diff(
2493 repo,
2494 repo,
2494 node1=None,
2495 node1=None,
2495 node2=None,
2496 node2=None,
2496 match=None,
2497 match=None,
2497 changes=None,
2498 changes=None,
2498 opts=None,
2499 opts=None,
2499 losedatafn=None,
2500 losedatafn=None,
2500 pathfn=None,
2501 pathfn=None,
2501 copy=None,
2502 copy=None,
2502 copysourcematch=None,
2503 copysourcematch=None,
2503 hunksfilterfn=None,
2504 hunksfilterfn=None,
2504 ):
2505 ):
2505 """yields diff of changes to files between two nodes, or node and
2506 """yields diff of changes to files between two nodes, or node and
2506 working directory.
2507 working directory.
2507
2508
2508 if node1 is None, use first dirstate parent instead.
2509 if node1 is None, use first dirstate parent instead.
2509 if node2 is None, compare node1 with working directory.
2510 if node2 is None, compare node1 with working directory.
2510
2511
2511 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2512 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2512 every time some change cannot be represented with the current
2513 every time some change cannot be represented with the current
2513 patch format. Return False to upgrade to git patch format, True to
2514 patch format. Return False to upgrade to git patch format, True to
2514 accept the loss or raise an exception to abort the diff. It is
2515 accept the loss or raise an exception to abort the diff. It is
2515 called with the name of current file being diffed as 'fn'. If set
2516 called with the name of current file being diffed as 'fn'. If set
2516 to None, patches will always be upgraded to git format when
2517 to None, patches will always be upgraded to git format when
2517 necessary.
2518 necessary.
2518
2519
2519 prefix is a filename prefix that is prepended to all filenames on
2520 prefix is a filename prefix that is prepended to all filenames on
2520 display (used for subrepos).
2521 display (used for subrepos).
2521
2522
2522 relroot, if not empty, must be normalized with a trailing /. Any match
2523 relroot, if not empty, must be normalized with a trailing /. Any match
2523 patterns that fall outside it will be ignored.
2524 patterns that fall outside it will be ignored.
2524
2525
2525 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2526 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2526 information.
2527 information.
2527
2528
2528 if copysourcematch is not None, then copy sources will be filtered by this
2529 if copysourcematch is not None, then copy sources will be filtered by this
2529 matcher
2530 matcher
2530
2531
2531 hunksfilterfn, if not None, should be a function taking a filectx and
2532 hunksfilterfn, if not None, should be a function taking a filectx and
2532 hunks generator that may yield filtered hunks.
2533 hunks generator that may yield filtered hunks.
2533 """
2534 """
2534 if not node1 and not node2:
2535 if not node1 and not node2:
2535 node1 = repo.dirstate.p1()
2536 node1 = repo.dirstate.p1()
2536
2537
2537 ctx1 = repo[node1]
2538 ctx1 = repo[node1]
2538 ctx2 = repo[node2]
2539 ctx2 = repo[node2]
2539
2540
2540 for fctx1, fctx2, hdr, hunks in diffhunks(
2541 for fctx1, fctx2, hdr, hunks in diffhunks(
2541 repo,
2542 repo,
2542 ctx1=ctx1,
2543 ctx1=ctx1,
2543 ctx2=ctx2,
2544 ctx2=ctx2,
2544 match=match,
2545 match=match,
2545 changes=changes,
2546 changes=changes,
2546 opts=opts,
2547 opts=opts,
2547 losedatafn=losedatafn,
2548 losedatafn=losedatafn,
2548 pathfn=pathfn,
2549 pathfn=pathfn,
2549 copy=copy,
2550 copy=copy,
2550 copysourcematch=copysourcematch,
2551 copysourcematch=copysourcematch,
2551 ):
2552 ):
2552 if hunksfilterfn is not None:
2553 if hunksfilterfn is not None:
2553 # If the file has been removed, fctx2 is None; but this should
2554 # If the file has been removed, fctx2 is None; but this should
2554 # not occur here since we catch removed files early in
2555 # not occur here since we catch removed files early in
2555 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2556 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2556 assert (
2557 assert (
2557 fctx2 is not None
2558 fctx2 is not None
2558 ), b'fctx2 unexpectly None in diff hunks filtering'
2559 ), b'fctx2 unexpectly None in diff hunks filtering'
2559 hunks = hunksfilterfn(fctx2, hunks)
2560 hunks = hunksfilterfn(fctx2, hunks)
2560 text = b''.join(b''.join(hlines) for hrange, hlines in hunks)
2561 text = b''.join(b''.join(hlines) for hrange, hlines in hunks)
2561 if hdr and (text or len(hdr) > 1):
2562 if hdr and (text or len(hdr) > 1):
2562 yield b'\n'.join(hdr) + b'\n'
2563 yield b'\n'.join(hdr) + b'\n'
2563 if text:
2564 if text:
2564 yield text
2565 yield text
2565
2566
2566
2567
2567 def diffhunks(
2568 def diffhunks(
2568 repo,
2569 repo,
2569 ctx1,
2570 ctx1,
2570 ctx2,
2571 ctx2,
2571 match=None,
2572 match=None,
2572 changes=None,
2573 changes=None,
2573 opts=None,
2574 opts=None,
2574 losedatafn=None,
2575 losedatafn=None,
2575 pathfn=None,
2576 pathfn=None,
2576 copy=None,
2577 copy=None,
2577 copysourcematch=None,
2578 copysourcematch=None,
2578 ):
2579 ):
2579 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2580 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2580 where `header` is a list of diff headers and `hunks` is an iterable of
2581 where `header` is a list of diff headers and `hunks` is an iterable of
2581 (`hunkrange`, `hunklines`) tuples.
2582 (`hunkrange`, `hunklines`) tuples.
2582
2583
2583 See diff() for the meaning of parameters.
2584 See diff() for the meaning of parameters.
2584 """
2585 """
2585
2586
2586 if opts is None:
2587 if opts is None:
2587 opts = mdiff.defaultopts
2588 opts = mdiff.defaultopts
2588
2589
2589 def lrugetfilectx():
2590 def lrugetfilectx():
2590 cache = {}
2591 cache = {}
2591 order = collections.deque()
2592 order = collections.deque()
2592
2593
2593 def getfilectx(f, ctx):
2594 def getfilectx(f, ctx):
2594 fctx = ctx.filectx(f, filelog=cache.get(f))
2595 fctx = ctx.filectx(f, filelog=cache.get(f))
2595 if f not in cache:
2596 if f not in cache:
2596 if len(cache) > 20:
2597 if len(cache) > 20:
2597 del cache[order.popleft()]
2598 del cache[order.popleft()]
2598 cache[f] = fctx.filelog()
2599 cache[f] = fctx.filelog()
2599 else:
2600 else:
2600 order.remove(f)
2601 order.remove(f)
2601 order.append(f)
2602 order.append(f)
2602 return fctx
2603 return fctx
2603
2604
2604 return getfilectx
2605 return getfilectx
2605
2606
2606 getfilectx = lrugetfilectx()
2607 getfilectx = lrugetfilectx()
2607
2608
2608 if not changes:
2609 if not changes:
2609 changes = ctx1.status(ctx2, match=match)
2610 changes = ctx1.status(ctx2, match=match)
2610 if isinstance(changes, list):
2611 if isinstance(changes, list):
2611 modified, added, removed = changes[:3]
2612 modified, added, removed = changes[:3]
2612 else:
2613 else:
2613 modified, added, removed = (
2614 modified, added, removed = (
2614 changes.modified,
2615 changes.modified,
2615 changes.added,
2616 changes.added,
2616 changes.removed,
2617 changes.removed,
2617 )
2618 )
2618
2619
2619 if not modified and not added and not removed:
2620 if not modified and not added and not removed:
2620 return []
2621 return []
2621
2622
2622 if repo.ui.debugflag:
2623 if repo.ui.debugflag:
2623 hexfunc = hex
2624 hexfunc = hex
2624 else:
2625 else:
2625 hexfunc = short
2626 hexfunc = short
2626 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2627 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2627
2628
2628 if copy is None:
2629 if copy is None:
2629 copy = {}
2630 copy = {}
2630 if opts.git or opts.upgrade:
2631 if opts.git or opts.upgrade:
2631 copy = copies.pathcopies(ctx1, ctx2, match=match)
2632 copy = copies.pathcopies(ctx1, ctx2, match=match)
2632
2633
2633 if copysourcematch:
2634 if copysourcematch:
2634 # filter out copies where source side isn't inside the matcher
2635 # filter out copies where source side isn't inside the matcher
2635 # (copies.pathcopies() already filtered out the destination)
2636 # (copies.pathcopies() already filtered out the destination)
2636 copy = {
2637 copy = {
2637 dst: src
2638 dst: src
2638 for dst, src in pycompat.iteritems(copy)
2639 for dst, src in pycompat.iteritems(copy)
2639 if copysourcematch(src)
2640 if copysourcematch(src)
2640 }
2641 }
2641
2642
2642 modifiedset = set(modified)
2643 modifiedset = set(modified)
2643 addedset = set(added)
2644 addedset = set(added)
2644 removedset = set(removed)
2645 removedset = set(removed)
2645 for f in modified:
2646 for f in modified:
2646 if f not in ctx1:
2647 if f not in ctx1:
2647 # Fix up added, since merged-in additions appear as
2648 # Fix up added, since merged-in additions appear as
2648 # modifications during merges
2649 # modifications during merges
2649 modifiedset.remove(f)
2650 modifiedset.remove(f)
2650 addedset.add(f)
2651 addedset.add(f)
2651 for f in removed:
2652 for f in removed:
2652 if f not in ctx1:
2653 if f not in ctx1:
2653 # Merged-in additions that are then removed are reported as removed.
2654 # Merged-in additions that are then removed are reported as removed.
2654 # They are not in ctx1, so We don't want to show them in the diff.
2655 # They are not in ctx1, so We don't want to show them in the diff.
2655 removedset.remove(f)
2656 removedset.remove(f)
2656 modified = sorted(modifiedset)
2657 modified = sorted(modifiedset)
2657 added = sorted(addedset)
2658 added = sorted(addedset)
2658 removed = sorted(removedset)
2659 removed = sorted(removedset)
2659 for dst, src in list(copy.items()):
2660 for dst, src in list(copy.items()):
2660 if src not in ctx1:
2661 if src not in ctx1:
2661 # Files merged in during a merge and then copied/renamed are
2662 # Files merged in during a merge and then copied/renamed are
2662 # reported as copies. We want to show them in the diff as additions.
2663 # reported as copies. We want to show them in the diff as additions.
2663 del copy[dst]
2664 del copy[dst]
2664
2665
2665 prefetchmatch = scmutil.matchfiles(
2666 prefetchmatch = scmutil.matchfiles(
2666 repo, list(modifiedset | addedset | removedset)
2667 repo, list(modifiedset | addedset | removedset)
2667 )
2668 )
2668 revmatches = [
2669 revmatches = [
2669 (ctx1.rev(), prefetchmatch),
2670 (ctx1.rev(), prefetchmatch),
2670 (ctx2.rev(), prefetchmatch),
2671 (ctx2.rev(), prefetchmatch),
2671 ]
2672 ]
2672 scmutil.prefetchfiles(repo, revmatches)
2673 scmutil.prefetchfiles(repo, revmatches)
2673
2674
2674 def difffn(opts, losedata):
2675 def difffn(opts, losedata):
2675 return trydiff(
2676 return trydiff(
2676 repo,
2677 repo,
2677 revs,
2678 revs,
2678 ctx1,
2679 ctx1,
2679 ctx2,
2680 ctx2,
2680 modified,
2681 modified,
2681 added,
2682 added,
2682 removed,
2683 removed,
2683 copy,
2684 copy,
2684 getfilectx,
2685 getfilectx,
2685 opts,
2686 opts,
2686 losedata,
2687 losedata,
2687 pathfn,
2688 pathfn,
2688 )
2689 )
2689
2690
2690 if opts.upgrade and not opts.git:
2691 if opts.upgrade and not opts.git:
2691 try:
2692 try:
2692
2693
2693 def losedata(fn):
2694 def losedata(fn):
2694 if not losedatafn or not losedatafn(fn=fn):
2695 if not losedatafn or not losedatafn(fn=fn):
2695 raise GitDiffRequired
2696 raise GitDiffRequired
2696
2697
2697 # Buffer the whole output until we are sure it can be generated
2698 # Buffer the whole output until we are sure it can be generated
2698 return list(difffn(opts.copy(git=False), losedata))
2699 return list(difffn(opts.copy(git=False), losedata))
2699 except GitDiffRequired:
2700 except GitDiffRequired:
2700 return difffn(opts.copy(git=True), None)
2701 return difffn(opts.copy(git=True), None)
2701 else:
2702 else:
2702 return difffn(opts, None)
2703 return difffn(opts, None)
2703
2704
2704
2705
2705 def diffsinglehunk(hunklines):
2706 def diffsinglehunk(hunklines):
2706 """yield tokens for a list of lines in a single hunk"""
2707 """yield tokens for a list of lines in a single hunk"""
2707 for line in hunklines:
2708 for line in hunklines:
2708 # chomp
2709 # chomp
2709 chompline = line.rstrip(b'\r\n')
2710 chompline = line.rstrip(b'\r\n')
2710 # highlight tabs and trailing whitespace
2711 # highlight tabs and trailing whitespace
2711 stripline = chompline.rstrip()
2712 stripline = chompline.rstrip()
2712 if line.startswith(b'-'):
2713 if line.startswith(b'-'):
2713 label = b'diff.deleted'
2714 label = b'diff.deleted'
2714 elif line.startswith(b'+'):
2715 elif line.startswith(b'+'):
2715 label = b'diff.inserted'
2716 label = b'diff.inserted'
2716 else:
2717 else:
2717 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2718 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2718 for token in tabsplitter.findall(stripline):
2719 for token in tabsplitter.findall(stripline):
2719 if token.startswith(b'\t'):
2720 if token.startswith(b'\t'):
2720 yield (token, b'diff.tab')
2721 yield (token, b'diff.tab')
2721 else:
2722 else:
2722 yield (token, label)
2723 yield (token, label)
2723
2724
2724 if chompline != stripline:
2725 if chompline != stripline:
2725 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2726 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2726 if chompline != line:
2727 if chompline != line:
2727 yield (line[len(chompline) :], b'')
2728 yield (line[len(chompline) :], b'')
2728
2729
2729
2730
2730 def diffsinglehunkinline(hunklines):
2731 def diffsinglehunkinline(hunklines):
2731 """yield tokens for a list of lines in a single hunk, with inline colors"""
2732 """yield tokens for a list of lines in a single hunk, with inline colors"""
2732 # prepare deleted, and inserted content
2733 # prepare deleted, and inserted content
2733 a = bytearray()
2734 a = bytearray()
2734 b = bytearray()
2735 b = bytearray()
2735 for line in hunklines:
2736 for line in hunklines:
2736 if line[0:1] == b'-':
2737 if line[0:1] == b'-':
2737 a += line[1:]
2738 a += line[1:]
2738 elif line[0:1] == b'+':
2739 elif line[0:1] == b'+':
2739 b += line[1:]
2740 b += line[1:]
2740 else:
2741 else:
2741 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2742 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2742 # fast path: if either side is empty, use diffsinglehunk
2743 # fast path: if either side is empty, use diffsinglehunk
2743 if not a or not b:
2744 if not a or not b:
2744 for t in diffsinglehunk(hunklines):
2745 for t in diffsinglehunk(hunklines):
2745 yield t
2746 yield t
2746 return
2747 return
2747 # re-split the content into words
2748 # re-split the content into words
2748 al = wordsplitter.findall(bytes(a))
2749 al = wordsplitter.findall(bytes(a))
2749 bl = wordsplitter.findall(bytes(b))
2750 bl = wordsplitter.findall(bytes(b))
2750 # re-arrange the words to lines since the diff algorithm is line-based
2751 # re-arrange the words to lines since the diff algorithm is line-based
2751 aln = [s if s == b'\n' else s + b'\n' for s in al]
2752 aln = [s if s == b'\n' else s + b'\n' for s in al]
2752 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2753 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2753 an = b''.join(aln)
2754 an = b''.join(aln)
2754 bn = b''.join(bln)
2755 bn = b''.join(bln)
2755 # run the diff algorithm, prepare atokens and btokens
2756 # run the diff algorithm, prepare atokens and btokens
2756 atokens = []
2757 atokens = []
2757 btokens = []
2758 btokens = []
2758 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2759 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2759 for (a1, a2, b1, b2), btype in blocks:
2760 for (a1, a2, b1, b2), btype in blocks:
2760 changed = btype == b'!'
2761 changed = btype == b'!'
2761 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2762 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2762 atokens.append((changed, token))
2763 atokens.append((changed, token))
2763 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2764 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2764 btokens.append((changed, token))
2765 btokens.append((changed, token))
2765
2766
2766 # yield deleted tokens, then inserted ones
2767 # yield deleted tokens, then inserted ones
2767 for prefix, label, tokens in [
2768 for prefix, label, tokens in [
2768 (b'-', b'diff.deleted', atokens),
2769 (b'-', b'diff.deleted', atokens),
2769 (b'+', b'diff.inserted', btokens),
2770 (b'+', b'diff.inserted', btokens),
2770 ]:
2771 ]:
2771 nextisnewline = True
2772 nextisnewline = True
2772 for changed, token in tokens:
2773 for changed, token in tokens:
2773 if nextisnewline:
2774 if nextisnewline:
2774 yield (prefix, label)
2775 yield (prefix, label)
2775 nextisnewline = False
2776 nextisnewline = False
2776 # special handling line end
2777 # special handling line end
2777 isendofline = token.endswith(b'\n')
2778 isendofline = token.endswith(b'\n')
2778 if isendofline:
2779 if isendofline:
2779 chomp = token[:-1] # chomp
2780 chomp = token[:-1] # chomp
2780 if chomp.endswith(b'\r'):
2781 if chomp.endswith(b'\r'):
2781 chomp = chomp[:-1]
2782 chomp = chomp[:-1]
2782 endofline = token[len(chomp) :]
2783 endofline = token[len(chomp) :]
2783 token = chomp.rstrip() # detect spaces at the end
2784 token = chomp.rstrip() # detect spaces at the end
2784 endspaces = chomp[len(token) :]
2785 endspaces = chomp[len(token) :]
2785 # scan tabs
2786 # scan tabs
2786 for maybetab in tabsplitter.findall(token):
2787 for maybetab in tabsplitter.findall(token):
2787 if b'\t' == maybetab[0:1]:
2788 if b'\t' == maybetab[0:1]:
2788 currentlabel = b'diff.tab'
2789 currentlabel = b'diff.tab'
2789 else:
2790 else:
2790 if changed:
2791 if changed:
2791 currentlabel = label + b'.changed'
2792 currentlabel = label + b'.changed'
2792 else:
2793 else:
2793 currentlabel = label + b'.unchanged'
2794 currentlabel = label + b'.unchanged'
2794 yield (maybetab, currentlabel)
2795 yield (maybetab, currentlabel)
2795 if isendofline:
2796 if isendofline:
2796 if endspaces:
2797 if endspaces:
2797 yield (endspaces, b'diff.trailingwhitespace')
2798 yield (endspaces, b'diff.trailingwhitespace')
2798 yield (endofline, b'')
2799 yield (endofline, b'')
2799 nextisnewline = True
2800 nextisnewline = True
2800
2801
2801
2802
2802 def difflabel(func, *args, **kw):
2803 def difflabel(func, *args, **kw):
2803 '''yields 2-tuples of (output, label) based on the output of func()'''
2804 '''yields 2-tuples of (output, label) based on the output of func()'''
2804 if kw.get('opts') and kw['opts'].worddiff:
2805 if kw.get('opts') and kw['opts'].worddiff:
2805 dodiffhunk = diffsinglehunkinline
2806 dodiffhunk = diffsinglehunkinline
2806 else:
2807 else:
2807 dodiffhunk = diffsinglehunk
2808 dodiffhunk = diffsinglehunk
2808 headprefixes = [
2809 headprefixes = [
2809 (b'diff', b'diff.diffline'),
2810 (b'diff', b'diff.diffline'),
2810 (b'copy', b'diff.extended'),
2811 (b'copy', b'diff.extended'),
2811 (b'rename', b'diff.extended'),
2812 (b'rename', b'diff.extended'),
2812 (b'old', b'diff.extended'),
2813 (b'old', b'diff.extended'),
2813 (b'new', b'diff.extended'),
2814 (b'new', b'diff.extended'),
2814 (b'deleted', b'diff.extended'),
2815 (b'deleted', b'diff.extended'),
2815 (b'index', b'diff.extended'),
2816 (b'index', b'diff.extended'),
2816 (b'similarity', b'diff.extended'),
2817 (b'similarity', b'diff.extended'),
2817 (b'---', b'diff.file_a'),
2818 (b'---', b'diff.file_a'),
2818 (b'+++', b'diff.file_b'),
2819 (b'+++', b'diff.file_b'),
2819 ]
2820 ]
2820 textprefixes = [
2821 textprefixes = [
2821 (b'@', b'diff.hunk'),
2822 (b'@', b'diff.hunk'),
2822 # - and + are handled by diffsinglehunk
2823 # - and + are handled by diffsinglehunk
2823 ]
2824 ]
2824 head = False
2825 head = False
2825
2826
2826 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2827 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2827 hunkbuffer = []
2828 hunkbuffer = []
2828
2829
2829 def consumehunkbuffer():
2830 def consumehunkbuffer():
2830 if hunkbuffer:
2831 if hunkbuffer:
2831 for token in dodiffhunk(hunkbuffer):
2832 for token in dodiffhunk(hunkbuffer):
2832 yield token
2833 yield token
2833 hunkbuffer[:] = []
2834 hunkbuffer[:] = []
2834
2835
2835 for chunk in func(*args, **kw):
2836 for chunk in func(*args, **kw):
2836 lines = chunk.split(b'\n')
2837 lines = chunk.split(b'\n')
2837 linecount = len(lines)
2838 linecount = len(lines)
2838 for i, line in enumerate(lines):
2839 for i, line in enumerate(lines):
2839 if head:
2840 if head:
2840 if line.startswith(b'@'):
2841 if line.startswith(b'@'):
2841 head = False
2842 head = False
2842 else:
2843 else:
2843 if line and not line.startswith(
2844 if line and not line.startswith(
2844 (b' ', b'+', b'-', b'@', b'\\')
2845 (b' ', b'+', b'-', b'@', b'\\')
2845 ):
2846 ):
2846 head = True
2847 head = True
2847 diffline = False
2848 diffline = False
2848 if not head and line and line.startswith((b'+', b'-')):
2849 if not head and line and line.startswith((b'+', b'-')):
2849 diffline = True
2850 diffline = True
2850
2851
2851 prefixes = textprefixes
2852 prefixes = textprefixes
2852 if head:
2853 if head:
2853 prefixes = headprefixes
2854 prefixes = headprefixes
2854 if diffline:
2855 if diffline:
2855 # buffered
2856 # buffered
2856 bufferedline = line
2857 bufferedline = line
2857 if i + 1 < linecount:
2858 if i + 1 < linecount:
2858 bufferedline += b"\n"
2859 bufferedline += b"\n"
2859 hunkbuffer.append(bufferedline)
2860 hunkbuffer.append(bufferedline)
2860 else:
2861 else:
2861 # unbuffered
2862 # unbuffered
2862 for token in consumehunkbuffer():
2863 for token in consumehunkbuffer():
2863 yield token
2864 yield token
2864 stripline = line.rstrip()
2865 stripline = line.rstrip()
2865 for prefix, label in prefixes:
2866 for prefix, label in prefixes:
2866 if stripline.startswith(prefix):
2867 if stripline.startswith(prefix):
2867 yield (stripline, label)
2868 yield (stripline, label)
2868 if line != stripline:
2869 if line != stripline:
2869 yield (
2870 yield (
2870 line[len(stripline) :],
2871 line[len(stripline) :],
2871 b'diff.trailingwhitespace',
2872 b'diff.trailingwhitespace',
2872 )
2873 )
2873 break
2874 break
2874 else:
2875 else:
2875 yield (line, b'')
2876 yield (line, b'')
2876 if i + 1 < linecount:
2877 if i + 1 < linecount:
2877 yield (b'\n', b'')
2878 yield (b'\n', b'')
2878 for token in consumehunkbuffer():
2879 for token in consumehunkbuffer():
2879 yield token
2880 yield token
2880
2881
2881
2882
2882 def diffui(*args, **kw):
2883 def diffui(*args, **kw):
2883 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2884 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2884 return difflabel(diff, *args, **kw)
2885 return difflabel(diff, *args, **kw)
2885
2886
2886
2887
2887 def _filepairs(modified, added, removed, copy, opts):
2888 def _filepairs(modified, added, removed, copy, opts):
2888 """generates tuples (f1, f2, copyop), where f1 is the name of the file
2889 """generates tuples (f1, f2, copyop), where f1 is the name of the file
2889 before and f2 is the the name after. For added files, f1 will be None,
2890 before and f2 is the the name after. For added files, f1 will be None,
2890 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2891 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2891 or 'rename' (the latter two only if opts.git is set)."""
2892 or 'rename' (the latter two only if opts.git is set)."""
2892 gone = set()
2893 gone = set()
2893
2894
2894 copyto = {v: k for k, v in copy.items()}
2895 copyto = {v: k for k, v in copy.items()}
2895
2896
2896 addedset, removedset = set(added), set(removed)
2897 addedset, removedset = set(added), set(removed)
2897
2898
2898 for f in sorted(modified + added + removed):
2899 for f in sorted(modified + added + removed):
2899 copyop = None
2900 copyop = None
2900 f1, f2 = f, f
2901 f1, f2 = f, f
2901 if f in addedset:
2902 if f in addedset:
2902 f1 = None
2903 f1 = None
2903 if f in copy:
2904 if f in copy:
2904 if opts.git:
2905 if opts.git:
2905 f1 = copy[f]
2906 f1 = copy[f]
2906 if f1 in removedset and f1 not in gone:
2907 if f1 in removedset and f1 not in gone:
2907 copyop = b'rename'
2908 copyop = b'rename'
2908 gone.add(f1)
2909 gone.add(f1)
2909 else:
2910 else:
2910 copyop = b'copy'
2911 copyop = b'copy'
2911 elif f in removedset:
2912 elif f in removedset:
2912 f2 = None
2913 f2 = None
2913 if opts.git:
2914 if opts.git:
2914 # have we already reported a copy above?
2915 # have we already reported a copy above?
2915 if (
2916 if (
2916 f in copyto
2917 f in copyto
2917 and copyto[f] in addedset
2918 and copyto[f] in addedset
2918 and copy[copyto[f]] == f
2919 and copy[copyto[f]] == f
2919 ):
2920 ):
2920 continue
2921 continue
2921 yield f1, f2, copyop
2922 yield f1, f2, copyop
2922
2923
2923
2924
2924 def _gitindex(text):
2925 def _gitindex(text):
2925 if not text:
2926 if not text:
2926 text = b""
2927 text = b""
2927 l = len(text)
2928 l = len(text)
2928 s = hashutil.sha1(b'blob %d\0' % l)
2929 s = hashutil.sha1(b'blob %d\0' % l)
2929 s.update(text)
2930 s.update(text)
2930 return hex(s.digest())
2931 return hex(s.digest())
2931
2932
2932
2933
2933 _gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2934 _gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2934
2935
2935
2936
2936 def trydiff(
2937 def trydiff(
2937 repo,
2938 repo,
2938 revs,
2939 revs,
2939 ctx1,
2940 ctx1,
2940 ctx2,
2941 ctx2,
2941 modified,
2942 modified,
2942 added,
2943 added,
2943 removed,
2944 removed,
2944 copy,
2945 copy,
2945 getfilectx,
2946 getfilectx,
2946 opts,
2947 opts,
2947 losedatafn,
2948 losedatafn,
2948 pathfn,
2949 pathfn,
2949 ):
2950 ):
2950 """given input data, generate a diff and yield it in blocks
2951 """given input data, generate a diff and yield it in blocks
2951
2952
2952 If generating a diff would lose data like flags or binary data and
2953 If generating a diff would lose data like flags or binary data and
2953 losedatafn is not None, it will be called.
2954 losedatafn is not None, it will be called.
2954
2955
2955 pathfn is applied to every path in the diff output.
2956 pathfn is applied to every path in the diff output.
2956 """
2957 """
2957
2958
2958 if opts.noprefix:
2959 if opts.noprefix:
2959 aprefix = bprefix = b''
2960 aprefix = bprefix = b''
2960 else:
2961 else:
2961 aprefix = b'a/'
2962 aprefix = b'a/'
2962 bprefix = b'b/'
2963 bprefix = b'b/'
2963
2964
2964 def diffline(f, revs):
2965 def diffline(f, revs):
2965 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2966 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2966 return b'diff %s %s' % (revinfo, f)
2967 return b'diff %s %s' % (revinfo, f)
2967
2968
2968 def isempty(fctx):
2969 def isempty(fctx):
2969 return fctx is None or fctx.size() == 0
2970 return fctx is None or fctx.size() == 0
2970
2971
2971 date1 = dateutil.datestr(ctx1.date())
2972 date1 = dateutil.datestr(ctx1.date())
2972 date2 = dateutil.datestr(ctx2.date())
2973 date2 = dateutil.datestr(ctx2.date())
2973
2974
2974 if not pathfn:
2975 if not pathfn:
2975 pathfn = lambda f: f
2976 pathfn = lambda f: f
2976
2977
2977 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2978 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2978 content1 = None
2979 content1 = None
2979 content2 = None
2980 content2 = None
2980 fctx1 = None
2981 fctx1 = None
2981 fctx2 = None
2982 fctx2 = None
2982 flag1 = None
2983 flag1 = None
2983 flag2 = None
2984 flag2 = None
2984 if f1:
2985 if f1:
2985 fctx1 = getfilectx(f1, ctx1)
2986 fctx1 = getfilectx(f1, ctx1)
2986 if opts.git or losedatafn:
2987 if opts.git or losedatafn:
2987 flag1 = ctx1.flags(f1)
2988 flag1 = ctx1.flags(f1)
2988 if f2:
2989 if f2:
2989 fctx2 = getfilectx(f2, ctx2)
2990 fctx2 = getfilectx(f2, ctx2)
2990 if opts.git or losedatafn:
2991 if opts.git or losedatafn:
2991 flag2 = ctx2.flags(f2)
2992 flag2 = ctx2.flags(f2)
2992 # if binary is True, output "summary" or "base85", but not "text diff"
2993 # if binary is True, output "summary" or "base85", but not "text diff"
2993 if opts.text:
2994 if opts.text:
2994 binary = False
2995 binary = False
2995 else:
2996 else:
2996 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2997 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2997
2998
2998 if losedatafn and not opts.git:
2999 if losedatafn and not opts.git:
2999 if (
3000 if (
3000 binary
3001 binary
3001 or
3002 or
3002 # copy/rename
3003 # copy/rename
3003 f2 in copy
3004 f2 in copy
3004 or
3005 or
3005 # empty file creation
3006 # empty file creation
3006 (not f1 and isempty(fctx2))
3007 (not f1 and isempty(fctx2))
3007 or
3008 or
3008 # empty file deletion
3009 # empty file deletion
3009 (isempty(fctx1) and not f2)
3010 (isempty(fctx1) and not f2)
3010 or
3011 or
3011 # create with flags
3012 # create with flags
3012 (not f1 and flag2)
3013 (not f1 and flag2)
3013 or
3014 or
3014 # change flags
3015 # change flags
3015 (f1 and f2 and flag1 != flag2)
3016 (f1 and f2 and flag1 != flag2)
3016 ):
3017 ):
3017 losedatafn(f2 or f1)
3018 losedatafn(f2 or f1)
3018
3019
3019 path1 = pathfn(f1 or f2)
3020 path1 = pathfn(f1 or f2)
3020 path2 = pathfn(f2 or f1)
3021 path2 = pathfn(f2 or f1)
3021 header = []
3022 header = []
3022 if opts.git:
3023 if opts.git:
3023 header.append(
3024 header.append(
3024 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3025 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3025 )
3026 )
3026 if not f1: # added
3027 if not f1: # added
3027 header.append(b'new file mode %s' % _gitmode[flag2])
3028 header.append(b'new file mode %s' % _gitmode[flag2])
3028 elif not f2: # removed
3029 elif not f2: # removed
3029 header.append(b'deleted file mode %s' % _gitmode[flag1])
3030 header.append(b'deleted file mode %s' % _gitmode[flag1])
3030 else: # modified/copied/renamed
3031 else: # modified/copied/renamed
3031 mode1, mode2 = _gitmode[flag1], _gitmode[flag2]
3032 mode1, mode2 = _gitmode[flag1], _gitmode[flag2]
3032 if mode1 != mode2:
3033 if mode1 != mode2:
3033 header.append(b'old mode %s' % mode1)
3034 header.append(b'old mode %s' % mode1)
3034 header.append(b'new mode %s' % mode2)
3035 header.append(b'new mode %s' % mode2)
3035 if copyop is not None:
3036 if copyop is not None:
3036 if opts.showsimilarity:
3037 if opts.showsimilarity:
3037 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3038 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3038 header.append(b'similarity index %d%%' % sim)
3039 header.append(b'similarity index %d%%' % sim)
3039 header.append(b'%s from %s' % (copyop, path1))
3040 header.append(b'%s from %s' % (copyop, path1))
3040 header.append(b'%s to %s' % (copyop, path2))
3041 header.append(b'%s to %s' % (copyop, path2))
3041 elif revs:
3042 elif revs:
3042 header.append(diffline(path1, revs))
3043 header.append(diffline(path1, revs))
3043
3044
3044 # fctx.is | diffopts | what to | is fctx.data()
3045 # fctx.is | diffopts | what to | is fctx.data()
3045 # binary() | text nobinary git index | output? | outputted?
3046 # binary() | text nobinary git index | output? | outputted?
3046 # ------------------------------------|----------------------------
3047 # ------------------------------------|----------------------------
3047 # yes | no no no * | summary | no
3048 # yes | no no no * | summary | no
3048 # yes | no no yes * | base85 | yes
3049 # yes | no no yes * | base85 | yes
3049 # yes | no yes no * | summary | no
3050 # yes | no yes no * | summary | no
3050 # yes | no yes yes 0 | summary | no
3051 # yes | no yes yes 0 | summary | no
3051 # yes | no yes yes >0 | summary | semi [1]
3052 # yes | no yes yes >0 | summary | semi [1]
3052 # yes | yes * * * | text diff | yes
3053 # yes | yes * * * | text diff | yes
3053 # no | * * * * | text diff | yes
3054 # no | * * * * | text diff | yes
3054 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3055 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3055 if binary and (
3056 if binary and (
3056 not opts.git or (opts.git and opts.nobinary and not opts.index)
3057 not opts.git or (opts.git and opts.nobinary and not opts.index)
3057 ):
3058 ):
3058 # fast path: no binary content will be displayed, content1 and
3059 # fast path: no binary content will be displayed, content1 and
3059 # content2 are only used for equivalent test. cmp() could have a
3060 # content2 are only used for equivalent test. cmp() could have a
3060 # fast path.
3061 # fast path.
3061 if fctx1 is not None:
3062 if fctx1 is not None:
3062 content1 = b'\0'
3063 content1 = b'\0'
3063 if fctx2 is not None:
3064 if fctx2 is not None:
3064 if fctx1 is not None and not fctx1.cmp(fctx2):
3065 if fctx1 is not None and not fctx1.cmp(fctx2):
3065 content2 = b'\0' # not different
3066 content2 = b'\0' # not different
3066 else:
3067 else:
3067 content2 = b'\0\0'
3068 content2 = b'\0\0'
3068 else:
3069 else:
3069 # normal path: load contents
3070 # normal path: load contents
3070 if fctx1 is not None:
3071 if fctx1 is not None:
3071 content1 = fctx1.data()
3072 content1 = fctx1.data()
3072 if fctx2 is not None:
3073 if fctx2 is not None:
3073 content2 = fctx2.data()
3074 content2 = fctx2.data()
3074
3075
3075 data1 = (ctx1, fctx1, path1, flag1, content1, date1)
3076 data1 = (ctx1, fctx1, path1, flag1, content1, date1)
3076 data2 = (ctx2, fctx2, path2, flag2, content2, date2)
3077 data2 = (ctx2, fctx2, path2, flag2, content2, date2)
3077 yield diffcontent(data1, data2, header, binary, opts)
3078 yield diffcontent(data1, data2, header, binary, opts)
3078
3079
3079
3080
3080 def diffcontent(data1, data2, header, binary, opts):
3081 def diffcontent(data1, data2, header, binary, opts):
3081 """diffs two versions of a file.
3082 """diffs two versions of a file.
3082
3083
3083 data1 and data2 are tuples containg:
3084 data1 and data2 are tuples containg:
3084
3085
3085 * ctx: changeset for the file
3086 * ctx: changeset for the file
3086 * fctx: file context for that file
3087 * fctx: file context for that file
3087 * path1: name of the file
3088 * path1: name of the file
3088 * flag: flags of the file
3089 * flag: flags of the file
3089 * content: full content of the file (can be null in case of binary)
3090 * content: full content of the file (can be null in case of binary)
3090 * date: date of the changeset
3091 * date: date of the changeset
3091
3092
3092 header: the patch header
3093 header: the patch header
3093 binary: whether the any of the version of file is binary or not
3094 binary: whether the any of the version of file is binary or not
3094 opts: user passed options
3095 opts: user passed options
3095
3096
3096 It exists as a separate function so that extensions like extdiff can wrap
3097 It exists as a separate function so that extensions like extdiff can wrap
3097 it and use the file content directly.
3098 it and use the file content directly.
3098 """
3099 """
3099
3100
3100 ctx1, fctx1, path1, flag1, content1, date1 = data1
3101 ctx1, fctx1, path1, flag1, content1, date1 = data1
3101 ctx2, fctx2, path2, flag2, content2, date2 = data2
3102 ctx2, fctx2, path2, flag2, content2, date2 = data2
3103 index1 = _gitindex(content1) if path1 in ctx1 else nullhex
3104 index2 = _gitindex(content2) if path2 in ctx2 else nullhex
3102 if binary and opts.git and not opts.nobinary:
3105 if binary and opts.git and not opts.nobinary:
3103 text = mdiff.b85diff(content1, content2)
3106 text = mdiff.b85diff(content1, content2)
3104 if text:
3107 if text:
3105 header.append(
3108 header.append(
3106 b'index %s..%s' % (_gitindex(content1), _gitindex(content2))
3109 b'index %s..%s' % (index1, index2)
3107 )
3110 )
3108 hunks = ((None, [text]),)
3111 hunks = ((None, [text]),)
3109 else:
3112 else:
3110 if opts.git and opts.index > 0:
3113 if opts.git and opts.index > 0:
3111 flag = flag1
3114 flag = flag1
3112 if flag is None:
3115 if flag is None:
3113 flag = flag2
3116 flag = flag2
3114 header.append(
3117 header.append(
3115 b'index %s..%s %s'
3118 b'index %s..%s %s'
3116 % (
3119 % (
3117 _gitindex(content1)[0 : opts.index],
3120 index1[0 : opts.index],
3118 _gitindex(content2)[0 : opts.index],
3121 index2[0 : opts.index],
3119 _gitmode[flag],
3122 _gitmode[flag],
3120 )
3123 )
3121 )
3124 )
3122
3125
3123 uheaders, hunks = mdiff.unidiff(
3126 uheaders, hunks = mdiff.unidiff(
3124 content1,
3127 content1,
3125 date1,
3128 date1,
3126 content2,
3129 content2,
3127 date2,
3130 date2,
3128 path1,
3131 path1,
3129 path2,
3132 path2,
3130 binary=binary,
3133 binary=binary,
3131 opts=opts,
3134 opts=opts,
3132 )
3135 )
3133 header.extend(uheaders)
3136 header.extend(uheaders)
3134 return fctx1, fctx2, header, hunks
3137 return fctx1, fctx2, header, hunks
3135
3138
3136
3139
3137 def diffstatsum(stats):
3140 def diffstatsum(stats):
3138 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3141 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3139 for f, a, r, b in stats:
3142 for f, a, r, b in stats:
3140 maxfile = max(maxfile, encoding.colwidth(f))
3143 maxfile = max(maxfile, encoding.colwidth(f))
3141 maxtotal = max(maxtotal, a + r)
3144 maxtotal = max(maxtotal, a + r)
3142 addtotal += a
3145 addtotal += a
3143 removetotal += r
3146 removetotal += r
3144 binary = binary or b
3147 binary = binary or b
3145
3148
3146 return maxfile, maxtotal, addtotal, removetotal, binary
3149 return maxfile, maxtotal, addtotal, removetotal, binary
3147
3150
3148
3151
3149 def diffstatdata(lines):
3152 def diffstatdata(lines):
3150 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3153 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3151
3154
3152 results = []
3155 results = []
3153 filename, adds, removes, isbinary = None, 0, 0, False
3156 filename, adds, removes, isbinary = None, 0, 0, False
3154
3157
3155 def addresult():
3158 def addresult():
3156 if filename:
3159 if filename:
3157 results.append((filename, adds, removes, isbinary))
3160 results.append((filename, adds, removes, isbinary))
3158
3161
3159 # inheader is used to track if a line is in the
3162 # inheader is used to track if a line is in the
3160 # header portion of the diff. This helps properly account
3163 # header portion of the diff. This helps properly account
3161 # for lines that start with '--' or '++'
3164 # for lines that start with '--' or '++'
3162 inheader = False
3165 inheader = False
3163
3166
3164 for line in lines:
3167 for line in lines:
3165 if line.startswith(b'diff'):
3168 if line.startswith(b'diff'):
3166 addresult()
3169 addresult()
3167 # starting a new file diff
3170 # starting a new file diff
3168 # set numbers to 0 and reset inheader
3171 # set numbers to 0 and reset inheader
3169 inheader = True
3172 inheader = True
3170 adds, removes, isbinary = 0, 0, False
3173 adds, removes, isbinary = 0, 0, False
3171 if line.startswith(b'diff --git a/'):
3174 if line.startswith(b'diff --git a/'):
3172 filename = gitre.search(line).group(2)
3175 filename = gitre.search(line).group(2)
3173 elif line.startswith(b'diff -r'):
3176 elif line.startswith(b'diff -r'):
3174 # format: "diff -r ... -r ... filename"
3177 # format: "diff -r ... -r ... filename"
3175 filename = diffre.search(line).group(1)
3178 filename = diffre.search(line).group(1)
3176 elif line.startswith(b'@@'):
3179 elif line.startswith(b'@@'):
3177 inheader = False
3180 inheader = False
3178 elif line.startswith(b'+') and not inheader:
3181 elif line.startswith(b'+') and not inheader:
3179 adds += 1
3182 adds += 1
3180 elif line.startswith(b'-') and not inheader:
3183 elif line.startswith(b'-') and not inheader:
3181 removes += 1
3184 removes += 1
3182 elif line.startswith(b'GIT binary patch') or line.startswith(
3185 elif line.startswith(b'GIT binary patch') or line.startswith(
3183 b'Binary file'
3186 b'Binary file'
3184 ):
3187 ):
3185 isbinary = True
3188 isbinary = True
3186 elif line.startswith(b'rename from'):
3189 elif line.startswith(b'rename from'):
3187 filename = line[12:]
3190 filename = line[12:]
3188 elif line.startswith(b'rename to'):
3191 elif line.startswith(b'rename to'):
3189 filename += b' => %s' % line[10:]
3192 filename += b' => %s' % line[10:]
3190 addresult()
3193 addresult()
3191 return results
3194 return results
3192
3195
3193
3196
3194 def diffstat(lines, width=80):
3197 def diffstat(lines, width=80):
3195 output = []
3198 output = []
3196 stats = diffstatdata(lines)
3199 stats = diffstatdata(lines)
3197 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3200 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3198
3201
3199 countwidth = len(str(maxtotal))
3202 countwidth = len(str(maxtotal))
3200 if hasbinary and countwidth < 3:
3203 if hasbinary and countwidth < 3:
3201 countwidth = 3
3204 countwidth = 3
3202 graphwidth = width - countwidth - maxname - 6
3205 graphwidth = width - countwidth - maxname - 6
3203 if graphwidth < 10:
3206 if graphwidth < 10:
3204 graphwidth = 10
3207 graphwidth = 10
3205
3208
3206 def scale(i):
3209 def scale(i):
3207 if maxtotal <= graphwidth:
3210 if maxtotal <= graphwidth:
3208 return i
3211 return i
3209 # If diffstat runs out of room it doesn't print anything,
3212 # If diffstat runs out of room it doesn't print anything,
3210 # which isn't very useful, so always print at least one + or -
3213 # which isn't very useful, so always print at least one + or -
3211 # if there were at least some changes.
3214 # if there were at least some changes.
3212 return max(i * graphwidth // maxtotal, int(bool(i)))
3215 return max(i * graphwidth // maxtotal, int(bool(i)))
3213
3216
3214 for filename, adds, removes, isbinary in stats:
3217 for filename, adds, removes, isbinary in stats:
3215 if isbinary:
3218 if isbinary:
3216 count = b'Bin'
3219 count = b'Bin'
3217 else:
3220 else:
3218 count = b'%d' % (adds + removes)
3221 count = b'%d' % (adds + removes)
3219 pluses = b'+' * scale(adds)
3222 pluses = b'+' * scale(adds)
3220 minuses = b'-' * scale(removes)
3223 minuses = b'-' * scale(removes)
3221 output.append(
3224 output.append(
3222 b' %s%s | %*s %s%s\n'
3225 b' %s%s | %*s %s%s\n'
3223 % (
3226 % (
3224 filename,
3227 filename,
3225 b' ' * (maxname - encoding.colwidth(filename)),
3228 b' ' * (maxname - encoding.colwidth(filename)),
3226 countwidth,
3229 countwidth,
3227 count,
3230 count,
3228 pluses,
3231 pluses,
3229 minuses,
3232 minuses,
3230 )
3233 )
3231 )
3234 )
3232
3235
3233 if stats:
3236 if stats:
3234 output.append(
3237 output.append(
3235 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3238 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3236 % (len(stats), totaladds, totalremoves)
3239 % (len(stats), totaladds, totalremoves)
3237 )
3240 )
3238
3241
3239 return b''.join(output)
3242 return b''.join(output)
3240
3243
3241
3244
3242 def diffstatui(*args, **kw):
3245 def diffstatui(*args, **kw):
3243 """like diffstat(), but yields 2-tuples of (output, label) for
3246 """like diffstat(), but yields 2-tuples of (output, label) for
3244 ui.write()
3247 ui.write()
3245 """
3248 """
3246
3249
3247 for line in diffstat(*args, **kw).splitlines():
3250 for line in diffstat(*args, **kw).splitlines():
3248 if line and line[-1] in b'+-':
3251 if line and line[-1] in b'+-':
3249 name, graph = line.rsplit(b' ', 1)
3252 name, graph = line.rsplit(b' ', 1)
3250 yield (name + b' ', b'')
3253 yield (name + b' ', b'')
3251 m = re.search(br'\++', graph)
3254 m = re.search(br'\++', graph)
3252 if m:
3255 if m:
3253 yield (m.group(0), b'diffstat.inserted')
3256 yield (m.group(0), b'diffstat.inserted')
3254 m = re.search(br'-+', graph)
3257 m = re.search(br'-+', graph)
3255 if m:
3258 if m:
3256 yield (m.group(0), b'diffstat.deleted')
3259 yield (m.group(0), b'diffstat.deleted')
3257 else:
3260 else:
3258 yield (line, b'')
3261 yield (line, b'')
3259 yield (b'\n', b'')
3262 yield (b'\n', b'')
@@ -1,481 +1,481 b''
1 $ hg init repo
1 $ hg init repo
2 $ cd repo
2 $ cd repo
3 $ cat > a <<EOF
3 $ cat > a <<EOF
4 > c
4 > c
5 > c
5 > c
6 > a
6 > a
7 > a
7 > a
8 > b
8 > b
9 > a
9 > a
10 > a
10 > a
11 > c
11 > c
12 > c
12 > c
13 > EOF
13 > EOF
14 $ hg ci -Am adda
14 $ hg ci -Am adda
15 adding a
15 adding a
16
16
17 $ cat > a <<EOF
17 $ cat > a <<EOF
18 > c
18 > c
19 > c
19 > c
20 > a
20 > a
21 > a
21 > a
22 > dd
22 > dd
23 > a
23 > a
24 > a
24 > a
25 > c
25 > c
26 > c
26 > c
27 > EOF
27 > EOF
28
28
29 default context
29 default context
30
30
31 $ hg diff --nodates
31 $ hg diff --nodates
32 diff -r cf9f4ba66af2 a
32 diff -r cf9f4ba66af2 a
33 --- a/a
33 --- a/a
34 +++ b/a
34 +++ b/a
35 @@ -2,7 +2,7 @@
35 @@ -2,7 +2,7 @@
36 c
36 c
37 a
37 a
38 a
38 a
39 -b
39 -b
40 +dd
40 +dd
41 a
41 a
42 a
42 a
43 c
43 c
44
44
45 invalid --unified
45 invalid --unified
46
46
47 $ hg diff --nodates -U foo
47 $ hg diff --nodates -U foo
48 abort: diff context lines count must be an integer, not 'foo'
48 abort: diff context lines count must be an integer, not 'foo'
49 [255]
49 [255]
50
50
51
51
52 $ hg diff --nodates -U 2
52 $ hg diff --nodates -U 2
53 diff -r cf9f4ba66af2 a
53 diff -r cf9f4ba66af2 a
54 --- a/a
54 --- a/a
55 +++ b/a
55 +++ b/a
56 @@ -3,5 +3,5 @@
56 @@ -3,5 +3,5 @@
57 a
57 a
58 a
58 a
59 -b
59 -b
60 +dd
60 +dd
61 a
61 a
62 a
62 a
63
63
64 $ hg --config diff.unified=2 diff --nodates
64 $ hg --config diff.unified=2 diff --nodates
65 diff -r cf9f4ba66af2 a
65 diff -r cf9f4ba66af2 a
66 --- a/a
66 --- a/a
67 +++ b/a
67 +++ b/a
68 @@ -3,5 +3,5 @@
68 @@ -3,5 +3,5 @@
69 a
69 a
70 a
70 a
71 -b
71 -b
72 +dd
72 +dd
73 a
73 a
74 a
74 a
75
75
76 $ hg diff --nodates -U 1
76 $ hg diff --nodates -U 1
77 diff -r cf9f4ba66af2 a
77 diff -r cf9f4ba66af2 a
78 --- a/a
78 --- a/a
79 +++ b/a
79 +++ b/a
80 @@ -4,3 +4,3 @@
80 @@ -4,3 +4,3 @@
81 a
81 a
82 -b
82 -b
83 +dd
83 +dd
84 a
84 a
85
85
86 invalid diff.unified
86 invalid diff.unified
87
87
88 $ hg --config diff.unified=foo diff --nodates
88 $ hg --config diff.unified=foo diff --nodates
89 abort: diff context lines count must be an integer, not 'foo'
89 abort: diff context lines count must be an integer, not 'foo'
90 [255]
90 [255]
91
91
92 noprefix config and option
92 noprefix config and option
93
93
94 $ hg --config diff.noprefix=True diff --nodates
94 $ hg --config diff.noprefix=True diff --nodates
95 diff -r cf9f4ba66af2 a
95 diff -r cf9f4ba66af2 a
96 --- a
96 --- a
97 +++ a
97 +++ a
98 @@ -2,7 +2,7 @@
98 @@ -2,7 +2,7 @@
99 c
99 c
100 a
100 a
101 a
101 a
102 -b
102 -b
103 +dd
103 +dd
104 a
104 a
105 a
105 a
106 c
106 c
107 $ hg diff --noprefix --nodates
107 $ hg diff --noprefix --nodates
108 diff -r cf9f4ba66af2 a
108 diff -r cf9f4ba66af2 a
109 --- a
109 --- a
110 +++ a
110 +++ a
111 @@ -2,7 +2,7 @@
111 @@ -2,7 +2,7 @@
112 c
112 c
113 a
113 a
114 a
114 a
115 -b
115 -b
116 +dd
116 +dd
117 a
117 a
118 a
118 a
119 c
119 c
120
120
121 noprefix config disabled in plain mode, but option still enabled
121 noprefix config disabled in plain mode, but option still enabled
122
122
123 $ HGPLAIN=1 hg --config diff.noprefix=True diff --nodates
123 $ HGPLAIN=1 hg --config diff.noprefix=True diff --nodates
124 diff -r cf9f4ba66af2 a
124 diff -r cf9f4ba66af2 a
125 --- a/a
125 --- a/a
126 +++ b/a
126 +++ b/a
127 @@ -2,7 +2,7 @@
127 @@ -2,7 +2,7 @@
128 c
128 c
129 a
129 a
130 a
130 a
131 -b
131 -b
132 +dd
132 +dd
133 a
133 a
134 a
134 a
135 c
135 c
136 $ HGPLAIN=1 hg diff --noprefix --nodates
136 $ HGPLAIN=1 hg diff --noprefix --nodates
137 diff -r cf9f4ba66af2 a
137 diff -r cf9f4ba66af2 a
138 --- a
138 --- a
139 +++ a
139 +++ a
140 @@ -2,7 +2,7 @@
140 @@ -2,7 +2,7 @@
141 c
141 c
142 a
142 a
143 a
143 a
144 -b
144 -b
145 +dd
145 +dd
146 a
146 a
147 a
147 a
148 c
148 c
149
149
150 $ cd ..
150 $ cd ..
151
151
152
152
153 0 lines of context hunk header matches gnu diff hunk header
153 0 lines of context hunk header matches gnu diff hunk header
154
154
155 $ hg init diffzero
155 $ hg init diffzero
156 $ cd diffzero
156 $ cd diffzero
157 $ cat > f1 << EOF
157 $ cat > f1 << EOF
158 > c2
158 > c2
159 > c4
159 > c4
160 > c5
160 > c5
161 > EOF
161 > EOF
162 $ hg commit -Am0
162 $ hg commit -Am0
163 adding f1
163 adding f1
164
164
165 $ cat > f2 << EOF
165 $ cat > f2 << EOF
166 > c1
166 > c1
167 > c2
167 > c2
168 > c3
168 > c3
169 > c4
169 > c4
170 > EOF
170 > EOF
171 $ mv f2 f1
171 $ mv f2 f1
172 $ hg diff -U0 --nodates
172 $ hg diff -U0 --nodates
173 diff -r 55d8ff78db23 f1
173 diff -r 55d8ff78db23 f1
174 --- a/f1
174 --- a/f1
175 +++ b/f1
175 +++ b/f1
176 @@ -0,0 +1,1 @@
176 @@ -0,0 +1,1 @@
177 +c1
177 +c1
178 @@ -1,0 +3,1 @@
178 @@ -1,0 +3,1 @@
179 +c3
179 +c3
180 @@ -3,1 +4,0 @@
180 @@ -3,1 +4,0 @@
181 -c5
181 -c5
182
182
183 $ hg diff -U0 --nodates --git
183 $ hg diff -U0 --nodates --git
184 diff --git a/f1 b/f1
184 diff --git a/f1 b/f1
185 --- a/f1
185 --- a/f1
186 +++ b/f1
186 +++ b/f1
187 @@ -0,0 +1,1 @@
187 @@ -0,0 +1,1 @@
188 +c1
188 +c1
189 @@ -1,0 +3,1 @@
189 @@ -1,0 +3,1 @@
190 +c3
190 +c3
191 @@ -3,1 +4,0 @@
191 @@ -3,1 +4,0 @@
192 -c5
192 -c5
193
193
194 $ hg diff -U0 --nodates -p
194 $ hg diff -U0 --nodates -p
195 diff -r 55d8ff78db23 f1
195 diff -r 55d8ff78db23 f1
196 --- a/f1
196 --- a/f1
197 +++ b/f1
197 +++ b/f1
198 @@ -0,0 +1,1 @@
198 @@ -0,0 +1,1 @@
199 +c1
199 +c1
200 @@ -1,0 +3,1 @@ c2
200 @@ -1,0 +3,1 @@ c2
201 +c3
201 +c3
202 @@ -3,1 +4,0 @@ c4
202 @@ -3,1 +4,0 @@ c4
203 -c5
203 -c5
204
204
205 $ echo a > f1
205 $ echo a > f1
206 $ hg ci -m movef2
206 $ hg ci -m movef2
207
207
208 Test diff headers terminating with TAB when necessary (issue3357)
208 Test diff headers terminating with TAB when necessary (issue3357)
209 Regular diff --nodates, file creation
209 Regular diff --nodates, file creation
210
210
211 $ hg mv f1 'f 1'
211 $ hg mv f1 'f 1'
212 $ echo b > 'f 1'
212 $ echo b > 'f 1'
213 $ hg diff --nodates 'f 1'
213 $ hg diff --nodates 'f 1'
214 diff -r 7574207d0d15 f 1
214 diff -r 7574207d0d15 f 1
215 --- /dev/null
215 --- /dev/null
216 +++ b/f 1
216 +++ b/f 1
217 @@ -0,0 +1,1 @@
217 @@ -0,0 +1,1 @@
218 +b
218 +b
219
219
220 Git diff, adding space
220 Git diff, adding space
221
221
222 $ hg diff --git
222 $ hg diff --git
223 diff --git a/f1 b/f 1
223 diff --git a/f1 b/f 1
224 rename from f1
224 rename from f1
225 rename to f 1
225 rename to f 1
226 --- a/f1
226 --- a/f1
227 +++ b/f 1
227 +++ b/f 1
228 @@ -1,1 +1,1 @@
228 @@ -1,1 +1,1 @@
229 -a
229 -a
230 +b
230 +b
231
231
232 Git diff, adding extended headers
232 Git diff, adding extended headers
233
233
234 $ hg diff --git --config experimental.extendedheader.index=7 --config experimental.extendedheader.similarity=True
234 $ hg diff --git --config experimental.extendedheader.index=7 --config experimental.extendedheader.similarity=True
235 diff --git a/f1 b/f 1
235 diff --git a/f1 b/f 1
236 similarity index 0%
236 similarity index 0%
237 rename from f1
237 rename from f1
238 rename to f 1
238 rename to f 1
239 index 7898192..6178079 100644
239 index 7898192..6178079 100644
240 --- a/f1
240 --- a/f1
241 +++ b/f 1
241 +++ b/f 1
242 @@ -1,1 +1,1 @@
242 @@ -1,1 +1,1 @@
243 -a
243 -a
244 +b
244 +b
245
245
246 $ hg diff --git --config experimental.extendedheader.index=-1
246 $ hg diff --git --config experimental.extendedheader.index=-1
247 invalid length for extendedheader.index: '-1'
247 invalid length for extendedheader.index: '-1'
248 diff --git a/f1 b/f 1
248 diff --git a/f1 b/f 1
249 rename from f1
249 rename from f1
250 rename to f 1
250 rename to f 1
251 --- a/f1
251 --- a/f1
252 +++ b/f 1
252 +++ b/f 1
253 @@ -1,1 +1,1 @@
253 @@ -1,1 +1,1 @@
254 -a
254 -a
255 +b
255 +b
256
256
257 $ hg diff --git --config experimental.extendedheader.index=whatever
257 $ hg diff --git --config experimental.extendedheader.index=whatever
258 invalid value for extendedheader.index: 'whatever'
258 invalid value for extendedheader.index: 'whatever'
259 diff --git a/f1 b/f 1
259 diff --git a/f1 b/f 1
260 rename from f1
260 rename from f1
261 rename to f 1
261 rename to f 1
262 --- a/f1
262 --- a/f1
263 +++ b/f 1
263 +++ b/f 1
264 @@ -1,1 +1,1 @@
264 @@ -1,1 +1,1 @@
265 -a
265 -a
266 +b
266 +b
267
267
268 Git diff with noprefix
268 Git diff with noprefix
269
269
270 $ hg --config diff.noprefix=True diff --git --nodates
270 $ hg --config diff.noprefix=True diff --git --nodates
271 diff --git f1 f 1
271 diff --git f1 f 1
272 rename from f1
272 rename from f1
273 rename to f 1
273 rename to f 1
274 --- f1
274 --- f1
275 +++ f 1
275 +++ f 1
276 @@ -1,1 +1,1 @@
276 @@ -1,1 +1,1 @@
277 -a
277 -a
278 +b
278 +b
279
279
280 noprefix config disabled in plain mode, but option still enabled
280 noprefix config disabled in plain mode, but option still enabled
281
281
282 $ HGPLAIN=1 hg --config diff.noprefix=True diff --git --nodates
282 $ HGPLAIN=1 hg --config diff.noprefix=True diff --git --nodates
283 diff --git a/f1 b/f 1
283 diff --git a/f1 b/f 1
284 rename from f1
284 rename from f1
285 rename to f 1
285 rename to f 1
286 --- a/f1
286 --- a/f1
287 +++ b/f 1
287 +++ b/f 1
288 @@ -1,1 +1,1 @@
288 @@ -1,1 +1,1 @@
289 -a
289 -a
290 +b
290 +b
291 $ HGPLAIN=1 hg diff --git --noprefix --nodates
291 $ HGPLAIN=1 hg diff --git --noprefix --nodates
292 diff --git f1 f 1
292 diff --git f1 f 1
293 rename from f1
293 rename from f1
294 rename to f 1
294 rename to f 1
295 --- f1
295 --- f1
296 +++ f 1
296 +++ f 1
297 @@ -1,1 +1,1 @@
297 @@ -1,1 +1,1 @@
298 -a
298 -a
299 +b
299 +b
300
300
301 Regular diff --nodates, file deletion
301 Regular diff --nodates, file deletion
302
302
303 $ hg ci -m addspace
303 $ hg ci -m addspace
304 $ hg mv 'f 1' f1
304 $ hg mv 'f 1' f1
305 $ echo a > f1
305 $ echo a > f1
306 $ hg diff --nodates 'f 1'
306 $ hg diff --nodates 'f 1'
307 diff -r ca50fe67c9c7 f 1
307 diff -r ca50fe67c9c7 f 1
308 --- a/f 1
308 --- a/f 1
309 +++ /dev/null
309 +++ /dev/null
310 @@ -1,1 +0,0 @@
310 @@ -1,1 +0,0 @@
311 -b
311 -b
312
312
313 Git diff, removing space
313 Git diff, removing space
314
314
315 $ hg diff --git
315 $ hg diff --git
316 diff --git a/f 1 b/f1
316 diff --git a/f 1 b/f1
317 rename from f 1
317 rename from f 1
318 rename to f1
318 rename to f1
319 --- a/f 1
319 --- a/f 1
320 +++ b/f1
320 +++ b/f1
321 @@ -1,1 +1,1 @@
321 @@ -1,1 +1,1 @@
322 -b
322 -b
323 +a
323 +a
324
324
325 showfunc diff
325 showfunc diff
326 $ cat > f1 << EOF
326 $ cat > f1 << EOF
327 > int main() {
327 > int main() {
328 > int a = 0;
328 > int a = 0;
329 > int b = 1;
329 > int b = 1;
330 > int c = 2;
330 > int c = 2;
331 > int d = 3;
331 > int d = 3;
332 > return a + b + c + d;
332 > return a + b + c + d;
333 > }
333 > }
334 > EOF
334 > EOF
335 $ hg commit -m addfunction
335 $ hg commit -m addfunction
336 $ cat > f1 << EOF
336 $ cat > f1 << EOF
337 > int main() {
337 > int main() {
338 > int a = 0;
338 > int a = 0;
339 > int b = 1;
339 > int b = 1;
340 > int c = 2;
340 > int c = 2;
341 > int e = 3;
341 > int e = 3;
342 > return a + b + c + e;
342 > return a + b + c + e;
343 > }
343 > }
344 > EOF
344 > EOF
345 $ hg diff --git
345 $ hg diff --git
346 diff --git a/f1 b/f1
346 diff --git a/f1 b/f1
347 --- a/f1
347 --- a/f1
348 +++ b/f1
348 +++ b/f1
349 @@ -2,6 +2,6 @@
349 @@ -2,6 +2,6 @@
350 int a = 0;
350 int a = 0;
351 int b = 1;
351 int b = 1;
352 int c = 2;
352 int c = 2;
353 - int d = 3;
353 - int d = 3;
354 - return a + b + c + d;
354 - return a + b + c + d;
355 + int e = 3;
355 + int e = 3;
356 + return a + b + c + e;
356 + return a + b + c + e;
357 }
357 }
358 $ hg diff --config diff.showfunc=True --git
358 $ hg diff --config diff.showfunc=True --git
359 diff --git a/f1 b/f1
359 diff --git a/f1 b/f1
360 --- a/f1
360 --- a/f1
361 +++ b/f1
361 +++ b/f1
362 @@ -2,6 +2,6 @@ int main() {
362 @@ -2,6 +2,6 @@ int main() {
363 int a = 0;
363 int a = 0;
364 int b = 1;
364 int b = 1;
365 int c = 2;
365 int c = 2;
366 - int d = 3;
366 - int d = 3;
367 - return a + b + c + d;
367 - return a + b + c + d;
368 + int e = 3;
368 + int e = 3;
369 + return a + b + c + e;
369 + return a + b + c + e;
370 }
370 }
371
371
372 If [diff] git is set to true, but the user says --no-git, we should
372 If [diff] git is set to true, but the user says --no-git, we should
373 *not* get git diffs
373 *not* get git diffs
374 $ hg diff --nodates --config diff.git=1 --no-git
374 $ hg diff --nodates --config diff.git=1 --no-git
375 diff -r f2c7c817fa55 f1
375 diff -r f2c7c817fa55 f1
376 --- a/f1
376 --- a/f1
377 +++ b/f1
377 +++ b/f1
378 @@ -2,6 +2,6 @@
378 @@ -2,6 +2,6 @@
379 int a = 0;
379 int a = 0;
380 int b = 1;
380 int b = 1;
381 int c = 2;
381 int c = 2;
382 - int d = 3;
382 - int d = 3;
383 - return a + b + c + d;
383 - return a + b + c + d;
384 + int e = 3;
384 + int e = 3;
385 + return a + b + c + e;
385 + return a + b + c + e;
386 }
386 }
387
387
388 $ cd ..
388 $ cd ..
389
389
390 Long function names should be abbreviated, but multi-byte character shouldn't
390 Long function names should be abbreviated, but multi-byte character shouldn't
391 be broken up
391 be broken up
392
392
393 $ hg init longfunc
393 $ hg init longfunc
394 $ cd longfunc
394 $ cd longfunc
395
395
396 >>> with open('a', 'wb') as f:
396 >>> with open('a', 'wb') as f:
397 ... f.write(b'a' * 39 + b'bb' + b'\n') and None
397 ... f.write(b'a' * 39 + b'bb' + b'\n') and None
398 ... f.write(b' .\n' * 3) and None
398 ... f.write(b' .\n' * 3) and None
399 ... f.write(b' 0 b\n') and None
399 ... f.write(b' 0 b\n') and None
400 ... f.write(b' .\n' * 3) and None
400 ... f.write(b' .\n' * 3) and None
401 ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n') and None
401 ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n') and None
402 ... f.write(b' .\n' * 3) and None
402 ... f.write(b' .\n' * 3) and None
403 ... f.write(b' 0 a with grave (single code point)\n') and None
403 ... f.write(b' 0 a with grave (single code point)\n') and None
404 ... f.write(b' .\n' * 3) and None
404 ... f.write(b' .\n' * 3) and None
405 ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n') and None
405 ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n') and None
406 ... f.write(b' .\n' * 3) and None
406 ... f.write(b' .\n' * 3) and None
407 ... f.write(b' 0 a with grave (composition)\n') and None
407 ... f.write(b' 0 a with grave (composition)\n') and None
408 ... f.write(b' .\n' * 3) and None
408 ... f.write(b' .\n' * 3) and None
409 $ hg ci -qAm0
409 $ hg ci -qAm0
410
410
411 >>> with open('a', 'wb') as f:
411 >>> with open('a', 'wb') as f:
412 ... f.write(b'a' * 39 + b'bb' + b'\n') and None
412 ... f.write(b'a' * 39 + b'bb' + b'\n') and None
413 ... f.write(b' .\n' * 3) and None
413 ... f.write(b' .\n' * 3) and None
414 ... f.write(b' 1 b\n') and None
414 ... f.write(b' 1 b\n') and None
415 ... f.write(b' .\n' * 3) and None
415 ... f.write(b' .\n' * 3) and None
416 ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n') and None
416 ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n') and None
417 ... f.write(b' .\n' * 3) and None
417 ... f.write(b' .\n' * 3) and None
418 ... f.write(b' 1 a with grave (single code point)\n') and None
418 ... f.write(b' 1 a with grave (single code point)\n') and None
419 ... f.write(b' .\n' * 3) and None
419 ... f.write(b' .\n' * 3) and None
420 ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n') and None
420 ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n') and None
421 ... f.write(b' .\n' * 3) and None
421 ... f.write(b' .\n' * 3) and None
422 ... f.write(b' 1 a with grave (composition)\n') and None
422 ... f.write(b' 1 a with grave (composition)\n') and None
423 ... f.write(b' .\n' * 3) and None
423 ... f.write(b' .\n' * 3) and None
424 $ hg ci -m1
424 $ hg ci -m1
425
425
426 $ hg diff -c1 --nodates --show-function
426 $ hg diff -c1 --nodates --show-function
427 diff -r 3e92dd6fa812 -r a256341606cb a
427 diff -r 3e92dd6fa812 -r a256341606cb a
428 --- a/a
428 --- a/a
429 +++ b/a
429 +++ b/a
430 @@ -2,7 +2,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab
430 @@ -2,7 +2,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab
431 .
431 .
432 .
432 .
433 .
433 .
434 - 0 b
434 - 0 b
435 + 1 b
435 + 1 b
436 .
436 .
437 .
437 .
438 .
438 .
439 @@ -10,7 +10,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xc3\xa0 (esc)
439 @@ -10,7 +10,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xc3\xa0 (esc)
440 .
440 .
441 .
441 .
442 .
442 .
443 - 0 a with grave (single code point)
443 - 0 a with grave (single code point)
444 + 1 a with grave (single code point)
444 + 1 a with grave (single code point)
445 .
445 .
446 .
446 .
447 .
447 .
448 @@ -18,7 +18,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xcc\x80 (esc)
448 @@ -18,7 +18,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xcc\x80 (esc)
449 .
449 .
450 .
450 .
451 .
451 .
452 - 0 a with grave (composition)
452 - 0 a with grave (composition)
453 + 1 a with grave (composition)
453 + 1 a with grave (composition)
454 .
454 .
455 .
455 .
456 .
456 .
457
457
458 $ cd ..
458 $ cd ..
459
459
460 Make sure `hg diff --git` differentiate "file did not exists" and "file is empty"
460 Make sure `hg diff --git` differentiate "file did not exists" and "file is empty"
461 for git blob oids
461 for git blob oids
462
462
463 $ hg init bloboids
463 $ hg init bloboids
464 $ cd bloboids
464 $ cd bloboids
465
465
466 $ touch a
466 $ touch a
467 $ hg ci -Am "empty a"
467 $ hg ci -Am "empty a"
468 adding a
468 adding a
469 $ hg diff -c 0 --git --config experimental.extendedheader.index=full | grep index
469 $ hg diff -c 0 --git --config experimental.extendedheader.index=full | grep index
470 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
470 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
471
471
472 Make sure `hg diff --git` differentiate "file was empty" and "file is removed"
472 Make sure `hg diff --git` differentiate "file was empty" and "file is removed"
473 for git blob oids
473 for git blob oids
474
474
475 $ rm a
475 $ rm a
476 $ hg ci -Am "removed a"
476 $ hg ci -Am "removed a"
477 removing a
477 removing a
478 $ hg diff -c 1 --git --config experimental.extendedheader.index=full | grep index
478 $ hg diff -c 1 --git --config experimental.extendedheader.index=full | grep index
479 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
479 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 100644
480
480
481 $ cd ..
481 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now