##// END OF EJS Templates
patch: remove superfluous pass statements
Augie Fackler -
r34379:86f3c976 default
parent child Browse files
Show More
@@ -1,2795 +1,2794 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 PatchError = error.PatchError
49 PatchError = error.PatchError
50
50
51 # public functions
51 # public functions
52
52
53 def split(stream):
53 def split(stream):
54 '''return an iterator of individual patches from a stream'''
54 '''return an iterator of individual patches from a stream'''
55 def isheader(line, inheader):
55 def isheader(line, inheader):
56 if inheader and line[0] in (' ', '\t'):
56 if inheader and line[0] in (' ', '\t'):
57 # continuation
57 # continuation
58 return True
58 return True
59 if line[0] in (' ', '-', '+'):
59 if line[0] in (' ', '-', '+'):
60 # diff line - don't check for header pattern in there
60 # diff line - don't check for header pattern in there
61 return False
61 return False
62 l = line.split(': ', 1)
62 l = line.split(': ', 1)
63 return len(l) == 2 and ' ' not in l[0]
63 return len(l) == 2 and ' ' not in l[0]
64
64
65 def chunk(lines):
65 def chunk(lines):
66 return stringio(''.join(lines))
66 return stringio(''.join(lines))
67
67
68 def hgsplit(stream, cur):
68 def hgsplit(stream, cur):
69 inheader = True
69 inheader = True
70
70
71 for line in stream:
71 for line in stream:
72 if not line.strip():
72 if not line.strip():
73 inheader = False
73 inheader = False
74 if not inheader and line.startswith('# HG changeset patch'):
74 if not inheader and line.startswith('# HG changeset patch'):
75 yield chunk(cur)
75 yield chunk(cur)
76 cur = []
76 cur = []
77 inheader = True
77 inheader = True
78
78
79 cur.append(line)
79 cur.append(line)
80
80
81 if cur:
81 if cur:
82 yield chunk(cur)
82 yield chunk(cur)
83
83
84 def mboxsplit(stream, cur):
84 def mboxsplit(stream, cur):
85 for line in stream:
85 for line in stream:
86 if line.startswith('From '):
86 if line.startswith('From '):
87 for c in split(chunk(cur[1:])):
87 for c in split(chunk(cur[1:])):
88 yield c
88 yield c
89 cur = []
89 cur = []
90
90
91 cur.append(line)
91 cur.append(line)
92
92
93 if cur:
93 if cur:
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96
96
97 def mimesplit(stream, cur):
97 def mimesplit(stream, cur):
98 def msgfp(m):
98 def msgfp(m):
99 fp = stringio()
99 fp = stringio()
100 g = email.Generator.Generator(fp, mangle_from_=False)
100 g = email.Generator.Generator(fp, mangle_from_=False)
101 g.flatten(m)
101 g.flatten(m)
102 fp.seek(0)
102 fp.seek(0)
103 return fp
103 return fp
104
104
105 for line in stream:
105 for line in stream:
106 cur.append(line)
106 cur.append(line)
107 c = chunk(cur)
107 c = chunk(cur)
108
108
109 m = email.Parser.Parser().parse(c)
109 m = email.Parser.Parser().parse(c)
110 if not m.is_multipart():
110 if not m.is_multipart():
111 yield msgfp(m)
111 yield msgfp(m)
112 else:
112 else:
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 for part in m.walk():
114 for part in m.walk():
115 ct = part.get_content_type()
115 ct = part.get_content_type()
116 if ct not in ok_types:
116 if ct not in ok_types:
117 continue
117 continue
118 yield msgfp(part)
118 yield msgfp(part)
119
119
120 def headersplit(stream, cur):
120 def headersplit(stream, cur):
121 inheader = False
121 inheader = False
122
122
123 for line in stream:
123 for line in stream:
124 if not inheader and isheader(line, inheader):
124 if not inheader and isheader(line, inheader):
125 yield chunk(cur)
125 yield chunk(cur)
126 cur = []
126 cur = []
127 inheader = True
127 inheader = True
128 if inheader and not isheader(line, inheader):
128 if inheader and not isheader(line, inheader):
129 inheader = False
129 inheader = False
130
130
131 cur.append(line)
131 cur.append(line)
132
132
133 if cur:
133 if cur:
134 yield chunk(cur)
134 yield chunk(cur)
135
135
136 def remainder(cur):
136 def remainder(cur):
137 yield chunk(cur)
137 yield chunk(cur)
138
138
139 class fiter(object):
139 class fiter(object):
140 def __init__(self, fp):
140 def __init__(self, fp):
141 self.fp = fp
141 self.fp = fp
142
142
143 def __iter__(self):
143 def __iter__(self):
144 return self
144 return self
145
145
146 def next(self):
146 def next(self):
147 l = self.fp.readline()
147 l = self.fp.readline()
148 if not l:
148 if not l:
149 raise StopIteration
149 raise StopIteration
150 return l
150 return l
151
151
152 inheader = False
152 inheader = False
153 cur = []
153 cur = []
154
154
155 mimeheaders = ['content-type']
155 mimeheaders = ['content-type']
156
156
157 if not util.safehasattr(stream, 'next'):
157 if not util.safehasattr(stream, 'next'):
158 # http responses, for example, have readline but not next
158 # http responses, for example, have readline but not next
159 stream = fiter(stream)
159 stream = fiter(stream)
160
160
161 for line in stream:
161 for line in stream:
162 cur.append(line)
162 cur.append(line)
163 if line.startswith('# HG changeset patch'):
163 if line.startswith('# HG changeset patch'):
164 return hgsplit(stream, cur)
164 return hgsplit(stream, cur)
165 elif line.startswith('From '):
165 elif line.startswith('From '):
166 return mboxsplit(stream, cur)
166 return mboxsplit(stream, cur)
167 elif isheader(line, inheader):
167 elif isheader(line, inheader):
168 inheader = True
168 inheader = True
169 if line.split(':', 1)[0].lower() in mimeheaders:
169 if line.split(':', 1)[0].lower() in mimeheaders:
170 # let email parser handle this
170 # let email parser handle this
171 return mimesplit(stream, cur)
171 return mimesplit(stream, cur)
172 elif line.startswith('--- ') and inheader:
172 elif line.startswith('--- ') and inheader:
173 # No evil headers seen by diff start, split by hand
173 # No evil headers seen by diff start, split by hand
174 return headersplit(stream, cur)
174 return headersplit(stream, cur)
175 # Not enough info, keep reading
175 # Not enough info, keep reading
176
176
177 # if we are here, we have a very plain patch
177 # if we are here, we have a very plain patch
178 return remainder(cur)
178 return remainder(cur)
179
179
180 ## Some facility for extensible patch parsing:
180 ## Some facility for extensible patch parsing:
181 # list of pairs ("header to match", "data key")
181 # list of pairs ("header to match", "data key")
182 patchheadermap = [('Date', 'date'),
182 patchheadermap = [('Date', 'date'),
183 ('Branch', 'branch'),
183 ('Branch', 'branch'),
184 ('Node ID', 'nodeid'),
184 ('Node ID', 'nodeid'),
185 ]
185 ]
186
186
187 def extract(ui, fileobj):
187 def extract(ui, fileobj):
188 '''extract patch from data read from fileobj.
188 '''extract patch from data read from fileobj.
189
189
190 patch can be a normal patch or contained in an email message.
190 patch can be a normal patch or contained in an email message.
191
191
192 return a dictionary. Standard keys are:
192 return a dictionary. Standard keys are:
193 - filename,
193 - filename,
194 - message,
194 - message,
195 - user,
195 - user,
196 - date,
196 - date,
197 - branch,
197 - branch,
198 - node,
198 - node,
199 - p1,
199 - p1,
200 - p2.
200 - p2.
201 Any item can be missing from the dictionary. If filename is missing,
201 Any item can be missing from the dictionary. If filename is missing,
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
203
203
204 # attempt to detect the start of a patch
204 # attempt to detect the start of a patch
205 # (this heuristic is borrowed from quilt)
205 # (this heuristic is borrowed from quilt)
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
209 br'\*\*\*[ \t].*?^---[ \t])',
209 br'\*\*\*[ \t].*?^---[ \t])',
210 re.MULTILINE | re.DOTALL)
210 re.MULTILINE | re.DOTALL)
211
211
212 data = {}
212 data = {}
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 try:
215 try:
216 msg = email.Parser.Parser().parse(fileobj)
216 msg = email.Parser.Parser().parse(fileobj)
217
217
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 if not subject and not data['user']:
220 if not subject and not data['user']:
221 # Not an email, restore parsed headers if any
221 # Not an email, restore parsed headers if any
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223
223
224 # should try to parse msg['Date']
224 # should try to parse msg['Date']
225 parents = []
225 parents = []
226
226
227 if subject:
227 if subject:
228 if subject.startswith('[PATCH'):
228 if subject.startswith('[PATCH'):
229 pend = subject.find(']')
229 pend = subject.find(']')
230 if pend >= 0:
230 if pend >= 0:
231 subject = subject[pend + 1:].lstrip()
231 subject = subject[pend + 1:].lstrip()
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
233 ui.debug('Subject: %s\n' % subject)
233 ui.debug('Subject: %s\n' % subject)
234 if data['user']:
234 if data['user']:
235 ui.debug('From: %s\n' % data['user'])
235 ui.debug('From: %s\n' % data['user'])
236 diffs_seen = 0
236 diffs_seen = 0
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 message = ''
238 message = ''
239 for part in msg.walk():
239 for part in msg.walk():
240 content_type = part.get_content_type()
240 content_type = part.get_content_type()
241 ui.debug('Content-Type: %s\n' % content_type)
241 ui.debug('Content-Type: %s\n' % content_type)
242 if content_type not in ok_types:
242 if content_type not in ok_types:
243 continue
243 continue
244 payload = part.get_payload(decode=True)
244 payload = part.get_payload(decode=True)
245 m = diffre.search(payload)
245 m = diffre.search(payload)
246 if m:
246 if m:
247 hgpatch = False
247 hgpatch = False
248 hgpatchheader = False
248 hgpatchheader = False
249 ignoretext = False
249 ignoretext = False
250
250
251 ui.debug('found patch at byte %d\n' % m.start(0))
251 ui.debug('found patch at byte %d\n' % m.start(0))
252 diffs_seen += 1
252 diffs_seen += 1
253 cfp = stringio()
253 cfp = stringio()
254 for line in payload[:m.start(0)].splitlines():
254 for line in payload[:m.start(0)].splitlines():
255 if line.startswith('# HG changeset patch') and not hgpatch:
255 if line.startswith('# HG changeset patch') and not hgpatch:
256 ui.debug('patch generated by hg export\n')
256 ui.debug('patch generated by hg export\n')
257 hgpatch = True
257 hgpatch = True
258 hgpatchheader = True
258 hgpatchheader = True
259 # drop earlier commit message content
259 # drop earlier commit message content
260 cfp.seek(0)
260 cfp.seek(0)
261 cfp.truncate()
261 cfp.truncate()
262 subject = None
262 subject = None
263 elif hgpatchheader:
263 elif hgpatchheader:
264 if line.startswith('# User '):
264 if line.startswith('# User '):
265 data['user'] = line[7:]
265 data['user'] = line[7:]
266 ui.debug('From: %s\n' % data['user'])
266 ui.debug('From: %s\n' % data['user'])
267 elif line.startswith("# Parent "):
267 elif line.startswith("# Parent "):
268 parents.append(line[9:].lstrip())
268 parents.append(line[9:].lstrip())
269 elif line.startswith("# "):
269 elif line.startswith("# "):
270 for header, key in patchheadermap:
270 for header, key in patchheadermap:
271 prefix = '# %s ' % header
271 prefix = '# %s ' % header
272 if line.startswith(prefix):
272 if line.startswith(prefix):
273 data[key] = line[len(prefix):]
273 data[key] = line[len(prefix):]
274 else:
274 else:
275 hgpatchheader = False
275 hgpatchheader = False
276 elif line == '---':
276 elif line == '---':
277 ignoretext = True
277 ignoretext = True
278 if not hgpatchheader and not ignoretext:
278 if not hgpatchheader and not ignoretext:
279 cfp.write(line)
279 cfp.write(line)
280 cfp.write('\n')
280 cfp.write('\n')
281 message = cfp.getvalue()
281 message = cfp.getvalue()
282 if tmpfp:
282 if tmpfp:
283 tmpfp.write(payload)
283 tmpfp.write(payload)
284 if not payload.endswith('\n'):
284 if not payload.endswith('\n'):
285 tmpfp.write('\n')
285 tmpfp.write('\n')
286 elif not diffs_seen and message and content_type == 'text/plain':
286 elif not diffs_seen and message and content_type == 'text/plain':
287 message += '\n' + payload
287 message += '\n' + payload
288 except: # re-raises
288 except: # re-raises
289 tmpfp.close()
289 tmpfp.close()
290 os.unlink(tmpname)
290 os.unlink(tmpname)
291 raise
291 raise
292
292
293 if subject and not message.startswith(subject):
293 if subject and not message.startswith(subject):
294 message = '%s\n%s' % (subject, message)
294 message = '%s\n%s' % (subject, message)
295 data['message'] = message
295 data['message'] = message
296 tmpfp.close()
296 tmpfp.close()
297 if parents:
297 if parents:
298 data['p1'] = parents.pop(0)
298 data['p1'] = parents.pop(0)
299 if parents:
299 if parents:
300 data['p2'] = parents.pop(0)
300 data['p2'] = parents.pop(0)
301
301
302 if diffs_seen:
302 if diffs_seen:
303 data['filename'] = tmpname
303 data['filename'] = tmpname
304 else:
304 else:
305 os.unlink(tmpname)
305 os.unlink(tmpname)
306 return data
306 return data
307
307
308 class patchmeta(object):
308 class patchmeta(object):
309 """Patched file metadata
309 """Patched file metadata
310
310
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 'islink' is True if the file is a symlink and 'isexec' is True if
315 'islink' is True if the file is a symlink and 'isexec' is True if
316 the file is executable. Otherwise, 'mode' is None.
316 the file is executable. Otherwise, 'mode' is None.
317 """
317 """
318 def __init__(self, path):
318 def __init__(self, path):
319 self.path = path
319 self.path = path
320 self.oldpath = None
320 self.oldpath = None
321 self.mode = None
321 self.mode = None
322 self.op = 'MODIFY'
322 self.op = 'MODIFY'
323 self.binary = False
323 self.binary = False
324
324
325 def setmode(self, mode):
325 def setmode(self, mode):
326 islink = mode & 0o20000
326 islink = mode & 0o20000
327 isexec = mode & 0o100
327 isexec = mode & 0o100
328 self.mode = (islink, isexec)
328 self.mode = (islink, isexec)
329
329
330 def copy(self):
330 def copy(self):
331 other = patchmeta(self.path)
331 other = patchmeta(self.path)
332 other.oldpath = self.oldpath
332 other.oldpath = self.oldpath
333 other.mode = self.mode
333 other.mode = self.mode
334 other.op = self.op
334 other.op = self.op
335 other.binary = self.binary
335 other.binary = self.binary
336 return other
336 return other
337
337
338 def _ispatchinga(self, afile):
338 def _ispatchinga(self, afile):
339 if afile == '/dev/null':
339 if afile == '/dev/null':
340 return self.op == 'ADD'
340 return self.op == 'ADD'
341 return afile == 'a/' + (self.oldpath or self.path)
341 return afile == 'a/' + (self.oldpath or self.path)
342
342
343 def _ispatchingb(self, bfile):
343 def _ispatchingb(self, bfile):
344 if bfile == '/dev/null':
344 if bfile == '/dev/null':
345 return self.op == 'DELETE'
345 return self.op == 'DELETE'
346 return bfile == 'b/' + self.path
346 return bfile == 'b/' + self.path
347
347
348 def ispatching(self, afile, bfile):
348 def ispatching(self, afile, bfile):
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350
350
351 def __repr__(self):
351 def __repr__(self):
352 return "<patchmeta %s %r>" % (self.op, self.path)
352 return "<patchmeta %s %r>" % (self.op, self.path)
353
353
354 def readgitpatch(lr):
354 def readgitpatch(lr):
355 """extract git-style metadata about patches from <patchname>"""
355 """extract git-style metadata about patches from <patchname>"""
356
356
357 # Filter patch for git information
357 # Filter patch for git information
358 gp = None
358 gp = None
359 gitpatches = []
359 gitpatches = []
360 for line in lr:
360 for line in lr:
361 line = line.rstrip(' \r\n')
361 line = line.rstrip(' \r\n')
362 if line.startswith('diff --git a/'):
362 if line.startswith('diff --git a/'):
363 m = gitre.match(line)
363 m = gitre.match(line)
364 if m:
364 if m:
365 if gp:
365 if gp:
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 dst = m.group(2)
367 dst = m.group(2)
368 gp = patchmeta(dst)
368 gp = patchmeta(dst)
369 elif gp:
369 elif gp:
370 if line.startswith('--- '):
370 if line.startswith('--- '):
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 gp = None
372 gp = None
373 continue
373 continue
374 if line.startswith('rename from '):
374 if line.startswith('rename from '):
375 gp.op = 'RENAME'
375 gp.op = 'RENAME'
376 gp.oldpath = line[12:]
376 gp.oldpath = line[12:]
377 elif line.startswith('rename to '):
377 elif line.startswith('rename to '):
378 gp.path = line[10:]
378 gp.path = line[10:]
379 elif line.startswith('copy from '):
379 elif line.startswith('copy from '):
380 gp.op = 'COPY'
380 gp.op = 'COPY'
381 gp.oldpath = line[10:]
381 gp.oldpath = line[10:]
382 elif line.startswith('copy to '):
382 elif line.startswith('copy to '):
383 gp.path = line[8:]
383 gp.path = line[8:]
384 elif line.startswith('deleted file'):
384 elif line.startswith('deleted file'):
385 gp.op = 'DELETE'
385 gp.op = 'DELETE'
386 elif line.startswith('new file mode '):
386 elif line.startswith('new file mode '):
387 gp.op = 'ADD'
387 gp.op = 'ADD'
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('new mode '):
389 elif line.startswith('new mode '):
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('GIT binary patch'):
391 elif line.startswith('GIT binary patch'):
392 gp.binary = True
392 gp.binary = True
393 if gp:
393 if gp:
394 gitpatches.append(gp)
394 gitpatches.append(gp)
395
395
396 return gitpatches
396 return gitpatches
397
397
398 class linereader(object):
398 class linereader(object):
399 # simple class to allow pushing lines back into the input stream
399 # simple class to allow pushing lines back into the input stream
400 def __init__(self, fp):
400 def __init__(self, fp):
401 self.fp = fp
401 self.fp = fp
402 self.buf = []
402 self.buf = []
403
403
404 def push(self, line):
404 def push(self, line):
405 if line is not None:
405 if line is not None:
406 self.buf.append(line)
406 self.buf.append(line)
407
407
408 def readline(self):
408 def readline(self):
409 if self.buf:
409 if self.buf:
410 l = self.buf[0]
410 l = self.buf[0]
411 del self.buf[0]
411 del self.buf[0]
412 return l
412 return l
413 return self.fp.readline()
413 return self.fp.readline()
414
414
415 def __iter__(self):
415 def __iter__(self):
416 return iter(self.readline, '')
416 return iter(self.readline, '')
417
417
418 class abstractbackend(object):
418 class abstractbackend(object):
419 def __init__(self, ui):
419 def __init__(self, ui):
420 self.ui = ui
420 self.ui = ui
421
421
422 def getfile(self, fname):
422 def getfile(self, fname):
423 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
424 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
425 """
425 """
426 raise NotImplementedError
426 raise NotImplementedError
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
430 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
431 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
432 copysource is set to the original file name.
432 copysource is set to the original file name.
433 """
433 """
434 raise NotImplementedError
434 raise NotImplementedError
435
435
436 def unlink(self, fname):
436 def unlink(self, fname):
437 """Unlink target file."""
437 """Unlink target file."""
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
441 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
442 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
443 files.
443 files.
444 """
444 """
445 pass
446
445
447 def exists(self, fname):
446 def exists(self, fname):
448 raise NotImplementedError
447 raise NotImplementedError
449
448
450 def close(self):
449 def close(self):
451 raise NotImplementedError
450 raise NotImplementedError
452
451
453 class fsbackend(abstractbackend):
452 class fsbackend(abstractbackend):
454 def __init__(self, ui, basedir):
453 def __init__(self, ui, basedir):
455 super(fsbackend, self).__init__(ui)
454 super(fsbackend, self).__init__(ui)
456 self.opener = vfsmod.vfs(basedir)
455 self.opener = vfsmod.vfs(basedir)
457
456
458 def getfile(self, fname):
457 def getfile(self, fname):
459 if self.opener.islink(fname):
458 if self.opener.islink(fname):
460 return (self.opener.readlink(fname), (True, False))
459 return (self.opener.readlink(fname), (True, False))
461
460
462 isexec = False
461 isexec = False
463 try:
462 try:
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 except OSError as e:
464 except OSError as e:
466 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
467 raise
466 raise
468 try:
467 try:
469 return (self.opener.read(fname), (False, isexec))
468 return (self.opener.read(fname), (False, isexec))
470 except IOError as e:
469 except IOError as e:
471 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
472 raise
471 raise
473 return None, None
472 return None, None
474
473
475 def setfile(self, fname, data, mode, copysource):
474 def setfile(self, fname, data, mode, copysource):
476 islink, isexec = mode
475 islink, isexec = mode
477 if data is None:
476 if data is None:
478 self.opener.setflags(fname, islink, isexec)
477 self.opener.setflags(fname, islink, isexec)
479 return
478 return
480 if islink:
479 if islink:
481 self.opener.symlink(data, fname)
480 self.opener.symlink(data, fname)
482 else:
481 else:
483 self.opener.write(fname, data)
482 self.opener.write(fname, data)
484 if isexec:
483 if isexec:
485 self.opener.setflags(fname, False, True)
484 self.opener.setflags(fname, False, True)
486
485
487 def unlink(self, fname):
486 def unlink(self, fname):
488 self.opener.unlinkpath(fname, ignoremissing=True)
487 self.opener.unlinkpath(fname, ignoremissing=True)
489
488
490 def writerej(self, fname, failed, total, lines):
489 def writerej(self, fname, failed, total, lines):
491 fname = fname + ".rej"
490 fname = fname + ".rej"
492 self.ui.warn(
491 self.ui.warn(
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 (failed, total, fname))
493 (failed, total, fname))
495 fp = self.opener(fname, 'w')
494 fp = self.opener(fname, 'w')
496 fp.writelines(lines)
495 fp.writelines(lines)
497 fp.close()
496 fp.close()
498
497
499 def exists(self, fname):
498 def exists(self, fname):
500 return self.opener.lexists(fname)
499 return self.opener.lexists(fname)
501
500
502 class workingbackend(fsbackend):
501 class workingbackend(fsbackend):
503 def __init__(self, ui, repo, similarity):
502 def __init__(self, ui, repo, similarity):
504 super(workingbackend, self).__init__(ui, repo.root)
503 super(workingbackend, self).__init__(ui, repo.root)
505 self.repo = repo
504 self.repo = repo
506 self.similarity = similarity
505 self.similarity = similarity
507 self.removed = set()
506 self.removed = set()
508 self.changed = set()
507 self.changed = set()
509 self.copied = []
508 self.copied = []
510
509
511 def _checkknown(self, fname):
510 def _checkknown(self, fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514
513
515 def setfile(self, fname, data, mode, copysource):
514 def setfile(self, fname, data, mode, copysource):
516 self._checkknown(fname)
515 self._checkknown(fname)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 if copysource is not None:
517 if copysource is not None:
519 self.copied.append((copysource, fname))
518 self.copied.append((copysource, fname))
520 self.changed.add(fname)
519 self.changed.add(fname)
521
520
522 def unlink(self, fname):
521 def unlink(self, fname):
523 self._checkknown(fname)
522 self._checkknown(fname)
524 super(workingbackend, self).unlink(fname)
523 super(workingbackend, self).unlink(fname)
525 self.removed.add(fname)
524 self.removed.add(fname)
526 self.changed.add(fname)
525 self.changed.add(fname)
527
526
528 def close(self):
527 def close(self):
529 wctx = self.repo[None]
528 wctx = self.repo[None]
530 changed = set(self.changed)
529 changed = set(self.changed)
531 for src, dst in self.copied:
530 for src, dst in self.copied:
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 if self.removed:
532 if self.removed:
534 wctx.forget(sorted(self.removed))
533 wctx.forget(sorted(self.removed))
535 for f in self.removed:
534 for f in self.removed:
536 if f not in self.repo.dirstate:
535 if f not in self.repo.dirstate:
537 # File was deleted and no longer belongs to the
536 # File was deleted and no longer belongs to the
538 # dirstate, it was probably marked added then
537 # dirstate, it was probably marked added then
539 # deleted, and should not be considered by
538 # deleted, and should not be considered by
540 # marktouched().
539 # marktouched().
541 changed.discard(f)
540 changed.discard(f)
542 if changed:
541 if changed:
543 scmutil.marktouched(self.repo, changed, self.similarity)
542 scmutil.marktouched(self.repo, changed, self.similarity)
544 return sorted(self.changed)
543 return sorted(self.changed)
545
544
546 class filestore(object):
545 class filestore(object):
547 def __init__(self, maxsize=None):
546 def __init__(self, maxsize=None):
548 self.opener = None
547 self.opener = None
549 self.files = {}
548 self.files = {}
550 self.created = 0
549 self.created = 0
551 self.maxsize = maxsize
550 self.maxsize = maxsize
552 if self.maxsize is None:
551 if self.maxsize is None:
553 self.maxsize = 4*(2**20)
552 self.maxsize = 4*(2**20)
554 self.size = 0
553 self.size = 0
555 self.data = {}
554 self.data = {}
556
555
557 def setfile(self, fname, data, mode, copied=None):
556 def setfile(self, fname, data, mode, copied=None):
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 self.data[fname] = (data, mode, copied)
558 self.data[fname] = (data, mode, copied)
560 self.size += len(data)
559 self.size += len(data)
561 else:
560 else:
562 if self.opener is None:
561 if self.opener is None:
563 root = tempfile.mkdtemp(prefix='hg-patch-')
562 root = tempfile.mkdtemp(prefix='hg-patch-')
564 self.opener = vfsmod.vfs(root)
563 self.opener = vfsmod.vfs(root)
565 # Avoid filename issues with these simple names
564 # Avoid filename issues with these simple names
566 fn = str(self.created)
565 fn = str(self.created)
567 self.opener.write(fn, data)
566 self.opener.write(fn, data)
568 self.created += 1
567 self.created += 1
569 self.files[fname] = (fn, mode, copied)
568 self.files[fname] = (fn, mode, copied)
570
569
571 def getfile(self, fname):
570 def getfile(self, fname):
572 if fname in self.data:
571 if fname in self.data:
573 return self.data[fname]
572 return self.data[fname]
574 if not self.opener or fname not in self.files:
573 if not self.opener or fname not in self.files:
575 return None, None, None
574 return None, None, None
576 fn, mode, copied = self.files[fname]
575 fn, mode, copied = self.files[fname]
577 return self.opener.read(fn), mode, copied
576 return self.opener.read(fn), mode, copied
578
577
579 def close(self):
578 def close(self):
580 if self.opener:
579 if self.opener:
581 shutil.rmtree(self.opener.base)
580 shutil.rmtree(self.opener.base)
582
581
583 class repobackend(abstractbackend):
582 class repobackend(abstractbackend):
584 def __init__(self, ui, repo, ctx, store):
583 def __init__(self, ui, repo, ctx, store):
585 super(repobackend, self).__init__(ui)
584 super(repobackend, self).__init__(ui)
586 self.repo = repo
585 self.repo = repo
587 self.ctx = ctx
586 self.ctx = ctx
588 self.store = store
587 self.store = store
589 self.changed = set()
588 self.changed = set()
590 self.removed = set()
589 self.removed = set()
591 self.copied = {}
590 self.copied = {}
592
591
593 def _checkknown(self, fname):
592 def _checkknown(self, fname):
594 if fname not in self.ctx:
593 if fname not in self.ctx:
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596
595
597 def getfile(self, fname):
596 def getfile(self, fname):
598 try:
597 try:
599 fctx = self.ctx[fname]
598 fctx = self.ctx[fname]
600 except error.LookupError:
599 except error.LookupError:
601 return None, None
600 return None, None
602 flags = fctx.flags()
601 flags = fctx.flags()
603 return fctx.data(), ('l' in flags, 'x' in flags)
602 return fctx.data(), ('l' in flags, 'x' in flags)
604
603
605 def setfile(self, fname, data, mode, copysource):
604 def setfile(self, fname, data, mode, copysource):
606 if copysource:
605 if copysource:
607 self._checkknown(copysource)
606 self._checkknown(copysource)
608 if data is None:
607 if data is None:
609 data = self.ctx[fname].data()
608 data = self.ctx[fname].data()
610 self.store.setfile(fname, data, mode, copysource)
609 self.store.setfile(fname, data, mode, copysource)
611 self.changed.add(fname)
610 self.changed.add(fname)
612 if copysource:
611 if copysource:
613 self.copied[fname] = copysource
612 self.copied[fname] = copysource
614
613
615 def unlink(self, fname):
614 def unlink(self, fname):
616 self._checkknown(fname)
615 self._checkknown(fname)
617 self.removed.add(fname)
616 self.removed.add(fname)
618
617
619 def exists(self, fname):
618 def exists(self, fname):
620 return fname in self.ctx
619 return fname in self.ctx
621
620
622 def close(self):
621 def close(self):
623 return self.changed | self.removed
622 return self.changed | self.removed
624
623
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629
628
630 class patchfile(object):
629 class patchfile(object):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 self.fname = gp.path
631 self.fname = gp.path
633 self.eolmode = eolmode
632 self.eolmode = eolmode
634 self.eol = None
633 self.eol = None
635 self.backend = backend
634 self.backend = backend
636 self.ui = ui
635 self.ui = ui
637 self.lines = []
636 self.lines = []
638 self.exists = False
637 self.exists = False
639 self.missing = True
638 self.missing = True
640 self.mode = gp.mode
639 self.mode = gp.mode
641 self.copysource = gp.oldpath
640 self.copysource = gp.oldpath
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.remove = gp.op == 'DELETE'
642 self.remove = gp.op == 'DELETE'
644 if self.copysource is None:
643 if self.copysource is None:
645 data, mode = backend.getfile(self.fname)
644 data, mode = backend.getfile(self.fname)
646 else:
645 else:
647 data, mode = store.getfile(self.copysource)[:2]
646 data, mode = store.getfile(self.copysource)[:2]
648 if data is not None:
647 if data is not None:
649 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.missing = False
649 self.missing = False
651 if data:
650 if data:
652 self.lines = mdiff.splitnewlines(data)
651 self.lines = mdiff.splitnewlines(data)
653 if self.mode is None:
652 if self.mode is None:
654 self.mode = mode
653 self.mode = mode
655 if self.lines:
654 if self.lines:
656 # Normalize line endings
655 # Normalize line endings
657 if self.lines[0].endswith('\r\n'):
656 if self.lines[0].endswith('\r\n'):
658 self.eol = '\r\n'
657 self.eol = '\r\n'
659 elif self.lines[0].endswith('\n'):
658 elif self.lines[0].endswith('\n'):
660 self.eol = '\n'
659 self.eol = '\n'
661 if eolmode != 'strict':
660 if eolmode != 'strict':
662 nlines = []
661 nlines = []
663 for l in self.lines:
662 for l in self.lines:
664 if l.endswith('\r\n'):
663 if l.endswith('\r\n'):
665 l = l[:-2] + '\n'
664 l = l[:-2] + '\n'
666 nlines.append(l)
665 nlines.append(l)
667 self.lines = nlines
666 self.lines = nlines
668 else:
667 else:
669 if self.create:
668 if self.create:
670 self.missing = False
669 self.missing = False
671 if self.mode is None:
670 if self.mode is None:
672 self.mode = (False, False)
671 self.mode = (False, False)
673 if self.missing:
672 if self.missing:
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 "current directory)\n"))
675 "current directory)\n"))
677
676
678 self.hash = {}
677 self.hash = {}
679 self.dirty = 0
678 self.dirty = 0
680 self.offset = 0
679 self.offset = 0
681 self.skew = 0
680 self.skew = 0
682 self.rej = []
681 self.rej = []
683 self.fileprinted = False
682 self.fileprinted = False
684 self.printfile(False)
683 self.printfile(False)
685 self.hunks = 0
684 self.hunks = 0
686
685
687 def writelines(self, fname, lines, mode):
686 def writelines(self, fname, lines, mode):
688 if self.eolmode == 'auto':
687 if self.eolmode == 'auto':
689 eol = self.eol
688 eol = self.eol
690 elif self.eolmode == 'crlf':
689 elif self.eolmode == 'crlf':
691 eol = '\r\n'
690 eol = '\r\n'
692 else:
691 else:
693 eol = '\n'
692 eol = '\n'
694
693
695 if self.eolmode != 'strict' and eol and eol != '\n':
694 if self.eolmode != 'strict' and eol and eol != '\n':
696 rawlines = []
695 rawlines = []
697 for l in lines:
696 for l in lines:
698 if l and l[-1] == '\n':
697 if l and l[-1] == '\n':
699 l = l[:-1] + eol
698 l = l[:-1] + eol
700 rawlines.append(l)
699 rawlines.append(l)
701 lines = rawlines
700 lines = rawlines
702
701
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704
703
705 def printfile(self, warn):
704 def printfile(self, warn):
706 if self.fileprinted:
705 if self.fileprinted:
707 return
706 return
708 if warn or self.ui.verbose:
707 if warn or self.ui.verbose:
709 self.fileprinted = True
708 self.fileprinted = True
710 s = _("patching file %s\n") % self.fname
709 s = _("patching file %s\n") % self.fname
711 if warn:
710 if warn:
712 self.ui.warn(s)
711 self.ui.warn(s)
713 else:
712 else:
714 self.ui.note(s)
713 self.ui.note(s)
715
714
716
715
717 def findlines(self, l, linenum):
716 def findlines(self, l, linenum):
718 # looks through the hash and finds candidate lines. The
717 # looks through the hash and finds candidate lines. The
719 # result is a list of line numbers sorted based on distance
718 # result is a list of line numbers sorted based on distance
720 # from linenum
719 # from linenum
721
720
722 cand = self.hash.get(l, [])
721 cand = self.hash.get(l, [])
723 if len(cand) > 1:
722 if len(cand) > 1:
724 # resort our list of potentials forward then back.
723 # resort our list of potentials forward then back.
725 cand.sort(key=lambda x: abs(x - linenum))
724 cand.sort(key=lambda x: abs(x - linenum))
726 return cand
725 return cand
727
726
728 def write_rej(self):
727 def write_rej(self):
729 # our rejects are a little different from patch(1). This always
728 # our rejects are a little different from patch(1). This always
730 # creates rejects in the same form as the original patch. A file
729 # creates rejects in the same form as the original patch. A file
731 # header is inserted so that you can run the reject through patch again
730 # header is inserted so that you can run the reject through patch again
732 # without having to type the filename.
731 # without having to type the filename.
733 if not self.rej:
732 if not self.rej:
734 return
733 return
735 base = os.path.basename(self.fname)
734 base = os.path.basename(self.fname)
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 for x in self.rej:
736 for x in self.rej:
738 for l in x.hunk:
737 for l in x.hunk:
739 lines.append(l)
738 lines.append(l)
740 if l[-1:] != '\n':
739 if l[-1:] != '\n':
741 lines.append("\n\ No newline at end of file\n")
740 lines.append("\n\ No newline at end of file\n")
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743
742
744 def apply(self, h):
743 def apply(self, h):
745 if not h.complete():
744 if not h.complete():
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 h.lenb))
747 h.lenb))
749
748
750 self.hunks += 1
749 self.hunks += 1
751
750
752 if self.missing:
751 if self.missing:
753 self.rej.append(h)
752 self.rej.append(h)
754 return -1
753 return -1
755
754
756 if self.exists and self.create:
755 if self.exists and self.create:
757 if self.copysource:
756 if self.copysource:
758 self.ui.warn(_("cannot create %s: destination already "
757 self.ui.warn(_("cannot create %s: destination already "
759 "exists\n") % self.fname)
758 "exists\n") % self.fname)
760 else:
759 else:
761 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.rej.append(h)
761 self.rej.append(h)
763 return -1
762 return -1
764
763
765 if isinstance(h, binhunk):
764 if isinstance(h, binhunk):
766 if self.remove:
765 if self.remove:
767 self.backend.unlink(self.fname)
766 self.backend.unlink(self.fname)
768 else:
767 else:
769 l = h.new(self.lines)
768 l = h.new(self.lines)
770 self.lines[:] = l
769 self.lines[:] = l
771 self.offset += len(l)
770 self.offset += len(l)
772 self.dirty = True
771 self.dirty = True
773 return 0
772 return 0
774
773
775 horig = h
774 horig = h
776 if (self.eolmode in ('crlf', 'lf')
775 if (self.eolmode in ('crlf', 'lf')
777 or self.eolmode == 'auto' and self.eol):
776 or self.eolmode == 'auto' and self.eol):
778 # If new eols are going to be normalized, then normalize
777 # If new eols are going to be normalized, then normalize
779 # hunk data before patching. Otherwise, preserve input
778 # hunk data before patching. Otherwise, preserve input
780 # line-endings.
779 # line-endings.
781 h = h.getnormalized()
780 h = h.getnormalized()
782
781
783 # fast case first, no offsets, no fuzz
782 # fast case first, no offsets, no fuzz
784 old, oldstart, new, newstart = h.fuzzit(0, False)
783 old, oldstart, new, newstart = h.fuzzit(0, False)
785 oldstart += self.offset
784 oldstart += self.offset
786 orig_start = oldstart
785 orig_start = oldstart
787 # if there's skew we want to emit the "(offset %d lines)" even
786 # if there's skew we want to emit the "(offset %d lines)" even
788 # when the hunk cleanly applies at start + skew, so skip the
787 # when the hunk cleanly applies at start + skew, so skip the
789 # fast case code
788 # fast case code
790 if (self.skew == 0 and
789 if (self.skew == 0 and
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 if self.remove:
791 if self.remove:
793 self.backend.unlink(self.fname)
792 self.backend.unlink(self.fname)
794 else:
793 else:
795 self.lines[oldstart:oldstart + len(old)] = new
794 self.lines[oldstart:oldstart + len(old)] = new
796 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
797 self.dirty = True
796 self.dirty = True
798 return 0
797 return 0
799
798
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 self.hash = {}
800 self.hash = {}
802 for x, s in enumerate(self.lines):
801 for x, s in enumerate(self.lines):
803 self.hash.setdefault(s, []).append(x)
802 self.hash.setdefault(s, []).append(x)
804
803
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
806 for toponly in [True, False]:
805 for toponly in [True, False]:
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 oldstart = oldstart + self.offset + self.skew
807 oldstart = oldstart + self.offset + self.skew
809 oldstart = min(oldstart, len(self.lines))
808 oldstart = min(oldstart, len(self.lines))
810 if old:
809 if old:
811 cand = self.findlines(old[0][1:], oldstart)
810 cand = self.findlines(old[0][1:], oldstart)
812 else:
811 else:
813 # Only adding lines with no or fuzzed context, just
812 # Only adding lines with no or fuzzed context, just
814 # take the skew in account
813 # take the skew in account
815 cand = [oldstart]
814 cand = [oldstart]
816
815
817 for l in cand:
816 for l in cand:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 self.lines[l : l + len(old)] = new
818 self.lines[l : l + len(old)] = new
820 self.offset += len(new) - len(old)
819 self.offset += len(new) - len(old)
821 self.skew = l - orig_start
820 self.skew = l - orig_start
822 self.dirty = True
821 self.dirty = True
823 offset = l - orig_start - fuzzlen
822 offset = l - orig_start - fuzzlen
824 if fuzzlen:
823 if fuzzlen:
825 msg = _("Hunk #%d succeeded at %d "
824 msg = _("Hunk #%d succeeded at %d "
826 "with fuzz %d "
825 "with fuzz %d "
827 "(offset %d lines).\n")
826 "(offset %d lines).\n")
828 self.printfile(True)
827 self.printfile(True)
829 self.ui.warn(msg %
828 self.ui.warn(msg %
830 (h.number, l + 1, fuzzlen, offset))
829 (h.number, l + 1, fuzzlen, offset))
831 else:
830 else:
832 msg = _("Hunk #%d succeeded at %d "
831 msg = _("Hunk #%d succeeded at %d "
833 "(offset %d lines).\n")
832 "(offset %d lines).\n")
834 self.ui.note(msg % (h.number, l + 1, offset))
833 self.ui.note(msg % (h.number, l + 1, offset))
835 return fuzzlen
834 return fuzzlen
836 self.printfile(True)
835 self.printfile(True)
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.rej.append(horig)
837 self.rej.append(horig)
839 return -1
838 return -1
840
839
841 def close(self):
840 def close(self):
842 if self.dirty:
841 if self.dirty:
843 self.writelines(self.fname, self.lines, self.mode)
842 self.writelines(self.fname, self.lines, self.mode)
844 self.write_rej()
843 self.write_rej()
845 return len(self.rej)
844 return len(self.rej)
846
845
847 class header(object):
846 class header(object):
848 """patch header
847 """patch header
849 """
848 """
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
852 allhunks_re = re.compile('(?:index|deleted file) ')
851 allhunks_re = re.compile('(?:index|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 newfile_re = re.compile('(?:new file)')
854 newfile_re = re.compile('(?:new file)')
856
855
857 def __init__(self, header):
856 def __init__(self, header):
858 self.header = header
857 self.header = header
859 self.hunks = []
858 self.hunks = []
860
859
861 def binary(self):
860 def binary(self):
862 return any(h.startswith('index ') for h in self.header)
861 return any(h.startswith('index ') for h in self.header)
863
862
864 def pretty(self, fp):
863 def pretty(self, fp):
865 for h in self.header:
864 for h in self.header:
866 if h.startswith('index '):
865 if h.startswith('index '):
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 break
867 break
869 if self.pretty_re.match(h):
868 if self.pretty_re.match(h):
870 fp.write(h)
869 fp.write(h)
871 if self.binary():
870 if self.binary():
872 fp.write(_('this is a binary file\n'))
871 fp.write(_('this is a binary file\n'))
873 break
872 break
874 if h.startswith('---'):
873 if h.startswith('---'):
875 fp.write(_('%d hunks, %d lines changed\n') %
874 fp.write(_('%d hunks, %d lines changed\n') %
876 (len(self.hunks),
875 (len(self.hunks),
877 sum([max(h.added, h.removed) for h in self.hunks])))
876 sum([max(h.added, h.removed) for h in self.hunks])))
878 break
877 break
879 fp.write(h)
878 fp.write(h)
880
879
881 def write(self, fp):
880 def write(self, fp):
882 fp.write(''.join(self.header))
881 fp.write(''.join(self.header))
883
882
884 def allhunks(self):
883 def allhunks(self):
885 return any(self.allhunks_re.match(h) for h in self.header)
884 return any(self.allhunks_re.match(h) for h in self.header)
886
885
887 def files(self):
886 def files(self):
888 match = self.diffgit_re.match(self.header[0])
887 match = self.diffgit_re.match(self.header[0])
889 if match:
888 if match:
890 fromfile, tofile = match.groups()
889 fromfile, tofile = match.groups()
891 if fromfile == tofile:
890 if fromfile == tofile:
892 return [fromfile]
891 return [fromfile]
893 return [fromfile, tofile]
892 return [fromfile, tofile]
894 else:
893 else:
895 return self.diff_re.match(self.header[0]).groups()
894 return self.diff_re.match(self.header[0]).groups()
896
895
897 def filename(self):
896 def filename(self):
898 return self.files()[-1]
897 return self.files()[-1]
899
898
900 def __repr__(self):
899 def __repr__(self):
901 return '<header %s>' % (' '.join(map(repr, self.files())))
900 return '<header %s>' % (' '.join(map(repr, self.files())))
902
901
903 def isnewfile(self):
902 def isnewfile(self):
904 return any(self.newfile_re.match(h) for h in self.header)
903 return any(self.newfile_re.match(h) for h in self.header)
905
904
906 def special(self):
905 def special(self):
907 # Special files are shown only at the header level and not at the hunk
906 # Special files are shown only at the header level and not at the hunk
908 # level for example a file that has been deleted is a special file.
907 # level for example a file that has been deleted is a special file.
909 # The user cannot change the content of the operation, in the case of
908 # The user cannot change the content of the operation, in the case of
910 # the deleted file he has to take the deletion or not take it, he
909 # the deleted file he has to take the deletion or not take it, he
911 # cannot take some of it.
910 # cannot take some of it.
912 # Newly added files are special if they are empty, they are not special
911 # Newly added files are special if they are empty, they are not special
913 # if they have some content as we want to be able to change it
912 # if they have some content as we want to be able to change it
914 nocontent = len(self.header) == 2
913 nocontent = len(self.header) == 2
915 emptynewfile = self.isnewfile() and nocontent
914 emptynewfile = self.isnewfile() and nocontent
916 return emptynewfile or \
915 return emptynewfile or \
917 any(self.special_re.match(h) for h in self.header)
916 any(self.special_re.match(h) for h in self.header)
918
917
919 class recordhunk(object):
918 class recordhunk(object):
920 """patch hunk
919 """patch hunk
921
920
922 XXX shouldn't we merge this with the other hunk class?
921 XXX shouldn't we merge this with the other hunk class?
923 """
922 """
924
923
925 def __init__(self, header, fromline, toline, proc, before, hunk, after,
924 def __init__(self, header, fromline, toline, proc, before, hunk, after,
926 maxcontext=None):
925 maxcontext=None):
927 def trimcontext(lines, reverse=False):
926 def trimcontext(lines, reverse=False):
928 if maxcontext is not None:
927 if maxcontext is not None:
929 delta = len(lines) - maxcontext
928 delta = len(lines) - maxcontext
930 if delta > 0:
929 if delta > 0:
931 if reverse:
930 if reverse:
932 return delta, lines[delta:]
931 return delta, lines[delta:]
933 else:
932 else:
934 return delta, lines[:maxcontext]
933 return delta, lines[:maxcontext]
935 return 0, lines
934 return 0, lines
936
935
937 self.header = header
936 self.header = header
938 trimedbefore, self.before = trimcontext(before, True)
937 trimedbefore, self.before = trimcontext(before, True)
939 self.fromline = fromline + trimedbefore
938 self.fromline = fromline + trimedbefore
940 self.toline = toline + trimedbefore
939 self.toline = toline + trimedbefore
941 _trimedafter, self.after = trimcontext(after, False)
940 _trimedafter, self.after = trimcontext(after, False)
942 self.proc = proc
941 self.proc = proc
943 self.hunk = hunk
942 self.hunk = hunk
944 self.added, self.removed = self.countchanges(self.hunk)
943 self.added, self.removed = self.countchanges(self.hunk)
945
944
946 def __eq__(self, v):
945 def __eq__(self, v):
947 if not isinstance(v, recordhunk):
946 if not isinstance(v, recordhunk):
948 return False
947 return False
949
948
950 return ((v.hunk == self.hunk) and
949 return ((v.hunk == self.hunk) and
951 (v.proc == self.proc) and
950 (v.proc == self.proc) and
952 (self.fromline == v.fromline) and
951 (self.fromline == v.fromline) and
953 (self.header.files() == v.header.files()))
952 (self.header.files() == v.header.files()))
954
953
955 def __hash__(self):
954 def __hash__(self):
956 return hash((tuple(self.hunk),
955 return hash((tuple(self.hunk),
957 tuple(self.header.files()),
956 tuple(self.header.files()),
958 self.fromline,
957 self.fromline,
959 self.proc))
958 self.proc))
960
959
961 def countchanges(self, hunk):
960 def countchanges(self, hunk):
962 """hunk -> (n+,n-)"""
961 """hunk -> (n+,n-)"""
963 add = len([h for h in hunk if h.startswith('+')])
962 add = len([h for h in hunk if h.startswith('+')])
964 rem = len([h for h in hunk if h.startswith('-')])
963 rem = len([h for h in hunk if h.startswith('-')])
965 return add, rem
964 return add, rem
966
965
967 def reversehunk(self):
966 def reversehunk(self):
968 """return another recordhunk which is the reverse of the hunk
967 """return another recordhunk which is the reverse of the hunk
969
968
970 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
969 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
971 that, swap fromline/toline and +/- signs while keep other things
970 that, swap fromline/toline and +/- signs while keep other things
972 unchanged.
971 unchanged.
973 """
972 """
974 m = {'+': '-', '-': '+', '\\': '\\'}
973 m = {'+': '-', '-': '+', '\\': '\\'}
975 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
974 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
976 return recordhunk(self.header, self.toline, self.fromline, self.proc,
975 return recordhunk(self.header, self.toline, self.fromline, self.proc,
977 self.before, hunk, self.after)
976 self.before, hunk, self.after)
978
977
979 def write(self, fp):
978 def write(self, fp):
980 delta = len(self.before) + len(self.after)
979 delta = len(self.before) + len(self.after)
981 if self.after and self.after[-1] == '\\ No newline at end of file\n':
980 if self.after and self.after[-1] == '\\ No newline at end of file\n':
982 delta -= 1
981 delta -= 1
983 fromlen = delta + self.removed
982 fromlen = delta + self.removed
984 tolen = delta + self.added
983 tolen = delta + self.added
985 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
984 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
986 (self.fromline, fromlen, self.toline, tolen,
985 (self.fromline, fromlen, self.toline, tolen,
987 self.proc and (' ' + self.proc)))
986 self.proc and (' ' + self.proc)))
988 fp.write(''.join(self.before + self.hunk + self.after))
987 fp.write(''.join(self.before + self.hunk + self.after))
989
988
990 pretty = write
989 pretty = write
991
990
992 def filename(self):
991 def filename(self):
993 return self.header.filename()
992 return self.header.filename()
994
993
995 def __repr__(self):
994 def __repr__(self):
996 return '<hunk %r@%d>' % (self.filename(), self.fromline)
995 return '<hunk %r@%d>' % (self.filename(), self.fromline)
997
996
998 messages = {
997 messages = {
999 'multiple': {
998 'multiple': {
1000 'discard': _("discard change %d/%d to '%s'?"),
999 'discard': _("discard change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1000 'record': _("record change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1001 'revert': _("revert change %d/%d to '%s'?"),
1003 },
1002 },
1004 'single': {
1003 'single': {
1005 'discard': _("discard this change to '%s'?"),
1004 'discard': _("discard this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1005 'record': _("record this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1006 'revert': _("revert this change to '%s'?"),
1008 },
1007 },
1009 'help': {
1008 'help': {
1010 'discard': _('[Ynesfdaq?]'
1009 'discard': _('[Ynesfdaq?]'
1011 '$$ &Yes, discard this change'
1010 '$$ &Yes, discard this change'
1012 '$$ &No, skip this change'
1011 '$$ &No, skip this change'
1013 '$$ &Edit this change manually'
1012 '$$ &Edit this change manually'
1014 '$$ &Skip remaining changes to this file'
1013 '$$ &Skip remaining changes to this file'
1015 '$$ Discard remaining changes to this &file'
1014 '$$ Discard remaining changes to this &file'
1016 '$$ &Done, skip remaining changes and files'
1015 '$$ &Done, skip remaining changes and files'
1017 '$$ Discard &all changes to all remaining files'
1016 '$$ Discard &all changes to all remaining files'
1018 '$$ &Quit, discarding no changes'
1017 '$$ &Quit, discarding no changes'
1019 '$$ &? (display help)'),
1018 '$$ &? (display help)'),
1020 'record': _('[Ynesfdaq?]'
1019 'record': _('[Ynesfdaq?]'
1021 '$$ &Yes, record this change'
1020 '$$ &Yes, record this change'
1022 '$$ &No, skip this change'
1021 '$$ &No, skip this change'
1023 '$$ &Edit this change manually'
1022 '$$ &Edit this change manually'
1024 '$$ &Skip remaining changes to this file'
1023 '$$ &Skip remaining changes to this file'
1025 '$$ Record remaining changes to this &file'
1024 '$$ Record remaining changes to this &file'
1026 '$$ &Done, skip remaining changes and files'
1025 '$$ &Done, skip remaining changes and files'
1027 '$$ Record &all changes to all remaining files'
1026 '$$ Record &all changes to all remaining files'
1028 '$$ &Quit, recording no changes'
1027 '$$ &Quit, recording no changes'
1029 '$$ &? (display help)'),
1028 '$$ &? (display help)'),
1030 'revert': _('[Ynesfdaq?]'
1029 'revert': _('[Ynesfdaq?]'
1031 '$$ &Yes, revert this change'
1030 '$$ &Yes, revert this change'
1032 '$$ &No, skip this change'
1031 '$$ &No, skip this change'
1033 '$$ &Edit this change manually'
1032 '$$ &Edit this change manually'
1034 '$$ &Skip remaining changes to this file'
1033 '$$ &Skip remaining changes to this file'
1035 '$$ Revert remaining changes to this &file'
1034 '$$ Revert remaining changes to this &file'
1036 '$$ &Done, skip remaining changes and files'
1035 '$$ &Done, skip remaining changes and files'
1037 '$$ Revert &all changes to all remaining files'
1036 '$$ Revert &all changes to all remaining files'
1038 '$$ &Quit, reverting no changes'
1037 '$$ &Quit, reverting no changes'
1039 '$$ &? (display help)')
1038 '$$ &? (display help)')
1040 }
1039 }
1041 }
1040 }
1042
1041
1043 def filterpatch(ui, headers, operation=None):
1042 def filterpatch(ui, headers, operation=None):
1044 """Interactively filter patch chunks into applied-only chunks"""
1043 """Interactively filter patch chunks into applied-only chunks"""
1045 if operation is None:
1044 if operation is None:
1046 operation = 'record'
1045 operation = 'record'
1047
1046
1048 def prompt(skipfile, skipall, query, chunk):
1047 def prompt(skipfile, skipall, query, chunk):
1049 """prompt query, and process base inputs
1048 """prompt query, and process base inputs
1050
1049
1051 - y/n for the rest of file
1050 - y/n for the rest of file
1052 - y/n for the rest
1051 - y/n for the rest
1053 - ? (help)
1052 - ? (help)
1054 - q (quit)
1053 - q (quit)
1055
1054
1056 Return True/False and possibly updated skipfile and skipall.
1055 Return True/False and possibly updated skipfile and skipall.
1057 """
1056 """
1058 newpatches = None
1057 newpatches = None
1059 if skipall is not None:
1058 if skipall is not None:
1060 return skipall, skipfile, skipall, newpatches
1059 return skipall, skipfile, skipall, newpatches
1061 if skipfile is not None:
1060 if skipfile is not None:
1062 return skipfile, skipfile, skipall, newpatches
1061 return skipfile, skipfile, skipall, newpatches
1063 while True:
1062 while True:
1064 resps = messages['help'][operation]
1063 resps = messages['help'][operation]
1065 r = ui.promptchoice("%s %s" % (query, resps))
1064 r = ui.promptchoice("%s %s" % (query, resps))
1066 ui.write("\n")
1065 ui.write("\n")
1067 if r == 8: # ?
1066 if r == 8: # ?
1068 for c, t in ui.extractchoices(resps)[1]:
1067 for c, t in ui.extractchoices(resps)[1]:
1069 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1068 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1070 continue
1069 continue
1071 elif r == 0: # yes
1070 elif r == 0: # yes
1072 ret = True
1071 ret = True
1073 elif r == 1: # no
1072 elif r == 1: # no
1074 ret = False
1073 ret = False
1075 elif r == 2: # Edit patch
1074 elif r == 2: # Edit patch
1076 if chunk is None:
1075 if chunk is None:
1077 ui.write(_('cannot edit patch for whole file'))
1076 ui.write(_('cannot edit patch for whole file'))
1078 ui.write("\n")
1077 ui.write("\n")
1079 continue
1078 continue
1080 if chunk.header.binary():
1079 if chunk.header.binary():
1081 ui.write(_('cannot edit patch for binary file'))
1080 ui.write(_('cannot edit patch for binary file'))
1082 ui.write("\n")
1081 ui.write("\n")
1083 continue
1082 continue
1084 # Patch comment based on the Git one (based on comment at end of
1083 # Patch comment based on the Git one (based on comment at end of
1085 # https://mercurial-scm.org/wiki/RecordExtension)
1084 # https://mercurial-scm.org/wiki/RecordExtension)
1086 phelp = '---' + _("""
1085 phelp = '---' + _("""
1087 To remove '-' lines, make them ' ' lines (context).
1086 To remove '-' lines, make them ' ' lines (context).
1088 To remove '+' lines, delete them.
1087 To remove '+' lines, delete them.
1089 Lines starting with # will be removed from the patch.
1088 Lines starting with # will be removed from the patch.
1090
1089
1091 If the patch applies cleanly, the edited hunk will immediately be
1090 If the patch applies cleanly, the edited hunk will immediately be
1092 added to the record list. If it does not apply cleanly, a rejects
1091 added to the record list. If it does not apply cleanly, a rejects
1093 file will be generated: you can use that when you try again. If
1092 file will be generated: you can use that when you try again. If
1094 all lines of the hunk are removed, then the edit is aborted and
1093 all lines of the hunk are removed, then the edit is aborted and
1095 the hunk is left unchanged.
1094 the hunk is left unchanged.
1096 """)
1095 """)
1097 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1096 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1098 suffix=".diff", text=True)
1097 suffix=".diff", text=True)
1099 ncpatchfp = None
1098 ncpatchfp = None
1100 try:
1099 try:
1101 # Write the initial patch
1100 # Write the initial patch
1102 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1101 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1103 chunk.header.write(f)
1102 chunk.header.write(f)
1104 chunk.write(f)
1103 chunk.write(f)
1105 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1104 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1106 f.close()
1105 f.close()
1107 # Start the editor and wait for it to complete
1106 # Start the editor and wait for it to complete
1108 editor = ui.geteditor()
1107 editor = ui.geteditor()
1109 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1108 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1110 environ={'HGUSER': ui.username()},
1109 environ={'HGUSER': ui.username()},
1111 blockedtag='filterpatch')
1110 blockedtag='filterpatch')
1112 if ret != 0:
1111 if ret != 0:
1113 ui.warn(_("editor exited with exit code %d\n") % ret)
1112 ui.warn(_("editor exited with exit code %d\n") % ret)
1114 continue
1113 continue
1115 # Remove comment lines
1114 # Remove comment lines
1116 patchfp = open(patchfn)
1115 patchfp = open(patchfn)
1117 ncpatchfp = stringio()
1116 ncpatchfp = stringio()
1118 for line in util.iterfile(patchfp):
1117 for line in util.iterfile(patchfp):
1119 if not line.startswith('#'):
1118 if not line.startswith('#'):
1120 ncpatchfp.write(line)
1119 ncpatchfp.write(line)
1121 patchfp.close()
1120 patchfp.close()
1122 ncpatchfp.seek(0)
1121 ncpatchfp.seek(0)
1123 newpatches = parsepatch(ncpatchfp)
1122 newpatches = parsepatch(ncpatchfp)
1124 finally:
1123 finally:
1125 os.unlink(patchfn)
1124 os.unlink(patchfn)
1126 del ncpatchfp
1125 del ncpatchfp
1127 # Signal that the chunk shouldn't be applied as-is, but
1126 # Signal that the chunk shouldn't be applied as-is, but
1128 # provide the new patch to be used instead.
1127 # provide the new patch to be used instead.
1129 ret = False
1128 ret = False
1130 elif r == 3: # Skip
1129 elif r == 3: # Skip
1131 ret = skipfile = False
1130 ret = skipfile = False
1132 elif r == 4: # file (Record remaining)
1131 elif r == 4: # file (Record remaining)
1133 ret = skipfile = True
1132 ret = skipfile = True
1134 elif r == 5: # done, skip remaining
1133 elif r == 5: # done, skip remaining
1135 ret = skipall = False
1134 ret = skipall = False
1136 elif r == 6: # all
1135 elif r == 6: # all
1137 ret = skipall = True
1136 ret = skipall = True
1138 elif r == 7: # quit
1137 elif r == 7: # quit
1139 raise error.Abort(_('user quit'))
1138 raise error.Abort(_('user quit'))
1140 return ret, skipfile, skipall, newpatches
1139 return ret, skipfile, skipall, newpatches
1141
1140
1142 seen = set()
1141 seen = set()
1143 applied = {} # 'filename' -> [] of chunks
1142 applied = {} # 'filename' -> [] of chunks
1144 skipfile, skipall = None, None
1143 skipfile, skipall = None, None
1145 pos, total = 1, sum(len(h.hunks) for h in headers)
1144 pos, total = 1, sum(len(h.hunks) for h in headers)
1146 for h in headers:
1145 for h in headers:
1147 pos += len(h.hunks)
1146 pos += len(h.hunks)
1148 skipfile = None
1147 skipfile = None
1149 fixoffset = 0
1148 fixoffset = 0
1150 hdr = ''.join(h.header)
1149 hdr = ''.join(h.header)
1151 if hdr in seen:
1150 if hdr in seen:
1152 continue
1151 continue
1153 seen.add(hdr)
1152 seen.add(hdr)
1154 if skipall is None:
1153 if skipall is None:
1155 h.pretty(ui)
1154 h.pretty(ui)
1156 msg = (_('examine changes to %s?') %
1155 msg = (_('examine changes to %s?') %
1157 _(' and ').join("'%s'" % f for f in h.files()))
1156 _(' and ').join("'%s'" % f for f in h.files()))
1158 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1157 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1159 if not r:
1158 if not r:
1160 continue
1159 continue
1161 applied[h.filename()] = [h]
1160 applied[h.filename()] = [h]
1162 if h.allhunks():
1161 if h.allhunks():
1163 applied[h.filename()] += h.hunks
1162 applied[h.filename()] += h.hunks
1164 continue
1163 continue
1165 for i, chunk in enumerate(h.hunks):
1164 for i, chunk in enumerate(h.hunks):
1166 if skipfile is None and skipall is None:
1165 if skipfile is None and skipall is None:
1167 chunk.pretty(ui)
1166 chunk.pretty(ui)
1168 if total == 1:
1167 if total == 1:
1169 msg = messages['single'][operation] % chunk.filename()
1168 msg = messages['single'][operation] % chunk.filename()
1170 else:
1169 else:
1171 idx = pos - len(h.hunks) + i
1170 idx = pos - len(h.hunks) + i
1172 msg = messages['multiple'][operation] % (idx, total,
1171 msg = messages['multiple'][operation] % (idx, total,
1173 chunk.filename())
1172 chunk.filename())
1174 r, skipfile, skipall, newpatches = prompt(skipfile,
1173 r, skipfile, skipall, newpatches = prompt(skipfile,
1175 skipall, msg, chunk)
1174 skipall, msg, chunk)
1176 if r:
1175 if r:
1177 if fixoffset:
1176 if fixoffset:
1178 chunk = copy.copy(chunk)
1177 chunk = copy.copy(chunk)
1179 chunk.toline += fixoffset
1178 chunk.toline += fixoffset
1180 applied[chunk.filename()].append(chunk)
1179 applied[chunk.filename()].append(chunk)
1181 elif newpatches is not None:
1180 elif newpatches is not None:
1182 for newpatch in newpatches:
1181 for newpatch in newpatches:
1183 for newhunk in newpatch.hunks:
1182 for newhunk in newpatch.hunks:
1184 if fixoffset:
1183 if fixoffset:
1185 newhunk.toline += fixoffset
1184 newhunk.toline += fixoffset
1186 applied[newhunk.filename()].append(newhunk)
1185 applied[newhunk.filename()].append(newhunk)
1187 else:
1186 else:
1188 fixoffset += chunk.removed - chunk.added
1187 fixoffset += chunk.removed - chunk.added
1189 return (sum([h for h in applied.itervalues()
1188 return (sum([h for h in applied.itervalues()
1190 if h[0].special() or len(h) > 1], []), {})
1189 if h[0].special() or len(h) > 1], []), {})
1191 class hunk(object):
1190 class hunk(object):
1192 def __init__(self, desc, num, lr, context):
1191 def __init__(self, desc, num, lr, context):
1193 self.number = num
1192 self.number = num
1194 self.desc = desc
1193 self.desc = desc
1195 self.hunk = [desc]
1194 self.hunk = [desc]
1196 self.a = []
1195 self.a = []
1197 self.b = []
1196 self.b = []
1198 self.starta = self.lena = None
1197 self.starta = self.lena = None
1199 self.startb = self.lenb = None
1198 self.startb = self.lenb = None
1200 if lr is not None:
1199 if lr is not None:
1201 if context:
1200 if context:
1202 self.read_context_hunk(lr)
1201 self.read_context_hunk(lr)
1203 else:
1202 else:
1204 self.read_unified_hunk(lr)
1203 self.read_unified_hunk(lr)
1205
1204
1206 def getnormalized(self):
1205 def getnormalized(self):
1207 """Return a copy with line endings normalized to LF."""
1206 """Return a copy with line endings normalized to LF."""
1208
1207
1209 def normalize(lines):
1208 def normalize(lines):
1210 nlines = []
1209 nlines = []
1211 for line in lines:
1210 for line in lines:
1212 if line.endswith('\r\n'):
1211 if line.endswith('\r\n'):
1213 line = line[:-2] + '\n'
1212 line = line[:-2] + '\n'
1214 nlines.append(line)
1213 nlines.append(line)
1215 return nlines
1214 return nlines
1216
1215
1217 # Dummy object, it is rebuilt manually
1216 # Dummy object, it is rebuilt manually
1218 nh = hunk(self.desc, self.number, None, None)
1217 nh = hunk(self.desc, self.number, None, None)
1219 nh.number = self.number
1218 nh.number = self.number
1220 nh.desc = self.desc
1219 nh.desc = self.desc
1221 nh.hunk = self.hunk
1220 nh.hunk = self.hunk
1222 nh.a = normalize(self.a)
1221 nh.a = normalize(self.a)
1223 nh.b = normalize(self.b)
1222 nh.b = normalize(self.b)
1224 nh.starta = self.starta
1223 nh.starta = self.starta
1225 nh.startb = self.startb
1224 nh.startb = self.startb
1226 nh.lena = self.lena
1225 nh.lena = self.lena
1227 nh.lenb = self.lenb
1226 nh.lenb = self.lenb
1228 return nh
1227 return nh
1229
1228
1230 def read_unified_hunk(self, lr):
1229 def read_unified_hunk(self, lr):
1231 m = unidesc.match(self.desc)
1230 m = unidesc.match(self.desc)
1232 if not m:
1231 if not m:
1233 raise PatchError(_("bad hunk #%d") % self.number)
1232 raise PatchError(_("bad hunk #%d") % self.number)
1234 self.starta, self.lena, self.startb, self.lenb = m.groups()
1233 self.starta, self.lena, self.startb, self.lenb = m.groups()
1235 if self.lena is None:
1234 if self.lena is None:
1236 self.lena = 1
1235 self.lena = 1
1237 else:
1236 else:
1238 self.lena = int(self.lena)
1237 self.lena = int(self.lena)
1239 if self.lenb is None:
1238 if self.lenb is None:
1240 self.lenb = 1
1239 self.lenb = 1
1241 else:
1240 else:
1242 self.lenb = int(self.lenb)
1241 self.lenb = int(self.lenb)
1243 self.starta = int(self.starta)
1242 self.starta = int(self.starta)
1244 self.startb = int(self.startb)
1243 self.startb = int(self.startb)
1245 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1244 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1246 self.b)
1245 self.b)
1247 # if we hit eof before finishing out the hunk, the last line will
1246 # if we hit eof before finishing out the hunk, the last line will
1248 # be zero length. Lets try to fix it up.
1247 # be zero length. Lets try to fix it up.
1249 while len(self.hunk[-1]) == 0:
1248 while len(self.hunk[-1]) == 0:
1250 del self.hunk[-1]
1249 del self.hunk[-1]
1251 del self.a[-1]
1250 del self.a[-1]
1252 del self.b[-1]
1251 del self.b[-1]
1253 self.lena -= 1
1252 self.lena -= 1
1254 self.lenb -= 1
1253 self.lenb -= 1
1255 self._fixnewline(lr)
1254 self._fixnewline(lr)
1256
1255
1257 def read_context_hunk(self, lr):
1256 def read_context_hunk(self, lr):
1258 self.desc = lr.readline()
1257 self.desc = lr.readline()
1259 m = contextdesc.match(self.desc)
1258 m = contextdesc.match(self.desc)
1260 if not m:
1259 if not m:
1261 raise PatchError(_("bad hunk #%d") % self.number)
1260 raise PatchError(_("bad hunk #%d") % self.number)
1262 self.starta, aend = m.groups()
1261 self.starta, aend = m.groups()
1263 self.starta = int(self.starta)
1262 self.starta = int(self.starta)
1264 if aend is None:
1263 if aend is None:
1265 aend = self.starta
1264 aend = self.starta
1266 self.lena = int(aend) - self.starta
1265 self.lena = int(aend) - self.starta
1267 if self.starta:
1266 if self.starta:
1268 self.lena += 1
1267 self.lena += 1
1269 for x in xrange(self.lena):
1268 for x in xrange(self.lena):
1270 l = lr.readline()
1269 l = lr.readline()
1271 if l.startswith('---'):
1270 if l.startswith('---'):
1272 # lines addition, old block is empty
1271 # lines addition, old block is empty
1273 lr.push(l)
1272 lr.push(l)
1274 break
1273 break
1275 s = l[2:]
1274 s = l[2:]
1276 if l.startswith('- ') or l.startswith('! '):
1275 if l.startswith('- ') or l.startswith('! '):
1277 u = '-' + s
1276 u = '-' + s
1278 elif l.startswith(' '):
1277 elif l.startswith(' '):
1279 u = ' ' + s
1278 u = ' ' + s
1280 else:
1279 else:
1281 raise PatchError(_("bad hunk #%d old text line %d") %
1280 raise PatchError(_("bad hunk #%d old text line %d") %
1282 (self.number, x))
1281 (self.number, x))
1283 self.a.append(u)
1282 self.a.append(u)
1284 self.hunk.append(u)
1283 self.hunk.append(u)
1285
1284
1286 l = lr.readline()
1285 l = lr.readline()
1287 if l.startswith('\ '):
1286 if l.startswith('\ '):
1288 s = self.a[-1][:-1]
1287 s = self.a[-1][:-1]
1289 self.a[-1] = s
1288 self.a[-1] = s
1290 self.hunk[-1] = s
1289 self.hunk[-1] = s
1291 l = lr.readline()
1290 l = lr.readline()
1292 m = contextdesc.match(l)
1291 m = contextdesc.match(l)
1293 if not m:
1292 if not m:
1294 raise PatchError(_("bad hunk #%d") % self.number)
1293 raise PatchError(_("bad hunk #%d") % self.number)
1295 self.startb, bend = m.groups()
1294 self.startb, bend = m.groups()
1296 self.startb = int(self.startb)
1295 self.startb = int(self.startb)
1297 if bend is None:
1296 if bend is None:
1298 bend = self.startb
1297 bend = self.startb
1299 self.lenb = int(bend) - self.startb
1298 self.lenb = int(bend) - self.startb
1300 if self.startb:
1299 if self.startb:
1301 self.lenb += 1
1300 self.lenb += 1
1302 hunki = 1
1301 hunki = 1
1303 for x in xrange(self.lenb):
1302 for x in xrange(self.lenb):
1304 l = lr.readline()
1303 l = lr.readline()
1305 if l.startswith('\ '):
1304 if l.startswith('\ '):
1306 # XXX: the only way to hit this is with an invalid line range.
1305 # XXX: the only way to hit this is with an invalid line range.
1307 # The no-eol marker is not counted in the line range, but I
1306 # The no-eol marker is not counted in the line range, but I
1308 # guess there are diff(1) out there which behave differently.
1307 # guess there are diff(1) out there which behave differently.
1309 s = self.b[-1][:-1]
1308 s = self.b[-1][:-1]
1310 self.b[-1] = s
1309 self.b[-1] = s
1311 self.hunk[hunki - 1] = s
1310 self.hunk[hunki - 1] = s
1312 continue
1311 continue
1313 if not l:
1312 if not l:
1314 # line deletions, new block is empty and we hit EOF
1313 # line deletions, new block is empty and we hit EOF
1315 lr.push(l)
1314 lr.push(l)
1316 break
1315 break
1317 s = l[2:]
1316 s = l[2:]
1318 if l.startswith('+ ') or l.startswith('! '):
1317 if l.startswith('+ ') or l.startswith('! '):
1319 u = '+' + s
1318 u = '+' + s
1320 elif l.startswith(' '):
1319 elif l.startswith(' '):
1321 u = ' ' + s
1320 u = ' ' + s
1322 elif len(self.b) == 0:
1321 elif len(self.b) == 0:
1323 # line deletions, new block is empty
1322 # line deletions, new block is empty
1324 lr.push(l)
1323 lr.push(l)
1325 break
1324 break
1326 else:
1325 else:
1327 raise PatchError(_("bad hunk #%d old text line %d") %
1326 raise PatchError(_("bad hunk #%d old text line %d") %
1328 (self.number, x))
1327 (self.number, x))
1329 self.b.append(s)
1328 self.b.append(s)
1330 while True:
1329 while True:
1331 if hunki >= len(self.hunk):
1330 if hunki >= len(self.hunk):
1332 h = ""
1331 h = ""
1333 else:
1332 else:
1334 h = self.hunk[hunki]
1333 h = self.hunk[hunki]
1335 hunki += 1
1334 hunki += 1
1336 if h == u:
1335 if h == u:
1337 break
1336 break
1338 elif h.startswith('-'):
1337 elif h.startswith('-'):
1339 continue
1338 continue
1340 else:
1339 else:
1341 self.hunk.insert(hunki - 1, u)
1340 self.hunk.insert(hunki - 1, u)
1342 break
1341 break
1343
1342
1344 if not self.a:
1343 if not self.a:
1345 # this happens when lines were only added to the hunk
1344 # this happens when lines were only added to the hunk
1346 for x in self.hunk:
1345 for x in self.hunk:
1347 if x.startswith('-') or x.startswith(' '):
1346 if x.startswith('-') or x.startswith(' '):
1348 self.a.append(x)
1347 self.a.append(x)
1349 if not self.b:
1348 if not self.b:
1350 # this happens when lines were only deleted from the hunk
1349 # this happens when lines were only deleted from the hunk
1351 for x in self.hunk:
1350 for x in self.hunk:
1352 if x.startswith('+') or x.startswith(' '):
1351 if x.startswith('+') or x.startswith(' '):
1353 self.b.append(x[1:])
1352 self.b.append(x[1:])
1354 # @@ -start,len +start,len @@
1353 # @@ -start,len +start,len @@
1355 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1354 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1356 self.startb, self.lenb)
1355 self.startb, self.lenb)
1357 self.hunk[0] = self.desc
1356 self.hunk[0] = self.desc
1358 self._fixnewline(lr)
1357 self._fixnewline(lr)
1359
1358
1360 def _fixnewline(self, lr):
1359 def _fixnewline(self, lr):
1361 l = lr.readline()
1360 l = lr.readline()
1362 if l.startswith('\ '):
1361 if l.startswith('\ '):
1363 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1362 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1364 else:
1363 else:
1365 lr.push(l)
1364 lr.push(l)
1366
1365
1367 def complete(self):
1366 def complete(self):
1368 return len(self.a) == self.lena and len(self.b) == self.lenb
1367 return len(self.a) == self.lena and len(self.b) == self.lenb
1369
1368
1370 def _fuzzit(self, old, new, fuzz, toponly):
1369 def _fuzzit(self, old, new, fuzz, toponly):
1371 # this removes context lines from the top and bottom of list 'l'. It
1370 # this removes context lines from the top and bottom of list 'l'. It
1372 # checks the hunk to make sure only context lines are removed, and then
1371 # checks the hunk to make sure only context lines are removed, and then
1373 # returns a new shortened list of lines.
1372 # returns a new shortened list of lines.
1374 fuzz = min(fuzz, len(old))
1373 fuzz = min(fuzz, len(old))
1375 if fuzz:
1374 if fuzz:
1376 top = 0
1375 top = 0
1377 bot = 0
1376 bot = 0
1378 hlen = len(self.hunk)
1377 hlen = len(self.hunk)
1379 for x in xrange(hlen - 1):
1378 for x in xrange(hlen - 1):
1380 # the hunk starts with the @@ line, so use x+1
1379 # the hunk starts with the @@ line, so use x+1
1381 if self.hunk[x + 1][0] == ' ':
1380 if self.hunk[x + 1][0] == ' ':
1382 top += 1
1381 top += 1
1383 else:
1382 else:
1384 break
1383 break
1385 if not toponly:
1384 if not toponly:
1386 for x in xrange(hlen - 1):
1385 for x in xrange(hlen - 1):
1387 if self.hunk[hlen - bot - 1][0] == ' ':
1386 if self.hunk[hlen - bot - 1][0] == ' ':
1388 bot += 1
1387 bot += 1
1389 else:
1388 else:
1390 break
1389 break
1391
1390
1392 bot = min(fuzz, bot)
1391 bot = min(fuzz, bot)
1393 top = min(fuzz, top)
1392 top = min(fuzz, top)
1394 return old[top:len(old) - bot], new[top:len(new) - bot], top
1393 return old[top:len(old) - bot], new[top:len(new) - bot], top
1395 return old, new, 0
1394 return old, new, 0
1396
1395
1397 def fuzzit(self, fuzz, toponly):
1396 def fuzzit(self, fuzz, toponly):
1398 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1397 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1399 oldstart = self.starta + top
1398 oldstart = self.starta + top
1400 newstart = self.startb + top
1399 newstart = self.startb + top
1401 # zero length hunk ranges already have their start decremented
1400 # zero length hunk ranges already have their start decremented
1402 if self.lena and oldstart > 0:
1401 if self.lena and oldstart > 0:
1403 oldstart -= 1
1402 oldstart -= 1
1404 if self.lenb and newstart > 0:
1403 if self.lenb and newstart > 0:
1405 newstart -= 1
1404 newstart -= 1
1406 return old, oldstart, new, newstart
1405 return old, oldstart, new, newstart
1407
1406
1408 class binhunk(object):
1407 class binhunk(object):
1409 'A binary patch file.'
1408 'A binary patch file.'
1410 def __init__(self, lr, fname):
1409 def __init__(self, lr, fname):
1411 self.text = None
1410 self.text = None
1412 self.delta = False
1411 self.delta = False
1413 self.hunk = ['GIT binary patch\n']
1412 self.hunk = ['GIT binary patch\n']
1414 self._fname = fname
1413 self._fname = fname
1415 self._read(lr)
1414 self._read(lr)
1416
1415
1417 def complete(self):
1416 def complete(self):
1418 return self.text is not None
1417 return self.text is not None
1419
1418
1420 def new(self, lines):
1419 def new(self, lines):
1421 if self.delta:
1420 if self.delta:
1422 return [applybindelta(self.text, ''.join(lines))]
1421 return [applybindelta(self.text, ''.join(lines))]
1423 return [self.text]
1422 return [self.text]
1424
1423
1425 def _read(self, lr):
1424 def _read(self, lr):
1426 def getline(lr, hunk):
1425 def getline(lr, hunk):
1427 l = lr.readline()
1426 l = lr.readline()
1428 hunk.append(l)
1427 hunk.append(l)
1429 return l.rstrip('\r\n')
1428 return l.rstrip('\r\n')
1430
1429
1431 size = 0
1430 size = 0
1432 while True:
1431 while True:
1433 line = getline(lr, self.hunk)
1432 line = getline(lr, self.hunk)
1434 if not line:
1433 if not line:
1435 raise PatchError(_('could not extract "%s" binary data')
1434 raise PatchError(_('could not extract "%s" binary data')
1436 % self._fname)
1435 % self._fname)
1437 if line.startswith('literal '):
1436 if line.startswith('literal '):
1438 size = int(line[8:].rstrip())
1437 size = int(line[8:].rstrip())
1439 break
1438 break
1440 if line.startswith('delta '):
1439 if line.startswith('delta '):
1441 size = int(line[6:].rstrip())
1440 size = int(line[6:].rstrip())
1442 self.delta = True
1441 self.delta = True
1443 break
1442 break
1444 dec = []
1443 dec = []
1445 line = getline(lr, self.hunk)
1444 line = getline(lr, self.hunk)
1446 while len(line) > 1:
1445 while len(line) > 1:
1447 l = line[0]
1446 l = line[0]
1448 if l <= 'Z' and l >= 'A':
1447 if l <= 'Z' and l >= 'A':
1449 l = ord(l) - ord('A') + 1
1448 l = ord(l) - ord('A') + 1
1450 else:
1449 else:
1451 l = ord(l) - ord('a') + 27
1450 l = ord(l) - ord('a') + 27
1452 try:
1451 try:
1453 dec.append(util.b85decode(line[1:])[:l])
1452 dec.append(util.b85decode(line[1:])[:l])
1454 except ValueError as e:
1453 except ValueError as e:
1455 raise PatchError(_('could not decode "%s" binary patch: %s')
1454 raise PatchError(_('could not decode "%s" binary patch: %s')
1456 % (self._fname, str(e)))
1455 % (self._fname, str(e)))
1457 line = getline(lr, self.hunk)
1456 line = getline(lr, self.hunk)
1458 text = zlib.decompress(''.join(dec))
1457 text = zlib.decompress(''.join(dec))
1459 if len(text) != size:
1458 if len(text) != size:
1460 raise PatchError(_('"%s" length is %d bytes, should be %d')
1459 raise PatchError(_('"%s" length is %d bytes, should be %d')
1461 % (self._fname, len(text), size))
1460 % (self._fname, len(text), size))
1462 self.text = text
1461 self.text = text
1463
1462
1464 def parsefilename(str):
1463 def parsefilename(str):
1465 # --- filename \t|space stuff
1464 # --- filename \t|space stuff
1466 s = str[4:].rstrip('\r\n')
1465 s = str[4:].rstrip('\r\n')
1467 i = s.find('\t')
1466 i = s.find('\t')
1468 if i < 0:
1467 if i < 0:
1469 i = s.find(' ')
1468 i = s.find(' ')
1470 if i < 0:
1469 if i < 0:
1471 return s
1470 return s
1472 return s[:i]
1471 return s[:i]
1473
1472
1474 def reversehunks(hunks):
1473 def reversehunks(hunks):
1475 '''reverse the signs in the hunks given as argument
1474 '''reverse the signs in the hunks given as argument
1476
1475
1477 This function operates on hunks coming out of patch.filterpatch, that is
1476 This function operates on hunks coming out of patch.filterpatch, that is
1478 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1477 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1479
1478
1480 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1479 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1481 ... --- a/folder1/g
1480 ... --- a/folder1/g
1482 ... +++ b/folder1/g
1481 ... +++ b/folder1/g
1483 ... @@ -1,7 +1,7 @@
1482 ... @@ -1,7 +1,7 @@
1484 ... +firstline
1483 ... +firstline
1485 ... c
1484 ... c
1486 ... 1
1485 ... 1
1487 ... 2
1486 ... 2
1488 ... + 3
1487 ... + 3
1489 ... -4
1488 ... -4
1490 ... 5
1489 ... 5
1491 ... d
1490 ... d
1492 ... +lastline"""
1491 ... +lastline"""
1493 >>> hunks = parsepatch([rawpatch])
1492 >>> hunks = parsepatch([rawpatch])
1494 >>> hunkscomingfromfilterpatch = []
1493 >>> hunkscomingfromfilterpatch = []
1495 >>> for h in hunks:
1494 >>> for h in hunks:
1496 ... hunkscomingfromfilterpatch.append(h)
1495 ... hunkscomingfromfilterpatch.append(h)
1497 ... hunkscomingfromfilterpatch.extend(h.hunks)
1496 ... hunkscomingfromfilterpatch.extend(h.hunks)
1498
1497
1499 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1498 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1500 >>> from . import util
1499 >>> from . import util
1501 >>> fp = util.stringio()
1500 >>> fp = util.stringio()
1502 >>> for c in reversedhunks:
1501 >>> for c in reversedhunks:
1503 ... c.write(fp)
1502 ... c.write(fp)
1504 >>> fp.seek(0) or None
1503 >>> fp.seek(0) or None
1505 >>> reversedpatch = fp.read()
1504 >>> reversedpatch = fp.read()
1506 >>> print(pycompat.sysstr(reversedpatch))
1505 >>> print(pycompat.sysstr(reversedpatch))
1507 diff --git a/folder1/g b/folder1/g
1506 diff --git a/folder1/g b/folder1/g
1508 --- a/folder1/g
1507 --- a/folder1/g
1509 +++ b/folder1/g
1508 +++ b/folder1/g
1510 @@ -1,4 +1,3 @@
1509 @@ -1,4 +1,3 @@
1511 -firstline
1510 -firstline
1512 c
1511 c
1513 1
1512 1
1514 2
1513 2
1515 @@ -2,6 +1,6 @@
1514 @@ -2,6 +1,6 @@
1516 c
1515 c
1517 1
1516 1
1518 2
1517 2
1519 - 3
1518 - 3
1520 +4
1519 +4
1521 5
1520 5
1522 d
1521 d
1523 @@ -6,3 +5,2 @@
1522 @@ -6,3 +5,2 @@
1524 5
1523 5
1525 d
1524 d
1526 -lastline
1525 -lastline
1527
1526
1528 '''
1527 '''
1529
1528
1530 newhunks = []
1529 newhunks = []
1531 for c in hunks:
1530 for c in hunks:
1532 if util.safehasattr(c, 'reversehunk'):
1531 if util.safehasattr(c, 'reversehunk'):
1533 c = c.reversehunk()
1532 c = c.reversehunk()
1534 newhunks.append(c)
1533 newhunks.append(c)
1535 return newhunks
1534 return newhunks
1536
1535
1537 def parsepatch(originalchunks, maxcontext=None):
1536 def parsepatch(originalchunks, maxcontext=None):
1538 """patch -> [] of headers -> [] of hunks
1537 """patch -> [] of headers -> [] of hunks
1539
1538
1540 If maxcontext is not None, trim context lines if necessary.
1539 If maxcontext is not None, trim context lines if necessary.
1541
1540
1542 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1541 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1543 ... --- a/folder1/g
1542 ... --- a/folder1/g
1544 ... +++ b/folder1/g
1543 ... +++ b/folder1/g
1545 ... @@ -1,8 +1,10 @@
1544 ... @@ -1,8 +1,10 @@
1546 ... 1
1545 ... 1
1547 ... 2
1546 ... 2
1548 ... -3
1547 ... -3
1549 ... 4
1548 ... 4
1550 ... 5
1549 ... 5
1551 ... 6
1550 ... 6
1552 ... +6.1
1551 ... +6.1
1553 ... +6.2
1552 ... +6.2
1554 ... 7
1553 ... 7
1555 ... 8
1554 ... 8
1556 ... +9'''
1555 ... +9'''
1557 >>> out = util.stringio()
1556 >>> out = util.stringio()
1558 >>> headers = parsepatch([rawpatch], maxcontext=1)
1557 >>> headers = parsepatch([rawpatch], maxcontext=1)
1559 >>> for header in headers:
1558 >>> for header in headers:
1560 ... header.write(out)
1559 ... header.write(out)
1561 ... for hunk in header.hunks:
1560 ... for hunk in header.hunks:
1562 ... hunk.write(out)
1561 ... hunk.write(out)
1563 >>> print(pycompat.sysstr(out.getvalue()))
1562 >>> print(pycompat.sysstr(out.getvalue()))
1564 diff --git a/folder1/g b/folder1/g
1563 diff --git a/folder1/g b/folder1/g
1565 --- a/folder1/g
1564 --- a/folder1/g
1566 +++ b/folder1/g
1565 +++ b/folder1/g
1567 @@ -2,3 +2,2 @@
1566 @@ -2,3 +2,2 @@
1568 2
1567 2
1569 -3
1568 -3
1570 4
1569 4
1571 @@ -6,2 +5,4 @@
1570 @@ -6,2 +5,4 @@
1572 6
1571 6
1573 +6.1
1572 +6.1
1574 +6.2
1573 +6.2
1575 7
1574 7
1576 @@ -8,1 +9,2 @@
1575 @@ -8,1 +9,2 @@
1577 8
1576 8
1578 +9
1577 +9
1579 """
1578 """
1580 class parser(object):
1579 class parser(object):
1581 """patch parsing state machine"""
1580 """patch parsing state machine"""
1582 def __init__(self):
1581 def __init__(self):
1583 self.fromline = 0
1582 self.fromline = 0
1584 self.toline = 0
1583 self.toline = 0
1585 self.proc = ''
1584 self.proc = ''
1586 self.header = None
1585 self.header = None
1587 self.context = []
1586 self.context = []
1588 self.before = []
1587 self.before = []
1589 self.hunk = []
1588 self.hunk = []
1590 self.headers = []
1589 self.headers = []
1591
1590
1592 def addrange(self, limits):
1591 def addrange(self, limits):
1593 fromstart, fromend, tostart, toend, proc = limits
1592 fromstart, fromend, tostart, toend, proc = limits
1594 self.fromline = int(fromstart)
1593 self.fromline = int(fromstart)
1595 self.toline = int(tostart)
1594 self.toline = int(tostart)
1596 self.proc = proc
1595 self.proc = proc
1597
1596
1598 def addcontext(self, context):
1597 def addcontext(self, context):
1599 if self.hunk:
1598 if self.hunk:
1600 h = recordhunk(self.header, self.fromline, self.toline,
1599 h = recordhunk(self.header, self.fromline, self.toline,
1601 self.proc, self.before, self.hunk, context, maxcontext)
1600 self.proc, self.before, self.hunk, context, maxcontext)
1602 self.header.hunks.append(h)
1601 self.header.hunks.append(h)
1603 self.fromline += len(self.before) + h.removed
1602 self.fromline += len(self.before) + h.removed
1604 self.toline += len(self.before) + h.added
1603 self.toline += len(self.before) + h.added
1605 self.before = []
1604 self.before = []
1606 self.hunk = []
1605 self.hunk = []
1607 self.context = context
1606 self.context = context
1608
1607
1609 def addhunk(self, hunk):
1608 def addhunk(self, hunk):
1610 if self.context:
1609 if self.context:
1611 self.before = self.context
1610 self.before = self.context
1612 self.context = []
1611 self.context = []
1613 self.hunk = hunk
1612 self.hunk = hunk
1614
1613
1615 def newfile(self, hdr):
1614 def newfile(self, hdr):
1616 self.addcontext([])
1615 self.addcontext([])
1617 h = header(hdr)
1616 h = header(hdr)
1618 self.headers.append(h)
1617 self.headers.append(h)
1619 self.header = h
1618 self.header = h
1620
1619
1621 def addother(self, line):
1620 def addother(self, line):
1622 pass # 'other' lines are ignored
1621 pass # 'other' lines are ignored
1623
1622
1624 def finished(self):
1623 def finished(self):
1625 self.addcontext([])
1624 self.addcontext([])
1626 return self.headers
1625 return self.headers
1627
1626
1628 transitions = {
1627 transitions = {
1629 'file': {'context': addcontext,
1628 'file': {'context': addcontext,
1630 'file': newfile,
1629 'file': newfile,
1631 'hunk': addhunk,
1630 'hunk': addhunk,
1632 'range': addrange},
1631 'range': addrange},
1633 'context': {'file': newfile,
1632 'context': {'file': newfile,
1634 'hunk': addhunk,
1633 'hunk': addhunk,
1635 'range': addrange,
1634 'range': addrange,
1636 'other': addother},
1635 'other': addother},
1637 'hunk': {'context': addcontext,
1636 'hunk': {'context': addcontext,
1638 'file': newfile,
1637 'file': newfile,
1639 'range': addrange},
1638 'range': addrange},
1640 'range': {'context': addcontext,
1639 'range': {'context': addcontext,
1641 'hunk': addhunk},
1640 'hunk': addhunk},
1642 'other': {'other': addother},
1641 'other': {'other': addother},
1643 }
1642 }
1644
1643
1645 p = parser()
1644 p = parser()
1646 fp = stringio()
1645 fp = stringio()
1647 fp.write(''.join(originalchunks))
1646 fp.write(''.join(originalchunks))
1648 fp.seek(0)
1647 fp.seek(0)
1649
1648
1650 state = 'context'
1649 state = 'context'
1651 for newstate, data in scanpatch(fp):
1650 for newstate, data in scanpatch(fp):
1652 try:
1651 try:
1653 p.transitions[state][newstate](p, data)
1652 p.transitions[state][newstate](p, data)
1654 except KeyError:
1653 except KeyError:
1655 raise PatchError('unhandled transition: %s -> %s' %
1654 raise PatchError('unhandled transition: %s -> %s' %
1656 (state, newstate))
1655 (state, newstate))
1657 state = newstate
1656 state = newstate
1658 del fp
1657 del fp
1659 return p.finished()
1658 return p.finished()
1660
1659
1661 def pathtransform(path, strip, prefix):
1660 def pathtransform(path, strip, prefix):
1662 '''turn a path from a patch into a path suitable for the repository
1661 '''turn a path from a patch into a path suitable for the repository
1663
1662
1664 prefix, if not empty, is expected to be normalized with a / at the end.
1663 prefix, if not empty, is expected to be normalized with a / at the end.
1665
1664
1666 Returns (stripped components, path in repository).
1665 Returns (stripped components, path in repository).
1667
1666
1668 >>> pathtransform(b'a/b/c', 0, b'')
1667 >>> pathtransform(b'a/b/c', 0, b'')
1669 ('', 'a/b/c')
1668 ('', 'a/b/c')
1670 >>> pathtransform(b' a/b/c ', 0, b'')
1669 >>> pathtransform(b' a/b/c ', 0, b'')
1671 ('', ' a/b/c')
1670 ('', ' a/b/c')
1672 >>> pathtransform(b' a/b/c ', 2, b'')
1671 >>> pathtransform(b' a/b/c ', 2, b'')
1673 ('a/b/', 'c')
1672 ('a/b/', 'c')
1674 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1673 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1675 ('', 'd/e/a/b/c')
1674 ('', 'd/e/a/b/c')
1676 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1675 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1677 ('a//b/', 'd/e/c')
1676 ('a//b/', 'd/e/c')
1678 >>> pathtransform(b'a/b/c', 3, b'')
1677 >>> pathtransform(b'a/b/c', 3, b'')
1679 Traceback (most recent call last):
1678 Traceback (most recent call last):
1680 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1679 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1681 '''
1680 '''
1682 pathlen = len(path)
1681 pathlen = len(path)
1683 i = 0
1682 i = 0
1684 if strip == 0:
1683 if strip == 0:
1685 return '', prefix + path.rstrip()
1684 return '', prefix + path.rstrip()
1686 count = strip
1685 count = strip
1687 while count > 0:
1686 while count > 0:
1688 i = path.find('/', i)
1687 i = path.find('/', i)
1689 if i == -1:
1688 if i == -1:
1690 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1689 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1691 (count, strip, path))
1690 (count, strip, path))
1692 i += 1
1691 i += 1
1693 # consume '//' in the path
1692 # consume '//' in the path
1694 while i < pathlen - 1 and path[i:i + 1] == '/':
1693 while i < pathlen - 1 and path[i:i + 1] == '/':
1695 i += 1
1694 i += 1
1696 count -= 1
1695 count -= 1
1697 return path[:i].lstrip(), prefix + path[i:].rstrip()
1696 return path[:i].lstrip(), prefix + path[i:].rstrip()
1698
1697
1699 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1698 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1700 nulla = afile_orig == "/dev/null"
1699 nulla = afile_orig == "/dev/null"
1701 nullb = bfile_orig == "/dev/null"
1700 nullb = bfile_orig == "/dev/null"
1702 create = nulla and hunk.starta == 0 and hunk.lena == 0
1701 create = nulla and hunk.starta == 0 and hunk.lena == 0
1703 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1702 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1704 abase, afile = pathtransform(afile_orig, strip, prefix)
1703 abase, afile = pathtransform(afile_orig, strip, prefix)
1705 gooda = not nulla and backend.exists(afile)
1704 gooda = not nulla and backend.exists(afile)
1706 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1705 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1707 if afile == bfile:
1706 if afile == bfile:
1708 goodb = gooda
1707 goodb = gooda
1709 else:
1708 else:
1710 goodb = not nullb and backend.exists(bfile)
1709 goodb = not nullb and backend.exists(bfile)
1711 missing = not goodb and not gooda and not create
1710 missing = not goodb and not gooda and not create
1712
1711
1713 # some diff programs apparently produce patches where the afile is
1712 # some diff programs apparently produce patches where the afile is
1714 # not /dev/null, but afile starts with bfile
1713 # not /dev/null, but afile starts with bfile
1715 abasedir = afile[:afile.rfind('/') + 1]
1714 abasedir = afile[:afile.rfind('/') + 1]
1716 bbasedir = bfile[:bfile.rfind('/') + 1]
1715 bbasedir = bfile[:bfile.rfind('/') + 1]
1717 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1716 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1718 and hunk.starta == 0 and hunk.lena == 0):
1717 and hunk.starta == 0 and hunk.lena == 0):
1719 create = True
1718 create = True
1720 missing = False
1719 missing = False
1721
1720
1722 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1721 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1723 # diff is between a file and its backup. In this case, the original
1722 # diff is between a file and its backup. In this case, the original
1724 # file should be patched (see original mpatch code).
1723 # file should be patched (see original mpatch code).
1725 isbackup = (abase == bbase and bfile.startswith(afile))
1724 isbackup = (abase == bbase and bfile.startswith(afile))
1726 fname = None
1725 fname = None
1727 if not missing:
1726 if not missing:
1728 if gooda and goodb:
1727 if gooda and goodb:
1729 if isbackup:
1728 if isbackup:
1730 fname = afile
1729 fname = afile
1731 else:
1730 else:
1732 fname = bfile
1731 fname = bfile
1733 elif gooda:
1732 elif gooda:
1734 fname = afile
1733 fname = afile
1735
1734
1736 if not fname:
1735 if not fname:
1737 if not nullb:
1736 if not nullb:
1738 if isbackup:
1737 if isbackup:
1739 fname = afile
1738 fname = afile
1740 else:
1739 else:
1741 fname = bfile
1740 fname = bfile
1742 elif not nulla:
1741 elif not nulla:
1743 fname = afile
1742 fname = afile
1744 else:
1743 else:
1745 raise PatchError(_("undefined source and destination files"))
1744 raise PatchError(_("undefined source and destination files"))
1746
1745
1747 gp = patchmeta(fname)
1746 gp = patchmeta(fname)
1748 if create:
1747 if create:
1749 gp.op = 'ADD'
1748 gp.op = 'ADD'
1750 elif remove:
1749 elif remove:
1751 gp.op = 'DELETE'
1750 gp.op = 'DELETE'
1752 return gp
1751 return gp
1753
1752
1754 def scanpatch(fp):
1753 def scanpatch(fp):
1755 """like patch.iterhunks, but yield different events
1754 """like patch.iterhunks, but yield different events
1756
1755
1757 - ('file', [header_lines + fromfile + tofile])
1756 - ('file', [header_lines + fromfile + tofile])
1758 - ('context', [context_lines])
1757 - ('context', [context_lines])
1759 - ('hunk', [hunk_lines])
1758 - ('hunk', [hunk_lines])
1760 - ('range', (-start,len, +start,len, proc))
1759 - ('range', (-start,len, +start,len, proc))
1761 """
1760 """
1762 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1761 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1763 lr = linereader(fp)
1762 lr = linereader(fp)
1764
1763
1765 def scanwhile(first, p):
1764 def scanwhile(first, p):
1766 """scan lr while predicate holds"""
1765 """scan lr while predicate holds"""
1767 lines = [first]
1766 lines = [first]
1768 for line in iter(lr.readline, ''):
1767 for line in iter(lr.readline, ''):
1769 if p(line):
1768 if p(line):
1770 lines.append(line)
1769 lines.append(line)
1771 else:
1770 else:
1772 lr.push(line)
1771 lr.push(line)
1773 break
1772 break
1774 return lines
1773 return lines
1775
1774
1776 for line in iter(lr.readline, ''):
1775 for line in iter(lr.readline, ''):
1777 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1776 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1778 def notheader(line):
1777 def notheader(line):
1779 s = line.split(None, 1)
1778 s = line.split(None, 1)
1780 return not s or s[0] not in ('---', 'diff')
1779 return not s or s[0] not in ('---', 'diff')
1781 header = scanwhile(line, notheader)
1780 header = scanwhile(line, notheader)
1782 fromfile = lr.readline()
1781 fromfile = lr.readline()
1783 if fromfile.startswith('---'):
1782 if fromfile.startswith('---'):
1784 tofile = lr.readline()
1783 tofile = lr.readline()
1785 header += [fromfile, tofile]
1784 header += [fromfile, tofile]
1786 else:
1785 else:
1787 lr.push(fromfile)
1786 lr.push(fromfile)
1788 yield 'file', header
1787 yield 'file', header
1789 elif line[0:1] == ' ':
1788 elif line[0:1] == ' ':
1790 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1789 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1791 elif line[0] in '-+':
1790 elif line[0] in '-+':
1792 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1791 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1793 else:
1792 else:
1794 m = lines_re.match(line)
1793 m = lines_re.match(line)
1795 if m:
1794 if m:
1796 yield 'range', m.groups()
1795 yield 'range', m.groups()
1797 else:
1796 else:
1798 yield 'other', line
1797 yield 'other', line
1799
1798
1800 def scangitpatch(lr, firstline):
1799 def scangitpatch(lr, firstline):
1801 """
1800 """
1802 Git patches can emit:
1801 Git patches can emit:
1803 - rename a to b
1802 - rename a to b
1804 - change b
1803 - change b
1805 - copy a to c
1804 - copy a to c
1806 - change c
1805 - change c
1807
1806
1808 We cannot apply this sequence as-is, the renamed 'a' could not be
1807 We cannot apply this sequence as-is, the renamed 'a' could not be
1809 found for it would have been renamed already. And we cannot copy
1808 found for it would have been renamed already. And we cannot copy
1810 from 'b' instead because 'b' would have been changed already. So
1809 from 'b' instead because 'b' would have been changed already. So
1811 we scan the git patch for copy and rename commands so we can
1810 we scan the git patch for copy and rename commands so we can
1812 perform the copies ahead of time.
1811 perform the copies ahead of time.
1813 """
1812 """
1814 pos = 0
1813 pos = 0
1815 try:
1814 try:
1816 pos = lr.fp.tell()
1815 pos = lr.fp.tell()
1817 fp = lr.fp
1816 fp = lr.fp
1818 except IOError:
1817 except IOError:
1819 fp = stringio(lr.fp.read())
1818 fp = stringio(lr.fp.read())
1820 gitlr = linereader(fp)
1819 gitlr = linereader(fp)
1821 gitlr.push(firstline)
1820 gitlr.push(firstline)
1822 gitpatches = readgitpatch(gitlr)
1821 gitpatches = readgitpatch(gitlr)
1823 fp.seek(pos)
1822 fp.seek(pos)
1824 return gitpatches
1823 return gitpatches
1825
1824
1826 def iterhunks(fp):
1825 def iterhunks(fp):
1827 """Read a patch and yield the following events:
1826 """Read a patch and yield the following events:
1828 - ("file", afile, bfile, firsthunk): select a new target file.
1827 - ("file", afile, bfile, firsthunk): select a new target file.
1829 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1828 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1830 "file" event.
1829 "file" event.
1831 - ("git", gitchanges): current diff is in git format, gitchanges
1830 - ("git", gitchanges): current diff is in git format, gitchanges
1832 maps filenames to gitpatch records. Unique event.
1831 maps filenames to gitpatch records. Unique event.
1833 """
1832 """
1834 afile = ""
1833 afile = ""
1835 bfile = ""
1834 bfile = ""
1836 state = None
1835 state = None
1837 hunknum = 0
1836 hunknum = 0
1838 emitfile = newfile = False
1837 emitfile = newfile = False
1839 gitpatches = None
1838 gitpatches = None
1840
1839
1841 # our states
1840 # our states
1842 BFILE = 1
1841 BFILE = 1
1843 context = None
1842 context = None
1844 lr = linereader(fp)
1843 lr = linereader(fp)
1845
1844
1846 for x in iter(lr.readline, ''):
1845 for x in iter(lr.readline, ''):
1847 if state == BFILE and (
1846 if state == BFILE and (
1848 (not context and x[0] == '@')
1847 (not context and x[0] == '@')
1849 or (context is not False and x.startswith('***************'))
1848 or (context is not False and x.startswith('***************'))
1850 or x.startswith('GIT binary patch')):
1849 or x.startswith('GIT binary patch')):
1851 gp = None
1850 gp = None
1852 if (gitpatches and
1851 if (gitpatches and
1853 gitpatches[-1].ispatching(afile, bfile)):
1852 gitpatches[-1].ispatching(afile, bfile)):
1854 gp = gitpatches.pop()
1853 gp = gitpatches.pop()
1855 if x.startswith('GIT binary patch'):
1854 if x.startswith('GIT binary patch'):
1856 h = binhunk(lr, gp.path)
1855 h = binhunk(lr, gp.path)
1857 else:
1856 else:
1858 if context is None and x.startswith('***************'):
1857 if context is None and x.startswith('***************'):
1859 context = True
1858 context = True
1860 h = hunk(x, hunknum + 1, lr, context)
1859 h = hunk(x, hunknum + 1, lr, context)
1861 hunknum += 1
1860 hunknum += 1
1862 if emitfile:
1861 if emitfile:
1863 emitfile = False
1862 emitfile = False
1864 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1863 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1865 yield 'hunk', h
1864 yield 'hunk', h
1866 elif x.startswith('diff --git a/'):
1865 elif x.startswith('diff --git a/'):
1867 m = gitre.match(x.rstrip(' \r\n'))
1866 m = gitre.match(x.rstrip(' \r\n'))
1868 if not m:
1867 if not m:
1869 continue
1868 continue
1870 if gitpatches is None:
1869 if gitpatches is None:
1871 # scan whole input for git metadata
1870 # scan whole input for git metadata
1872 gitpatches = scangitpatch(lr, x)
1871 gitpatches = scangitpatch(lr, x)
1873 yield 'git', [g.copy() for g in gitpatches
1872 yield 'git', [g.copy() for g in gitpatches
1874 if g.op in ('COPY', 'RENAME')]
1873 if g.op in ('COPY', 'RENAME')]
1875 gitpatches.reverse()
1874 gitpatches.reverse()
1876 afile = 'a/' + m.group(1)
1875 afile = 'a/' + m.group(1)
1877 bfile = 'b/' + m.group(2)
1876 bfile = 'b/' + m.group(2)
1878 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1877 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1879 gp = gitpatches.pop()
1878 gp = gitpatches.pop()
1880 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1879 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1881 if not gitpatches:
1880 if not gitpatches:
1882 raise PatchError(_('failed to synchronize metadata for "%s"')
1881 raise PatchError(_('failed to synchronize metadata for "%s"')
1883 % afile[2:])
1882 % afile[2:])
1884 gp = gitpatches[-1]
1883 gp = gitpatches[-1]
1885 newfile = True
1884 newfile = True
1886 elif x.startswith('---'):
1885 elif x.startswith('---'):
1887 # check for a unified diff
1886 # check for a unified diff
1888 l2 = lr.readline()
1887 l2 = lr.readline()
1889 if not l2.startswith('+++'):
1888 if not l2.startswith('+++'):
1890 lr.push(l2)
1889 lr.push(l2)
1891 continue
1890 continue
1892 newfile = True
1891 newfile = True
1893 context = False
1892 context = False
1894 afile = parsefilename(x)
1893 afile = parsefilename(x)
1895 bfile = parsefilename(l2)
1894 bfile = parsefilename(l2)
1896 elif x.startswith('***'):
1895 elif x.startswith('***'):
1897 # check for a context diff
1896 # check for a context diff
1898 l2 = lr.readline()
1897 l2 = lr.readline()
1899 if not l2.startswith('---'):
1898 if not l2.startswith('---'):
1900 lr.push(l2)
1899 lr.push(l2)
1901 continue
1900 continue
1902 l3 = lr.readline()
1901 l3 = lr.readline()
1903 lr.push(l3)
1902 lr.push(l3)
1904 if not l3.startswith("***************"):
1903 if not l3.startswith("***************"):
1905 lr.push(l2)
1904 lr.push(l2)
1906 continue
1905 continue
1907 newfile = True
1906 newfile = True
1908 context = True
1907 context = True
1909 afile = parsefilename(x)
1908 afile = parsefilename(x)
1910 bfile = parsefilename(l2)
1909 bfile = parsefilename(l2)
1911
1910
1912 if newfile:
1911 if newfile:
1913 newfile = False
1912 newfile = False
1914 emitfile = True
1913 emitfile = True
1915 state = BFILE
1914 state = BFILE
1916 hunknum = 0
1915 hunknum = 0
1917
1916
1918 while gitpatches:
1917 while gitpatches:
1919 gp = gitpatches.pop()
1918 gp = gitpatches.pop()
1920 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1919 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1921
1920
1922 def applybindelta(binchunk, data):
1921 def applybindelta(binchunk, data):
1923 """Apply a binary delta hunk
1922 """Apply a binary delta hunk
1924 The algorithm used is the algorithm from git's patch-delta.c
1923 The algorithm used is the algorithm from git's patch-delta.c
1925 """
1924 """
1926 def deltahead(binchunk):
1925 def deltahead(binchunk):
1927 i = 0
1926 i = 0
1928 for c in binchunk:
1927 for c in binchunk:
1929 i += 1
1928 i += 1
1930 if not (ord(c) & 0x80):
1929 if not (ord(c) & 0x80):
1931 return i
1930 return i
1932 return i
1931 return i
1933 out = ""
1932 out = ""
1934 s = deltahead(binchunk)
1933 s = deltahead(binchunk)
1935 binchunk = binchunk[s:]
1934 binchunk = binchunk[s:]
1936 s = deltahead(binchunk)
1935 s = deltahead(binchunk)
1937 binchunk = binchunk[s:]
1936 binchunk = binchunk[s:]
1938 i = 0
1937 i = 0
1939 while i < len(binchunk):
1938 while i < len(binchunk):
1940 cmd = ord(binchunk[i])
1939 cmd = ord(binchunk[i])
1941 i += 1
1940 i += 1
1942 if (cmd & 0x80):
1941 if (cmd & 0x80):
1943 offset = 0
1942 offset = 0
1944 size = 0
1943 size = 0
1945 if (cmd & 0x01):
1944 if (cmd & 0x01):
1946 offset = ord(binchunk[i])
1945 offset = ord(binchunk[i])
1947 i += 1
1946 i += 1
1948 if (cmd & 0x02):
1947 if (cmd & 0x02):
1949 offset |= ord(binchunk[i]) << 8
1948 offset |= ord(binchunk[i]) << 8
1950 i += 1
1949 i += 1
1951 if (cmd & 0x04):
1950 if (cmd & 0x04):
1952 offset |= ord(binchunk[i]) << 16
1951 offset |= ord(binchunk[i]) << 16
1953 i += 1
1952 i += 1
1954 if (cmd & 0x08):
1953 if (cmd & 0x08):
1955 offset |= ord(binchunk[i]) << 24
1954 offset |= ord(binchunk[i]) << 24
1956 i += 1
1955 i += 1
1957 if (cmd & 0x10):
1956 if (cmd & 0x10):
1958 size = ord(binchunk[i])
1957 size = ord(binchunk[i])
1959 i += 1
1958 i += 1
1960 if (cmd & 0x20):
1959 if (cmd & 0x20):
1961 size |= ord(binchunk[i]) << 8
1960 size |= ord(binchunk[i]) << 8
1962 i += 1
1961 i += 1
1963 if (cmd & 0x40):
1962 if (cmd & 0x40):
1964 size |= ord(binchunk[i]) << 16
1963 size |= ord(binchunk[i]) << 16
1965 i += 1
1964 i += 1
1966 if size == 0:
1965 if size == 0:
1967 size = 0x10000
1966 size = 0x10000
1968 offset_end = offset + size
1967 offset_end = offset + size
1969 out += data[offset:offset_end]
1968 out += data[offset:offset_end]
1970 elif cmd != 0:
1969 elif cmd != 0:
1971 offset_end = i + cmd
1970 offset_end = i + cmd
1972 out += binchunk[i:offset_end]
1971 out += binchunk[i:offset_end]
1973 i += cmd
1972 i += cmd
1974 else:
1973 else:
1975 raise PatchError(_('unexpected delta opcode 0'))
1974 raise PatchError(_('unexpected delta opcode 0'))
1976 return out
1975 return out
1977
1976
1978 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1977 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1979 """Reads a patch from fp and tries to apply it.
1978 """Reads a patch from fp and tries to apply it.
1980
1979
1981 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1980 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1982 there was any fuzz.
1981 there was any fuzz.
1983
1982
1984 If 'eolmode' is 'strict', the patch content and patched file are
1983 If 'eolmode' is 'strict', the patch content and patched file are
1985 read in binary mode. Otherwise, line endings are ignored when
1984 read in binary mode. Otherwise, line endings are ignored when
1986 patching then normalized according to 'eolmode'.
1985 patching then normalized according to 'eolmode'.
1987 """
1986 """
1988 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1987 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1989 prefix=prefix, eolmode=eolmode)
1988 prefix=prefix, eolmode=eolmode)
1990
1989
1991 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1990 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1992 eolmode='strict'):
1991 eolmode='strict'):
1993
1992
1994 if prefix:
1993 if prefix:
1995 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1994 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1996 prefix)
1995 prefix)
1997 if prefix != '':
1996 if prefix != '':
1998 prefix += '/'
1997 prefix += '/'
1999 def pstrip(p):
1998 def pstrip(p):
2000 return pathtransform(p, strip - 1, prefix)[1]
1999 return pathtransform(p, strip - 1, prefix)[1]
2001
2000
2002 rejects = 0
2001 rejects = 0
2003 err = 0
2002 err = 0
2004 current_file = None
2003 current_file = None
2005
2004
2006 for state, values in iterhunks(fp):
2005 for state, values in iterhunks(fp):
2007 if state == 'hunk':
2006 if state == 'hunk':
2008 if not current_file:
2007 if not current_file:
2009 continue
2008 continue
2010 ret = current_file.apply(values)
2009 ret = current_file.apply(values)
2011 if ret > 0:
2010 if ret > 0:
2012 err = 1
2011 err = 1
2013 elif state == 'file':
2012 elif state == 'file':
2014 if current_file:
2013 if current_file:
2015 rejects += current_file.close()
2014 rejects += current_file.close()
2016 current_file = None
2015 current_file = None
2017 afile, bfile, first_hunk, gp = values
2016 afile, bfile, first_hunk, gp = values
2018 if gp:
2017 if gp:
2019 gp.path = pstrip(gp.path)
2018 gp.path = pstrip(gp.path)
2020 if gp.oldpath:
2019 if gp.oldpath:
2021 gp.oldpath = pstrip(gp.oldpath)
2020 gp.oldpath = pstrip(gp.oldpath)
2022 else:
2021 else:
2023 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2022 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2024 prefix)
2023 prefix)
2025 if gp.op == 'RENAME':
2024 if gp.op == 'RENAME':
2026 backend.unlink(gp.oldpath)
2025 backend.unlink(gp.oldpath)
2027 if not first_hunk:
2026 if not first_hunk:
2028 if gp.op == 'DELETE':
2027 if gp.op == 'DELETE':
2029 backend.unlink(gp.path)
2028 backend.unlink(gp.path)
2030 continue
2029 continue
2031 data, mode = None, None
2030 data, mode = None, None
2032 if gp.op in ('RENAME', 'COPY'):
2031 if gp.op in ('RENAME', 'COPY'):
2033 data, mode = store.getfile(gp.oldpath)[:2]
2032 data, mode = store.getfile(gp.oldpath)[:2]
2034 if data is None:
2033 if data is None:
2035 # This means that the old path does not exist
2034 # This means that the old path does not exist
2036 raise PatchError(_("source file '%s' does not exist")
2035 raise PatchError(_("source file '%s' does not exist")
2037 % gp.oldpath)
2036 % gp.oldpath)
2038 if gp.mode:
2037 if gp.mode:
2039 mode = gp.mode
2038 mode = gp.mode
2040 if gp.op == 'ADD':
2039 if gp.op == 'ADD':
2041 # Added files without content have no hunk and
2040 # Added files without content have no hunk and
2042 # must be created
2041 # must be created
2043 data = ''
2042 data = ''
2044 if data or mode:
2043 if data or mode:
2045 if (gp.op in ('ADD', 'RENAME', 'COPY')
2044 if (gp.op in ('ADD', 'RENAME', 'COPY')
2046 and backend.exists(gp.path)):
2045 and backend.exists(gp.path)):
2047 raise PatchError(_("cannot create %s: destination "
2046 raise PatchError(_("cannot create %s: destination "
2048 "already exists") % gp.path)
2047 "already exists") % gp.path)
2049 backend.setfile(gp.path, data, mode, gp.oldpath)
2048 backend.setfile(gp.path, data, mode, gp.oldpath)
2050 continue
2049 continue
2051 try:
2050 try:
2052 current_file = patcher(ui, gp, backend, store,
2051 current_file = patcher(ui, gp, backend, store,
2053 eolmode=eolmode)
2052 eolmode=eolmode)
2054 except PatchError as inst:
2053 except PatchError as inst:
2055 ui.warn(str(inst) + '\n')
2054 ui.warn(str(inst) + '\n')
2056 current_file = None
2055 current_file = None
2057 rejects += 1
2056 rejects += 1
2058 continue
2057 continue
2059 elif state == 'git':
2058 elif state == 'git':
2060 for gp in values:
2059 for gp in values:
2061 path = pstrip(gp.oldpath)
2060 path = pstrip(gp.oldpath)
2062 data, mode = backend.getfile(path)
2061 data, mode = backend.getfile(path)
2063 if data is None:
2062 if data is None:
2064 # The error ignored here will trigger a getfile()
2063 # The error ignored here will trigger a getfile()
2065 # error in a place more appropriate for error
2064 # error in a place more appropriate for error
2066 # handling, and will not interrupt the patching
2065 # handling, and will not interrupt the patching
2067 # process.
2066 # process.
2068 pass
2067 pass
2069 else:
2068 else:
2070 store.setfile(path, data, mode)
2069 store.setfile(path, data, mode)
2071 else:
2070 else:
2072 raise error.Abort(_('unsupported parser state: %s') % state)
2071 raise error.Abort(_('unsupported parser state: %s') % state)
2073
2072
2074 if current_file:
2073 if current_file:
2075 rejects += current_file.close()
2074 rejects += current_file.close()
2076
2075
2077 if rejects:
2076 if rejects:
2078 return -1
2077 return -1
2079 return err
2078 return err
2080
2079
2081 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2080 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2082 similarity):
2081 similarity):
2083 """use <patcher> to apply <patchname> to the working directory.
2082 """use <patcher> to apply <patchname> to the working directory.
2084 returns whether patch was applied with fuzz factor."""
2083 returns whether patch was applied with fuzz factor."""
2085
2084
2086 fuzz = False
2085 fuzz = False
2087 args = []
2086 args = []
2088 cwd = repo.root
2087 cwd = repo.root
2089 if cwd:
2088 if cwd:
2090 args.append('-d %s' % util.shellquote(cwd))
2089 args.append('-d %s' % util.shellquote(cwd))
2091 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2090 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2092 util.shellquote(patchname)))
2091 util.shellquote(patchname)))
2093 try:
2092 try:
2094 for line in util.iterfile(fp):
2093 for line in util.iterfile(fp):
2095 line = line.rstrip()
2094 line = line.rstrip()
2096 ui.note(line + '\n')
2095 ui.note(line + '\n')
2097 if line.startswith('patching file '):
2096 if line.startswith('patching file '):
2098 pf = util.parsepatchoutput(line)
2097 pf = util.parsepatchoutput(line)
2099 printed_file = False
2098 printed_file = False
2100 files.add(pf)
2099 files.add(pf)
2101 elif line.find('with fuzz') >= 0:
2100 elif line.find('with fuzz') >= 0:
2102 fuzz = True
2101 fuzz = True
2103 if not printed_file:
2102 if not printed_file:
2104 ui.warn(pf + '\n')
2103 ui.warn(pf + '\n')
2105 printed_file = True
2104 printed_file = True
2106 ui.warn(line + '\n')
2105 ui.warn(line + '\n')
2107 elif line.find('saving rejects to file') >= 0:
2106 elif line.find('saving rejects to file') >= 0:
2108 ui.warn(line + '\n')
2107 ui.warn(line + '\n')
2109 elif line.find('FAILED') >= 0:
2108 elif line.find('FAILED') >= 0:
2110 if not printed_file:
2109 if not printed_file:
2111 ui.warn(pf + '\n')
2110 ui.warn(pf + '\n')
2112 printed_file = True
2111 printed_file = True
2113 ui.warn(line + '\n')
2112 ui.warn(line + '\n')
2114 finally:
2113 finally:
2115 if files:
2114 if files:
2116 scmutil.marktouched(repo, files, similarity)
2115 scmutil.marktouched(repo, files, similarity)
2117 code = fp.close()
2116 code = fp.close()
2118 if code:
2117 if code:
2119 raise PatchError(_("patch command failed: %s") %
2118 raise PatchError(_("patch command failed: %s") %
2120 util.explainexit(code)[0])
2119 util.explainexit(code)[0])
2121 return fuzz
2120 return fuzz
2122
2121
2123 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2122 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2124 eolmode='strict'):
2123 eolmode='strict'):
2125 if files is None:
2124 if files is None:
2126 files = set()
2125 files = set()
2127 if eolmode is None:
2126 if eolmode is None:
2128 eolmode = ui.config('patch', 'eol')
2127 eolmode = ui.config('patch', 'eol')
2129 if eolmode.lower() not in eolmodes:
2128 if eolmode.lower() not in eolmodes:
2130 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2129 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2131 eolmode = eolmode.lower()
2130 eolmode = eolmode.lower()
2132
2131
2133 store = filestore()
2132 store = filestore()
2134 try:
2133 try:
2135 fp = open(patchobj, 'rb')
2134 fp = open(patchobj, 'rb')
2136 except TypeError:
2135 except TypeError:
2137 fp = patchobj
2136 fp = patchobj
2138 try:
2137 try:
2139 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2138 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2140 eolmode=eolmode)
2139 eolmode=eolmode)
2141 finally:
2140 finally:
2142 if fp != patchobj:
2141 if fp != patchobj:
2143 fp.close()
2142 fp.close()
2144 files.update(backend.close())
2143 files.update(backend.close())
2145 store.close()
2144 store.close()
2146 if ret < 0:
2145 if ret < 0:
2147 raise PatchError(_('patch failed to apply'))
2146 raise PatchError(_('patch failed to apply'))
2148 return ret > 0
2147 return ret > 0
2149
2148
2150 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2149 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2151 eolmode='strict', similarity=0):
2150 eolmode='strict', similarity=0):
2152 """use builtin patch to apply <patchobj> to the working directory.
2151 """use builtin patch to apply <patchobj> to the working directory.
2153 returns whether patch was applied with fuzz factor."""
2152 returns whether patch was applied with fuzz factor."""
2154 backend = workingbackend(ui, repo, similarity)
2153 backend = workingbackend(ui, repo, similarity)
2155 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2154 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2156
2155
2157 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2156 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2158 eolmode='strict'):
2157 eolmode='strict'):
2159 backend = repobackend(ui, repo, ctx, store)
2158 backend = repobackend(ui, repo, ctx, store)
2160 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2159 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2161
2160
2162 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2161 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2163 similarity=0):
2162 similarity=0):
2164 """Apply <patchname> to the working directory.
2163 """Apply <patchname> to the working directory.
2165
2164
2166 'eolmode' specifies how end of lines should be handled. It can be:
2165 'eolmode' specifies how end of lines should be handled. It can be:
2167 - 'strict': inputs are read in binary mode, EOLs are preserved
2166 - 'strict': inputs are read in binary mode, EOLs are preserved
2168 - 'crlf': EOLs are ignored when patching and reset to CRLF
2167 - 'crlf': EOLs are ignored when patching and reset to CRLF
2169 - 'lf': EOLs are ignored when patching and reset to LF
2168 - 'lf': EOLs are ignored when patching and reset to LF
2170 - None: get it from user settings, default to 'strict'
2169 - None: get it from user settings, default to 'strict'
2171 'eolmode' is ignored when using an external patcher program.
2170 'eolmode' is ignored when using an external patcher program.
2172
2171
2173 Returns whether patch was applied with fuzz factor.
2172 Returns whether patch was applied with fuzz factor.
2174 """
2173 """
2175 patcher = ui.config('ui', 'patch')
2174 patcher = ui.config('ui', 'patch')
2176 if files is None:
2175 if files is None:
2177 files = set()
2176 files = set()
2178 if patcher:
2177 if patcher:
2179 return _externalpatch(ui, repo, patcher, patchname, strip,
2178 return _externalpatch(ui, repo, patcher, patchname, strip,
2180 files, similarity)
2179 files, similarity)
2181 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2180 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2182 similarity)
2181 similarity)
2183
2182
2184 def changedfiles(ui, repo, patchpath, strip=1):
2183 def changedfiles(ui, repo, patchpath, strip=1):
2185 backend = fsbackend(ui, repo.root)
2184 backend = fsbackend(ui, repo.root)
2186 with open(patchpath, 'rb') as fp:
2185 with open(patchpath, 'rb') as fp:
2187 changed = set()
2186 changed = set()
2188 for state, values in iterhunks(fp):
2187 for state, values in iterhunks(fp):
2189 if state == 'file':
2188 if state == 'file':
2190 afile, bfile, first_hunk, gp = values
2189 afile, bfile, first_hunk, gp = values
2191 if gp:
2190 if gp:
2192 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2191 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2193 if gp.oldpath:
2192 if gp.oldpath:
2194 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2193 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2195 else:
2194 else:
2196 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2195 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2197 '')
2196 '')
2198 changed.add(gp.path)
2197 changed.add(gp.path)
2199 if gp.op == 'RENAME':
2198 if gp.op == 'RENAME':
2200 changed.add(gp.oldpath)
2199 changed.add(gp.oldpath)
2201 elif state not in ('hunk', 'git'):
2200 elif state not in ('hunk', 'git'):
2202 raise error.Abort(_('unsupported parser state: %s') % state)
2201 raise error.Abort(_('unsupported parser state: %s') % state)
2203 return changed
2202 return changed
2204
2203
2205 class GitDiffRequired(Exception):
2204 class GitDiffRequired(Exception):
2206 pass
2205 pass
2207
2206
2208 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2207 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2209 '''return diffopts with all features supported and parsed'''
2208 '''return diffopts with all features supported and parsed'''
2210 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2209 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2211 git=True, whitespace=True, formatchanging=True)
2210 git=True, whitespace=True, formatchanging=True)
2212
2211
2213 diffopts = diffallopts
2212 diffopts = diffallopts
2214
2213
2215 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2214 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2216 whitespace=False, formatchanging=False):
2215 whitespace=False, formatchanging=False):
2217 '''return diffopts with only opted-in features parsed
2216 '''return diffopts with only opted-in features parsed
2218
2217
2219 Features:
2218 Features:
2220 - git: git-style diffs
2219 - git: git-style diffs
2221 - whitespace: whitespace options like ignoreblanklines and ignorews
2220 - whitespace: whitespace options like ignoreblanklines and ignorews
2222 - formatchanging: options that will likely break or cause correctness issues
2221 - formatchanging: options that will likely break or cause correctness issues
2223 with most diff parsers
2222 with most diff parsers
2224 '''
2223 '''
2225 def get(key, name=None, getter=ui.configbool, forceplain=None):
2224 def get(key, name=None, getter=ui.configbool, forceplain=None):
2226 if opts:
2225 if opts:
2227 v = opts.get(key)
2226 v = opts.get(key)
2228 # diffopts flags are either None-default (which is passed
2227 # diffopts flags are either None-default (which is passed
2229 # through unchanged, so we can identify unset values), or
2228 # through unchanged, so we can identify unset values), or
2230 # some other falsey default (eg --unified, which defaults
2229 # some other falsey default (eg --unified, which defaults
2231 # to an empty string). We only want to override the config
2230 # to an empty string). We only want to override the config
2232 # entries from hgrc with command line values if they
2231 # entries from hgrc with command line values if they
2233 # appear to have been set, which is any truthy value,
2232 # appear to have been set, which is any truthy value,
2234 # True, or False.
2233 # True, or False.
2235 if v or isinstance(v, bool):
2234 if v or isinstance(v, bool):
2236 return v
2235 return v
2237 if forceplain is not None and ui.plain():
2236 if forceplain is not None and ui.plain():
2238 return forceplain
2237 return forceplain
2239 return getter(section, name or key, None, untrusted=untrusted)
2238 return getter(section, name or key, None, untrusted=untrusted)
2240
2239
2241 # core options, expected to be understood by every diff parser
2240 # core options, expected to be understood by every diff parser
2242 buildopts = {
2241 buildopts = {
2243 'nodates': get('nodates'),
2242 'nodates': get('nodates'),
2244 'showfunc': get('show_function', 'showfunc'),
2243 'showfunc': get('show_function', 'showfunc'),
2245 'context': get('unified', getter=ui.config),
2244 'context': get('unified', getter=ui.config),
2246 }
2245 }
2247
2246
2248 if git:
2247 if git:
2249 buildopts['git'] = get('git')
2248 buildopts['git'] = get('git')
2250
2249
2251 # since this is in the experimental section, we need to call
2250 # since this is in the experimental section, we need to call
2252 # ui.configbool directory
2251 # ui.configbool directory
2253 buildopts['showsimilarity'] = ui.configbool('experimental',
2252 buildopts['showsimilarity'] = ui.configbool('experimental',
2254 'extendedheader.similarity')
2253 'extendedheader.similarity')
2255
2254
2256 # need to inspect the ui object instead of using get() since we want to
2255 # need to inspect the ui object instead of using get() since we want to
2257 # test for an int
2256 # test for an int
2258 hconf = ui.config('experimental', 'extendedheader.index')
2257 hconf = ui.config('experimental', 'extendedheader.index')
2259 if hconf is not None:
2258 if hconf is not None:
2260 hlen = None
2259 hlen = None
2261 try:
2260 try:
2262 # the hash config could be an integer (for length of hash) or a
2261 # the hash config could be an integer (for length of hash) or a
2263 # word (e.g. short, full, none)
2262 # word (e.g. short, full, none)
2264 hlen = int(hconf)
2263 hlen = int(hconf)
2265 if hlen < 0 or hlen > 40:
2264 if hlen < 0 or hlen > 40:
2266 msg = _("invalid length for extendedheader.index: '%d'\n")
2265 msg = _("invalid length for extendedheader.index: '%d'\n")
2267 ui.warn(msg % hlen)
2266 ui.warn(msg % hlen)
2268 except ValueError:
2267 except ValueError:
2269 # default value
2268 # default value
2270 if hconf == 'short' or hconf == '':
2269 if hconf == 'short' or hconf == '':
2271 hlen = 12
2270 hlen = 12
2272 elif hconf == 'full':
2271 elif hconf == 'full':
2273 hlen = 40
2272 hlen = 40
2274 elif hconf != 'none':
2273 elif hconf != 'none':
2275 msg = _("invalid value for extendedheader.index: '%s'\n")
2274 msg = _("invalid value for extendedheader.index: '%s'\n")
2276 ui.warn(msg % hconf)
2275 ui.warn(msg % hconf)
2277 finally:
2276 finally:
2278 buildopts['index'] = hlen
2277 buildopts['index'] = hlen
2279
2278
2280 if whitespace:
2279 if whitespace:
2281 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2280 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2282 buildopts['ignorewsamount'] = get('ignore_space_change',
2281 buildopts['ignorewsamount'] = get('ignore_space_change',
2283 'ignorewsamount')
2282 'ignorewsamount')
2284 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2283 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2285 'ignoreblanklines')
2284 'ignoreblanklines')
2286 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2285 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2287 if formatchanging:
2286 if formatchanging:
2288 buildopts['text'] = opts and opts.get('text')
2287 buildopts['text'] = opts and opts.get('text')
2289 binary = None if opts is None else opts.get('binary')
2288 binary = None if opts is None else opts.get('binary')
2290 buildopts['nobinary'] = (not binary if binary is not None
2289 buildopts['nobinary'] = (not binary if binary is not None
2291 else get('nobinary', forceplain=False))
2290 else get('nobinary', forceplain=False))
2292 buildopts['noprefix'] = get('noprefix', forceplain=False)
2291 buildopts['noprefix'] = get('noprefix', forceplain=False)
2293
2292
2294 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2293 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2295
2294
2296 def diff(repo, node1=None, node2=None, match=None, changes=None,
2295 def diff(repo, node1=None, node2=None, match=None, changes=None,
2297 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2296 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2298 '''yields diff of changes to files between two nodes, or node and
2297 '''yields diff of changes to files between two nodes, or node and
2299 working directory.
2298 working directory.
2300
2299
2301 if node1 is None, use first dirstate parent instead.
2300 if node1 is None, use first dirstate parent instead.
2302 if node2 is None, compare node1 with working directory.
2301 if node2 is None, compare node1 with working directory.
2303
2302
2304 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2303 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2305 every time some change cannot be represented with the current
2304 every time some change cannot be represented with the current
2306 patch format. Return False to upgrade to git patch format, True to
2305 patch format. Return False to upgrade to git patch format, True to
2307 accept the loss or raise an exception to abort the diff. It is
2306 accept the loss or raise an exception to abort the diff. It is
2308 called with the name of current file being diffed as 'fn'. If set
2307 called with the name of current file being diffed as 'fn'. If set
2309 to None, patches will always be upgraded to git format when
2308 to None, patches will always be upgraded to git format when
2310 necessary.
2309 necessary.
2311
2310
2312 prefix is a filename prefix that is prepended to all filenames on
2311 prefix is a filename prefix that is prepended to all filenames on
2313 display (used for subrepos).
2312 display (used for subrepos).
2314
2313
2315 relroot, if not empty, must be normalized with a trailing /. Any match
2314 relroot, if not empty, must be normalized with a trailing /. Any match
2316 patterns that fall outside it will be ignored.
2315 patterns that fall outside it will be ignored.
2317
2316
2318 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2317 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2319 information.'''
2318 information.'''
2320 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2319 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2321 changes=changes, opts=opts,
2320 changes=changes, opts=opts,
2322 losedatafn=losedatafn, prefix=prefix,
2321 losedatafn=losedatafn, prefix=prefix,
2323 relroot=relroot, copy=copy):
2322 relroot=relroot, copy=copy):
2324 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2323 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2325 if header and (text or len(header) > 1):
2324 if header and (text or len(header) > 1):
2326 yield '\n'.join(header) + '\n'
2325 yield '\n'.join(header) + '\n'
2327 if text:
2326 if text:
2328 yield text
2327 yield text
2329
2328
2330 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2329 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2331 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2330 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2332 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2331 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2333 where `header` is a list of diff headers and `hunks` is an iterable of
2332 where `header` is a list of diff headers and `hunks` is an iterable of
2334 (`hunkrange`, `hunklines`) tuples.
2333 (`hunkrange`, `hunklines`) tuples.
2335
2334
2336 See diff() for the meaning of parameters.
2335 See diff() for the meaning of parameters.
2337 """
2336 """
2338
2337
2339 if opts is None:
2338 if opts is None:
2340 opts = mdiff.defaultopts
2339 opts = mdiff.defaultopts
2341
2340
2342 if not node1 and not node2:
2341 if not node1 and not node2:
2343 node1 = repo.dirstate.p1()
2342 node1 = repo.dirstate.p1()
2344
2343
2345 def lrugetfilectx():
2344 def lrugetfilectx():
2346 cache = {}
2345 cache = {}
2347 order = collections.deque()
2346 order = collections.deque()
2348 def getfilectx(f, ctx):
2347 def getfilectx(f, ctx):
2349 fctx = ctx.filectx(f, filelog=cache.get(f))
2348 fctx = ctx.filectx(f, filelog=cache.get(f))
2350 if f not in cache:
2349 if f not in cache:
2351 if len(cache) > 20:
2350 if len(cache) > 20:
2352 del cache[order.popleft()]
2351 del cache[order.popleft()]
2353 cache[f] = fctx.filelog()
2352 cache[f] = fctx.filelog()
2354 else:
2353 else:
2355 order.remove(f)
2354 order.remove(f)
2356 order.append(f)
2355 order.append(f)
2357 return fctx
2356 return fctx
2358 return getfilectx
2357 return getfilectx
2359 getfilectx = lrugetfilectx()
2358 getfilectx = lrugetfilectx()
2360
2359
2361 ctx1 = repo[node1]
2360 ctx1 = repo[node1]
2362 ctx2 = repo[node2]
2361 ctx2 = repo[node2]
2363
2362
2364 relfiltered = False
2363 relfiltered = False
2365 if relroot != '' and match.always():
2364 if relroot != '' and match.always():
2366 # as a special case, create a new matcher with just the relroot
2365 # as a special case, create a new matcher with just the relroot
2367 pats = [relroot]
2366 pats = [relroot]
2368 match = scmutil.match(ctx2, pats, default='path')
2367 match = scmutil.match(ctx2, pats, default='path')
2369 relfiltered = True
2368 relfiltered = True
2370
2369
2371 if not changes:
2370 if not changes:
2372 changes = repo.status(ctx1, ctx2, match=match)
2371 changes = repo.status(ctx1, ctx2, match=match)
2373 modified, added, removed = changes[:3]
2372 modified, added, removed = changes[:3]
2374
2373
2375 if not modified and not added and not removed:
2374 if not modified and not added and not removed:
2376 return []
2375 return []
2377
2376
2378 if repo.ui.debugflag:
2377 if repo.ui.debugflag:
2379 hexfunc = hex
2378 hexfunc = hex
2380 else:
2379 else:
2381 hexfunc = short
2380 hexfunc = short
2382 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2381 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2383
2382
2384 if copy is None:
2383 if copy is None:
2385 copy = {}
2384 copy = {}
2386 if opts.git or opts.upgrade:
2385 if opts.git or opts.upgrade:
2387 copy = copies.pathcopies(ctx1, ctx2, match=match)
2386 copy = copies.pathcopies(ctx1, ctx2, match=match)
2388
2387
2389 if relroot is not None:
2388 if relroot is not None:
2390 if not relfiltered:
2389 if not relfiltered:
2391 # XXX this would ideally be done in the matcher, but that is
2390 # XXX this would ideally be done in the matcher, but that is
2392 # generally meant to 'or' patterns, not 'and' them. In this case we
2391 # generally meant to 'or' patterns, not 'and' them. In this case we
2393 # need to 'and' all the patterns from the matcher with relroot.
2392 # need to 'and' all the patterns from the matcher with relroot.
2394 def filterrel(l):
2393 def filterrel(l):
2395 return [f for f in l if f.startswith(relroot)]
2394 return [f for f in l if f.startswith(relroot)]
2396 modified = filterrel(modified)
2395 modified = filterrel(modified)
2397 added = filterrel(added)
2396 added = filterrel(added)
2398 removed = filterrel(removed)
2397 removed = filterrel(removed)
2399 relfiltered = True
2398 relfiltered = True
2400 # filter out copies where either side isn't inside the relative root
2399 # filter out copies where either side isn't inside the relative root
2401 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2400 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2402 if dst.startswith(relroot)
2401 if dst.startswith(relroot)
2403 and src.startswith(relroot)))
2402 and src.startswith(relroot)))
2404
2403
2405 modifiedset = set(modified)
2404 modifiedset = set(modified)
2406 addedset = set(added)
2405 addedset = set(added)
2407 removedset = set(removed)
2406 removedset = set(removed)
2408 for f in modified:
2407 for f in modified:
2409 if f not in ctx1:
2408 if f not in ctx1:
2410 # Fix up added, since merged-in additions appear as
2409 # Fix up added, since merged-in additions appear as
2411 # modifications during merges
2410 # modifications during merges
2412 modifiedset.remove(f)
2411 modifiedset.remove(f)
2413 addedset.add(f)
2412 addedset.add(f)
2414 for f in removed:
2413 for f in removed:
2415 if f not in ctx1:
2414 if f not in ctx1:
2416 # Merged-in additions that are then removed are reported as removed.
2415 # Merged-in additions that are then removed are reported as removed.
2417 # They are not in ctx1, so We don't want to show them in the diff.
2416 # They are not in ctx1, so We don't want to show them in the diff.
2418 removedset.remove(f)
2417 removedset.remove(f)
2419 modified = sorted(modifiedset)
2418 modified = sorted(modifiedset)
2420 added = sorted(addedset)
2419 added = sorted(addedset)
2421 removed = sorted(removedset)
2420 removed = sorted(removedset)
2422 for dst, src in copy.items():
2421 for dst, src in copy.items():
2423 if src not in ctx1:
2422 if src not in ctx1:
2424 # Files merged in during a merge and then copied/renamed are
2423 # Files merged in during a merge and then copied/renamed are
2425 # reported as copies. We want to show them in the diff as additions.
2424 # reported as copies. We want to show them in the diff as additions.
2426 del copy[dst]
2425 del copy[dst]
2427
2426
2428 def difffn(opts, losedata):
2427 def difffn(opts, losedata):
2429 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2428 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2430 copy, getfilectx, opts, losedata, prefix, relroot)
2429 copy, getfilectx, opts, losedata, prefix, relroot)
2431 if opts.upgrade and not opts.git:
2430 if opts.upgrade and not opts.git:
2432 try:
2431 try:
2433 def losedata(fn):
2432 def losedata(fn):
2434 if not losedatafn or not losedatafn(fn=fn):
2433 if not losedatafn or not losedatafn(fn=fn):
2435 raise GitDiffRequired
2434 raise GitDiffRequired
2436 # Buffer the whole output until we are sure it can be generated
2435 # Buffer the whole output until we are sure it can be generated
2437 return list(difffn(opts.copy(git=False), losedata))
2436 return list(difffn(opts.copy(git=False), losedata))
2438 except GitDiffRequired:
2437 except GitDiffRequired:
2439 return difffn(opts.copy(git=True), None)
2438 return difffn(opts.copy(git=True), None)
2440 else:
2439 else:
2441 return difffn(opts, None)
2440 return difffn(opts, None)
2442
2441
2443 def difflabel(func, *args, **kw):
2442 def difflabel(func, *args, **kw):
2444 '''yields 2-tuples of (output, label) based on the output of func()'''
2443 '''yields 2-tuples of (output, label) based on the output of func()'''
2445 headprefixes = [('diff', 'diff.diffline'),
2444 headprefixes = [('diff', 'diff.diffline'),
2446 ('copy', 'diff.extended'),
2445 ('copy', 'diff.extended'),
2447 ('rename', 'diff.extended'),
2446 ('rename', 'diff.extended'),
2448 ('old', 'diff.extended'),
2447 ('old', 'diff.extended'),
2449 ('new', 'diff.extended'),
2448 ('new', 'diff.extended'),
2450 ('deleted', 'diff.extended'),
2449 ('deleted', 'diff.extended'),
2451 ('index', 'diff.extended'),
2450 ('index', 'diff.extended'),
2452 ('similarity', 'diff.extended'),
2451 ('similarity', 'diff.extended'),
2453 ('---', 'diff.file_a'),
2452 ('---', 'diff.file_a'),
2454 ('+++', 'diff.file_b')]
2453 ('+++', 'diff.file_b')]
2455 textprefixes = [('@', 'diff.hunk'),
2454 textprefixes = [('@', 'diff.hunk'),
2456 ('-', 'diff.deleted'),
2455 ('-', 'diff.deleted'),
2457 ('+', 'diff.inserted')]
2456 ('+', 'diff.inserted')]
2458 head = False
2457 head = False
2459 for chunk in func(*args, **kw):
2458 for chunk in func(*args, **kw):
2460 lines = chunk.split('\n')
2459 lines = chunk.split('\n')
2461 for i, line in enumerate(lines):
2460 for i, line in enumerate(lines):
2462 if i != 0:
2461 if i != 0:
2463 yield ('\n', '')
2462 yield ('\n', '')
2464 if head:
2463 if head:
2465 if line.startswith('@'):
2464 if line.startswith('@'):
2466 head = False
2465 head = False
2467 else:
2466 else:
2468 if line and line[0] not in ' +-@\\':
2467 if line and line[0] not in ' +-@\\':
2469 head = True
2468 head = True
2470 stripline = line
2469 stripline = line
2471 diffline = False
2470 diffline = False
2472 if not head and line and line[0] in '+-':
2471 if not head and line and line[0] in '+-':
2473 # highlight tabs and trailing whitespace, but only in
2472 # highlight tabs and trailing whitespace, but only in
2474 # changed lines
2473 # changed lines
2475 stripline = line.rstrip()
2474 stripline = line.rstrip()
2476 diffline = True
2475 diffline = True
2477
2476
2478 prefixes = textprefixes
2477 prefixes = textprefixes
2479 if head:
2478 if head:
2480 prefixes = headprefixes
2479 prefixes = headprefixes
2481 for prefix, label in prefixes:
2480 for prefix, label in prefixes:
2482 if stripline.startswith(prefix):
2481 if stripline.startswith(prefix):
2483 if diffline:
2482 if diffline:
2484 for token in tabsplitter.findall(stripline):
2483 for token in tabsplitter.findall(stripline):
2485 if '\t' == token[0]:
2484 if '\t' == token[0]:
2486 yield (token, 'diff.tab')
2485 yield (token, 'diff.tab')
2487 else:
2486 else:
2488 yield (token, label)
2487 yield (token, label)
2489 else:
2488 else:
2490 yield (stripline, label)
2489 yield (stripline, label)
2491 break
2490 break
2492 else:
2491 else:
2493 yield (line, '')
2492 yield (line, '')
2494 if line != stripline:
2493 if line != stripline:
2495 yield (line[len(stripline):], 'diff.trailingwhitespace')
2494 yield (line[len(stripline):], 'diff.trailingwhitespace')
2496
2495
2497 def diffui(*args, **kw):
2496 def diffui(*args, **kw):
2498 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2497 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2499 return difflabel(diff, *args, **kw)
2498 return difflabel(diff, *args, **kw)
2500
2499
2501 def _filepairs(modified, added, removed, copy, opts):
2500 def _filepairs(modified, added, removed, copy, opts):
2502 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2501 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2503 before and f2 is the the name after. For added files, f1 will be None,
2502 before and f2 is the the name after. For added files, f1 will be None,
2504 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2503 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2505 or 'rename' (the latter two only if opts.git is set).'''
2504 or 'rename' (the latter two only if opts.git is set).'''
2506 gone = set()
2505 gone = set()
2507
2506
2508 copyto = dict([(v, k) for k, v in copy.items()])
2507 copyto = dict([(v, k) for k, v in copy.items()])
2509
2508
2510 addedset, removedset = set(added), set(removed)
2509 addedset, removedset = set(added), set(removed)
2511
2510
2512 for f in sorted(modified + added + removed):
2511 for f in sorted(modified + added + removed):
2513 copyop = None
2512 copyop = None
2514 f1, f2 = f, f
2513 f1, f2 = f, f
2515 if f in addedset:
2514 if f in addedset:
2516 f1 = None
2515 f1 = None
2517 if f in copy:
2516 if f in copy:
2518 if opts.git:
2517 if opts.git:
2519 f1 = copy[f]
2518 f1 = copy[f]
2520 if f1 in removedset and f1 not in gone:
2519 if f1 in removedset and f1 not in gone:
2521 copyop = 'rename'
2520 copyop = 'rename'
2522 gone.add(f1)
2521 gone.add(f1)
2523 else:
2522 else:
2524 copyop = 'copy'
2523 copyop = 'copy'
2525 elif f in removedset:
2524 elif f in removedset:
2526 f2 = None
2525 f2 = None
2527 if opts.git:
2526 if opts.git:
2528 # have we already reported a copy above?
2527 # have we already reported a copy above?
2529 if (f in copyto and copyto[f] in addedset
2528 if (f in copyto and copyto[f] in addedset
2530 and copy[copyto[f]] == f):
2529 and copy[copyto[f]] == f):
2531 continue
2530 continue
2532 yield f1, f2, copyop
2531 yield f1, f2, copyop
2533
2532
2534 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2533 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2535 copy, getfilectx, opts, losedatafn, prefix, relroot):
2534 copy, getfilectx, opts, losedatafn, prefix, relroot):
2536 '''given input data, generate a diff and yield it in blocks
2535 '''given input data, generate a diff and yield it in blocks
2537
2536
2538 If generating a diff would lose data like flags or binary data and
2537 If generating a diff would lose data like flags or binary data and
2539 losedatafn is not None, it will be called.
2538 losedatafn is not None, it will be called.
2540
2539
2541 relroot is removed and prefix is added to every path in the diff output.
2540 relroot is removed and prefix is added to every path in the diff output.
2542
2541
2543 If relroot is not empty, this function expects every path in modified,
2542 If relroot is not empty, this function expects every path in modified,
2544 added, removed and copy to start with it.'''
2543 added, removed and copy to start with it.'''
2545
2544
2546 def gitindex(text):
2545 def gitindex(text):
2547 if not text:
2546 if not text:
2548 text = ""
2547 text = ""
2549 l = len(text)
2548 l = len(text)
2550 s = hashlib.sha1('blob %d\0' % l)
2549 s = hashlib.sha1('blob %d\0' % l)
2551 s.update(text)
2550 s.update(text)
2552 return s.hexdigest()
2551 return s.hexdigest()
2553
2552
2554 if opts.noprefix:
2553 if opts.noprefix:
2555 aprefix = bprefix = ''
2554 aprefix = bprefix = ''
2556 else:
2555 else:
2557 aprefix = 'a/'
2556 aprefix = 'a/'
2558 bprefix = 'b/'
2557 bprefix = 'b/'
2559
2558
2560 def diffline(f, revs):
2559 def diffline(f, revs):
2561 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2560 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2562 return 'diff %s %s' % (revinfo, f)
2561 return 'diff %s %s' % (revinfo, f)
2563
2562
2564 def isempty(fctx):
2563 def isempty(fctx):
2565 return fctx is None or fctx.size() == 0
2564 return fctx is None or fctx.size() == 0
2566
2565
2567 date1 = util.datestr(ctx1.date())
2566 date1 = util.datestr(ctx1.date())
2568 date2 = util.datestr(ctx2.date())
2567 date2 = util.datestr(ctx2.date())
2569
2568
2570 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2569 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2571
2570
2572 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2571 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2573 or repo.ui.configbool('devel', 'check-relroot')):
2572 or repo.ui.configbool('devel', 'check-relroot')):
2574 for f in modified + added + removed + list(copy) + list(copy.values()):
2573 for f in modified + added + removed + list(copy) + list(copy.values()):
2575 if f is not None and not f.startswith(relroot):
2574 if f is not None and not f.startswith(relroot):
2576 raise AssertionError(
2575 raise AssertionError(
2577 "file %s doesn't start with relroot %s" % (f, relroot))
2576 "file %s doesn't start with relroot %s" % (f, relroot))
2578
2577
2579 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2578 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2580 content1 = None
2579 content1 = None
2581 content2 = None
2580 content2 = None
2582 fctx1 = None
2581 fctx1 = None
2583 fctx2 = None
2582 fctx2 = None
2584 flag1 = None
2583 flag1 = None
2585 flag2 = None
2584 flag2 = None
2586 if f1:
2585 if f1:
2587 fctx1 = getfilectx(f1, ctx1)
2586 fctx1 = getfilectx(f1, ctx1)
2588 if opts.git or losedatafn:
2587 if opts.git or losedatafn:
2589 flag1 = ctx1.flags(f1)
2588 flag1 = ctx1.flags(f1)
2590 if f2:
2589 if f2:
2591 fctx2 = getfilectx(f2, ctx2)
2590 fctx2 = getfilectx(f2, ctx2)
2592 if opts.git or losedatafn:
2591 if opts.git or losedatafn:
2593 flag2 = ctx2.flags(f2)
2592 flag2 = ctx2.flags(f2)
2594 # if binary is True, output "summary" or "base85", but not "text diff"
2593 # if binary is True, output "summary" or "base85", but not "text diff"
2595 binary = not opts.text and any(f.isbinary()
2594 binary = not opts.text and any(f.isbinary()
2596 for f in [fctx1, fctx2] if f is not None)
2595 for f in [fctx1, fctx2] if f is not None)
2597
2596
2598 if losedatafn and not opts.git:
2597 if losedatafn and not opts.git:
2599 if (binary or
2598 if (binary or
2600 # copy/rename
2599 # copy/rename
2601 f2 in copy or
2600 f2 in copy or
2602 # empty file creation
2601 # empty file creation
2603 (not f1 and isempty(fctx2)) or
2602 (not f1 and isempty(fctx2)) or
2604 # empty file deletion
2603 # empty file deletion
2605 (isempty(fctx1) and not f2) or
2604 (isempty(fctx1) and not f2) or
2606 # create with flags
2605 # create with flags
2607 (not f1 and flag2) or
2606 (not f1 and flag2) or
2608 # change flags
2607 # change flags
2609 (f1 and f2 and flag1 != flag2)):
2608 (f1 and f2 and flag1 != flag2)):
2610 losedatafn(f2 or f1)
2609 losedatafn(f2 or f1)
2611
2610
2612 path1 = f1 or f2
2611 path1 = f1 or f2
2613 path2 = f2 or f1
2612 path2 = f2 or f1
2614 path1 = posixpath.join(prefix, path1[len(relroot):])
2613 path1 = posixpath.join(prefix, path1[len(relroot):])
2615 path2 = posixpath.join(prefix, path2[len(relroot):])
2614 path2 = posixpath.join(prefix, path2[len(relroot):])
2616 header = []
2615 header = []
2617 if opts.git:
2616 if opts.git:
2618 header.append('diff --git %s%s %s%s' %
2617 header.append('diff --git %s%s %s%s' %
2619 (aprefix, path1, bprefix, path2))
2618 (aprefix, path1, bprefix, path2))
2620 if not f1: # added
2619 if not f1: # added
2621 header.append('new file mode %s' % gitmode[flag2])
2620 header.append('new file mode %s' % gitmode[flag2])
2622 elif not f2: # removed
2621 elif not f2: # removed
2623 header.append('deleted file mode %s' % gitmode[flag1])
2622 header.append('deleted file mode %s' % gitmode[flag1])
2624 else: # modified/copied/renamed
2623 else: # modified/copied/renamed
2625 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2624 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2626 if mode1 != mode2:
2625 if mode1 != mode2:
2627 header.append('old mode %s' % mode1)
2626 header.append('old mode %s' % mode1)
2628 header.append('new mode %s' % mode2)
2627 header.append('new mode %s' % mode2)
2629 if copyop is not None:
2628 if copyop is not None:
2630 if opts.showsimilarity:
2629 if opts.showsimilarity:
2631 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2630 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2632 header.append('similarity index %d%%' % sim)
2631 header.append('similarity index %d%%' % sim)
2633 header.append('%s from %s' % (copyop, path1))
2632 header.append('%s from %s' % (copyop, path1))
2634 header.append('%s to %s' % (copyop, path2))
2633 header.append('%s to %s' % (copyop, path2))
2635 elif revs and not repo.ui.quiet:
2634 elif revs and not repo.ui.quiet:
2636 header.append(diffline(path1, revs))
2635 header.append(diffline(path1, revs))
2637
2636
2638 # fctx.is | diffopts | what to | is fctx.data()
2637 # fctx.is | diffopts | what to | is fctx.data()
2639 # binary() | text nobinary git index | output? | outputted?
2638 # binary() | text nobinary git index | output? | outputted?
2640 # ------------------------------------|----------------------------
2639 # ------------------------------------|----------------------------
2641 # yes | no no no * | summary | no
2640 # yes | no no no * | summary | no
2642 # yes | no no yes * | base85 | yes
2641 # yes | no no yes * | base85 | yes
2643 # yes | no yes no * | summary | no
2642 # yes | no yes no * | summary | no
2644 # yes | no yes yes 0 | summary | no
2643 # yes | no yes yes 0 | summary | no
2645 # yes | no yes yes >0 | summary | semi [1]
2644 # yes | no yes yes >0 | summary | semi [1]
2646 # yes | yes * * * | text diff | yes
2645 # yes | yes * * * | text diff | yes
2647 # no | * * * * | text diff | yes
2646 # no | * * * * | text diff | yes
2648 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2647 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2649 if binary and (not opts.git or (opts.git and opts.nobinary and not
2648 if binary and (not opts.git or (opts.git and opts.nobinary and not
2650 opts.index)):
2649 opts.index)):
2651 # fast path: no binary content will be displayed, content1 and
2650 # fast path: no binary content will be displayed, content1 and
2652 # content2 are only used for equivalent test. cmp() could have a
2651 # content2 are only used for equivalent test. cmp() could have a
2653 # fast path.
2652 # fast path.
2654 if fctx1 is not None:
2653 if fctx1 is not None:
2655 content1 = b'\0'
2654 content1 = b'\0'
2656 if fctx2 is not None:
2655 if fctx2 is not None:
2657 if fctx1 is not None and not fctx1.cmp(fctx2):
2656 if fctx1 is not None and not fctx1.cmp(fctx2):
2658 content2 = b'\0' # not different
2657 content2 = b'\0' # not different
2659 else:
2658 else:
2660 content2 = b'\0\0'
2659 content2 = b'\0\0'
2661 else:
2660 else:
2662 # normal path: load contents
2661 # normal path: load contents
2663 if fctx1 is not None:
2662 if fctx1 is not None:
2664 content1 = fctx1.data()
2663 content1 = fctx1.data()
2665 if fctx2 is not None:
2664 if fctx2 is not None:
2666 content2 = fctx2.data()
2665 content2 = fctx2.data()
2667
2666
2668 if binary and opts.git and not opts.nobinary:
2667 if binary and opts.git and not opts.nobinary:
2669 text = mdiff.b85diff(content1, content2)
2668 text = mdiff.b85diff(content1, content2)
2670 if text:
2669 if text:
2671 header.append('index %s..%s' %
2670 header.append('index %s..%s' %
2672 (gitindex(content1), gitindex(content2)))
2671 (gitindex(content1), gitindex(content2)))
2673 hunks = (None, [text]),
2672 hunks = (None, [text]),
2674 else:
2673 else:
2675 if opts.git and opts.index > 0:
2674 if opts.git and opts.index > 0:
2676 flag = flag1
2675 flag = flag1
2677 if flag is None:
2676 if flag is None:
2678 flag = flag2
2677 flag = flag2
2679 header.append('index %s..%s %s' %
2678 header.append('index %s..%s %s' %
2680 (gitindex(content1)[0:opts.index],
2679 (gitindex(content1)[0:opts.index],
2681 gitindex(content2)[0:opts.index],
2680 gitindex(content2)[0:opts.index],
2682 gitmode[flag]))
2681 gitmode[flag]))
2683
2682
2684 uheaders, hunks = mdiff.unidiff(content1, date1,
2683 uheaders, hunks = mdiff.unidiff(content1, date1,
2685 content2, date2,
2684 content2, date2,
2686 path1, path2, opts=opts)
2685 path1, path2, opts=opts)
2687 header.extend(uheaders)
2686 header.extend(uheaders)
2688 yield header, hunks
2687 yield header, hunks
2689
2688
2690 def diffstatsum(stats):
2689 def diffstatsum(stats):
2691 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2690 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2692 for f, a, r, b in stats:
2691 for f, a, r, b in stats:
2693 maxfile = max(maxfile, encoding.colwidth(f))
2692 maxfile = max(maxfile, encoding.colwidth(f))
2694 maxtotal = max(maxtotal, a + r)
2693 maxtotal = max(maxtotal, a + r)
2695 addtotal += a
2694 addtotal += a
2696 removetotal += r
2695 removetotal += r
2697 binary = binary or b
2696 binary = binary or b
2698
2697
2699 return maxfile, maxtotal, addtotal, removetotal, binary
2698 return maxfile, maxtotal, addtotal, removetotal, binary
2700
2699
2701 def diffstatdata(lines):
2700 def diffstatdata(lines):
2702 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2701 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2703
2702
2704 results = []
2703 results = []
2705 filename, adds, removes, isbinary = None, 0, 0, False
2704 filename, adds, removes, isbinary = None, 0, 0, False
2706
2705
2707 def addresult():
2706 def addresult():
2708 if filename:
2707 if filename:
2709 results.append((filename, adds, removes, isbinary))
2708 results.append((filename, adds, removes, isbinary))
2710
2709
2711 # inheader is used to track if a line is in the
2710 # inheader is used to track if a line is in the
2712 # header portion of the diff. This helps properly account
2711 # header portion of the diff. This helps properly account
2713 # for lines that start with '--' or '++'
2712 # for lines that start with '--' or '++'
2714 inheader = False
2713 inheader = False
2715
2714
2716 for line in lines:
2715 for line in lines:
2717 if line.startswith('diff'):
2716 if line.startswith('diff'):
2718 addresult()
2717 addresult()
2719 # starting a new file diff
2718 # starting a new file diff
2720 # set numbers to 0 and reset inheader
2719 # set numbers to 0 and reset inheader
2721 inheader = True
2720 inheader = True
2722 adds, removes, isbinary = 0, 0, False
2721 adds, removes, isbinary = 0, 0, False
2723 if line.startswith('diff --git a/'):
2722 if line.startswith('diff --git a/'):
2724 filename = gitre.search(line).group(2)
2723 filename = gitre.search(line).group(2)
2725 elif line.startswith('diff -r'):
2724 elif line.startswith('diff -r'):
2726 # format: "diff -r ... -r ... filename"
2725 # format: "diff -r ... -r ... filename"
2727 filename = diffre.search(line).group(1)
2726 filename = diffre.search(line).group(1)
2728 elif line.startswith('@@'):
2727 elif line.startswith('@@'):
2729 inheader = False
2728 inheader = False
2730 elif line.startswith('+') and not inheader:
2729 elif line.startswith('+') and not inheader:
2731 adds += 1
2730 adds += 1
2732 elif line.startswith('-') and not inheader:
2731 elif line.startswith('-') and not inheader:
2733 removes += 1
2732 removes += 1
2734 elif (line.startswith('GIT binary patch') or
2733 elif (line.startswith('GIT binary patch') or
2735 line.startswith('Binary file')):
2734 line.startswith('Binary file')):
2736 isbinary = True
2735 isbinary = True
2737 addresult()
2736 addresult()
2738 return results
2737 return results
2739
2738
2740 def diffstat(lines, width=80):
2739 def diffstat(lines, width=80):
2741 output = []
2740 output = []
2742 stats = diffstatdata(lines)
2741 stats = diffstatdata(lines)
2743 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2742 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2744
2743
2745 countwidth = len(str(maxtotal))
2744 countwidth = len(str(maxtotal))
2746 if hasbinary and countwidth < 3:
2745 if hasbinary and countwidth < 3:
2747 countwidth = 3
2746 countwidth = 3
2748 graphwidth = width - countwidth - maxname - 6
2747 graphwidth = width - countwidth - maxname - 6
2749 if graphwidth < 10:
2748 if graphwidth < 10:
2750 graphwidth = 10
2749 graphwidth = 10
2751
2750
2752 def scale(i):
2751 def scale(i):
2753 if maxtotal <= graphwidth:
2752 if maxtotal <= graphwidth:
2754 return i
2753 return i
2755 # If diffstat runs out of room it doesn't print anything,
2754 # If diffstat runs out of room it doesn't print anything,
2756 # which isn't very useful, so always print at least one + or -
2755 # which isn't very useful, so always print at least one + or -
2757 # if there were at least some changes.
2756 # if there were at least some changes.
2758 return max(i * graphwidth // maxtotal, int(bool(i)))
2757 return max(i * graphwidth // maxtotal, int(bool(i)))
2759
2758
2760 for filename, adds, removes, isbinary in stats:
2759 for filename, adds, removes, isbinary in stats:
2761 if isbinary:
2760 if isbinary:
2762 count = 'Bin'
2761 count = 'Bin'
2763 else:
2762 else:
2764 count = '%d' % (adds + removes)
2763 count = '%d' % (adds + removes)
2765 pluses = '+' * scale(adds)
2764 pluses = '+' * scale(adds)
2766 minuses = '-' * scale(removes)
2765 minuses = '-' * scale(removes)
2767 output.append(' %s%s | %*s %s%s\n' %
2766 output.append(' %s%s | %*s %s%s\n' %
2768 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2767 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2769 countwidth, count, pluses, minuses))
2768 countwidth, count, pluses, minuses))
2770
2769
2771 if stats:
2770 if stats:
2772 output.append(_(' %d files changed, %d insertions(+), '
2771 output.append(_(' %d files changed, %d insertions(+), '
2773 '%d deletions(-)\n')
2772 '%d deletions(-)\n')
2774 % (len(stats), totaladds, totalremoves))
2773 % (len(stats), totaladds, totalremoves))
2775
2774
2776 return ''.join(output)
2775 return ''.join(output)
2777
2776
2778 def diffstatui(*args, **kw):
2777 def diffstatui(*args, **kw):
2779 '''like diffstat(), but yields 2-tuples of (output, label) for
2778 '''like diffstat(), but yields 2-tuples of (output, label) for
2780 ui.write()
2779 ui.write()
2781 '''
2780 '''
2782
2781
2783 for line in diffstat(*args, **kw).splitlines():
2782 for line in diffstat(*args, **kw).splitlines():
2784 if line and line[-1] in '+-':
2783 if line and line[-1] in '+-':
2785 name, graph = line.rsplit(' ', 1)
2784 name, graph = line.rsplit(' ', 1)
2786 yield (name + ' ', '')
2785 yield (name + ' ', '')
2787 m = re.search(br'\++', graph)
2786 m = re.search(br'\++', graph)
2788 if m:
2787 if m:
2789 yield (m.group(0), 'diffstat.inserted')
2788 yield (m.group(0), 'diffstat.inserted')
2790 m = re.search(br'-+', graph)
2789 m = re.search(br'-+', graph)
2791 if m:
2790 if m:
2792 yield (m.group(0), 'diffstat.deleted')
2791 yield (m.group(0), 'diffstat.deleted')
2793 else:
2792 else:
2794 yield (line, '')
2793 yield (line, '')
2795 yield ('\n', '')
2794 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now