##// END OF EJS Templates
record: fix revert -i for lines without newline (issue5651)...
Jun Wu -
r33941:5707bfe0 stable
parent child Browse files
Show More
@@ -1,2793 +1,2793 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 class PatchError(Exception):
49 class PatchError(Exception):
50 pass
50 pass
51
51
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = email.Parser.Parser().parse(c)
111 m = email.Parser.Parser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 inheader = False
154 inheader = False
155 cur = []
155 cur = []
156
156
157 mimeheaders = ['content-type']
157 mimeheaders = ['content-type']
158
158
159 if not util.safehasattr(stream, 'next'):
159 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
160 # http responses, for example, have readline but not next
161 stream = fiter(stream)
161 stream = fiter(stream)
162
162
163 for line in stream:
163 for line in stream:
164 cur.append(line)
164 cur.append(line)
165 if line.startswith('# HG changeset patch'):
165 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
166 return hgsplit(stream, cur)
167 elif line.startswith('From '):
167 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
168 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
169 elif isheader(line, inheader):
170 inheader = True
170 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
172 # let email parser handle this
173 return mimesplit(stream, cur)
173 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
174 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
175 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
176 return headersplit(stream, cur)
177 # Not enough info, keep reading
177 # Not enough info, keep reading
178
178
179 # if we are here, we have a very plain patch
179 # if we are here, we have a very plain patch
180 return remainder(cur)
180 return remainder(cur)
181
181
182 ## Some facility for extensible patch parsing:
182 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
183 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
184 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
185 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
186 ('Node ID', 'nodeid'),
187 ]
187 ]
188
188
189 def extract(ui, fileobj):
189 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
190 '''extract patch from data read from fileobj.
191
191
192 patch can be a normal patch or contained in an email message.
192 patch can be a normal patch or contained in an email message.
193
193
194 return a dictionary. Standard keys are:
194 return a dictionary. Standard keys are:
195 - filename,
195 - filename,
196 - message,
196 - message,
197 - user,
197 - user,
198 - date,
198 - date,
199 - branch,
199 - branch,
200 - node,
200 - node,
201 - p1,
201 - p1,
202 - p2.
202 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
203 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
205
206 # attempt to detect the start of a patch
206 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
207 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
212
212
213 data = {}
213 data = {}
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 try:
216 try:
217 msg = email.Parser.Parser().parse(fileobj)
217 msg = email.Parser.Parser().parse(fileobj)
218
218
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 if not subject and not data['user']:
221 if not subject and not data['user']:
222 # Not an email, restore parsed headers if any
222 # Not an email, restore parsed headers if any
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224
224
225 # should try to parse msg['Date']
225 # should try to parse msg['Date']
226 parents = []
226 parents = []
227
227
228 if subject:
228 if subject:
229 if subject.startswith('[PATCH'):
229 if subject.startswith('[PATCH'):
230 pend = subject.find(']')
230 pend = subject.find(']')
231 if pend >= 0:
231 if pend >= 0:
232 subject = subject[pend + 1:].lstrip()
232 subject = subject[pend + 1:].lstrip()
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
234 ui.debug('Subject: %s\n' % subject)
234 ui.debug('Subject: %s\n' % subject)
235 if data['user']:
235 if data['user']:
236 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
237 diffs_seen = 0
237 diffs_seen = 0
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 message = ''
239 message = ''
240 for part in msg.walk():
240 for part in msg.walk():
241 content_type = part.get_content_type()
241 content_type = part.get_content_type()
242 ui.debug('Content-Type: %s\n' % content_type)
242 ui.debug('Content-Type: %s\n' % content_type)
243 if content_type not in ok_types:
243 if content_type not in ok_types:
244 continue
244 continue
245 payload = part.get_payload(decode=True)
245 payload = part.get_payload(decode=True)
246 m = diffre.search(payload)
246 m = diffre.search(payload)
247 if m:
247 if m:
248 hgpatch = False
248 hgpatch = False
249 hgpatchheader = False
249 hgpatchheader = False
250 ignoretext = False
250 ignoretext = False
251
251
252 ui.debug('found patch at byte %d\n' % m.start(0))
252 ui.debug('found patch at byte %d\n' % m.start(0))
253 diffs_seen += 1
253 diffs_seen += 1
254 cfp = stringio()
254 cfp = stringio()
255 for line in payload[:m.start(0)].splitlines():
255 for line in payload[:m.start(0)].splitlines():
256 if line.startswith('# HG changeset patch') and not hgpatch:
256 if line.startswith('# HG changeset patch') and not hgpatch:
257 ui.debug('patch generated by hg export\n')
257 ui.debug('patch generated by hg export\n')
258 hgpatch = True
258 hgpatch = True
259 hgpatchheader = True
259 hgpatchheader = True
260 # drop earlier commit message content
260 # drop earlier commit message content
261 cfp.seek(0)
261 cfp.seek(0)
262 cfp.truncate()
262 cfp.truncate()
263 subject = None
263 subject = None
264 elif hgpatchheader:
264 elif hgpatchheader:
265 if line.startswith('# User '):
265 if line.startswith('# User '):
266 data['user'] = line[7:]
266 data['user'] = line[7:]
267 ui.debug('From: %s\n' % data['user'])
267 ui.debug('From: %s\n' % data['user'])
268 elif line.startswith("# Parent "):
268 elif line.startswith("# Parent "):
269 parents.append(line[9:].lstrip())
269 parents.append(line[9:].lstrip())
270 elif line.startswith("# "):
270 elif line.startswith("# "):
271 for header, key in patchheadermap:
271 for header, key in patchheadermap:
272 prefix = '# %s ' % header
272 prefix = '# %s ' % header
273 if line.startswith(prefix):
273 if line.startswith(prefix):
274 data[key] = line[len(prefix):]
274 data[key] = line[len(prefix):]
275 else:
275 else:
276 hgpatchheader = False
276 hgpatchheader = False
277 elif line == '---':
277 elif line == '---':
278 ignoretext = True
278 ignoretext = True
279 if not hgpatchheader and not ignoretext:
279 if not hgpatchheader and not ignoretext:
280 cfp.write(line)
280 cfp.write(line)
281 cfp.write('\n')
281 cfp.write('\n')
282 message = cfp.getvalue()
282 message = cfp.getvalue()
283 if tmpfp:
283 if tmpfp:
284 tmpfp.write(payload)
284 tmpfp.write(payload)
285 if not payload.endswith('\n'):
285 if not payload.endswith('\n'):
286 tmpfp.write('\n')
286 tmpfp.write('\n')
287 elif not diffs_seen and message and content_type == 'text/plain':
287 elif not diffs_seen and message and content_type == 'text/plain':
288 message += '\n' + payload
288 message += '\n' + payload
289 except: # re-raises
289 except: # re-raises
290 tmpfp.close()
290 tmpfp.close()
291 os.unlink(tmpname)
291 os.unlink(tmpname)
292 raise
292 raise
293
293
294 if subject and not message.startswith(subject):
294 if subject and not message.startswith(subject):
295 message = '%s\n%s' % (subject, message)
295 message = '%s\n%s' % (subject, message)
296 data['message'] = message
296 data['message'] = message
297 tmpfp.close()
297 tmpfp.close()
298 if parents:
298 if parents:
299 data['p1'] = parents.pop(0)
299 data['p1'] = parents.pop(0)
300 if parents:
300 if parents:
301 data['p2'] = parents.pop(0)
301 data['p2'] = parents.pop(0)
302
302
303 if diffs_seen:
303 if diffs_seen:
304 data['filename'] = tmpname
304 data['filename'] = tmpname
305 else:
305 else:
306 os.unlink(tmpname)
306 os.unlink(tmpname)
307 return data
307 return data
308
308
309 class patchmeta(object):
309 class patchmeta(object):
310 """Patched file metadata
310 """Patched file metadata
311
311
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 'islink' is True if the file is a symlink and 'isexec' is True if
316 'islink' is True if the file is a symlink and 'isexec' is True if
317 the file is executable. Otherwise, 'mode' is None.
317 the file is executable. Otherwise, 'mode' is None.
318 """
318 """
319 def __init__(self, path):
319 def __init__(self, path):
320 self.path = path
320 self.path = path
321 self.oldpath = None
321 self.oldpath = None
322 self.mode = None
322 self.mode = None
323 self.op = 'MODIFY'
323 self.op = 'MODIFY'
324 self.binary = False
324 self.binary = False
325
325
326 def setmode(self, mode):
326 def setmode(self, mode):
327 islink = mode & 0o20000
327 islink = mode & 0o20000
328 isexec = mode & 0o100
328 isexec = mode & 0o100
329 self.mode = (islink, isexec)
329 self.mode = (islink, isexec)
330
330
331 def copy(self):
331 def copy(self):
332 other = patchmeta(self.path)
332 other = patchmeta(self.path)
333 other.oldpath = self.oldpath
333 other.oldpath = self.oldpath
334 other.mode = self.mode
334 other.mode = self.mode
335 other.op = self.op
335 other.op = self.op
336 other.binary = self.binary
336 other.binary = self.binary
337 return other
337 return other
338
338
339 def _ispatchinga(self, afile):
339 def _ispatchinga(self, afile):
340 if afile == '/dev/null':
340 if afile == '/dev/null':
341 return self.op == 'ADD'
341 return self.op == 'ADD'
342 return afile == 'a/' + (self.oldpath or self.path)
342 return afile == 'a/' + (self.oldpath or self.path)
343
343
344 def _ispatchingb(self, bfile):
344 def _ispatchingb(self, bfile):
345 if bfile == '/dev/null':
345 if bfile == '/dev/null':
346 return self.op == 'DELETE'
346 return self.op == 'DELETE'
347 return bfile == 'b/' + self.path
347 return bfile == 'b/' + self.path
348
348
349 def ispatching(self, afile, bfile):
349 def ispatching(self, afile, bfile):
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351
351
352 def __repr__(self):
352 def __repr__(self):
353 return "<patchmeta %s %r>" % (self.op, self.path)
353 return "<patchmeta %s %r>" % (self.op, self.path)
354
354
355 def readgitpatch(lr):
355 def readgitpatch(lr):
356 """extract git-style metadata about patches from <patchname>"""
356 """extract git-style metadata about patches from <patchname>"""
357
357
358 # Filter patch for git information
358 # Filter patch for git information
359 gp = None
359 gp = None
360 gitpatches = []
360 gitpatches = []
361 for line in lr:
361 for line in lr:
362 line = line.rstrip(' \r\n')
362 line = line.rstrip(' \r\n')
363 if line.startswith('diff --git a/'):
363 if line.startswith('diff --git a/'):
364 m = gitre.match(line)
364 m = gitre.match(line)
365 if m:
365 if m:
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368 dst = m.group(2)
368 dst = m.group(2)
369 gp = patchmeta(dst)
369 gp = patchmeta(dst)
370 elif gp:
370 elif gp:
371 if line.startswith('--- '):
371 if line.startswith('--- '):
372 gitpatches.append(gp)
372 gitpatches.append(gp)
373 gp = None
373 gp = None
374 continue
374 continue
375 if line.startswith('rename from '):
375 if line.startswith('rename from '):
376 gp.op = 'RENAME'
376 gp.op = 'RENAME'
377 gp.oldpath = line[12:]
377 gp.oldpath = line[12:]
378 elif line.startswith('rename to '):
378 elif line.startswith('rename to '):
379 gp.path = line[10:]
379 gp.path = line[10:]
380 elif line.startswith('copy from '):
380 elif line.startswith('copy from '):
381 gp.op = 'COPY'
381 gp.op = 'COPY'
382 gp.oldpath = line[10:]
382 gp.oldpath = line[10:]
383 elif line.startswith('copy to '):
383 elif line.startswith('copy to '):
384 gp.path = line[8:]
384 gp.path = line[8:]
385 elif line.startswith('deleted file'):
385 elif line.startswith('deleted file'):
386 gp.op = 'DELETE'
386 gp.op = 'DELETE'
387 elif line.startswith('new file mode '):
387 elif line.startswith('new file mode '):
388 gp.op = 'ADD'
388 gp.op = 'ADD'
389 gp.setmode(int(line[-6:], 8))
389 gp.setmode(int(line[-6:], 8))
390 elif line.startswith('new mode '):
390 elif line.startswith('new mode '):
391 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
392 elif line.startswith('GIT binary patch'):
392 elif line.startswith('GIT binary patch'):
393 gp.binary = True
393 gp.binary = True
394 if gp:
394 if gp:
395 gitpatches.append(gp)
395 gitpatches.append(gp)
396
396
397 return gitpatches
397 return gitpatches
398
398
399 class linereader(object):
399 class linereader(object):
400 # simple class to allow pushing lines back into the input stream
400 # simple class to allow pushing lines back into the input stream
401 def __init__(self, fp):
401 def __init__(self, fp):
402 self.fp = fp
402 self.fp = fp
403 self.buf = []
403 self.buf = []
404
404
405 def push(self, line):
405 def push(self, line):
406 if line is not None:
406 if line is not None:
407 self.buf.append(line)
407 self.buf.append(line)
408
408
409 def readline(self):
409 def readline(self):
410 if self.buf:
410 if self.buf:
411 l = self.buf[0]
411 l = self.buf[0]
412 del self.buf[0]
412 del self.buf[0]
413 return l
413 return l
414 return self.fp.readline()
414 return self.fp.readline()
415
415
416 def __iter__(self):
416 def __iter__(self):
417 return iter(self.readline, '')
417 return iter(self.readline, '')
418
418
419 class abstractbackend(object):
419 class abstractbackend(object):
420 def __init__(self, ui):
420 def __init__(self, ui):
421 self.ui = ui
421 self.ui = ui
422
422
423 def getfile(self, fname):
423 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
424 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
425 isexec)) tuple. Data is None if file is missing/deleted.
426 """
426 """
427 raise NotImplementedError
427 raise NotImplementedError
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
430 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
431 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
432 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
433 copysource is set to the original file name.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def unlink(self, fname):
437 def unlink(self, fname):
438 """Unlink target file."""
438 """Unlink target file."""
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def writerej(self, fname, failed, total, lines):
441 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
442 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
443 which failed to apply and total the total number of hunks for this
444 files.
444 files.
445 """
445 """
446 pass
446 pass
447
447
448 def exists(self, fname):
448 def exists(self, fname):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def close(self):
451 def close(self):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 class fsbackend(abstractbackend):
454 class fsbackend(abstractbackend):
455 def __init__(self, ui, basedir):
455 def __init__(self, ui, basedir):
456 super(fsbackend, self).__init__(ui)
456 super(fsbackend, self).__init__(ui)
457 self.opener = vfsmod.vfs(basedir)
457 self.opener = vfsmod.vfs(basedir)
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 if self.opener.islink(fname):
460 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
461 return (self.opener.readlink(fname), (True, False))
462
462
463 isexec = False
463 isexec = False
464 try:
464 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
466 except OSError as e:
467 if e.errno != errno.ENOENT:
467 if e.errno != errno.ENOENT:
468 raise
468 raise
469 try:
469 try:
470 return (self.opener.read(fname), (False, isexec))
470 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
471 except IOError as e:
472 if e.errno != errno.ENOENT:
472 if e.errno != errno.ENOENT:
473 raise
473 raise
474 return None, None
474 return None, None
475
475
476 def setfile(self, fname, data, mode, copysource):
476 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
477 islink, isexec = mode
478 if data is None:
478 if data is None:
479 self.opener.setflags(fname, islink, isexec)
479 self.opener.setflags(fname, islink, isexec)
480 return
480 return
481 if islink:
481 if islink:
482 self.opener.symlink(data, fname)
482 self.opener.symlink(data, fname)
483 else:
483 else:
484 self.opener.write(fname, data)
484 self.opener.write(fname, data)
485 if isexec:
485 if isexec:
486 self.opener.setflags(fname, False, True)
486 self.opener.setflags(fname, False, True)
487
487
488 def unlink(self, fname):
488 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
489 self.opener.unlinkpath(fname, ignoremissing=True)
490
490
491 def writerej(self, fname, failed, total, lines):
491 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
492 fname = fname + ".rej"
493 self.ui.warn(
493 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
495 (failed, total, fname))
496 fp = self.opener(fname, 'w')
496 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
497 fp.writelines(lines)
498 fp.close()
498 fp.close()
499
499
500 def exists(self, fname):
500 def exists(self, fname):
501 return self.opener.lexists(fname)
501 return self.opener.lexists(fname)
502
502
503 class workingbackend(fsbackend):
503 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
504 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
505 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
506 self.repo = repo
507 self.similarity = similarity
507 self.similarity = similarity
508 self.removed = set()
508 self.removed = set()
509 self.changed = set()
509 self.changed = set()
510 self.copied = []
510 self.copied = []
511
511
512 def _checkknown(self, fname):
512 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
515
516 def setfile(self, fname, data, mode, copysource):
516 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
517 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
519 if copysource is not None:
520 self.copied.append((copysource, fname))
520 self.copied.append((copysource, fname))
521 self.changed.add(fname)
521 self.changed.add(fname)
522
522
523 def unlink(self, fname):
523 def unlink(self, fname):
524 self._checkknown(fname)
524 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
525 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
526 self.removed.add(fname)
527 self.changed.add(fname)
527 self.changed.add(fname)
528
528
529 def close(self):
529 def close(self):
530 wctx = self.repo[None]
530 wctx = self.repo[None]
531 changed = set(self.changed)
531 changed = set(self.changed)
532 for src, dst in self.copied:
532 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
534 if self.removed:
535 wctx.forget(sorted(self.removed))
535 wctx.forget(sorted(self.removed))
536 for f in self.removed:
536 for f in self.removed:
537 if f not in self.repo.dirstate:
537 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
538 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
539 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
540 # deleted, and should not be considered by
541 # marktouched().
541 # marktouched().
542 changed.discard(f)
542 changed.discard(f)
543 if changed:
543 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
544 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
545 return sorted(self.changed)
546
546
547 class filestore(object):
547 class filestore(object):
548 def __init__(self, maxsize=None):
548 def __init__(self, maxsize=None):
549 self.opener = None
549 self.opener = None
550 self.files = {}
550 self.files = {}
551 self.created = 0
551 self.created = 0
552 self.maxsize = maxsize
552 self.maxsize = maxsize
553 if self.maxsize is None:
553 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
554 self.maxsize = 4*(2**20)
555 self.size = 0
555 self.size = 0
556 self.data = {}
556 self.data = {}
557
557
558 def setfile(self, fname, data, mode, copied=None):
558 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
560 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
561 self.size += len(data)
562 else:
562 else:
563 if self.opener is None:
563 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
564 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = vfsmod.vfs(root)
565 self.opener = vfsmod.vfs(root)
566 # Avoid filename issues with these simple names
566 # Avoid filename issues with these simple names
567 fn = str(self.created)
567 fn = str(self.created)
568 self.opener.write(fn, data)
568 self.opener.write(fn, data)
569 self.created += 1
569 self.created += 1
570 self.files[fname] = (fn, mode, copied)
570 self.files[fname] = (fn, mode, copied)
571
571
572 def getfile(self, fname):
572 def getfile(self, fname):
573 if fname in self.data:
573 if fname in self.data:
574 return self.data[fname]
574 return self.data[fname]
575 if not self.opener or fname not in self.files:
575 if not self.opener or fname not in self.files:
576 return None, None, None
576 return None, None, None
577 fn, mode, copied = self.files[fname]
577 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
578 return self.opener.read(fn), mode, copied
579
579
580 def close(self):
580 def close(self):
581 if self.opener:
581 if self.opener:
582 shutil.rmtree(self.opener.base)
582 shutil.rmtree(self.opener.base)
583
583
584 class repobackend(abstractbackend):
584 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
585 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
586 super(repobackend, self).__init__(ui)
587 self.repo = repo
587 self.repo = repo
588 self.ctx = ctx
588 self.ctx = ctx
589 self.store = store
589 self.store = store
590 self.changed = set()
590 self.changed = set()
591 self.removed = set()
591 self.removed = set()
592 self.copied = {}
592 self.copied = {}
593
593
594 def _checkknown(self, fname):
594 def _checkknown(self, fname):
595 if fname not in self.ctx:
595 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
597
598 def getfile(self, fname):
598 def getfile(self, fname):
599 try:
599 try:
600 fctx = self.ctx[fname]
600 fctx = self.ctx[fname]
601 except error.LookupError:
601 except error.LookupError:
602 return None, None
602 return None, None
603 flags = fctx.flags()
603 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
604 return fctx.data(), ('l' in flags, 'x' in flags)
605
605
606 def setfile(self, fname, data, mode, copysource):
606 def setfile(self, fname, data, mode, copysource):
607 if copysource:
607 if copysource:
608 self._checkknown(copysource)
608 self._checkknown(copysource)
609 if data is None:
609 if data is None:
610 data = self.ctx[fname].data()
610 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
611 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
612 self.changed.add(fname)
613 if copysource:
613 if copysource:
614 self.copied[fname] = copysource
614 self.copied[fname] = copysource
615
615
616 def unlink(self, fname):
616 def unlink(self, fname):
617 self._checkknown(fname)
617 self._checkknown(fname)
618 self.removed.add(fname)
618 self.removed.add(fname)
619
619
620 def exists(self, fname):
620 def exists(self, fname):
621 return fname in self.ctx
621 return fname in self.ctx
622
622
623 def close(self):
623 def close(self):
624 return self.changed | self.removed
624 return self.changed | self.removed
625
625
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
630
631 class patchfile(object):
631 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
633 self.fname = gp.path
634 self.eolmode = eolmode
634 self.eolmode = eolmode
635 self.eol = None
635 self.eol = None
636 self.backend = backend
636 self.backend = backend
637 self.ui = ui
637 self.ui = ui
638 self.lines = []
638 self.lines = []
639 self.exists = False
639 self.exists = False
640 self.missing = True
640 self.missing = True
641 self.mode = gp.mode
641 self.mode = gp.mode
642 self.copysource = gp.oldpath
642 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
644 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
645 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
646 data, mode = backend.getfile(self.fname)
647 else:
647 else:
648 data, mode = store.getfile(self.copysource)[:2]
648 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
649 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
651 self.missing = False
652 if data:
652 if data:
653 self.lines = mdiff.splitnewlines(data)
653 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
654 if self.mode is None:
655 self.mode = mode
655 self.mode = mode
656 if self.lines:
656 if self.lines:
657 # Normalize line endings
657 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
658 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
659 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
660 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
661 self.eol = '\n'
662 if eolmode != 'strict':
662 if eolmode != 'strict':
663 nlines = []
663 nlines = []
664 for l in self.lines:
664 for l in self.lines:
665 if l.endswith('\r\n'):
665 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
666 l = l[:-2] + '\n'
667 nlines.append(l)
667 nlines.append(l)
668 self.lines = nlines
668 self.lines = nlines
669 else:
669 else:
670 if self.create:
670 if self.create:
671 self.missing = False
671 self.missing = False
672 if self.mode is None:
672 if self.mode is None:
673 self.mode = (False, False)
673 self.mode = (False, False)
674 if self.missing:
674 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 "current directory)\n"))
677 "current directory)\n"))
678
678
679 self.hash = {}
679 self.hash = {}
680 self.dirty = 0
680 self.dirty = 0
681 self.offset = 0
681 self.offset = 0
682 self.skew = 0
682 self.skew = 0
683 self.rej = []
683 self.rej = []
684 self.fileprinted = False
684 self.fileprinted = False
685 self.printfile(False)
685 self.printfile(False)
686 self.hunks = 0
686 self.hunks = 0
687
687
688 def writelines(self, fname, lines, mode):
688 def writelines(self, fname, lines, mode):
689 if self.eolmode == 'auto':
689 if self.eolmode == 'auto':
690 eol = self.eol
690 eol = self.eol
691 elif self.eolmode == 'crlf':
691 elif self.eolmode == 'crlf':
692 eol = '\r\n'
692 eol = '\r\n'
693 else:
693 else:
694 eol = '\n'
694 eol = '\n'
695
695
696 if self.eolmode != 'strict' and eol and eol != '\n':
696 if self.eolmode != 'strict' and eol and eol != '\n':
697 rawlines = []
697 rawlines = []
698 for l in lines:
698 for l in lines:
699 if l and l[-1] == '\n':
699 if l and l[-1] == '\n':
700 l = l[:-1] + eol
700 l = l[:-1] + eol
701 rawlines.append(l)
701 rawlines.append(l)
702 lines = rawlines
702 lines = rawlines
703
703
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705
705
706 def printfile(self, warn):
706 def printfile(self, warn):
707 if self.fileprinted:
707 if self.fileprinted:
708 return
708 return
709 if warn or self.ui.verbose:
709 if warn or self.ui.verbose:
710 self.fileprinted = True
710 self.fileprinted = True
711 s = _("patching file %s\n") % self.fname
711 s = _("patching file %s\n") % self.fname
712 if warn:
712 if warn:
713 self.ui.warn(s)
713 self.ui.warn(s)
714 else:
714 else:
715 self.ui.note(s)
715 self.ui.note(s)
716
716
717
717
718 def findlines(self, l, linenum):
718 def findlines(self, l, linenum):
719 # looks through the hash and finds candidate lines. The
719 # looks through the hash and finds candidate lines. The
720 # result is a list of line numbers sorted based on distance
720 # result is a list of line numbers sorted based on distance
721 # from linenum
721 # from linenum
722
722
723 cand = self.hash.get(l, [])
723 cand = self.hash.get(l, [])
724 if len(cand) > 1:
724 if len(cand) > 1:
725 # resort our list of potentials forward then back.
725 # resort our list of potentials forward then back.
726 cand.sort(key=lambda x: abs(x - linenum))
726 cand.sort(key=lambda x: abs(x - linenum))
727 return cand
727 return cand
728
728
729 def write_rej(self):
729 def write_rej(self):
730 # our rejects are a little different from patch(1). This always
730 # our rejects are a little different from patch(1). This always
731 # creates rejects in the same form as the original patch. A file
731 # creates rejects in the same form as the original patch. A file
732 # header is inserted so that you can run the reject through patch again
732 # header is inserted so that you can run the reject through patch again
733 # without having to type the filename.
733 # without having to type the filename.
734 if not self.rej:
734 if not self.rej:
735 return
735 return
736 base = os.path.basename(self.fname)
736 base = os.path.basename(self.fname)
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 for x in self.rej:
738 for x in self.rej:
739 for l in x.hunk:
739 for l in x.hunk:
740 lines.append(l)
740 lines.append(l)
741 if l[-1:] != '\n':
741 if l[-1:] != '\n':
742 lines.append("\n\ No newline at end of file\n")
742 lines.append("\n\ No newline at end of file\n")
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744
744
745 def apply(self, h):
745 def apply(self, h):
746 if not h.complete():
746 if not h.complete():
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 h.lenb))
749 h.lenb))
750
750
751 self.hunks += 1
751 self.hunks += 1
752
752
753 if self.missing:
753 if self.missing:
754 self.rej.append(h)
754 self.rej.append(h)
755 return -1
755 return -1
756
756
757 if self.exists and self.create:
757 if self.exists and self.create:
758 if self.copysource:
758 if self.copysource:
759 self.ui.warn(_("cannot create %s: destination already "
759 self.ui.warn(_("cannot create %s: destination already "
760 "exists\n") % self.fname)
760 "exists\n") % self.fname)
761 else:
761 else:
762 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.rej.append(h)
763 self.rej.append(h)
764 return -1
764 return -1
765
765
766 if isinstance(h, binhunk):
766 if isinstance(h, binhunk):
767 if self.remove:
767 if self.remove:
768 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
769 else:
769 else:
770 l = h.new(self.lines)
770 l = h.new(self.lines)
771 self.lines[:] = l
771 self.lines[:] = l
772 self.offset += len(l)
772 self.offset += len(l)
773 self.dirty = True
773 self.dirty = True
774 return 0
774 return 0
775
775
776 horig = h
776 horig = h
777 if (self.eolmode in ('crlf', 'lf')
777 if (self.eolmode in ('crlf', 'lf')
778 or self.eolmode == 'auto' and self.eol):
778 or self.eolmode == 'auto' and self.eol):
779 # If new eols are going to be normalized, then normalize
779 # If new eols are going to be normalized, then normalize
780 # hunk data before patching. Otherwise, preserve input
780 # hunk data before patching. Otherwise, preserve input
781 # line-endings.
781 # line-endings.
782 h = h.getnormalized()
782 h = h.getnormalized()
783
783
784 # fast case first, no offsets, no fuzz
784 # fast case first, no offsets, no fuzz
785 old, oldstart, new, newstart = h.fuzzit(0, False)
785 old, oldstart, new, newstart = h.fuzzit(0, False)
786 oldstart += self.offset
786 oldstart += self.offset
787 orig_start = oldstart
787 orig_start = oldstart
788 # if there's skew we want to emit the "(offset %d lines)" even
788 # if there's skew we want to emit the "(offset %d lines)" even
789 # when the hunk cleanly applies at start + skew, so skip the
789 # when the hunk cleanly applies at start + skew, so skip the
790 # fast case code
790 # fast case code
791 if (self.skew == 0 and
791 if (self.skew == 0 and
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 if self.remove:
793 if self.remove:
794 self.backend.unlink(self.fname)
794 self.backend.unlink(self.fname)
795 else:
795 else:
796 self.lines[oldstart:oldstart + len(old)] = new
796 self.lines[oldstart:oldstart + len(old)] = new
797 self.offset += len(new) - len(old)
797 self.offset += len(new) - len(old)
798 self.dirty = True
798 self.dirty = True
799 return 0
799 return 0
800
800
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 self.hash = {}
802 self.hash = {}
803 for x, s in enumerate(self.lines):
803 for x, s in enumerate(self.lines):
804 self.hash.setdefault(s, []).append(x)
804 self.hash.setdefault(s, []).append(x)
805
805
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for toponly in [True, False]:
807 for toponly in [True, False]:
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 oldstart = oldstart + self.offset + self.skew
809 oldstart = oldstart + self.offset + self.skew
810 oldstart = min(oldstart, len(self.lines))
810 oldstart = min(oldstart, len(self.lines))
811 if old:
811 if old:
812 cand = self.findlines(old[0][1:], oldstart)
812 cand = self.findlines(old[0][1:], oldstart)
813 else:
813 else:
814 # Only adding lines with no or fuzzed context, just
814 # Only adding lines with no or fuzzed context, just
815 # take the skew in account
815 # take the skew in account
816 cand = [oldstart]
816 cand = [oldstart]
817
817
818 for l in cand:
818 for l in cand:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 self.lines[l : l + len(old)] = new
820 self.lines[l : l + len(old)] = new
821 self.offset += len(new) - len(old)
821 self.offset += len(new) - len(old)
822 self.skew = l - orig_start
822 self.skew = l - orig_start
823 self.dirty = True
823 self.dirty = True
824 offset = l - orig_start - fuzzlen
824 offset = l - orig_start - fuzzlen
825 if fuzzlen:
825 if fuzzlen:
826 msg = _("Hunk #%d succeeded at %d "
826 msg = _("Hunk #%d succeeded at %d "
827 "with fuzz %d "
827 "with fuzz %d "
828 "(offset %d lines).\n")
828 "(offset %d lines).\n")
829 self.printfile(True)
829 self.printfile(True)
830 self.ui.warn(msg %
830 self.ui.warn(msg %
831 (h.number, l + 1, fuzzlen, offset))
831 (h.number, l + 1, fuzzlen, offset))
832 else:
832 else:
833 msg = _("Hunk #%d succeeded at %d "
833 msg = _("Hunk #%d succeeded at %d "
834 "(offset %d lines).\n")
834 "(offset %d lines).\n")
835 self.ui.note(msg % (h.number, l + 1, offset))
835 self.ui.note(msg % (h.number, l + 1, offset))
836 return fuzzlen
836 return fuzzlen
837 self.printfile(True)
837 self.printfile(True)
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.rej.append(horig)
839 self.rej.append(horig)
840 return -1
840 return -1
841
841
842 def close(self):
842 def close(self):
843 if self.dirty:
843 if self.dirty:
844 self.writelines(self.fname, self.lines, self.mode)
844 self.writelines(self.fname, self.lines, self.mode)
845 self.write_rej()
845 self.write_rej()
846 return len(self.rej)
846 return len(self.rej)
847
847
848 class header(object):
848 class header(object):
849 """patch header
849 """patch header
850 """
850 """
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
853 allhunks_re = re.compile('(?:index|deleted file) ')
853 allhunks_re = re.compile('(?:index|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 newfile_re = re.compile('(?:new file)')
856 newfile_re = re.compile('(?:new file)')
857
857
858 def __init__(self, header):
858 def __init__(self, header):
859 self.header = header
859 self.header = header
860 self.hunks = []
860 self.hunks = []
861
861
862 def binary(self):
862 def binary(self):
863 return any(h.startswith('index ') for h in self.header)
863 return any(h.startswith('index ') for h in self.header)
864
864
865 def pretty(self, fp):
865 def pretty(self, fp):
866 for h in self.header:
866 for h in self.header:
867 if h.startswith('index '):
867 if h.startswith('index '):
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 break
869 break
870 if self.pretty_re.match(h):
870 if self.pretty_re.match(h):
871 fp.write(h)
871 fp.write(h)
872 if self.binary():
872 if self.binary():
873 fp.write(_('this is a binary file\n'))
873 fp.write(_('this is a binary file\n'))
874 break
874 break
875 if h.startswith('---'):
875 if h.startswith('---'):
876 fp.write(_('%d hunks, %d lines changed\n') %
876 fp.write(_('%d hunks, %d lines changed\n') %
877 (len(self.hunks),
877 (len(self.hunks),
878 sum([max(h.added, h.removed) for h in self.hunks])))
878 sum([max(h.added, h.removed) for h in self.hunks])))
879 break
879 break
880 fp.write(h)
880 fp.write(h)
881
881
882 def write(self, fp):
882 def write(self, fp):
883 fp.write(''.join(self.header))
883 fp.write(''.join(self.header))
884
884
885 def allhunks(self):
885 def allhunks(self):
886 return any(self.allhunks_re.match(h) for h in self.header)
886 return any(self.allhunks_re.match(h) for h in self.header)
887
887
888 def files(self):
888 def files(self):
889 match = self.diffgit_re.match(self.header[0])
889 match = self.diffgit_re.match(self.header[0])
890 if match:
890 if match:
891 fromfile, tofile = match.groups()
891 fromfile, tofile = match.groups()
892 if fromfile == tofile:
892 if fromfile == tofile:
893 return [fromfile]
893 return [fromfile]
894 return [fromfile, tofile]
894 return [fromfile, tofile]
895 else:
895 else:
896 return self.diff_re.match(self.header[0]).groups()
896 return self.diff_re.match(self.header[0]).groups()
897
897
898 def filename(self):
898 def filename(self):
899 return self.files()[-1]
899 return self.files()[-1]
900
900
901 def __repr__(self):
901 def __repr__(self):
902 return '<header %s>' % (' '.join(map(repr, self.files())))
902 return '<header %s>' % (' '.join(map(repr, self.files())))
903
903
904 def isnewfile(self):
904 def isnewfile(self):
905 return any(self.newfile_re.match(h) for h in self.header)
905 return any(self.newfile_re.match(h) for h in self.header)
906
906
907 def special(self):
907 def special(self):
908 # Special files are shown only at the header level and not at the hunk
908 # Special files are shown only at the header level and not at the hunk
909 # level for example a file that has been deleted is a special file.
909 # level for example a file that has been deleted is a special file.
910 # The user cannot change the content of the operation, in the case of
910 # The user cannot change the content of the operation, in the case of
911 # the deleted file he has to take the deletion or not take it, he
911 # the deleted file he has to take the deletion or not take it, he
912 # cannot take some of it.
912 # cannot take some of it.
913 # Newly added files are special if they are empty, they are not special
913 # Newly added files are special if they are empty, they are not special
914 # if they have some content as we want to be able to change it
914 # if they have some content as we want to be able to change it
915 nocontent = len(self.header) == 2
915 nocontent = len(self.header) == 2
916 emptynewfile = self.isnewfile() and nocontent
916 emptynewfile = self.isnewfile() and nocontent
917 return emptynewfile or \
917 return emptynewfile or \
918 any(self.special_re.match(h) for h in self.header)
918 any(self.special_re.match(h) for h in self.header)
919
919
920 class recordhunk(object):
920 class recordhunk(object):
921 """patch hunk
921 """patch hunk
922
922
923 XXX shouldn't we merge this with the other hunk class?
923 XXX shouldn't we merge this with the other hunk class?
924 """
924 """
925
925
926 def __init__(self, header, fromline, toline, proc, before, hunk, after,
926 def __init__(self, header, fromline, toline, proc, before, hunk, after,
927 maxcontext=None):
927 maxcontext=None):
928 def trimcontext(lines, reverse=False):
928 def trimcontext(lines, reverse=False):
929 if maxcontext is not None:
929 if maxcontext is not None:
930 delta = len(lines) - maxcontext
930 delta = len(lines) - maxcontext
931 if delta > 0:
931 if delta > 0:
932 if reverse:
932 if reverse:
933 return delta, lines[delta:]
933 return delta, lines[delta:]
934 else:
934 else:
935 return delta, lines[:maxcontext]
935 return delta, lines[:maxcontext]
936 return 0, lines
936 return 0, lines
937
937
938 self.header = header
938 self.header = header
939 trimedbefore, self.before = trimcontext(before, True)
939 trimedbefore, self.before = trimcontext(before, True)
940 self.fromline = fromline + trimedbefore
940 self.fromline = fromline + trimedbefore
941 self.toline = toline + trimedbefore
941 self.toline = toline + trimedbefore
942 _trimedafter, self.after = trimcontext(after, False)
942 _trimedafter, self.after = trimcontext(after, False)
943 self.proc = proc
943 self.proc = proc
944 self.hunk = hunk
944 self.hunk = hunk
945 self.added, self.removed = self.countchanges(self.hunk)
945 self.added, self.removed = self.countchanges(self.hunk)
946
946
947 def __eq__(self, v):
947 def __eq__(self, v):
948 if not isinstance(v, recordhunk):
948 if not isinstance(v, recordhunk):
949 return False
949 return False
950
950
951 return ((v.hunk == self.hunk) and
951 return ((v.hunk == self.hunk) and
952 (v.proc == self.proc) and
952 (v.proc == self.proc) and
953 (self.fromline == v.fromline) and
953 (self.fromline == v.fromline) and
954 (self.header.files() == v.header.files()))
954 (self.header.files() == v.header.files()))
955
955
956 def __hash__(self):
956 def __hash__(self):
957 return hash((tuple(self.hunk),
957 return hash((tuple(self.hunk),
958 tuple(self.header.files()),
958 tuple(self.header.files()),
959 self.fromline,
959 self.fromline,
960 self.proc))
960 self.proc))
961
961
962 def countchanges(self, hunk):
962 def countchanges(self, hunk):
963 """hunk -> (n+,n-)"""
963 """hunk -> (n+,n-)"""
964 add = len([h for h in hunk if h[0] == '+'])
964 add = len([h for h in hunk if h[0] == '+'])
965 rem = len([h for h in hunk if h[0] == '-'])
965 rem = len([h for h in hunk if h[0] == '-'])
966 return add, rem
966 return add, rem
967
967
968 def reversehunk(self):
968 def reversehunk(self):
969 """return another recordhunk which is the reverse of the hunk
969 """return another recordhunk which is the reverse of the hunk
970
970
971 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
971 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
972 that, swap fromline/toline and +/- signs while keep other things
972 that, swap fromline/toline and +/- signs while keep other things
973 unchanged.
973 unchanged.
974 """
974 """
975 m = {'+': '-', '-': '+'}
975 m = {'+': '-', '-': '+', '\\': '\\'}
976 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
976 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
977 return recordhunk(self.header, self.toline, self.fromline, self.proc,
977 return recordhunk(self.header, self.toline, self.fromline, self.proc,
978 self.before, hunk, self.after)
978 self.before, hunk, self.after)
979
979
980 def write(self, fp):
980 def write(self, fp):
981 delta = len(self.before) + len(self.after)
981 delta = len(self.before) + len(self.after)
982 if self.after and self.after[-1] == '\\ No newline at end of file\n':
982 if self.after and self.after[-1] == '\\ No newline at end of file\n':
983 delta -= 1
983 delta -= 1
984 fromlen = delta + self.removed
984 fromlen = delta + self.removed
985 tolen = delta + self.added
985 tolen = delta + self.added
986 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
986 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
987 (self.fromline, fromlen, self.toline, tolen,
987 (self.fromline, fromlen, self.toline, tolen,
988 self.proc and (' ' + self.proc)))
988 self.proc and (' ' + self.proc)))
989 fp.write(''.join(self.before + self.hunk + self.after))
989 fp.write(''.join(self.before + self.hunk + self.after))
990
990
991 pretty = write
991 pretty = write
992
992
993 def filename(self):
993 def filename(self):
994 return self.header.filename()
994 return self.header.filename()
995
995
996 def __repr__(self):
996 def __repr__(self):
997 return '<hunk %r@%d>' % (self.filename(), self.fromline)
997 return '<hunk %r@%d>' % (self.filename(), self.fromline)
998
998
999 def filterpatch(ui, headers, operation=None):
999 def filterpatch(ui, headers, operation=None):
1000 """Interactively filter patch chunks into applied-only chunks"""
1000 """Interactively filter patch chunks into applied-only chunks"""
1001 if operation is None:
1001 if operation is None:
1002 operation = 'record'
1002 operation = 'record'
1003 messages = {
1003 messages = {
1004 'multiple': {
1004 'multiple': {
1005 'discard': _("discard change %d/%d to '%s'?"),
1005 'discard': _("discard change %d/%d to '%s'?"),
1006 'record': _("record change %d/%d to '%s'?"),
1006 'record': _("record change %d/%d to '%s'?"),
1007 'revert': _("revert change %d/%d to '%s'?"),
1007 'revert': _("revert change %d/%d to '%s'?"),
1008 }[operation],
1008 }[operation],
1009 'single': {
1009 'single': {
1010 'discard': _("discard this change to '%s'?"),
1010 'discard': _("discard this change to '%s'?"),
1011 'record': _("record this change to '%s'?"),
1011 'record': _("record this change to '%s'?"),
1012 'revert': _("revert this change to '%s'?"),
1012 'revert': _("revert this change to '%s'?"),
1013 }[operation],
1013 }[operation],
1014 'help': {
1014 'help': {
1015 'discard': _('[Ynesfdaq?]'
1015 'discard': _('[Ynesfdaq?]'
1016 '$$ &Yes, discard this change'
1016 '$$ &Yes, discard this change'
1017 '$$ &No, skip this change'
1017 '$$ &No, skip this change'
1018 '$$ &Edit this change manually'
1018 '$$ &Edit this change manually'
1019 '$$ &Skip remaining changes to this file'
1019 '$$ &Skip remaining changes to this file'
1020 '$$ Discard remaining changes to this &file'
1020 '$$ Discard remaining changes to this &file'
1021 '$$ &Done, skip remaining changes and files'
1021 '$$ &Done, skip remaining changes and files'
1022 '$$ Discard &all changes to all remaining files'
1022 '$$ Discard &all changes to all remaining files'
1023 '$$ &Quit, discarding no changes'
1023 '$$ &Quit, discarding no changes'
1024 '$$ &? (display help)'),
1024 '$$ &? (display help)'),
1025 'record': _('[Ynesfdaq?]'
1025 'record': _('[Ynesfdaq?]'
1026 '$$ &Yes, record this change'
1026 '$$ &Yes, record this change'
1027 '$$ &No, skip this change'
1027 '$$ &No, skip this change'
1028 '$$ &Edit this change manually'
1028 '$$ &Edit this change manually'
1029 '$$ &Skip remaining changes to this file'
1029 '$$ &Skip remaining changes to this file'
1030 '$$ Record remaining changes to this &file'
1030 '$$ Record remaining changes to this &file'
1031 '$$ &Done, skip remaining changes and files'
1031 '$$ &Done, skip remaining changes and files'
1032 '$$ Record &all changes to all remaining files'
1032 '$$ Record &all changes to all remaining files'
1033 '$$ &Quit, recording no changes'
1033 '$$ &Quit, recording no changes'
1034 '$$ &? (display help)'),
1034 '$$ &? (display help)'),
1035 'revert': _('[Ynesfdaq?]'
1035 'revert': _('[Ynesfdaq?]'
1036 '$$ &Yes, revert this change'
1036 '$$ &Yes, revert this change'
1037 '$$ &No, skip this change'
1037 '$$ &No, skip this change'
1038 '$$ &Edit this change manually'
1038 '$$ &Edit this change manually'
1039 '$$ &Skip remaining changes to this file'
1039 '$$ &Skip remaining changes to this file'
1040 '$$ Revert remaining changes to this &file'
1040 '$$ Revert remaining changes to this &file'
1041 '$$ &Done, skip remaining changes and files'
1041 '$$ &Done, skip remaining changes and files'
1042 '$$ Revert &all changes to all remaining files'
1042 '$$ Revert &all changes to all remaining files'
1043 '$$ &Quit, reverting no changes'
1043 '$$ &Quit, reverting no changes'
1044 '$$ &? (display help)')
1044 '$$ &? (display help)')
1045 }[operation]
1045 }[operation]
1046 }
1046 }
1047
1047
1048 def prompt(skipfile, skipall, query, chunk):
1048 def prompt(skipfile, skipall, query, chunk):
1049 """prompt query, and process base inputs
1049 """prompt query, and process base inputs
1050
1050
1051 - y/n for the rest of file
1051 - y/n for the rest of file
1052 - y/n for the rest
1052 - y/n for the rest
1053 - ? (help)
1053 - ? (help)
1054 - q (quit)
1054 - q (quit)
1055
1055
1056 Return True/False and possibly updated skipfile and skipall.
1056 Return True/False and possibly updated skipfile and skipall.
1057 """
1057 """
1058 newpatches = None
1058 newpatches = None
1059 if skipall is not None:
1059 if skipall is not None:
1060 return skipall, skipfile, skipall, newpatches
1060 return skipall, skipfile, skipall, newpatches
1061 if skipfile is not None:
1061 if skipfile is not None:
1062 return skipfile, skipfile, skipall, newpatches
1062 return skipfile, skipfile, skipall, newpatches
1063 while True:
1063 while True:
1064 resps = messages['help']
1064 resps = messages['help']
1065 r = ui.promptchoice("%s %s" % (query, resps))
1065 r = ui.promptchoice("%s %s" % (query, resps))
1066 ui.write("\n")
1066 ui.write("\n")
1067 if r == 8: # ?
1067 if r == 8: # ?
1068 for c, t in ui.extractchoices(resps)[1]:
1068 for c, t in ui.extractchoices(resps)[1]:
1069 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1069 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1070 continue
1070 continue
1071 elif r == 0: # yes
1071 elif r == 0: # yes
1072 ret = True
1072 ret = True
1073 elif r == 1: # no
1073 elif r == 1: # no
1074 ret = False
1074 ret = False
1075 elif r == 2: # Edit patch
1075 elif r == 2: # Edit patch
1076 if chunk is None:
1076 if chunk is None:
1077 ui.write(_('cannot edit patch for whole file'))
1077 ui.write(_('cannot edit patch for whole file'))
1078 ui.write("\n")
1078 ui.write("\n")
1079 continue
1079 continue
1080 if chunk.header.binary():
1080 if chunk.header.binary():
1081 ui.write(_('cannot edit patch for binary file'))
1081 ui.write(_('cannot edit patch for binary file'))
1082 ui.write("\n")
1082 ui.write("\n")
1083 continue
1083 continue
1084 # Patch comment based on the Git one (based on comment at end of
1084 # Patch comment based on the Git one (based on comment at end of
1085 # https://mercurial-scm.org/wiki/RecordExtension)
1085 # https://mercurial-scm.org/wiki/RecordExtension)
1086 phelp = '---' + _("""
1086 phelp = '---' + _("""
1087 To remove '-' lines, make them ' ' lines (context).
1087 To remove '-' lines, make them ' ' lines (context).
1088 To remove '+' lines, delete them.
1088 To remove '+' lines, delete them.
1089 Lines starting with # will be removed from the patch.
1089 Lines starting with # will be removed from the patch.
1090
1090
1091 If the patch applies cleanly, the edited hunk will immediately be
1091 If the patch applies cleanly, the edited hunk will immediately be
1092 added to the record list. If it does not apply cleanly, a rejects
1092 added to the record list. If it does not apply cleanly, a rejects
1093 file will be generated: you can use that when you try again. If
1093 file will be generated: you can use that when you try again. If
1094 all lines of the hunk are removed, then the edit is aborted and
1094 all lines of the hunk are removed, then the edit is aborted and
1095 the hunk is left unchanged.
1095 the hunk is left unchanged.
1096 """)
1096 """)
1097 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1097 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1098 suffix=".diff", text=True)
1098 suffix=".diff", text=True)
1099 ncpatchfp = None
1099 ncpatchfp = None
1100 try:
1100 try:
1101 # Write the initial patch
1101 # Write the initial patch
1102 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1102 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1103 chunk.header.write(f)
1103 chunk.header.write(f)
1104 chunk.write(f)
1104 chunk.write(f)
1105 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1105 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1106 f.close()
1106 f.close()
1107 # Start the editor and wait for it to complete
1107 # Start the editor and wait for it to complete
1108 editor = ui.geteditor()
1108 editor = ui.geteditor()
1109 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1109 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1110 environ={'HGUSER': ui.username()},
1110 environ={'HGUSER': ui.username()},
1111 blockedtag='filterpatch')
1111 blockedtag='filterpatch')
1112 if ret != 0:
1112 if ret != 0:
1113 ui.warn(_("editor exited with exit code %d\n") % ret)
1113 ui.warn(_("editor exited with exit code %d\n") % ret)
1114 continue
1114 continue
1115 # Remove comment lines
1115 # Remove comment lines
1116 patchfp = open(patchfn)
1116 patchfp = open(patchfn)
1117 ncpatchfp = stringio()
1117 ncpatchfp = stringio()
1118 for line in util.iterfile(patchfp):
1118 for line in util.iterfile(patchfp):
1119 if not line.startswith('#'):
1119 if not line.startswith('#'):
1120 ncpatchfp.write(line)
1120 ncpatchfp.write(line)
1121 patchfp.close()
1121 patchfp.close()
1122 ncpatchfp.seek(0)
1122 ncpatchfp.seek(0)
1123 newpatches = parsepatch(ncpatchfp)
1123 newpatches = parsepatch(ncpatchfp)
1124 finally:
1124 finally:
1125 os.unlink(patchfn)
1125 os.unlink(patchfn)
1126 del ncpatchfp
1126 del ncpatchfp
1127 # Signal that the chunk shouldn't be applied as-is, but
1127 # Signal that the chunk shouldn't be applied as-is, but
1128 # provide the new patch to be used instead.
1128 # provide the new patch to be used instead.
1129 ret = False
1129 ret = False
1130 elif r == 3: # Skip
1130 elif r == 3: # Skip
1131 ret = skipfile = False
1131 ret = skipfile = False
1132 elif r == 4: # file (Record remaining)
1132 elif r == 4: # file (Record remaining)
1133 ret = skipfile = True
1133 ret = skipfile = True
1134 elif r == 5: # done, skip remaining
1134 elif r == 5: # done, skip remaining
1135 ret = skipall = False
1135 ret = skipall = False
1136 elif r == 6: # all
1136 elif r == 6: # all
1137 ret = skipall = True
1137 ret = skipall = True
1138 elif r == 7: # quit
1138 elif r == 7: # quit
1139 raise error.Abort(_('user quit'))
1139 raise error.Abort(_('user quit'))
1140 return ret, skipfile, skipall, newpatches
1140 return ret, skipfile, skipall, newpatches
1141
1141
1142 seen = set()
1142 seen = set()
1143 applied = {} # 'filename' -> [] of chunks
1143 applied = {} # 'filename' -> [] of chunks
1144 skipfile, skipall = None, None
1144 skipfile, skipall = None, None
1145 pos, total = 1, sum(len(h.hunks) for h in headers)
1145 pos, total = 1, sum(len(h.hunks) for h in headers)
1146 for h in headers:
1146 for h in headers:
1147 pos += len(h.hunks)
1147 pos += len(h.hunks)
1148 skipfile = None
1148 skipfile = None
1149 fixoffset = 0
1149 fixoffset = 0
1150 hdr = ''.join(h.header)
1150 hdr = ''.join(h.header)
1151 if hdr in seen:
1151 if hdr in seen:
1152 continue
1152 continue
1153 seen.add(hdr)
1153 seen.add(hdr)
1154 if skipall is None:
1154 if skipall is None:
1155 h.pretty(ui)
1155 h.pretty(ui)
1156 msg = (_('examine changes to %s?') %
1156 msg = (_('examine changes to %s?') %
1157 _(' and ').join("'%s'" % f for f in h.files()))
1157 _(' and ').join("'%s'" % f for f in h.files()))
1158 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1158 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1159 if not r:
1159 if not r:
1160 continue
1160 continue
1161 applied[h.filename()] = [h]
1161 applied[h.filename()] = [h]
1162 if h.allhunks():
1162 if h.allhunks():
1163 applied[h.filename()] += h.hunks
1163 applied[h.filename()] += h.hunks
1164 continue
1164 continue
1165 for i, chunk in enumerate(h.hunks):
1165 for i, chunk in enumerate(h.hunks):
1166 if skipfile is None and skipall is None:
1166 if skipfile is None and skipall is None:
1167 chunk.pretty(ui)
1167 chunk.pretty(ui)
1168 if total == 1:
1168 if total == 1:
1169 msg = messages['single'] % chunk.filename()
1169 msg = messages['single'] % chunk.filename()
1170 else:
1170 else:
1171 idx = pos - len(h.hunks) + i
1171 idx = pos - len(h.hunks) + i
1172 msg = messages['multiple'] % (idx, total, chunk.filename())
1172 msg = messages['multiple'] % (idx, total, chunk.filename())
1173 r, skipfile, skipall, newpatches = prompt(skipfile,
1173 r, skipfile, skipall, newpatches = prompt(skipfile,
1174 skipall, msg, chunk)
1174 skipall, msg, chunk)
1175 if r:
1175 if r:
1176 if fixoffset:
1176 if fixoffset:
1177 chunk = copy.copy(chunk)
1177 chunk = copy.copy(chunk)
1178 chunk.toline += fixoffset
1178 chunk.toline += fixoffset
1179 applied[chunk.filename()].append(chunk)
1179 applied[chunk.filename()].append(chunk)
1180 elif newpatches is not None:
1180 elif newpatches is not None:
1181 for newpatch in newpatches:
1181 for newpatch in newpatches:
1182 for newhunk in newpatch.hunks:
1182 for newhunk in newpatch.hunks:
1183 if fixoffset:
1183 if fixoffset:
1184 newhunk.toline += fixoffset
1184 newhunk.toline += fixoffset
1185 applied[newhunk.filename()].append(newhunk)
1185 applied[newhunk.filename()].append(newhunk)
1186 else:
1186 else:
1187 fixoffset += chunk.removed - chunk.added
1187 fixoffset += chunk.removed - chunk.added
1188 return (sum([h for h in applied.itervalues()
1188 return (sum([h for h in applied.itervalues()
1189 if h[0].special() or len(h) > 1], []), {})
1189 if h[0].special() or len(h) > 1], []), {})
1190 class hunk(object):
1190 class hunk(object):
1191 def __init__(self, desc, num, lr, context):
1191 def __init__(self, desc, num, lr, context):
1192 self.number = num
1192 self.number = num
1193 self.desc = desc
1193 self.desc = desc
1194 self.hunk = [desc]
1194 self.hunk = [desc]
1195 self.a = []
1195 self.a = []
1196 self.b = []
1196 self.b = []
1197 self.starta = self.lena = None
1197 self.starta = self.lena = None
1198 self.startb = self.lenb = None
1198 self.startb = self.lenb = None
1199 if lr is not None:
1199 if lr is not None:
1200 if context:
1200 if context:
1201 self.read_context_hunk(lr)
1201 self.read_context_hunk(lr)
1202 else:
1202 else:
1203 self.read_unified_hunk(lr)
1203 self.read_unified_hunk(lr)
1204
1204
1205 def getnormalized(self):
1205 def getnormalized(self):
1206 """Return a copy with line endings normalized to LF."""
1206 """Return a copy with line endings normalized to LF."""
1207
1207
1208 def normalize(lines):
1208 def normalize(lines):
1209 nlines = []
1209 nlines = []
1210 for line in lines:
1210 for line in lines:
1211 if line.endswith('\r\n'):
1211 if line.endswith('\r\n'):
1212 line = line[:-2] + '\n'
1212 line = line[:-2] + '\n'
1213 nlines.append(line)
1213 nlines.append(line)
1214 return nlines
1214 return nlines
1215
1215
1216 # Dummy object, it is rebuilt manually
1216 # Dummy object, it is rebuilt manually
1217 nh = hunk(self.desc, self.number, None, None)
1217 nh = hunk(self.desc, self.number, None, None)
1218 nh.number = self.number
1218 nh.number = self.number
1219 nh.desc = self.desc
1219 nh.desc = self.desc
1220 nh.hunk = self.hunk
1220 nh.hunk = self.hunk
1221 nh.a = normalize(self.a)
1221 nh.a = normalize(self.a)
1222 nh.b = normalize(self.b)
1222 nh.b = normalize(self.b)
1223 nh.starta = self.starta
1223 nh.starta = self.starta
1224 nh.startb = self.startb
1224 nh.startb = self.startb
1225 nh.lena = self.lena
1225 nh.lena = self.lena
1226 nh.lenb = self.lenb
1226 nh.lenb = self.lenb
1227 return nh
1227 return nh
1228
1228
1229 def read_unified_hunk(self, lr):
1229 def read_unified_hunk(self, lr):
1230 m = unidesc.match(self.desc)
1230 m = unidesc.match(self.desc)
1231 if not m:
1231 if not m:
1232 raise PatchError(_("bad hunk #%d") % self.number)
1232 raise PatchError(_("bad hunk #%d") % self.number)
1233 self.starta, self.lena, self.startb, self.lenb = m.groups()
1233 self.starta, self.lena, self.startb, self.lenb = m.groups()
1234 if self.lena is None:
1234 if self.lena is None:
1235 self.lena = 1
1235 self.lena = 1
1236 else:
1236 else:
1237 self.lena = int(self.lena)
1237 self.lena = int(self.lena)
1238 if self.lenb is None:
1238 if self.lenb is None:
1239 self.lenb = 1
1239 self.lenb = 1
1240 else:
1240 else:
1241 self.lenb = int(self.lenb)
1241 self.lenb = int(self.lenb)
1242 self.starta = int(self.starta)
1242 self.starta = int(self.starta)
1243 self.startb = int(self.startb)
1243 self.startb = int(self.startb)
1244 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1244 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1245 self.b)
1245 self.b)
1246 # if we hit eof before finishing out the hunk, the last line will
1246 # if we hit eof before finishing out the hunk, the last line will
1247 # be zero length. Lets try to fix it up.
1247 # be zero length. Lets try to fix it up.
1248 while len(self.hunk[-1]) == 0:
1248 while len(self.hunk[-1]) == 0:
1249 del self.hunk[-1]
1249 del self.hunk[-1]
1250 del self.a[-1]
1250 del self.a[-1]
1251 del self.b[-1]
1251 del self.b[-1]
1252 self.lena -= 1
1252 self.lena -= 1
1253 self.lenb -= 1
1253 self.lenb -= 1
1254 self._fixnewline(lr)
1254 self._fixnewline(lr)
1255
1255
1256 def read_context_hunk(self, lr):
1256 def read_context_hunk(self, lr):
1257 self.desc = lr.readline()
1257 self.desc = lr.readline()
1258 m = contextdesc.match(self.desc)
1258 m = contextdesc.match(self.desc)
1259 if not m:
1259 if not m:
1260 raise PatchError(_("bad hunk #%d") % self.number)
1260 raise PatchError(_("bad hunk #%d") % self.number)
1261 self.starta, aend = m.groups()
1261 self.starta, aend = m.groups()
1262 self.starta = int(self.starta)
1262 self.starta = int(self.starta)
1263 if aend is None:
1263 if aend is None:
1264 aend = self.starta
1264 aend = self.starta
1265 self.lena = int(aend) - self.starta
1265 self.lena = int(aend) - self.starta
1266 if self.starta:
1266 if self.starta:
1267 self.lena += 1
1267 self.lena += 1
1268 for x in xrange(self.lena):
1268 for x in xrange(self.lena):
1269 l = lr.readline()
1269 l = lr.readline()
1270 if l.startswith('---'):
1270 if l.startswith('---'):
1271 # lines addition, old block is empty
1271 # lines addition, old block is empty
1272 lr.push(l)
1272 lr.push(l)
1273 break
1273 break
1274 s = l[2:]
1274 s = l[2:]
1275 if l.startswith('- ') or l.startswith('! '):
1275 if l.startswith('- ') or l.startswith('! '):
1276 u = '-' + s
1276 u = '-' + s
1277 elif l.startswith(' '):
1277 elif l.startswith(' '):
1278 u = ' ' + s
1278 u = ' ' + s
1279 else:
1279 else:
1280 raise PatchError(_("bad hunk #%d old text line %d") %
1280 raise PatchError(_("bad hunk #%d old text line %d") %
1281 (self.number, x))
1281 (self.number, x))
1282 self.a.append(u)
1282 self.a.append(u)
1283 self.hunk.append(u)
1283 self.hunk.append(u)
1284
1284
1285 l = lr.readline()
1285 l = lr.readline()
1286 if l.startswith('\ '):
1286 if l.startswith('\ '):
1287 s = self.a[-1][:-1]
1287 s = self.a[-1][:-1]
1288 self.a[-1] = s
1288 self.a[-1] = s
1289 self.hunk[-1] = s
1289 self.hunk[-1] = s
1290 l = lr.readline()
1290 l = lr.readline()
1291 m = contextdesc.match(l)
1291 m = contextdesc.match(l)
1292 if not m:
1292 if not m:
1293 raise PatchError(_("bad hunk #%d") % self.number)
1293 raise PatchError(_("bad hunk #%d") % self.number)
1294 self.startb, bend = m.groups()
1294 self.startb, bend = m.groups()
1295 self.startb = int(self.startb)
1295 self.startb = int(self.startb)
1296 if bend is None:
1296 if bend is None:
1297 bend = self.startb
1297 bend = self.startb
1298 self.lenb = int(bend) - self.startb
1298 self.lenb = int(bend) - self.startb
1299 if self.startb:
1299 if self.startb:
1300 self.lenb += 1
1300 self.lenb += 1
1301 hunki = 1
1301 hunki = 1
1302 for x in xrange(self.lenb):
1302 for x in xrange(self.lenb):
1303 l = lr.readline()
1303 l = lr.readline()
1304 if l.startswith('\ '):
1304 if l.startswith('\ '):
1305 # XXX: the only way to hit this is with an invalid line range.
1305 # XXX: the only way to hit this is with an invalid line range.
1306 # The no-eol marker is not counted in the line range, but I
1306 # The no-eol marker is not counted in the line range, but I
1307 # guess there are diff(1) out there which behave differently.
1307 # guess there are diff(1) out there which behave differently.
1308 s = self.b[-1][:-1]
1308 s = self.b[-1][:-1]
1309 self.b[-1] = s
1309 self.b[-1] = s
1310 self.hunk[hunki - 1] = s
1310 self.hunk[hunki - 1] = s
1311 continue
1311 continue
1312 if not l:
1312 if not l:
1313 # line deletions, new block is empty and we hit EOF
1313 # line deletions, new block is empty and we hit EOF
1314 lr.push(l)
1314 lr.push(l)
1315 break
1315 break
1316 s = l[2:]
1316 s = l[2:]
1317 if l.startswith('+ ') or l.startswith('! '):
1317 if l.startswith('+ ') or l.startswith('! '):
1318 u = '+' + s
1318 u = '+' + s
1319 elif l.startswith(' '):
1319 elif l.startswith(' '):
1320 u = ' ' + s
1320 u = ' ' + s
1321 elif len(self.b) == 0:
1321 elif len(self.b) == 0:
1322 # line deletions, new block is empty
1322 # line deletions, new block is empty
1323 lr.push(l)
1323 lr.push(l)
1324 break
1324 break
1325 else:
1325 else:
1326 raise PatchError(_("bad hunk #%d old text line %d") %
1326 raise PatchError(_("bad hunk #%d old text line %d") %
1327 (self.number, x))
1327 (self.number, x))
1328 self.b.append(s)
1328 self.b.append(s)
1329 while True:
1329 while True:
1330 if hunki >= len(self.hunk):
1330 if hunki >= len(self.hunk):
1331 h = ""
1331 h = ""
1332 else:
1332 else:
1333 h = self.hunk[hunki]
1333 h = self.hunk[hunki]
1334 hunki += 1
1334 hunki += 1
1335 if h == u:
1335 if h == u:
1336 break
1336 break
1337 elif h.startswith('-'):
1337 elif h.startswith('-'):
1338 continue
1338 continue
1339 else:
1339 else:
1340 self.hunk.insert(hunki - 1, u)
1340 self.hunk.insert(hunki - 1, u)
1341 break
1341 break
1342
1342
1343 if not self.a:
1343 if not self.a:
1344 # this happens when lines were only added to the hunk
1344 # this happens when lines were only added to the hunk
1345 for x in self.hunk:
1345 for x in self.hunk:
1346 if x.startswith('-') or x.startswith(' '):
1346 if x.startswith('-') or x.startswith(' '):
1347 self.a.append(x)
1347 self.a.append(x)
1348 if not self.b:
1348 if not self.b:
1349 # this happens when lines were only deleted from the hunk
1349 # this happens when lines were only deleted from the hunk
1350 for x in self.hunk:
1350 for x in self.hunk:
1351 if x.startswith('+') or x.startswith(' '):
1351 if x.startswith('+') or x.startswith(' '):
1352 self.b.append(x[1:])
1352 self.b.append(x[1:])
1353 # @@ -start,len +start,len @@
1353 # @@ -start,len +start,len @@
1354 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1354 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1355 self.startb, self.lenb)
1355 self.startb, self.lenb)
1356 self.hunk[0] = self.desc
1356 self.hunk[0] = self.desc
1357 self._fixnewline(lr)
1357 self._fixnewline(lr)
1358
1358
1359 def _fixnewline(self, lr):
1359 def _fixnewline(self, lr):
1360 l = lr.readline()
1360 l = lr.readline()
1361 if l.startswith('\ '):
1361 if l.startswith('\ '):
1362 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1362 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1363 else:
1363 else:
1364 lr.push(l)
1364 lr.push(l)
1365
1365
1366 def complete(self):
1366 def complete(self):
1367 return len(self.a) == self.lena and len(self.b) == self.lenb
1367 return len(self.a) == self.lena and len(self.b) == self.lenb
1368
1368
1369 def _fuzzit(self, old, new, fuzz, toponly):
1369 def _fuzzit(self, old, new, fuzz, toponly):
1370 # this removes context lines from the top and bottom of list 'l'. It
1370 # this removes context lines from the top and bottom of list 'l'. It
1371 # checks the hunk to make sure only context lines are removed, and then
1371 # checks the hunk to make sure only context lines are removed, and then
1372 # returns a new shortened list of lines.
1372 # returns a new shortened list of lines.
1373 fuzz = min(fuzz, len(old))
1373 fuzz = min(fuzz, len(old))
1374 if fuzz:
1374 if fuzz:
1375 top = 0
1375 top = 0
1376 bot = 0
1376 bot = 0
1377 hlen = len(self.hunk)
1377 hlen = len(self.hunk)
1378 for x in xrange(hlen - 1):
1378 for x in xrange(hlen - 1):
1379 # the hunk starts with the @@ line, so use x+1
1379 # the hunk starts with the @@ line, so use x+1
1380 if self.hunk[x + 1][0] == ' ':
1380 if self.hunk[x + 1][0] == ' ':
1381 top += 1
1381 top += 1
1382 else:
1382 else:
1383 break
1383 break
1384 if not toponly:
1384 if not toponly:
1385 for x in xrange(hlen - 1):
1385 for x in xrange(hlen - 1):
1386 if self.hunk[hlen - bot - 1][0] == ' ':
1386 if self.hunk[hlen - bot - 1][0] == ' ':
1387 bot += 1
1387 bot += 1
1388 else:
1388 else:
1389 break
1389 break
1390
1390
1391 bot = min(fuzz, bot)
1391 bot = min(fuzz, bot)
1392 top = min(fuzz, top)
1392 top = min(fuzz, top)
1393 return old[top:len(old) - bot], new[top:len(new) - bot], top
1393 return old[top:len(old) - bot], new[top:len(new) - bot], top
1394 return old, new, 0
1394 return old, new, 0
1395
1395
1396 def fuzzit(self, fuzz, toponly):
1396 def fuzzit(self, fuzz, toponly):
1397 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1397 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1398 oldstart = self.starta + top
1398 oldstart = self.starta + top
1399 newstart = self.startb + top
1399 newstart = self.startb + top
1400 # zero length hunk ranges already have their start decremented
1400 # zero length hunk ranges already have their start decremented
1401 if self.lena and oldstart > 0:
1401 if self.lena and oldstart > 0:
1402 oldstart -= 1
1402 oldstart -= 1
1403 if self.lenb and newstart > 0:
1403 if self.lenb and newstart > 0:
1404 newstart -= 1
1404 newstart -= 1
1405 return old, oldstart, new, newstart
1405 return old, oldstart, new, newstart
1406
1406
1407 class binhunk(object):
1407 class binhunk(object):
1408 'A binary patch file.'
1408 'A binary patch file.'
1409 def __init__(self, lr, fname):
1409 def __init__(self, lr, fname):
1410 self.text = None
1410 self.text = None
1411 self.delta = False
1411 self.delta = False
1412 self.hunk = ['GIT binary patch\n']
1412 self.hunk = ['GIT binary patch\n']
1413 self._fname = fname
1413 self._fname = fname
1414 self._read(lr)
1414 self._read(lr)
1415
1415
1416 def complete(self):
1416 def complete(self):
1417 return self.text is not None
1417 return self.text is not None
1418
1418
1419 def new(self, lines):
1419 def new(self, lines):
1420 if self.delta:
1420 if self.delta:
1421 return [applybindelta(self.text, ''.join(lines))]
1421 return [applybindelta(self.text, ''.join(lines))]
1422 return [self.text]
1422 return [self.text]
1423
1423
1424 def _read(self, lr):
1424 def _read(self, lr):
1425 def getline(lr, hunk):
1425 def getline(lr, hunk):
1426 l = lr.readline()
1426 l = lr.readline()
1427 hunk.append(l)
1427 hunk.append(l)
1428 return l.rstrip('\r\n')
1428 return l.rstrip('\r\n')
1429
1429
1430 size = 0
1430 size = 0
1431 while True:
1431 while True:
1432 line = getline(lr, self.hunk)
1432 line = getline(lr, self.hunk)
1433 if not line:
1433 if not line:
1434 raise PatchError(_('could not extract "%s" binary data')
1434 raise PatchError(_('could not extract "%s" binary data')
1435 % self._fname)
1435 % self._fname)
1436 if line.startswith('literal '):
1436 if line.startswith('literal '):
1437 size = int(line[8:].rstrip())
1437 size = int(line[8:].rstrip())
1438 break
1438 break
1439 if line.startswith('delta '):
1439 if line.startswith('delta '):
1440 size = int(line[6:].rstrip())
1440 size = int(line[6:].rstrip())
1441 self.delta = True
1441 self.delta = True
1442 break
1442 break
1443 dec = []
1443 dec = []
1444 line = getline(lr, self.hunk)
1444 line = getline(lr, self.hunk)
1445 while len(line) > 1:
1445 while len(line) > 1:
1446 l = line[0]
1446 l = line[0]
1447 if l <= 'Z' and l >= 'A':
1447 if l <= 'Z' and l >= 'A':
1448 l = ord(l) - ord('A') + 1
1448 l = ord(l) - ord('A') + 1
1449 else:
1449 else:
1450 l = ord(l) - ord('a') + 27
1450 l = ord(l) - ord('a') + 27
1451 try:
1451 try:
1452 dec.append(util.b85decode(line[1:])[:l])
1452 dec.append(util.b85decode(line[1:])[:l])
1453 except ValueError as e:
1453 except ValueError as e:
1454 raise PatchError(_('could not decode "%s" binary patch: %s')
1454 raise PatchError(_('could not decode "%s" binary patch: %s')
1455 % (self._fname, str(e)))
1455 % (self._fname, str(e)))
1456 line = getline(lr, self.hunk)
1456 line = getline(lr, self.hunk)
1457 text = zlib.decompress(''.join(dec))
1457 text = zlib.decompress(''.join(dec))
1458 if len(text) != size:
1458 if len(text) != size:
1459 raise PatchError(_('"%s" length is %d bytes, should be %d')
1459 raise PatchError(_('"%s" length is %d bytes, should be %d')
1460 % (self._fname, len(text), size))
1460 % (self._fname, len(text), size))
1461 self.text = text
1461 self.text = text
1462
1462
1463 def parsefilename(str):
1463 def parsefilename(str):
1464 # --- filename \t|space stuff
1464 # --- filename \t|space stuff
1465 s = str[4:].rstrip('\r\n')
1465 s = str[4:].rstrip('\r\n')
1466 i = s.find('\t')
1466 i = s.find('\t')
1467 if i < 0:
1467 if i < 0:
1468 i = s.find(' ')
1468 i = s.find(' ')
1469 if i < 0:
1469 if i < 0:
1470 return s
1470 return s
1471 return s[:i]
1471 return s[:i]
1472
1472
1473 def reversehunks(hunks):
1473 def reversehunks(hunks):
1474 '''reverse the signs in the hunks given as argument
1474 '''reverse the signs in the hunks given as argument
1475
1475
1476 This function operates on hunks coming out of patch.filterpatch, that is
1476 This function operates on hunks coming out of patch.filterpatch, that is
1477 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1477 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1478
1478
1479 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1479 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1480 ... --- a/folder1/g
1480 ... --- a/folder1/g
1481 ... +++ b/folder1/g
1481 ... +++ b/folder1/g
1482 ... @@ -1,7 +1,7 @@
1482 ... @@ -1,7 +1,7 @@
1483 ... +firstline
1483 ... +firstline
1484 ... c
1484 ... c
1485 ... 1
1485 ... 1
1486 ... 2
1486 ... 2
1487 ... + 3
1487 ... + 3
1488 ... -4
1488 ... -4
1489 ... 5
1489 ... 5
1490 ... d
1490 ... d
1491 ... +lastline"""
1491 ... +lastline"""
1492 >>> hunks = parsepatch(rawpatch)
1492 >>> hunks = parsepatch(rawpatch)
1493 >>> hunkscomingfromfilterpatch = []
1493 >>> hunkscomingfromfilterpatch = []
1494 >>> for h in hunks:
1494 >>> for h in hunks:
1495 ... hunkscomingfromfilterpatch.append(h)
1495 ... hunkscomingfromfilterpatch.append(h)
1496 ... hunkscomingfromfilterpatch.extend(h.hunks)
1496 ... hunkscomingfromfilterpatch.extend(h.hunks)
1497
1497
1498 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1498 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1499 >>> from . import util
1499 >>> from . import util
1500 >>> fp = util.stringio()
1500 >>> fp = util.stringio()
1501 >>> for c in reversedhunks:
1501 >>> for c in reversedhunks:
1502 ... c.write(fp)
1502 ... c.write(fp)
1503 >>> fp.seek(0)
1503 >>> fp.seek(0)
1504 >>> reversedpatch = fp.read()
1504 >>> reversedpatch = fp.read()
1505 >>> print reversedpatch
1505 >>> print reversedpatch
1506 diff --git a/folder1/g b/folder1/g
1506 diff --git a/folder1/g b/folder1/g
1507 --- a/folder1/g
1507 --- a/folder1/g
1508 +++ b/folder1/g
1508 +++ b/folder1/g
1509 @@ -1,4 +1,3 @@
1509 @@ -1,4 +1,3 @@
1510 -firstline
1510 -firstline
1511 c
1511 c
1512 1
1512 1
1513 2
1513 2
1514 @@ -2,6 +1,6 @@
1514 @@ -2,6 +1,6 @@
1515 c
1515 c
1516 1
1516 1
1517 2
1517 2
1518 - 3
1518 - 3
1519 +4
1519 +4
1520 5
1520 5
1521 d
1521 d
1522 @@ -6,3 +5,2 @@
1522 @@ -6,3 +5,2 @@
1523 5
1523 5
1524 d
1524 d
1525 -lastline
1525 -lastline
1526
1526
1527 '''
1527 '''
1528
1528
1529 newhunks = []
1529 newhunks = []
1530 for c in hunks:
1530 for c in hunks:
1531 if util.safehasattr(c, 'reversehunk'):
1531 if util.safehasattr(c, 'reversehunk'):
1532 c = c.reversehunk()
1532 c = c.reversehunk()
1533 newhunks.append(c)
1533 newhunks.append(c)
1534 return newhunks
1534 return newhunks
1535
1535
1536 def parsepatch(originalchunks, maxcontext=None):
1536 def parsepatch(originalchunks, maxcontext=None):
1537 """patch -> [] of headers -> [] of hunks
1537 """patch -> [] of headers -> [] of hunks
1538
1538
1539 If maxcontext is not None, trim context lines if necessary.
1539 If maxcontext is not None, trim context lines if necessary.
1540
1540
1541 >>> rawpatch = '''diff --git a/folder1/g b/folder1/g
1541 >>> rawpatch = '''diff --git a/folder1/g b/folder1/g
1542 ... --- a/folder1/g
1542 ... --- a/folder1/g
1543 ... +++ b/folder1/g
1543 ... +++ b/folder1/g
1544 ... @@ -1,8 +1,10 @@
1544 ... @@ -1,8 +1,10 @@
1545 ... 1
1545 ... 1
1546 ... 2
1546 ... 2
1547 ... -3
1547 ... -3
1548 ... 4
1548 ... 4
1549 ... 5
1549 ... 5
1550 ... 6
1550 ... 6
1551 ... +6.1
1551 ... +6.1
1552 ... +6.2
1552 ... +6.2
1553 ... 7
1553 ... 7
1554 ... 8
1554 ... 8
1555 ... +9'''
1555 ... +9'''
1556 >>> out = util.stringio()
1556 >>> out = util.stringio()
1557 >>> headers = parsepatch([rawpatch], maxcontext=1)
1557 >>> headers = parsepatch([rawpatch], maxcontext=1)
1558 >>> for header in headers:
1558 >>> for header in headers:
1559 ... header.write(out)
1559 ... header.write(out)
1560 ... for hunk in header.hunks:
1560 ... for hunk in header.hunks:
1561 ... hunk.write(out)
1561 ... hunk.write(out)
1562 >>> print(out.getvalue())
1562 >>> print(out.getvalue())
1563 diff --git a/folder1/g b/folder1/g
1563 diff --git a/folder1/g b/folder1/g
1564 --- a/folder1/g
1564 --- a/folder1/g
1565 +++ b/folder1/g
1565 +++ b/folder1/g
1566 @@ -2,3 +2,2 @@
1566 @@ -2,3 +2,2 @@
1567 2
1567 2
1568 -3
1568 -3
1569 4
1569 4
1570 @@ -6,2 +5,4 @@
1570 @@ -6,2 +5,4 @@
1571 6
1571 6
1572 +6.1
1572 +6.1
1573 +6.2
1573 +6.2
1574 7
1574 7
1575 @@ -8,1 +9,2 @@
1575 @@ -8,1 +9,2 @@
1576 8
1576 8
1577 +9
1577 +9
1578 """
1578 """
1579 class parser(object):
1579 class parser(object):
1580 """patch parsing state machine"""
1580 """patch parsing state machine"""
1581 def __init__(self):
1581 def __init__(self):
1582 self.fromline = 0
1582 self.fromline = 0
1583 self.toline = 0
1583 self.toline = 0
1584 self.proc = ''
1584 self.proc = ''
1585 self.header = None
1585 self.header = None
1586 self.context = []
1586 self.context = []
1587 self.before = []
1587 self.before = []
1588 self.hunk = []
1588 self.hunk = []
1589 self.headers = []
1589 self.headers = []
1590
1590
1591 def addrange(self, limits):
1591 def addrange(self, limits):
1592 fromstart, fromend, tostart, toend, proc = limits
1592 fromstart, fromend, tostart, toend, proc = limits
1593 self.fromline = int(fromstart)
1593 self.fromline = int(fromstart)
1594 self.toline = int(tostart)
1594 self.toline = int(tostart)
1595 self.proc = proc
1595 self.proc = proc
1596
1596
1597 def addcontext(self, context):
1597 def addcontext(self, context):
1598 if self.hunk:
1598 if self.hunk:
1599 h = recordhunk(self.header, self.fromline, self.toline,
1599 h = recordhunk(self.header, self.fromline, self.toline,
1600 self.proc, self.before, self.hunk, context, maxcontext)
1600 self.proc, self.before, self.hunk, context, maxcontext)
1601 self.header.hunks.append(h)
1601 self.header.hunks.append(h)
1602 self.fromline += len(self.before) + h.removed
1602 self.fromline += len(self.before) + h.removed
1603 self.toline += len(self.before) + h.added
1603 self.toline += len(self.before) + h.added
1604 self.before = []
1604 self.before = []
1605 self.hunk = []
1605 self.hunk = []
1606 self.context = context
1606 self.context = context
1607
1607
1608 def addhunk(self, hunk):
1608 def addhunk(self, hunk):
1609 if self.context:
1609 if self.context:
1610 self.before = self.context
1610 self.before = self.context
1611 self.context = []
1611 self.context = []
1612 self.hunk = hunk
1612 self.hunk = hunk
1613
1613
1614 def newfile(self, hdr):
1614 def newfile(self, hdr):
1615 self.addcontext([])
1615 self.addcontext([])
1616 h = header(hdr)
1616 h = header(hdr)
1617 self.headers.append(h)
1617 self.headers.append(h)
1618 self.header = h
1618 self.header = h
1619
1619
1620 def addother(self, line):
1620 def addother(self, line):
1621 pass # 'other' lines are ignored
1621 pass # 'other' lines are ignored
1622
1622
1623 def finished(self):
1623 def finished(self):
1624 self.addcontext([])
1624 self.addcontext([])
1625 return self.headers
1625 return self.headers
1626
1626
1627 transitions = {
1627 transitions = {
1628 'file': {'context': addcontext,
1628 'file': {'context': addcontext,
1629 'file': newfile,
1629 'file': newfile,
1630 'hunk': addhunk,
1630 'hunk': addhunk,
1631 'range': addrange},
1631 'range': addrange},
1632 'context': {'file': newfile,
1632 'context': {'file': newfile,
1633 'hunk': addhunk,
1633 'hunk': addhunk,
1634 'range': addrange,
1634 'range': addrange,
1635 'other': addother},
1635 'other': addother},
1636 'hunk': {'context': addcontext,
1636 'hunk': {'context': addcontext,
1637 'file': newfile,
1637 'file': newfile,
1638 'range': addrange},
1638 'range': addrange},
1639 'range': {'context': addcontext,
1639 'range': {'context': addcontext,
1640 'hunk': addhunk},
1640 'hunk': addhunk},
1641 'other': {'other': addother},
1641 'other': {'other': addother},
1642 }
1642 }
1643
1643
1644 p = parser()
1644 p = parser()
1645 fp = stringio()
1645 fp = stringio()
1646 fp.write(''.join(originalchunks))
1646 fp.write(''.join(originalchunks))
1647 fp.seek(0)
1647 fp.seek(0)
1648
1648
1649 state = 'context'
1649 state = 'context'
1650 for newstate, data in scanpatch(fp):
1650 for newstate, data in scanpatch(fp):
1651 try:
1651 try:
1652 p.transitions[state][newstate](p, data)
1652 p.transitions[state][newstate](p, data)
1653 except KeyError:
1653 except KeyError:
1654 raise PatchError('unhandled transition: %s -> %s' %
1654 raise PatchError('unhandled transition: %s -> %s' %
1655 (state, newstate))
1655 (state, newstate))
1656 state = newstate
1656 state = newstate
1657 del fp
1657 del fp
1658 return p.finished()
1658 return p.finished()
1659
1659
1660 def pathtransform(path, strip, prefix):
1660 def pathtransform(path, strip, prefix):
1661 '''turn a path from a patch into a path suitable for the repository
1661 '''turn a path from a patch into a path suitable for the repository
1662
1662
1663 prefix, if not empty, is expected to be normalized with a / at the end.
1663 prefix, if not empty, is expected to be normalized with a / at the end.
1664
1664
1665 Returns (stripped components, path in repository).
1665 Returns (stripped components, path in repository).
1666
1666
1667 >>> pathtransform('a/b/c', 0, '')
1667 >>> pathtransform('a/b/c', 0, '')
1668 ('', 'a/b/c')
1668 ('', 'a/b/c')
1669 >>> pathtransform(' a/b/c ', 0, '')
1669 >>> pathtransform(' a/b/c ', 0, '')
1670 ('', ' a/b/c')
1670 ('', ' a/b/c')
1671 >>> pathtransform(' a/b/c ', 2, '')
1671 >>> pathtransform(' a/b/c ', 2, '')
1672 ('a/b/', 'c')
1672 ('a/b/', 'c')
1673 >>> pathtransform('a/b/c', 0, 'd/e/')
1673 >>> pathtransform('a/b/c', 0, 'd/e/')
1674 ('', 'd/e/a/b/c')
1674 ('', 'd/e/a/b/c')
1675 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1675 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1676 ('a//b/', 'd/e/c')
1676 ('a//b/', 'd/e/c')
1677 >>> pathtransform('a/b/c', 3, '')
1677 >>> pathtransform('a/b/c', 3, '')
1678 Traceback (most recent call last):
1678 Traceback (most recent call last):
1679 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1679 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1680 '''
1680 '''
1681 pathlen = len(path)
1681 pathlen = len(path)
1682 i = 0
1682 i = 0
1683 if strip == 0:
1683 if strip == 0:
1684 return '', prefix + path.rstrip()
1684 return '', prefix + path.rstrip()
1685 count = strip
1685 count = strip
1686 while count > 0:
1686 while count > 0:
1687 i = path.find('/', i)
1687 i = path.find('/', i)
1688 if i == -1:
1688 if i == -1:
1689 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1689 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1690 (count, strip, path))
1690 (count, strip, path))
1691 i += 1
1691 i += 1
1692 # consume '//' in the path
1692 # consume '//' in the path
1693 while i < pathlen - 1 and path[i] == '/':
1693 while i < pathlen - 1 and path[i] == '/':
1694 i += 1
1694 i += 1
1695 count -= 1
1695 count -= 1
1696 return path[:i].lstrip(), prefix + path[i:].rstrip()
1696 return path[:i].lstrip(), prefix + path[i:].rstrip()
1697
1697
1698 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1698 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1699 nulla = afile_orig == "/dev/null"
1699 nulla = afile_orig == "/dev/null"
1700 nullb = bfile_orig == "/dev/null"
1700 nullb = bfile_orig == "/dev/null"
1701 create = nulla and hunk.starta == 0 and hunk.lena == 0
1701 create = nulla and hunk.starta == 0 and hunk.lena == 0
1702 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1702 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1703 abase, afile = pathtransform(afile_orig, strip, prefix)
1703 abase, afile = pathtransform(afile_orig, strip, prefix)
1704 gooda = not nulla and backend.exists(afile)
1704 gooda = not nulla and backend.exists(afile)
1705 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1705 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1706 if afile == bfile:
1706 if afile == bfile:
1707 goodb = gooda
1707 goodb = gooda
1708 else:
1708 else:
1709 goodb = not nullb and backend.exists(bfile)
1709 goodb = not nullb and backend.exists(bfile)
1710 missing = not goodb and not gooda and not create
1710 missing = not goodb and not gooda and not create
1711
1711
1712 # some diff programs apparently produce patches where the afile is
1712 # some diff programs apparently produce patches where the afile is
1713 # not /dev/null, but afile starts with bfile
1713 # not /dev/null, but afile starts with bfile
1714 abasedir = afile[:afile.rfind('/') + 1]
1714 abasedir = afile[:afile.rfind('/') + 1]
1715 bbasedir = bfile[:bfile.rfind('/') + 1]
1715 bbasedir = bfile[:bfile.rfind('/') + 1]
1716 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1716 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1717 and hunk.starta == 0 and hunk.lena == 0):
1717 and hunk.starta == 0 and hunk.lena == 0):
1718 create = True
1718 create = True
1719 missing = False
1719 missing = False
1720
1720
1721 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1721 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1722 # diff is between a file and its backup. In this case, the original
1722 # diff is between a file and its backup. In this case, the original
1723 # file should be patched (see original mpatch code).
1723 # file should be patched (see original mpatch code).
1724 isbackup = (abase == bbase and bfile.startswith(afile))
1724 isbackup = (abase == bbase and bfile.startswith(afile))
1725 fname = None
1725 fname = None
1726 if not missing:
1726 if not missing:
1727 if gooda and goodb:
1727 if gooda and goodb:
1728 if isbackup:
1728 if isbackup:
1729 fname = afile
1729 fname = afile
1730 else:
1730 else:
1731 fname = bfile
1731 fname = bfile
1732 elif gooda:
1732 elif gooda:
1733 fname = afile
1733 fname = afile
1734
1734
1735 if not fname:
1735 if not fname:
1736 if not nullb:
1736 if not nullb:
1737 if isbackup:
1737 if isbackup:
1738 fname = afile
1738 fname = afile
1739 else:
1739 else:
1740 fname = bfile
1740 fname = bfile
1741 elif not nulla:
1741 elif not nulla:
1742 fname = afile
1742 fname = afile
1743 else:
1743 else:
1744 raise PatchError(_("undefined source and destination files"))
1744 raise PatchError(_("undefined source and destination files"))
1745
1745
1746 gp = patchmeta(fname)
1746 gp = patchmeta(fname)
1747 if create:
1747 if create:
1748 gp.op = 'ADD'
1748 gp.op = 'ADD'
1749 elif remove:
1749 elif remove:
1750 gp.op = 'DELETE'
1750 gp.op = 'DELETE'
1751 return gp
1751 return gp
1752
1752
1753 def scanpatch(fp):
1753 def scanpatch(fp):
1754 """like patch.iterhunks, but yield different events
1754 """like patch.iterhunks, but yield different events
1755
1755
1756 - ('file', [header_lines + fromfile + tofile])
1756 - ('file', [header_lines + fromfile + tofile])
1757 - ('context', [context_lines])
1757 - ('context', [context_lines])
1758 - ('hunk', [hunk_lines])
1758 - ('hunk', [hunk_lines])
1759 - ('range', (-start,len, +start,len, proc))
1759 - ('range', (-start,len, +start,len, proc))
1760 """
1760 """
1761 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1761 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1762 lr = linereader(fp)
1762 lr = linereader(fp)
1763
1763
1764 def scanwhile(first, p):
1764 def scanwhile(first, p):
1765 """scan lr while predicate holds"""
1765 """scan lr while predicate holds"""
1766 lines = [first]
1766 lines = [first]
1767 for line in iter(lr.readline, ''):
1767 for line in iter(lr.readline, ''):
1768 if p(line):
1768 if p(line):
1769 lines.append(line)
1769 lines.append(line)
1770 else:
1770 else:
1771 lr.push(line)
1771 lr.push(line)
1772 break
1772 break
1773 return lines
1773 return lines
1774
1774
1775 for line in iter(lr.readline, ''):
1775 for line in iter(lr.readline, ''):
1776 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1776 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1777 def notheader(line):
1777 def notheader(line):
1778 s = line.split(None, 1)
1778 s = line.split(None, 1)
1779 return not s or s[0] not in ('---', 'diff')
1779 return not s or s[0] not in ('---', 'diff')
1780 header = scanwhile(line, notheader)
1780 header = scanwhile(line, notheader)
1781 fromfile = lr.readline()
1781 fromfile = lr.readline()
1782 if fromfile.startswith('---'):
1782 if fromfile.startswith('---'):
1783 tofile = lr.readline()
1783 tofile = lr.readline()
1784 header += [fromfile, tofile]
1784 header += [fromfile, tofile]
1785 else:
1785 else:
1786 lr.push(fromfile)
1786 lr.push(fromfile)
1787 yield 'file', header
1787 yield 'file', header
1788 elif line[0] == ' ':
1788 elif line[0] == ' ':
1789 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1789 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1790 elif line[0] in '-+':
1790 elif line[0] in '-+':
1791 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1791 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1792 else:
1792 else:
1793 m = lines_re.match(line)
1793 m = lines_re.match(line)
1794 if m:
1794 if m:
1795 yield 'range', m.groups()
1795 yield 'range', m.groups()
1796 else:
1796 else:
1797 yield 'other', line
1797 yield 'other', line
1798
1798
1799 def scangitpatch(lr, firstline):
1799 def scangitpatch(lr, firstline):
1800 """
1800 """
1801 Git patches can emit:
1801 Git patches can emit:
1802 - rename a to b
1802 - rename a to b
1803 - change b
1803 - change b
1804 - copy a to c
1804 - copy a to c
1805 - change c
1805 - change c
1806
1806
1807 We cannot apply this sequence as-is, the renamed 'a' could not be
1807 We cannot apply this sequence as-is, the renamed 'a' could not be
1808 found for it would have been renamed already. And we cannot copy
1808 found for it would have been renamed already. And we cannot copy
1809 from 'b' instead because 'b' would have been changed already. So
1809 from 'b' instead because 'b' would have been changed already. So
1810 we scan the git patch for copy and rename commands so we can
1810 we scan the git patch for copy and rename commands so we can
1811 perform the copies ahead of time.
1811 perform the copies ahead of time.
1812 """
1812 """
1813 pos = 0
1813 pos = 0
1814 try:
1814 try:
1815 pos = lr.fp.tell()
1815 pos = lr.fp.tell()
1816 fp = lr.fp
1816 fp = lr.fp
1817 except IOError:
1817 except IOError:
1818 fp = stringio(lr.fp.read())
1818 fp = stringio(lr.fp.read())
1819 gitlr = linereader(fp)
1819 gitlr = linereader(fp)
1820 gitlr.push(firstline)
1820 gitlr.push(firstline)
1821 gitpatches = readgitpatch(gitlr)
1821 gitpatches = readgitpatch(gitlr)
1822 fp.seek(pos)
1822 fp.seek(pos)
1823 return gitpatches
1823 return gitpatches
1824
1824
1825 def iterhunks(fp):
1825 def iterhunks(fp):
1826 """Read a patch and yield the following events:
1826 """Read a patch and yield the following events:
1827 - ("file", afile, bfile, firsthunk): select a new target file.
1827 - ("file", afile, bfile, firsthunk): select a new target file.
1828 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1828 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1829 "file" event.
1829 "file" event.
1830 - ("git", gitchanges): current diff is in git format, gitchanges
1830 - ("git", gitchanges): current diff is in git format, gitchanges
1831 maps filenames to gitpatch records. Unique event.
1831 maps filenames to gitpatch records. Unique event.
1832 """
1832 """
1833 afile = ""
1833 afile = ""
1834 bfile = ""
1834 bfile = ""
1835 state = None
1835 state = None
1836 hunknum = 0
1836 hunknum = 0
1837 emitfile = newfile = False
1837 emitfile = newfile = False
1838 gitpatches = None
1838 gitpatches = None
1839
1839
1840 # our states
1840 # our states
1841 BFILE = 1
1841 BFILE = 1
1842 context = None
1842 context = None
1843 lr = linereader(fp)
1843 lr = linereader(fp)
1844
1844
1845 for x in iter(lr.readline, ''):
1845 for x in iter(lr.readline, ''):
1846 if state == BFILE and (
1846 if state == BFILE and (
1847 (not context and x[0] == '@')
1847 (not context and x[0] == '@')
1848 or (context is not False and x.startswith('***************'))
1848 or (context is not False and x.startswith('***************'))
1849 or x.startswith('GIT binary patch')):
1849 or x.startswith('GIT binary patch')):
1850 gp = None
1850 gp = None
1851 if (gitpatches and
1851 if (gitpatches and
1852 gitpatches[-1].ispatching(afile, bfile)):
1852 gitpatches[-1].ispatching(afile, bfile)):
1853 gp = gitpatches.pop()
1853 gp = gitpatches.pop()
1854 if x.startswith('GIT binary patch'):
1854 if x.startswith('GIT binary patch'):
1855 h = binhunk(lr, gp.path)
1855 h = binhunk(lr, gp.path)
1856 else:
1856 else:
1857 if context is None and x.startswith('***************'):
1857 if context is None and x.startswith('***************'):
1858 context = True
1858 context = True
1859 h = hunk(x, hunknum + 1, lr, context)
1859 h = hunk(x, hunknum + 1, lr, context)
1860 hunknum += 1
1860 hunknum += 1
1861 if emitfile:
1861 if emitfile:
1862 emitfile = False
1862 emitfile = False
1863 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1863 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1864 yield 'hunk', h
1864 yield 'hunk', h
1865 elif x.startswith('diff --git a/'):
1865 elif x.startswith('diff --git a/'):
1866 m = gitre.match(x.rstrip(' \r\n'))
1866 m = gitre.match(x.rstrip(' \r\n'))
1867 if not m:
1867 if not m:
1868 continue
1868 continue
1869 if gitpatches is None:
1869 if gitpatches is None:
1870 # scan whole input for git metadata
1870 # scan whole input for git metadata
1871 gitpatches = scangitpatch(lr, x)
1871 gitpatches = scangitpatch(lr, x)
1872 yield 'git', [g.copy() for g in gitpatches
1872 yield 'git', [g.copy() for g in gitpatches
1873 if g.op in ('COPY', 'RENAME')]
1873 if g.op in ('COPY', 'RENAME')]
1874 gitpatches.reverse()
1874 gitpatches.reverse()
1875 afile = 'a/' + m.group(1)
1875 afile = 'a/' + m.group(1)
1876 bfile = 'b/' + m.group(2)
1876 bfile = 'b/' + m.group(2)
1877 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1877 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1878 gp = gitpatches.pop()
1878 gp = gitpatches.pop()
1879 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1879 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1880 if not gitpatches:
1880 if not gitpatches:
1881 raise PatchError(_('failed to synchronize metadata for "%s"')
1881 raise PatchError(_('failed to synchronize metadata for "%s"')
1882 % afile[2:])
1882 % afile[2:])
1883 gp = gitpatches[-1]
1883 gp = gitpatches[-1]
1884 newfile = True
1884 newfile = True
1885 elif x.startswith('---'):
1885 elif x.startswith('---'):
1886 # check for a unified diff
1886 # check for a unified diff
1887 l2 = lr.readline()
1887 l2 = lr.readline()
1888 if not l2.startswith('+++'):
1888 if not l2.startswith('+++'):
1889 lr.push(l2)
1889 lr.push(l2)
1890 continue
1890 continue
1891 newfile = True
1891 newfile = True
1892 context = False
1892 context = False
1893 afile = parsefilename(x)
1893 afile = parsefilename(x)
1894 bfile = parsefilename(l2)
1894 bfile = parsefilename(l2)
1895 elif x.startswith('***'):
1895 elif x.startswith('***'):
1896 # check for a context diff
1896 # check for a context diff
1897 l2 = lr.readline()
1897 l2 = lr.readline()
1898 if not l2.startswith('---'):
1898 if not l2.startswith('---'):
1899 lr.push(l2)
1899 lr.push(l2)
1900 continue
1900 continue
1901 l3 = lr.readline()
1901 l3 = lr.readline()
1902 lr.push(l3)
1902 lr.push(l3)
1903 if not l3.startswith("***************"):
1903 if not l3.startswith("***************"):
1904 lr.push(l2)
1904 lr.push(l2)
1905 continue
1905 continue
1906 newfile = True
1906 newfile = True
1907 context = True
1907 context = True
1908 afile = parsefilename(x)
1908 afile = parsefilename(x)
1909 bfile = parsefilename(l2)
1909 bfile = parsefilename(l2)
1910
1910
1911 if newfile:
1911 if newfile:
1912 newfile = False
1912 newfile = False
1913 emitfile = True
1913 emitfile = True
1914 state = BFILE
1914 state = BFILE
1915 hunknum = 0
1915 hunknum = 0
1916
1916
1917 while gitpatches:
1917 while gitpatches:
1918 gp = gitpatches.pop()
1918 gp = gitpatches.pop()
1919 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1919 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1920
1920
1921 def applybindelta(binchunk, data):
1921 def applybindelta(binchunk, data):
1922 """Apply a binary delta hunk
1922 """Apply a binary delta hunk
1923 The algorithm used is the algorithm from git's patch-delta.c
1923 The algorithm used is the algorithm from git's patch-delta.c
1924 """
1924 """
1925 def deltahead(binchunk):
1925 def deltahead(binchunk):
1926 i = 0
1926 i = 0
1927 for c in binchunk:
1927 for c in binchunk:
1928 i += 1
1928 i += 1
1929 if not (ord(c) & 0x80):
1929 if not (ord(c) & 0x80):
1930 return i
1930 return i
1931 return i
1931 return i
1932 out = ""
1932 out = ""
1933 s = deltahead(binchunk)
1933 s = deltahead(binchunk)
1934 binchunk = binchunk[s:]
1934 binchunk = binchunk[s:]
1935 s = deltahead(binchunk)
1935 s = deltahead(binchunk)
1936 binchunk = binchunk[s:]
1936 binchunk = binchunk[s:]
1937 i = 0
1937 i = 0
1938 while i < len(binchunk):
1938 while i < len(binchunk):
1939 cmd = ord(binchunk[i])
1939 cmd = ord(binchunk[i])
1940 i += 1
1940 i += 1
1941 if (cmd & 0x80):
1941 if (cmd & 0x80):
1942 offset = 0
1942 offset = 0
1943 size = 0
1943 size = 0
1944 if (cmd & 0x01):
1944 if (cmd & 0x01):
1945 offset = ord(binchunk[i])
1945 offset = ord(binchunk[i])
1946 i += 1
1946 i += 1
1947 if (cmd & 0x02):
1947 if (cmd & 0x02):
1948 offset |= ord(binchunk[i]) << 8
1948 offset |= ord(binchunk[i]) << 8
1949 i += 1
1949 i += 1
1950 if (cmd & 0x04):
1950 if (cmd & 0x04):
1951 offset |= ord(binchunk[i]) << 16
1951 offset |= ord(binchunk[i]) << 16
1952 i += 1
1952 i += 1
1953 if (cmd & 0x08):
1953 if (cmd & 0x08):
1954 offset |= ord(binchunk[i]) << 24
1954 offset |= ord(binchunk[i]) << 24
1955 i += 1
1955 i += 1
1956 if (cmd & 0x10):
1956 if (cmd & 0x10):
1957 size = ord(binchunk[i])
1957 size = ord(binchunk[i])
1958 i += 1
1958 i += 1
1959 if (cmd & 0x20):
1959 if (cmd & 0x20):
1960 size |= ord(binchunk[i]) << 8
1960 size |= ord(binchunk[i]) << 8
1961 i += 1
1961 i += 1
1962 if (cmd & 0x40):
1962 if (cmd & 0x40):
1963 size |= ord(binchunk[i]) << 16
1963 size |= ord(binchunk[i]) << 16
1964 i += 1
1964 i += 1
1965 if size == 0:
1965 if size == 0:
1966 size = 0x10000
1966 size = 0x10000
1967 offset_end = offset + size
1967 offset_end = offset + size
1968 out += data[offset:offset_end]
1968 out += data[offset:offset_end]
1969 elif cmd != 0:
1969 elif cmd != 0:
1970 offset_end = i + cmd
1970 offset_end = i + cmd
1971 out += binchunk[i:offset_end]
1971 out += binchunk[i:offset_end]
1972 i += cmd
1972 i += cmd
1973 else:
1973 else:
1974 raise PatchError(_('unexpected delta opcode 0'))
1974 raise PatchError(_('unexpected delta opcode 0'))
1975 return out
1975 return out
1976
1976
1977 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1977 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1978 """Reads a patch from fp and tries to apply it.
1978 """Reads a patch from fp and tries to apply it.
1979
1979
1980 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1980 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1981 there was any fuzz.
1981 there was any fuzz.
1982
1982
1983 If 'eolmode' is 'strict', the patch content and patched file are
1983 If 'eolmode' is 'strict', the patch content and patched file are
1984 read in binary mode. Otherwise, line endings are ignored when
1984 read in binary mode. Otherwise, line endings are ignored when
1985 patching then normalized according to 'eolmode'.
1985 patching then normalized according to 'eolmode'.
1986 """
1986 """
1987 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1987 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1988 prefix=prefix, eolmode=eolmode)
1988 prefix=prefix, eolmode=eolmode)
1989
1989
1990 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1990 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1991 eolmode='strict'):
1991 eolmode='strict'):
1992
1992
1993 if prefix:
1993 if prefix:
1994 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1994 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1995 prefix)
1995 prefix)
1996 if prefix != '':
1996 if prefix != '':
1997 prefix += '/'
1997 prefix += '/'
1998 def pstrip(p):
1998 def pstrip(p):
1999 return pathtransform(p, strip - 1, prefix)[1]
1999 return pathtransform(p, strip - 1, prefix)[1]
2000
2000
2001 rejects = 0
2001 rejects = 0
2002 err = 0
2002 err = 0
2003 current_file = None
2003 current_file = None
2004
2004
2005 for state, values in iterhunks(fp):
2005 for state, values in iterhunks(fp):
2006 if state == 'hunk':
2006 if state == 'hunk':
2007 if not current_file:
2007 if not current_file:
2008 continue
2008 continue
2009 ret = current_file.apply(values)
2009 ret = current_file.apply(values)
2010 if ret > 0:
2010 if ret > 0:
2011 err = 1
2011 err = 1
2012 elif state == 'file':
2012 elif state == 'file':
2013 if current_file:
2013 if current_file:
2014 rejects += current_file.close()
2014 rejects += current_file.close()
2015 current_file = None
2015 current_file = None
2016 afile, bfile, first_hunk, gp = values
2016 afile, bfile, first_hunk, gp = values
2017 if gp:
2017 if gp:
2018 gp.path = pstrip(gp.path)
2018 gp.path = pstrip(gp.path)
2019 if gp.oldpath:
2019 if gp.oldpath:
2020 gp.oldpath = pstrip(gp.oldpath)
2020 gp.oldpath = pstrip(gp.oldpath)
2021 else:
2021 else:
2022 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2022 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2023 prefix)
2023 prefix)
2024 if gp.op == 'RENAME':
2024 if gp.op == 'RENAME':
2025 backend.unlink(gp.oldpath)
2025 backend.unlink(gp.oldpath)
2026 if not first_hunk:
2026 if not first_hunk:
2027 if gp.op == 'DELETE':
2027 if gp.op == 'DELETE':
2028 backend.unlink(gp.path)
2028 backend.unlink(gp.path)
2029 continue
2029 continue
2030 data, mode = None, None
2030 data, mode = None, None
2031 if gp.op in ('RENAME', 'COPY'):
2031 if gp.op in ('RENAME', 'COPY'):
2032 data, mode = store.getfile(gp.oldpath)[:2]
2032 data, mode = store.getfile(gp.oldpath)[:2]
2033 if data is None:
2033 if data is None:
2034 # This means that the old path does not exist
2034 # This means that the old path does not exist
2035 raise PatchError(_("source file '%s' does not exist")
2035 raise PatchError(_("source file '%s' does not exist")
2036 % gp.oldpath)
2036 % gp.oldpath)
2037 if gp.mode:
2037 if gp.mode:
2038 mode = gp.mode
2038 mode = gp.mode
2039 if gp.op == 'ADD':
2039 if gp.op == 'ADD':
2040 # Added files without content have no hunk and
2040 # Added files without content have no hunk and
2041 # must be created
2041 # must be created
2042 data = ''
2042 data = ''
2043 if data or mode:
2043 if data or mode:
2044 if (gp.op in ('ADD', 'RENAME', 'COPY')
2044 if (gp.op in ('ADD', 'RENAME', 'COPY')
2045 and backend.exists(gp.path)):
2045 and backend.exists(gp.path)):
2046 raise PatchError(_("cannot create %s: destination "
2046 raise PatchError(_("cannot create %s: destination "
2047 "already exists") % gp.path)
2047 "already exists") % gp.path)
2048 backend.setfile(gp.path, data, mode, gp.oldpath)
2048 backend.setfile(gp.path, data, mode, gp.oldpath)
2049 continue
2049 continue
2050 try:
2050 try:
2051 current_file = patcher(ui, gp, backend, store,
2051 current_file = patcher(ui, gp, backend, store,
2052 eolmode=eolmode)
2052 eolmode=eolmode)
2053 except PatchError as inst:
2053 except PatchError as inst:
2054 ui.warn(str(inst) + '\n')
2054 ui.warn(str(inst) + '\n')
2055 current_file = None
2055 current_file = None
2056 rejects += 1
2056 rejects += 1
2057 continue
2057 continue
2058 elif state == 'git':
2058 elif state == 'git':
2059 for gp in values:
2059 for gp in values:
2060 path = pstrip(gp.oldpath)
2060 path = pstrip(gp.oldpath)
2061 data, mode = backend.getfile(path)
2061 data, mode = backend.getfile(path)
2062 if data is None:
2062 if data is None:
2063 # The error ignored here will trigger a getfile()
2063 # The error ignored here will trigger a getfile()
2064 # error in a place more appropriate for error
2064 # error in a place more appropriate for error
2065 # handling, and will not interrupt the patching
2065 # handling, and will not interrupt the patching
2066 # process.
2066 # process.
2067 pass
2067 pass
2068 else:
2068 else:
2069 store.setfile(path, data, mode)
2069 store.setfile(path, data, mode)
2070 else:
2070 else:
2071 raise error.Abort(_('unsupported parser state: %s') % state)
2071 raise error.Abort(_('unsupported parser state: %s') % state)
2072
2072
2073 if current_file:
2073 if current_file:
2074 rejects += current_file.close()
2074 rejects += current_file.close()
2075
2075
2076 if rejects:
2076 if rejects:
2077 return -1
2077 return -1
2078 return err
2078 return err
2079
2079
2080 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2080 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2081 similarity):
2081 similarity):
2082 """use <patcher> to apply <patchname> to the working directory.
2082 """use <patcher> to apply <patchname> to the working directory.
2083 returns whether patch was applied with fuzz factor."""
2083 returns whether patch was applied with fuzz factor."""
2084
2084
2085 fuzz = False
2085 fuzz = False
2086 args = []
2086 args = []
2087 cwd = repo.root
2087 cwd = repo.root
2088 if cwd:
2088 if cwd:
2089 args.append('-d %s' % util.shellquote(cwd))
2089 args.append('-d %s' % util.shellquote(cwd))
2090 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2090 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2091 util.shellquote(patchname)))
2091 util.shellquote(patchname)))
2092 try:
2092 try:
2093 for line in util.iterfile(fp):
2093 for line in util.iterfile(fp):
2094 line = line.rstrip()
2094 line = line.rstrip()
2095 ui.note(line + '\n')
2095 ui.note(line + '\n')
2096 if line.startswith('patching file '):
2096 if line.startswith('patching file '):
2097 pf = util.parsepatchoutput(line)
2097 pf = util.parsepatchoutput(line)
2098 printed_file = False
2098 printed_file = False
2099 files.add(pf)
2099 files.add(pf)
2100 elif line.find('with fuzz') >= 0:
2100 elif line.find('with fuzz') >= 0:
2101 fuzz = True
2101 fuzz = True
2102 if not printed_file:
2102 if not printed_file:
2103 ui.warn(pf + '\n')
2103 ui.warn(pf + '\n')
2104 printed_file = True
2104 printed_file = True
2105 ui.warn(line + '\n')
2105 ui.warn(line + '\n')
2106 elif line.find('saving rejects to file') >= 0:
2106 elif line.find('saving rejects to file') >= 0:
2107 ui.warn(line + '\n')
2107 ui.warn(line + '\n')
2108 elif line.find('FAILED') >= 0:
2108 elif line.find('FAILED') >= 0:
2109 if not printed_file:
2109 if not printed_file:
2110 ui.warn(pf + '\n')
2110 ui.warn(pf + '\n')
2111 printed_file = True
2111 printed_file = True
2112 ui.warn(line + '\n')
2112 ui.warn(line + '\n')
2113 finally:
2113 finally:
2114 if files:
2114 if files:
2115 scmutil.marktouched(repo, files, similarity)
2115 scmutil.marktouched(repo, files, similarity)
2116 code = fp.close()
2116 code = fp.close()
2117 if code:
2117 if code:
2118 raise PatchError(_("patch command failed: %s") %
2118 raise PatchError(_("patch command failed: %s") %
2119 util.explainexit(code)[0])
2119 util.explainexit(code)[0])
2120 return fuzz
2120 return fuzz
2121
2121
2122 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2122 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2123 eolmode='strict'):
2123 eolmode='strict'):
2124 if files is None:
2124 if files is None:
2125 files = set()
2125 files = set()
2126 if eolmode is None:
2126 if eolmode is None:
2127 eolmode = ui.config('patch', 'eol')
2127 eolmode = ui.config('patch', 'eol')
2128 if eolmode.lower() not in eolmodes:
2128 if eolmode.lower() not in eolmodes:
2129 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2129 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2130 eolmode = eolmode.lower()
2130 eolmode = eolmode.lower()
2131
2131
2132 store = filestore()
2132 store = filestore()
2133 try:
2133 try:
2134 fp = open(patchobj, 'rb')
2134 fp = open(patchobj, 'rb')
2135 except TypeError:
2135 except TypeError:
2136 fp = patchobj
2136 fp = patchobj
2137 try:
2137 try:
2138 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2138 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2139 eolmode=eolmode)
2139 eolmode=eolmode)
2140 finally:
2140 finally:
2141 if fp != patchobj:
2141 if fp != patchobj:
2142 fp.close()
2142 fp.close()
2143 files.update(backend.close())
2143 files.update(backend.close())
2144 store.close()
2144 store.close()
2145 if ret < 0:
2145 if ret < 0:
2146 raise PatchError(_('patch failed to apply'))
2146 raise PatchError(_('patch failed to apply'))
2147 return ret > 0
2147 return ret > 0
2148
2148
2149 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2149 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2150 eolmode='strict', similarity=0):
2150 eolmode='strict', similarity=0):
2151 """use builtin patch to apply <patchobj> to the working directory.
2151 """use builtin patch to apply <patchobj> to the working directory.
2152 returns whether patch was applied with fuzz factor."""
2152 returns whether patch was applied with fuzz factor."""
2153 backend = workingbackend(ui, repo, similarity)
2153 backend = workingbackend(ui, repo, similarity)
2154 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2154 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2155
2155
2156 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2156 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2157 eolmode='strict'):
2157 eolmode='strict'):
2158 backend = repobackend(ui, repo, ctx, store)
2158 backend = repobackend(ui, repo, ctx, store)
2159 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2159 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2160
2160
2161 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2161 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2162 similarity=0):
2162 similarity=0):
2163 """Apply <patchname> to the working directory.
2163 """Apply <patchname> to the working directory.
2164
2164
2165 'eolmode' specifies how end of lines should be handled. It can be:
2165 'eolmode' specifies how end of lines should be handled. It can be:
2166 - 'strict': inputs are read in binary mode, EOLs are preserved
2166 - 'strict': inputs are read in binary mode, EOLs are preserved
2167 - 'crlf': EOLs are ignored when patching and reset to CRLF
2167 - 'crlf': EOLs are ignored when patching and reset to CRLF
2168 - 'lf': EOLs are ignored when patching and reset to LF
2168 - 'lf': EOLs are ignored when patching and reset to LF
2169 - None: get it from user settings, default to 'strict'
2169 - None: get it from user settings, default to 'strict'
2170 'eolmode' is ignored when using an external patcher program.
2170 'eolmode' is ignored when using an external patcher program.
2171
2171
2172 Returns whether patch was applied with fuzz factor.
2172 Returns whether patch was applied with fuzz factor.
2173 """
2173 """
2174 patcher = ui.config('ui', 'patch')
2174 patcher = ui.config('ui', 'patch')
2175 if files is None:
2175 if files is None:
2176 files = set()
2176 files = set()
2177 if patcher:
2177 if patcher:
2178 return _externalpatch(ui, repo, patcher, patchname, strip,
2178 return _externalpatch(ui, repo, patcher, patchname, strip,
2179 files, similarity)
2179 files, similarity)
2180 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2180 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2181 similarity)
2181 similarity)
2182
2182
2183 def changedfiles(ui, repo, patchpath, strip=1):
2183 def changedfiles(ui, repo, patchpath, strip=1):
2184 backend = fsbackend(ui, repo.root)
2184 backend = fsbackend(ui, repo.root)
2185 with open(patchpath, 'rb') as fp:
2185 with open(patchpath, 'rb') as fp:
2186 changed = set()
2186 changed = set()
2187 for state, values in iterhunks(fp):
2187 for state, values in iterhunks(fp):
2188 if state == 'file':
2188 if state == 'file':
2189 afile, bfile, first_hunk, gp = values
2189 afile, bfile, first_hunk, gp = values
2190 if gp:
2190 if gp:
2191 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2191 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2192 if gp.oldpath:
2192 if gp.oldpath:
2193 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2193 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2194 else:
2194 else:
2195 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2195 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2196 '')
2196 '')
2197 changed.add(gp.path)
2197 changed.add(gp.path)
2198 if gp.op == 'RENAME':
2198 if gp.op == 'RENAME':
2199 changed.add(gp.oldpath)
2199 changed.add(gp.oldpath)
2200 elif state not in ('hunk', 'git'):
2200 elif state not in ('hunk', 'git'):
2201 raise error.Abort(_('unsupported parser state: %s') % state)
2201 raise error.Abort(_('unsupported parser state: %s') % state)
2202 return changed
2202 return changed
2203
2203
2204 class GitDiffRequired(Exception):
2204 class GitDiffRequired(Exception):
2205 pass
2205 pass
2206
2206
2207 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2207 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2208 '''return diffopts with all features supported and parsed'''
2208 '''return diffopts with all features supported and parsed'''
2209 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2209 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2210 git=True, whitespace=True, formatchanging=True)
2210 git=True, whitespace=True, formatchanging=True)
2211
2211
2212 diffopts = diffallopts
2212 diffopts = diffallopts
2213
2213
2214 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2214 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2215 whitespace=False, formatchanging=False):
2215 whitespace=False, formatchanging=False):
2216 '''return diffopts with only opted-in features parsed
2216 '''return diffopts with only opted-in features parsed
2217
2217
2218 Features:
2218 Features:
2219 - git: git-style diffs
2219 - git: git-style diffs
2220 - whitespace: whitespace options like ignoreblanklines and ignorews
2220 - whitespace: whitespace options like ignoreblanklines and ignorews
2221 - formatchanging: options that will likely break or cause correctness issues
2221 - formatchanging: options that will likely break or cause correctness issues
2222 with most diff parsers
2222 with most diff parsers
2223 '''
2223 '''
2224 def get(key, name=None, getter=ui.configbool, forceplain=None):
2224 def get(key, name=None, getter=ui.configbool, forceplain=None):
2225 if opts:
2225 if opts:
2226 v = opts.get(key)
2226 v = opts.get(key)
2227 # diffopts flags are either None-default (which is passed
2227 # diffopts flags are either None-default (which is passed
2228 # through unchanged, so we can identify unset values), or
2228 # through unchanged, so we can identify unset values), or
2229 # some other falsey default (eg --unified, which defaults
2229 # some other falsey default (eg --unified, which defaults
2230 # to an empty string). We only want to override the config
2230 # to an empty string). We only want to override the config
2231 # entries from hgrc with command line values if they
2231 # entries from hgrc with command line values if they
2232 # appear to have been set, which is any truthy value,
2232 # appear to have been set, which is any truthy value,
2233 # True, or False.
2233 # True, or False.
2234 if v or isinstance(v, bool):
2234 if v or isinstance(v, bool):
2235 return v
2235 return v
2236 if forceplain is not None and ui.plain():
2236 if forceplain is not None and ui.plain():
2237 return forceplain
2237 return forceplain
2238 return getter(section, name or key, None, untrusted=untrusted)
2238 return getter(section, name or key, None, untrusted=untrusted)
2239
2239
2240 # core options, expected to be understood by every diff parser
2240 # core options, expected to be understood by every diff parser
2241 buildopts = {
2241 buildopts = {
2242 'nodates': get('nodates'),
2242 'nodates': get('nodates'),
2243 'showfunc': get('show_function', 'showfunc'),
2243 'showfunc': get('show_function', 'showfunc'),
2244 'context': get('unified', getter=ui.config),
2244 'context': get('unified', getter=ui.config),
2245 }
2245 }
2246
2246
2247 if git:
2247 if git:
2248 buildopts['git'] = get('git')
2248 buildopts['git'] = get('git')
2249
2249
2250 # since this is in the experimental section, we need to call
2250 # since this is in the experimental section, we need to call
2251 # ui.configbool directory
2251 # ui.configbool directory
2252 buildopts['showsimilarity'] = ui.configbool('experimental',
2252 buildopts['showsimilarity'] = ui.configbool('experimental',
2253 'extendedheader.similarity')
2253 'extendedheader.similarity')
2254
2254
2255 # need to inspect the ui object instead of using get() since we want to
2255 # need to inspect the ui object instead of using get() since we want to
2256 # test for an int
2256 # test for an int
2257 hconf = ui.config('experimental', 'extendedheader.index')
2257 hconf = ui.config('experimental', 'extendedheader.index')
2258 if hconf is not None:
2258 if hconf is not None:
2259 hlen = None
2259 hlen = None
2260 try:
2260 try:
2261 # the hash config could be an integer (for length of hash) or a
2261 # the hash config could be an integer (for length of hash) or a
2262 # word (e.g. short, full, none)
2262 # word (e.g. short, full, none)
2263 hlen = int(hconf)
2263 hlen = int(hconf)
2264 if hlen < 0 or hlen > 40:
2264 if hlen < 0 or hlen > 40:
2265 msg = _("invalid length for extendedheader.index: '%d'\n")
2265 msg = _("invalid length for extendedheader.index: '%d'\n")
2266 ui.warn(msg % hlen)
2266 ui.warn(msg % hlen)
2267 except ValueError:
2267 except ValueError:
2268 # default value
2268 # default value
2269 if hconf == 'short' or hconf == '':
2269 if hconf == 'short' or hconf == '':
2270 hlen = 12
2270 hlen = 12
2271 elif hconf == 'full':
2271 elif hconf == 'full':
2272 hlen = 40
2272 hlen = 40
2273 elif hconf != 'none':
2273 elif hconf != 'none':
2274 msg = _("invalid value for extendedheader.index: '%s'\n")
2274 msg = _("invalid value for extendedheader.index: '%s'\n")
2275 ui.warn(msg % hconf)
2275 ui.warn(msg % hconf)
2276 finally:
2276 finally:
2277 buildopts['index'] = hlen
2277 buildopts['index'] = hlen
2278
2278
2279 if whitespace:
2279 if whitespace:
2280 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2280 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2281 buildopts['ignorewsamount'] = get('ignore_space_change',
2281 buildopts['ignorewsamount'] = get('ignore_space_change',
2282 'ignorewsamount')
2282 'ignorewsamount')
2283 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2283 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2284 'ignoreblanklines')
2284 'ignoreblanklines')
2285 if formatchanging:
2285 if formatchanging:
2286 buildopts['text'] = opts and opts.get('text')
2286 buildopts['text'] = opts and opts.get('text')
2287 binary = None if opts is None else opts.get('binary')
2287 binary = None if opts is None else opts.get('binary')
2288 buildopts['nobinary'] = (not binary if binary is not None
2288 buildopts['nobinary'] = (not binary if binary is not None
2289 else get('nobinary', forceplain=False))
2289 else get('nobinary', forceplain=False))
2290 buildopts['noprefix'] = get('noprefix', forceplain=False)
2290 buildopts['noprefix'] = get('noprefix', forceplain=False)
2291
2291
2292 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2292 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2293
2293
2294 def diff(repo, node1=None, node2=None, match=None, changes=None,
2294 def diff(repo, node1=None, node2=None, match=None, changes=None,
2295 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2295 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2296 '''yields diff of changes to files between two nodes, or node and
2296 '''yields diff of changes to files between two nodes, or node and
2297 working directory.
2297 working directory.
2298
2298
2299 if node1 is None, use first dirstate parent instead.
2299 if node1 is None, use first dirstate parent instead.
2300 if node2 is None, compare node1 with working directory.
2300 if node2 is None, compare node1 with working directory.
2301
2301
2302 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2302 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2303 every time some change cannot be represented with the current
2303 every time some change cannot be represented with the current
2304 patch format. Return False to upgrade to git patch format, True to
2304 patch format. Return False to upgrade to git patch format, True to
2305 accept the loss or raise an exception to abort the diff. It is
2305 accept the loss or raise an exception to abort the diff. It is
2306 called with the name of current file being diffed as 'fn'. If set
2306 called with the name of current file being diffed as 'fn'. If set
2307 to None, patches will always be upgraded to git format when
2307 to None, patches will always be upgraded to git format when
2308 necessary.
2308 necessary.
2309
2309
2310 prefix is a filename prefix that is prepended to all filenames on
2310 prefix is a filename prefix that is prepended to all filenames on
2311 display (used for subrepos).
2311 display (used for subrepos).
2312
2312
2313 relroot, if not empty, must be normalized with a trailing /. Any match
2313 relroot, if not empty, must be normalized with a trailing /. Any match
2314 patterns that fall outside it will be ignored.
2314 patterns that fall outside it will be ignored.
2315
2315
2316 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2316 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2317 information.'''
2317 information.'''
2318 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2318 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2319 changes=changes, opts=opts,
2319 changes=changes, opts=opts,
2320 losedatafn=losedatafn, prefix=prefix,
2320 losedatafn=losedatafn, prefix=prefix,
2321 relroot=relroot, copy=copy):
2321 relroot=relroot, copy=copy):
2322 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2322 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2323 if header and (text or len(header) > 1):
2323 if header and (text or len(header) > 1):
2324 yield '\n'.join(header) + '\n'
2324 yield '\n'.join(header) + '\n'
2325 if text:
2325 if text:
2326 yield text
2326 yield text
2327
2327
2328 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2328 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2329 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2329 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2330 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2330 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2331 where `header` is a list of diff headers and `hunks` is an iterable of
2331 where `header` is a list of diff headers and `hunks` is an iterable of
2332 (`hunkrange`, `hunklines`) tuples.
2332 (`hunkrange`, `hunklines`) tuples.
2333
2333
2334 See diff() for the meaning of parameters.
2334 See diff() for the meaning of parameters.
2335 """
2335 """
2336
2336
2337 if opts is None:
2337 if opts is None:
2338 opts = mdiff.defaultopts
2338 opts = mdiff.defaultopts
2339
2339
2340 if not node1 and not node2:
2340 if not node1 and not node2:
2341 node1 = repo.dirstate.p1()
2341 node1 = repo.dirstate.p1()
2342
2342
2343 def lrugetfilectx():
2343 def lrugetfilectx():
2344 cache = {}
2344 cache = {}
2345 order = collections.deque()
2345 order = collections.deque()
2346 def getfilectx(f, ctx):
2346 def getfilectx(f, ctx):
2347 fctx = ctx.filectx(f, filelog=cache.get(f))
2347 fctx = ctx.filectx(f, filelog=cache.get(f))
2348 if f not in cache:
2348 if f not in cache:
2349 if len(cache) > 20:
2349 if len(cache) > 20:
2350 del cache[order.popleft()]
2350 del cache[order.popleft()]
2351 cache[f] = fctx.filelog()
2351 cache[f] = fctx.filelog()
2352 else:
2352 else:
2353 order.remove(f)
2353 order.remove(f)
2354 order.append(f)
2354 order.append(f)
2355 return fctx
2355 return fctx
2356 return getfilectx
2356 return getfilectx
2357 getfilectx = lrugetfilectx()
2357 getfilectx = lrugetfilectx()
2358
2358
2359 ctx1 = repo[node1]
2359 ctx1 = repo[node1]
2360 ctx2 = repo[node2]
2360 ctx2 = repo[node2]
2361
2361
2362 relfiltered = False
2362 relfiltered = False
2363 if relroot != '' and match.always():
2363 if relroot != '' and match.always():
2364 # as a special case, create a new matcher with just the relroot
2364 # as a special case, create a new matcher with just the relroot
2365 pats = [relroot]
2365 pats = [relroot]
2366 match = scmutil.match(ctx2, pats, default='path')
2366 match = scmutil.match(ctx2, pats, default='path')
2367 relfiltered = True
2367 relfiltered = True
2368
2368
2369 if not changes:
2369 if not changes:
2370 changes = repo.status(ctx1, ctx2, match=match)
2370 changes = repo.status(ctx1, ctx2, match=match)
2371 modified, added, removed = changes[:3]
2371 modified, added, removed = changes[:3]
2372
2372
2373 if not modified and not added and not removed:
2373 if not modified and not added and not removed:
2374 return []
2374 return []
2375
2375
2376 if repo.ui.debugflag:
2376 if repo.ui.debugflag:
2377 hexfunc = hex
2377 hexfunc = hex
2378 else:
2378 else:
2379 hexfunc = short
2379 hexfunc = short
2380 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2380 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2381
2381
2382 if copy is None:
2382 if copy is None:
2383 copy = {}
2383 copy = {}
2384 if opts.git or opts.upgrade:
2384 if opts.git or opts.upgrade:
2385 copy = copies.pathcopies(ctx1, ctx2, match=match)
2385 copy = copies.pathcopies(ctx1, ctx2, match=match)
2386
2386
2387 if relroot is not None:
2387 if relroot is not None:
2388 if not relfiltered:
2388 if not relfiltered:
2389 # XXX this would ideally be done in the matcher, but that is
2389 # XXX this would ideally be done in the matcher, but that is
2390 # generally meant to 'or' patterns, not 'and' them. In this case we
2390 # generally meant to 'or' patterns, not 'and' them. In this case we
2391 # need to 'and' all the patterns from the matcher with relroot.
2391 # need to 'and' all the patterns from the matcher with relroot.
2392 def filterrel(l):
2392 def filterrel(l):
2393 return [f for f in l if f.startswith(relroot)]
2393 return [f for f in l if f.startswith(relroot)]
2394 modified = filterrel(modified)
2394 modified = filterrel(modified)
2395 added = filterrel(added)
2395 added = filterrel(added)
2396 removed = filterrel(removed)
2396 removed = filterrel(removed)
2397 relfiltered = True
2397 relfiltered = True
2398 # filter out copies where either side isn't inside the relative root
2398 # filter out copies where either side isn't inside the relative root
2399 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2399 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2400 if dst.startswith(relroot)
2400 if dst.startswith(relroot)
2401 and src.startswith(relroot)))
2401 and src.startswith(relroot)))
2402
2402
2403 modifiedset = set(modified)
2403 modifiedset = set(modified)
2404 addedset = set(added)
2404 addedset = set(added)
2405 removedset = set(removed)
2405 removedset = set(removed)
2406 for f in modified:
2406 for f in modified:
2407 if f not in ctx1:
2407 if f not in ctx1:
2408 # Fix up added, since merged-in additions appear as
2408 # Fix up added, since merged-in additions appear as
2409 # modifications during merges
2409 # modifications during merges
2410 modifiedset.remove(f)
2410 modifiedset.remove(f)
2411 addedset.add(f)
2411 addedset.add(f)
2412 for f in removed:
2412 for f in removed:
2413 if f not in ctx1:
2413 if f not in ctx1:
2414 # Merged-in additions that are then removed are reported as removed.
2414 # Merged-in additions that are then removed are reported as removed.
2415 # They are not in ctx1, so We don't want to show them in the diff.
2415 # They are not in ctx1, so We don't want to show them in the diff.
2416 removedset.remove(f)
2416 removedset.remove(f)
2417 modified = sorted(modifiedset)
2417 modified = sorted(modifiedset)
2418 added = sorted(addedset)
2418 added = sorted(addedset)
2419 removed = sorted(removedset)
2419 removed = sorted(removedset)
2420 for dst, src in copy.items():
2420 for dst, src in copy.items():
2421 if src not in ctx1:
2421 if src not in ctx1:
2422 # Files merged in during a merge and then copied/renamed are
2422 # Files merged in during a merge and then copied/renamed are
2423 # reported as copies. We want to show them in the diff as additions.
2423 # reported as copies. We want to show them in the diff as additions.
2424 del copy[dst]
2424 del copy[dst]
2425
2425
2426 def difffn(opts, losedata):
2426 def difffn(opts, losedata):
2427 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2427 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2428 copy, getfilectx, opts, losedata, prefix, relroot)
2428 copy, getfilectx, opts, losedata, prefix, relroot)
2429 if opts.upgrade and not opts.git:
2429 if opts.upgrade and not opts.git:
2430 try:
2430 try:
2431 def losedata(fn):
2431 def losedata(fn):
2432 if not losedatafn or not losedatafn(fn=fn):
2432 if not losedatafn or not losedatafn(fn=fn):
2433 raise GitDiffRequired
2433 raise GitDiffRequired
2434 # Buffer the whole output until we are sure it can be generated
2434 # Buffer the whole output until we are sure it can be generated
2435 return list(difffn(opts.copy(git=False), losedata))
2435 return list(difffn(opts.copy(git=False), losedata))
2436 except GitDiffRequired:
2436 except GitDiffRequired:
2437 return difffn(opts.copy(git=True), None)
2437 return difffn(opts.copy(git=True), None)
2438 else:
2438 else:
2439 return difffn(opts, None)
2439 return difffn(opts, None)
2440
2440
2441 def difflabel(func, *args, **kw):
2441 def difflabel(func, *args, **kw):
2442 '''yields 2-tuples of (output, label) based on the output of func()'''
2442 '''yields 2-tuples of (output, label) based on the output of func()'''
2443 headprefixes = [('diff', 'diff.diffline'),
2443 headprefixes = [('diff', 'diff.diffline'),
2444 ('copy', 'diff.extended'),
2444 ('copy', 'diff.extended'),
2445 ('rename', 'diff.extended'),
2445 ('rename', 'diff.extended'),
2446 ('old', 'diff.extended'),
2446 ('old', 'diff.extended'),
2447 ('new', 'diff.extended'),
2447 ('new', 'diff.extended'),
2448 ('deleted', 'diff.extended'),
2448 ('deleted', 'diff.extended'),
2449 ('index', 'diff.extended'),
2449 ('index', 'diff.extended'),
2450 ('similarity', 'diff.extended'),
2450 ('similarity', 'diff.extended'),
2451 ('---', 'diff.file_a'),
2451 ('---', 'diff.file_a'),
2452 ('+++', 'diff.file_b')]
2452 ('+++', 'diff.file_b')]
2453 textprefixes = [('@', 'diff.hunk'),
2453 textprefixes = [('@', 'diff.hunk'),
2454 ('-', 'diff.deleted'),
2454 ('-', 'diff.deleted'),
2455 ('+', 'diff.inserted')]
2455 ('+', 'diff.inserted')]
2456 head = False
2456 head = False
2457 for chunk in func(*args, **kw):
2457 for chunk in func(*args, **kw):
2458 lines = chunk.split('\n')
2458 lines = chunk.split('\n')
2459 for i, line in enumerate(lines):
2459 for i, line in enumerate(lines):
2460 if i != 0:
2460 if i != 0:
2461 yield ('\n', '')
2461 yield ('\n', '')
2462 if head:
2462 if head:
2463 if line.startswith('@'):
2463 if line.startswith('@'):
2464 head = False
2464 head = False
2465 else:
2465 else:
2466 if line and line[0] not in ' +-@\\':
2466 if line and line[0] not in ' +-@\\':
2467 head = True
2467 head = True
2468 stripline = line
2468 stripline = line
2469 diffline = False
2469 diffline = False
2470 if not head and line and line[0] in '+-':
2470 if not head and line and line[0] in '+-':
2471 # highlight tabs and trailing whitespace, but only in
2471 # highlight tabs and trailing whitespace, but only in
2472 # changed lines
2472 # changed lines
2473 stripline = line.rstrip()
2473 stripline = line.rstrip()
2474 diffline = True
2474 diffline = True
2475
2475
2476 prefixes = textprefixes
2476 prefixes = textprefixes
2477 if head:
2477 if head:
2478 prefixes = headprefixes
2478 prefixes = headprefixes
2479 for prefix, label in prefixes:
2479 for prefix, label in prefixes:
2480 if stripline.startswith(prefix):
2480 if stripline.startswith(prefix):
2481 if diffline:
2481 if diffline:
2482 for token in tabsplitter.findall(stripline):
2482 for token in tabsplitter.findall(stripline):
2483 if '\t' == token[0]:
2483 if '\t' == token[0]:
2484 yield (token, 'diff.tab')
2484 yield (token, 'diff.tab')
2485 else:
2485 else:
2486 yield (token, label)
2486 yield (token, label)
2487 else:
2487 else:
2488 yield (stripline, label)
2488 yield (stripline, label)
2489 break
2489 break
2490 else:
2490 else:
2491 yield (line, '')
2491 yield (line, '')
2492 if line != stripline:
2492 if line != stripline:
2493 yield (line[len(stripline):], 'diff.trailingwhitespace')
2493 yield (line[len(stripline):], 'diff.trailingwhitespace')
2494
2494
2495 def diffui(*args, **kw):
2495 def diffui(*args, **kw):
2496 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2496 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2497 return difflabel(diff, *args, **kw)
2497 return difflabel(diff, *args, **kw)
2498
2498
2499 def _filepairs(modified, added, removed, copy, opts):
2499 def _filepairs(modified, added, removed, copy, opts):
2500 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2500 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2501 before and f2 is the the name after. For added files, f1 will be None,
2501 before and f2 is the the name after. For added files, f1 will be None,
2502 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2502 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2503 or 'rename' (the latter two only if opts.git is set).'''
2503 or 'rename' (the latter two only if opts.git is set).'''
2504 gone = set()
2504 gone = set()
2505
2505
2506 copyto = dict([(v, k) for k, v in copy.items()])
2506 copyto = dict([(v, k) for k, v in copy.items()])
2507
2507
2508 addedset, removedset = set(added), set(removed)
2508 addedset, removedset = set(added), set(removed)
2509
2509
2510 for f in sorted(modified + added + removed):
2510 for f in sorted(modified + added + removed):
2511 copyop = None
2511 copyop = None
2512 f1, f2 = f, f
2512 f1, f2 = f, f
2513 if f in addedset:
2513 if f in addedset:
2514 f1 = None
2514 f1 = None
2515 if f in copy:
2515 if f in copy:
2516 if opts.git:
2516 if opts.git:
2517 f1 = copy[f]
2517 f1 = copy[f]
2518 if f1 in removedset and f1 not in gone:
2518 if f1 in removedset and f1 not in gone:
2519 copyop = 'rename'
2519 copyop = 'rename'
2520 gone.add(f1)
2520 gone.add(f1)
2521 else:
2521 else:
2522 copyop = 'copy'
2522 copyop = 'copy'
2523 elif f in removedset:
2523 elif f in removedset:
2524 f2 = None
2524 f2 = None
2525 if opts.git:
2525 if opts.git:
2526 # have we already reported a copy above?
2526 # have we already reported a copy above?
2527 if (f in copyto and copyto[f] in addedset
2527 if (f in copyto and copyto[f] in addedset
2528 and copy[copyto[f]] == f):
2528 and copy[copyto[f]] == f):
2529 continue
2529 continue
2530 yield f1, f2, copyop
2530 yield f1, f2, copyop
2531
2531
2532 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2532 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2533 copy, getfilectx, opts, losedatafn, prefix, relroot):
2533 copy, getfilectx, opts, losedatafn, prefix, relroot):
2534 '''given input data, generate a diff and yield it in blocks
2534 '''given input data, generate a diff and yield it in blocks
2535
2535
2536 If generating a diff would lose data like flags or binary data and
2536 If generating a diff would lose data like flags or binary data and
2537 losedatafn is not None, it will be called.
2537 losedatafn is not None, it will be called.
2538
2538
2539 relroot is removed and prefix is added to every path in the diff output.
2539 relroot is removed and prefix is added to every path in the diff output.
2540
2540
2541 If relroot is not empty, this function expects every path in modified,
2541 If relroot is not empty, this function expects every path in modified,
2542 added, removed and copy to start with it.'''
2542 added, removed and copy to start with it.'''
2543
2543
2544 def gitindex(text):
2544 def gitindex(text):
2545 if not text:
2545 if not text:
2546 text = ""
2546 text = ""
2547 l = len(text)
2547 l = len(text)
2548 s = hashlib.sha1('blob %d\0' % l)
2548 s = hashlib.sha1('blob %d\0' % l)
2549 s.update(text)
2549 s.update(text)
2550 return s.hexdigest()
2550 return s.hexdigest()
2551
2551
2552 if opts.noprefix:
2552 if opts.noprefix:
2553 aprefix = bprefix = ''
2553 aprefix = bprefix = ''
2554 else:
2554 else:
2555 aprefix = 'a/'
2555 aprefix = 'a/'
2556 bprefix = 'b/'
2556 bprefix = 'b/'
2557
2557
2558 def diffline(f, revs):
2558 def diffline(f, revs):
2559 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2559 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2560 return 'diff %s %s' % (revinfo, f)
2560 return 'diff %s %s' % (revinfo, f)
2561
2561
2562 def isempty(fctx):
2562 def isempty(fctx):
2563 return fctx is None or fctx.size() == 0
2563 return fctx is None or fctx.size() == 0
2564
2564
2565 date1 = util.datestr(ctx1.date())
2565 date1 = util.datestr(ctx1.date())
2566 date2 = util.datestr(ctx2.date())
2566 date2 = util.datestr(ctx2.date())
2567
2567
2568 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2568 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2569
2569
2570 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2570 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2571 or repo.ui.configbool('devel', 'check-relroot')):
2571 or repo.ui.configbool('devel', 'check-relroot')):
2572 for f in modified + added + removed + list(copy) + list(copy.values()):
2572 for f in modified + added + removed + list(copy) + list(copy.values()):
2573 if f is not None and not f.startswith(relroot):
2573 if f is not None and not f.startswith(relroot):
2574 raise AssertionError(
2574 raise AssertionError(
2575 "file %s doesn't start with relroot %s" % (f, relroot))
2575 "file %s doesn't start with relroot %s" % (f, relroot))
2576
2576
2577 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2577 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2578 content1 = None
2578 content1 = None
2579 content2 = None
2579 content2 = None
2580 fctx1 = None
2580 fctx1 = None
2581 fctx2 = None
2581 fctx2 = None
2582 flag1 = None
2582 flag1 = None
2583 flag2 = None
2583 flag2 = None
2584 if f1:
2584 if f1:
2585 fctx1 = getfilectx(f1, ctx1)
2585 fctx1 = getfilectx(f1, ctx1)
2586 if opts.git or losedatafn:
2586 if opts.git or losedatafn:
2587 flag1 = ctx1.flags(f1)
2587 flag1 = ctx1.flags(f1)
2588 if f2:
2588 if f2:
2589 fctx2 = getfilectx(f2, ctx2)
2589 fctx2 = getfilectx(f2, ctx2)
2590 if opts.git or losedatafn:
2590 if opts.git or losedatafn:
2591 flag2 = ctx2.flags(f2)
2591 flag2 = ctx2.flags(f2)
2592 # if binary is True, output "summary" or "base85", but not "text diff"
2592 # if binary is True, output "summary" or "base85", but not "text diff"
2593 binary = not opts.text and any(f.isbinary()
2593 binary = not opts.text and any(f.isbinary()
2594 for f in [fctx1, fctx2] if f is not None)
2594 for f in [fctx1, fctx2] if f is not None)
2595
2595
2596 if losedatafn and not opts.git:
2596 if losedatafn and not opts.git:
2597 if (binary or
2597 if (binary or
2598 # copy/rename
2598 # copy/rename
2599 f2 in copy or
2599 f2 in copy or
2600 # empty file creation
2600 # empty file creation
2601 (not f1 and isempty(fctx2)) or
2601 (not f1 and isempty(fctx2)) or
2602 # empty file deletion
2602 # empty file deletion
2603 (isempty(fctx1) and not f2) or
2603 (isempty(fctx1) and not f2) or
2604 # create with flags
2604 # create with flags
2605 (not f1 and flag2) or
2605 (not f1 and flag2) or
2606 # change flags
2606 # change flags
2607 (f1 and f2 and flag1 != flag2)):
2607 (f1 and f2 and flag1 != flag2)):
2608 losedatafn(f2 or f1)
2608 losedatafn(f2 or f1)
2609
2609
2610 path1 = f1 or f2
2610 path1 = f1 or f2
2611 path2 = f2 or f1
2611 path2 = f2 or f1
2612 path1 = posixpath.join(prefix, path1[len(relroot):])
2612 path1 = posixpath.join(prefix, path1[len(relroot):])
2613 path2 = posixpath.join(prefix, path2[len(relroot):])
2613 path2 = posixpath.join(prefix, path2[len(relroot):])
2614 header = []
2614 header = []
2615 if opts.git:
2615 if opts.git:
2616 header.append('diff --git %s%s %s%s' %
2616 header.append('diff --git %s%s %s%s' %
2617 (aprefix, path1, bprefix, path2))
2617 (aprefix, path1, bprefix, path2))
2618 if not f1: # added
2618 if not f1: # added
2619 header.append('new file mode %s' % gitmode[flag2])
2619 header.append('new file mode %s' % gitmode[flag2])
2620 elif not f2: # removed
2620 elif not f2: # removed
2621 header.append('deleted file mode %s' % gitmode[flag1])
2621 header.append('deleted file mode %s' % gitmode[flag1])
2622 else: # modified/copied/renamed
2622 else: # modified/copied/renamed
2623 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2623 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2624 if mode1 != mode2:
2624 if mode1 != mode2:
2625 header.append('old mode %s' % mode1)
2625 header.append('old mode %s' % mode1)
2626 header.append('new mode %s' % mode2)
2626 header.append('new mode %s' % mode2)
2627 if copyop is not None:
2627 if copyop is not None:
2628 if opts.showsimilarity:
2628 if opts.showsimilarity:
2629 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2629 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2630 header.append('similarity index %d%%' % sim)
2630 header.append('similarity index %d%%' % sim)
2631 header.append('%s from %s' % (copyop, path1))
2631 header.append('%s from %s' % (copyop, path1))
2632 header.append('%s to %s' % (copyop, path2))
2632 header.append('%s to %s' % (copyop, path2))
2633 elif revs and not repo.ui.quiet:
2633 elif revs and not repo.ui.quiet:
2634 header.append(diffline(path1, revs))
2634 header.append(diffline(path1, revs))
2635
2635
2636 # fctx.is | diffopts | what to | is fctx.data()
2636 # fctx.is | diffopts | what to | is fctx.data()
2637 # binary() | text nobinary git index | output? | outputted?
2637 # binary() | text nobinary git index | output? | outputted?
2638 # ------------------------------------|----------------------------
2638 # ------------------------------------|----------------------------
2639 # yes | no no no * | summary | no
2639 # yes | no no no * | summary | no
2640 # yes | no no yes * | base85 | yes
2640 # yes | no no yes * | base85 | yes
2641 # yes | no yes no * | summary | no
2641 # yes | no yes no * | summary | no
2642 # yes | no yes yes 0 | summary | no
2642 # yes | no yes yes 0 | summary | no
2643 # yes | no yes yes >0 | summary | semi [1]
2643 # yes | no yes yes >0 | summary | semi [1]
2644 # yes | yes * * * | text diff | yes
2644 # yes | yes * * * | text diff | yes
2645 # no | * * * * | text diff | yes
2645 # no | * * * * | text diff | yes
2646 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2646 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2647 if binary and (not opts.git or (opts.git and opts.nobinary and not
2647 if binary and (not opts.git or (opts.git and opts.nobinary and not
2648 opts.index)):
2648 opts.index)):
2649 # fast path: no binary content will be displayed, content1 and
2649 # fast path: no binary content will be displayed, content1 and
2650 # content2 are only used for equivalent test. cmp() could have a
2650 # content2 are only used for equivalent test. cmp() could have a
2651 # fast path.
2651 # fast path.
2652 if fctx1 is not None:
2652 if fctx1 is not None:
2653 content1 = b'\0'
2653 content1 = b'\0'
2654 if fctx2 is not None:
2654 if fctx2 is not None:
2655 if fctx1 is not None and not fctx1.cmp(fctx2):
2655 if fctx1 is not None and not fctx1.cmp(fctx2):
2656 content2 = b'\0' # not different
2656 content2 = b'\0' # not different
2657 else:
2657 else:
2658 content2 = b'\0\0'
2658 content2 = b'\0\0'
2659 else:
2659 else:
2660 # normal path: load contents
2660 # normal path: load contents
2661 if fctx1 is not None:
2661 if fctx1 is not None:
2662 content1 = fctx1.data()
2662 content1 = fctx1.data()
2663 if fctx2 is not None:
2663 if fctx2 is not None:
2664 content2 = fctx2.data()
2664 content2 = fctx2.data()
2665
2665
2666 if binary and opts.git and not opts.nobinary:
2666 if binary and opts.git and not opts.nobinary:
2667 text = mdiff.b85diff(content1, content2)
2667 text = mdiff.b85diff(content1, content2)
2668 if text:
2668 if text:
2669 header.append('index %s..%s' %
2669 header.append('index %s..%s' %
2670 (gitindex(content1), gitindex(content2)))
2670 (gitindex(content1), gitindex(content2)))
2671 hunks = (None, [text]),
2671 hunks = (None, [text]),
2672 else:
2672 else:
2673 if opts.git and opts.index > 0:
2673 if opts.git and opts.index > 0:
2674 flag = flag1
2674 flag = flag1
2675 if flag is None:
2675 if flag is None:
2676 flag = flag2
2676 flag = flag2
2677 header.append('index %s..%s %s' %
2677 header.append('index %s..%s %s' %
2678 (gitindex(content1)[0:opts.index],
2678 (gitindex(content1)[0:opts.index],
2679 gitindex(content2)[0:opts.index],
2679 gitindex(content2)[0:opts.index],
2680 gitmode[flag]))
2680 gitmode[flag]))
2681
2681
2682 uheaders, hunks = mdiff.unidiff(content1, date1,
2682 uheaders, hunks = mdiff.unidiff(content1, date1,
2683 content2, date2,
2683 content2, date2,
2684 path1, path2, opts=opts)
2684 path1, path2, opts=opts)
2685 header.extend(uheaders)
2685 header.extend(uheaders)
2686 yield header, hunks
2686 yield header, hunks
2687
2687
2688 def diffstatsum(stats):
2688 def diffstatsum(stats):
2689 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2689 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2690 for f, a, r, b in stats:
2690 for f, a, r, b in stats:
2691 maxfile = max(maxfile, encoding.colwidth(f))
2691 maxfile = max(maxfile, encoding.colwidth(f))
2692 maxtotal = max(maxtotal, a + r)
2692 maxtotal = max(maxtotal, a + r)
2693 addtotal += a
2693 addtotal += a
2694 removetotal += r
2694 removetotal += r
2695 binary = binary or b
2695 binary = binary or b
2696
2696
2697 return maxfile, maxtotal, addtotal, removetotal, binary
2697 return maxfile, maxtotal, addtotal, removetotal, binary
2698
2698
2699 def diffstatdata(lines):
2699 def diffstatdata(lines):
2700 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2700 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2701
2701
2702 results = []
2702 results = []
2703 filename, adds, removes, isbinary = None, 0, 0, False
2703 filename, adds, removes, isbinary = None, 0, 0, False
2704
2704
2705 def addresult():
2705 def addresult():
2706 if filename:
2706 if filename:
2707 results.append((filename, adds, removes, isbinary))
2707 results.append((filename, adds, removes, isbinary))
2708
2708
2709 # inheader is used to track if a line is in the
2709 # inheader is used to track if a line is in the
2710 # header portion of the diff. This helps properly account
2710 # header portion of the diff. This helps properly account
2711 # for lines that start with '--' or '++'
2711 # for lines that start with '--' or '++'
2712 inheader = False
2712 inheader = False
2713
2713
2714 for line in lines:
2714 for line in lines:
2715 if line.startswith('diff'):
2715 if line.startswith('diff'):
2716 addresult()
2716 addresult()
2717 # starting a new file diff
2717 # starting a new file diff
2718 # set numbers to 0 and reset inheader
2718 # set numbers to 0 and reset inheader
2719 inheader = True
2719 inheader = True
2720 adds, removes, isbinary = 0, 0, False
2720 adds, removes, isbinary = 0, 0, False
2721 if line.startswith('diff --git a/'):
2721 if line.startswith('diff --git a/'):
2722 filename = gitre.search(line).group(2)
2722 filename = gitre.search(line).group(2)
2723 elif line.startswith('diff -r'):
2723 elif line.startswith('diff -r'):
2724 # format: "diff -r ... -r ... filename"
2724 # format: "diff -r ... -r ... filename"
2725 filename = diffre.search(line).group(1)
2725 filename = diffre.search(line).group(1)
2726 elif line.startswith('@@'):
2726 elif line.startswith('@@'):
2727 inheader = False
2727 inheader = False
2728 elif line.startswith('+') and not inheader:
2728 elif line.startswith('+') and not inheader:
2729 adds += 1
2729 adds += 1
2730 elif line.startswith('-') and not inheader:
2730 elif line.startswith('-') and not inheader:
2731 removes += 1
2731 removes += 1
2732 elif (line.startswith('GIT binary patch') or
2732 elif (line.startswith('GIT binary patch') or
2733 line.startswith('Binary file')):
2733 line.startswith('Binary file')):
2734 isbinary = True
2734 isbinary = True
2735 addresult()
2735 addresult()
2736 return results
2736 return results
2737
2737
2738 def diffstat(lines, width=80):
2738 def diffstat(lines, width=80):
2739 output = []
2739 output = []
2740 stats = diffstatdata(lines)
2740 stats = diffstatdata(lines)
2741 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2741 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2742
2742
2743 countwidth = len(str(maxtotal))
2743 countwidth = len(str(maxtotal))
2744 if hasbinary and countwidth < 3:
2744 if hasbinary and countwidth < 3:
2745 countwidth = 3
2745 countwidth = 3
2746 graphwidth = width - countwidth - maxname - 6
2746 graphwidth = width - countwidth - maxname - 6
2747 if graphwidth < 10:
2747 if graphwidth < 10:
2748 graphwidth = 10
2748 graphwidth = 10
2749
2749
2750 def scale(i):
2750 def scale(i):
2751 if maxtotal <= graphwidth:
2751 if maxtotal <= graphwidth:
2752 return i
2752 return i
2753 # If diffstat runs out of room it doesn't print anything,
2753 # If diffstat runs out of room it doesn't print anything,
2754 # which isn't very useful, so always print at least one + or -
2754 # which isn't very useful, so always print at least one + or -
2755 # if there were at least some changes.
2755 # if there were at least some changes.
2756 return max(i * graphwidth // maxtotal, int(bool(i)))
2756 return max(i * graphwidth // maxtotal, int(bool(i)))
2757
2757
2758 for filename, adds, removes, isbinary in stats:
2758 for filename, adds, removes, isbinary in stats:
2759 if isbinary:
2759 if isbinary:
2760 count = 'Bin'
2760 count = 'Bin'
2761 else:
2761 else:
2762 count = '%d' % (adds + removes)
2762 count = '%d' % (adds + removes)
2763 pluses = '+' * scale(adds)
2763 pluses = '+' * scale(adds)
2764 minuses = '-' * scale(removes)
2764 minuses = '-' * scale(removes)
2765 output.append(' %s%s | %*s %s%s\n' %
2765 output.append(' %s%s | %*s %s%s\n' %
2766 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2766 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2767 countwidth, count, pluses, minuses))
2767 countwidth, count, pluses, minuses))
2768
2768
2769 if stats:
2769 if stats:
2770 output.append(_(' %d files changed, %d insertions(+), '
2770 output.append(_(' %d files changed, %d insertions(+), '
2771 '%d deletions(-)\n')
2771 '%d deletions(-)\n')
2772 % (len(stats), totaladds, totalremoves))
2772 % (len(stats), totaladds, totalremoves))
2773
2773
2774 return ''.join(output)
2774 return ''.join(output)
2775
2775
2776 def diffstatui(*args, **kw):
2776 def diffstatui(*args, **kw):
2777 '''like diffstat(), but yields 2-tuples of (output, label) for
2777 '''like diffstat(), but yields 2-tuples of (output, label) for
2778 ui.write()
2778 ui.write()
2779 '''
2779 '''
2780
2780
2781 for line in diffstat(*args, **kw).splitlines():
2781 for line in diffstat(*args, **kw).splitlines():
2782 if line and line[-1] in '+-':
2782 if line and line[-1] in '+-':
2783 name, graph = line.rsplit(' ', 1)
2783 name, graph = line.rsplit(' ', 1)
2784 yield (name + ' ', '')
2784 yield (name + ' ', '')
2785 m = re.search(br'\++', graph)
2785 m = re.search(br'\++', graph)
2786 if m:
2786 if m:
2787 yield (m.group(0), 'diffstat.inserted')
2787 yield (m.group(0), 'diffstat.inserted')
2788 m = re.search(br'-+', graph)
2788 m = re.search(br'-+', graph)
2789 if m:
2789 if m:
2790 yield (m.group(0), 'diffstat.deleted')
2790 yield (m.group(0), 'diffstat.deleted')
2791 else:
2791 else:
2792 yield (line, '')
2792 yield (line, '')
2793 yield ('\n', '')
2793 yield ('\n', '')
@@ -1,462 +1,499 b''
1 Revert interactive tests
1 Revert interactive tests
2 1 add and commit file f
2 1 add and commit file f
3 2 add commit file folder1/g
3 2 add commit file folder1/g
4 3 add and commit file folder2/h
4 3 add and commit file folder2/h
5 4 add and commit file folder1/i
5 4 add and commit file folder1/i
6 5 commit change to file f
6 5 commit change to file f
7 6 commit changes to files folder1/g folder2/h
7 6 commit changes to files folder1/g folder2/h
8 7 commit changes to files folder1/g folder2/h
8 7 commit changes to files folder1/g folder2/h
9 8 revert interactive to commit id 2 (line 3 above), check that folder1/i is removed and
9 8 revert interactive to commit id 2 (line 3 above), check that folder1/i is removed and
10 9 make workdir match 7
10 9 make workdir match 7
11 10 run the same test than 8 from within folder1 and check same expectations
11 10 run the same test than 8 from within folder1 and check same expectations
12
12
13 $ cat <<EOF >> $HGRCPATH
13 $ cat <<EOF >> $HGRCPATH
14 > [ui]
14 > [ui]
15 > interactive = true
15 > interactive = true
16 > [extensions]
16 > [extensions]
17 > record =
17 > record =
18 > purge =
18 > purge =
19 > EOF
19 > EOF
20
20
21
21
22 $ mkdir -p a/folder1 a/folder2
22 $ mkdir -p a/folder1 a/folder2
23 $ cd a
23 $ cd a
24 $ hg init
24 $ hg init
25 >>> open('f', 'wb').write("1\n2\n3\n4\n5\n")
25 >>> open('f', 'wb').write("1\n2\n3\n4\n5\n")
26 $ hg add f ; hg commit -m "adding f"
26 $ hg add f ; hg commit -m "adding f"
27 $ cat f > folder1/g ; hg add folder1/g ; hg commit -m "adding folder1/g"
27 $ cat f > folder1/g ; hg add folder1/g ; hg commit -m "adding folder1/g"
28 $ cat f > folder2/h ; hg add folder2/h ; hg commit -m "adding folder2/h"
28 $ cat f > folder2/h ; hg add folder2/h ; hg commit -m "adding folder2/h"
29 $ cat f > folder1/i ; hg add folder1/i ; hg commit -m "adding folder1/i"
29 $ cat f > folder1/i ; hg add folder1/i ; hg commit -m "adding folder1/i"
30 >>> open('f', 'wb').write("a\n1\n2\n3\n4\n5\nb\n")
30 >>> open('f', 'wb').write("a\n1\n2\n3\n4\n5\nb\n")
31 $ hg commit -m "modifying f"
31 $ hg commit -m "modifying f"
32 >>> open('folder1/g', 'wb').write("c\n1\n2\n3\n4\n5\nd\n")
32 >>> open('folder1/g', 'wb').write("c\n1\n2\n3\n4\n5\nd\n")
33 $ hg commit -m "modifying folder1/g"
33 $ hg commit -m "modifying folder1/g"
34 >>> open('folder2/h', 'wb').write("e\n1\n2\n3\n4\n5\nf\n")
34 >>> open('folder2/h', 'wb').write("e\n1\n2\n3\n4\n5\nf\n")
35 $ hg commit -m "modifying folder2/h"
35 $ hg commit -m "modifying folder2/h"
36 $ hg tip
36 $ hg tip
37 changeset: 6:59dd6e4ab63a
37 changeset: 6:59dd6e4ab63a
38 tag: tip
38 tag: tip
39 user: test
39 user: test
40 date: Thu Jan 01 00:00:00 1970 +0000
40 date: Thu Jan 01 00:00:00 1970 +0000
41 summary: modifying folder2/h
41 summary: modifying folder2/h
42
42
43 $ hg revert -i -r 2 --all -- << EOF
43 $ hg revert -i -r 2 --all -- << EOF
44 > y
44 > y
45 > y
45 > y
46 > y
46 > y
47 > y
47 > y
48 > y
48 > y
49 > ?
49 > ?
50 > y
50 > y
51 > n
51 > n
52 > n
52 > n
53 > EOF
53 > EOF
54 reverting f
54 reverting f
55 reverting folder1/g (glob)
55 reverting folder1/g (glob)
56 removing folder1/i (glob)
56 removing folder1/i (glob)
57 reverting folder2/h (glob)
57 reverting folder2/h (glob)
58 remove added file folder1/i (Yn)? y
58 remove added file folder1/i (Yn)? y
59 diff --git a/f b/f
59 diff --git a/f b/f
60 2 hunks, 2 lines changed
60 2 hunks, 2 lines changed
61 examine changes to 'f'? [Ynesfdaq?] y
61 examine changes to 'f'? [Ynesfdaq?] y
62
62
63 @@ -1,5 +1,6 @@
63 @@ -1,5 +1,6 @@
64 +a
64 +a
65 1
65 1
66 2
66 2
67 3
67 3
68 4
68 4
69 5
69 5
70 revert change 1/6 to 'f'? [Ynesfdaq?] y
70 revert change 1/6 to 'f'? [Ynesfdaq?] y
71
71
72 @@ -1,5 +2,6 @@
72 @@ -1,5 +2,6 @@
73 1
73 1
74 2
74 2
75 3
75 3
76 4
76 4
77 5
77 5
78 +b
78 +b
79 revert change 2/6 to 'f'? [Ynesfdaq?] y
79 revert change 2/6 to 'f'? [Ynesfdaq?] y
80
80
81 diff --git a/folder1/g b/folder1/g
81 diff --git a/folder1/g b/folder1/g
82 2 hunks, 2 lines changed
82 2 hunks, 2 lines changed
83 examine changes to 'folder1/g'? [Ynesfdaq?] y
83 examine changes to 'folder1/g'? [Ynesfdaq?] y
84
84
85 @@ -1,5 +1,6 @@
85 @@ -1,5 +1,6 @@
86 +c
86 +c
87 1
87 1
88 2
88 2
89 3
89 3
90 4
90 4
91 5
91 5
92 revert change 3/6 to 'folder1/g'? [Ynesfdaq?] ?
92 revert change 3/6 to 'folder1/g'? [Ynesfdaq?] ?
93
93
94 y - yes, revert this change
94 y - yes, revert this change
95 n - no, skip this change
95 n - no, skip this change
96 e - edit this change manually
96 e - edit this change manually
97 s - skip remaining changes to this file
97 s - skip remaining changes to this file
98 f - revert remaining changes to this file
98 f - revert remaining changes to this file
99 d - done, skip remaining changes and files
99 d - done, skip remaining changes and files
100 a - revert all changes to all remaining files
100 a - revert all changes to all remaining files
101 q - quit, reverting no changes
101 q - quit, reverting no changes
102 ? - ? (display help)
102 ? - ? (display help)
103 revert change 3/6 to 'folder1/g'? [Ynesfdaq?] y
103 revert change 3/6 to 'folder1/g'? [Ynesfdaq?] y
104
104
105 @@ -1,5 +2,6 @@
105 @@ -1,5 +2,6 @@
106 1
106 1
107 2
107 2
108 3
108 3
109 4
109 4
110 5
110 5
111 +d
111 +d
112 revert change 4/6 to 'folder1/g'? [Ynesfdaq?] n
112 revert change 4/6 to 'folder1/g'? [Ynesfdaq?] n
113
113
114 diff --git a/folder2/h b/folder2/h
114 diff --git a/folder2/h b/folder2/h
115 2 hunks, 2 lines changed
115 2 hunks, 2 lines changed
116 examine changes to 'folder2/h'? [Ynesfdaq?] n
116 examine changes to 'folder2/h'? [Ynesfdaq?] n
117
117
118 $ cat f
118 $ cat f
119 1
119 1
120 2
120 2
121 3
121 3
122 4
122 4
123 5
123 5
124 $ cat folder1/g
124 $ cat folder1/g
125 1
125 1
126 2
126 2
127 3
127 3
128 4
128 4
129 5
129 5
130 d
130 d
131 $ cat folder2/h
131 $ cat folder2/h
132 e
132 e
133 1
133 1
134 2
134 2
135 3
135 3
136 4
136 4
137 5
137 5
138 f
138 f
139
139
140 Test that --interactive lift the need for --all
140 Test that --interactive lift the need for --all
141
141
142 $ echo q | hg revert -i -r 2
142 $ echo q | hg revert -i -r 2
143 reverting folder1/g (glob)
143 reverting folder1/g (glob)
144 reverting folder2/h (glob)
144 reverting folder2/h (glob)
145 diff --git a/folder1/g b/folder1/g
145 diff --git a/folder1/g b/folder1/g
146 1 hunks, 1 lines changed
146 1 hunks, 1 lines changed
147 examine changes to 'folder1/g'? [Ynesfdaq?] q
147 examine changes to 'folder1/g'? [Ynesfdaq?] q
148
148
149 abort: user quit
149 abort: user quit
150 [255]
150 [255]
151 $ ls folder1/
151 $ ls folder1/
152 g
152 g
153
153
154 Test that a noop revert doesn't do an unnecessary backup
154 Test that a noop revert doesn't do an unnecessary backup
155 $ (echo y; echo n) | hg revert -i -r 2 folder1/g
155 $ (echo y; echo n) | hg revert -i -r 2 folder1/g
156 diff --git a/folder1/g b/folder1/g
156 diff --git a/folder1/g b/folder1/g
157 1 hunks, 1 lines changed
157 1 hunks, 1 lines changed
158 examine changes to 'folder1/g'? [Ynesfdaq?] y
158 examine changes to 'folder1/g'? [Ynesfdaq?] y
159
159
160 @@ -3,3 +3,4 @@
160 @@ -3,3 +3,4 @@
161 3
161 3
162 4
162 4
163 5
163 5
164 +d
164 +d
165 revert this change to 'folder1/g'? [Ynesfdaq?] n
165 revert this change to 'folder1/g'? [Ynesfdaq?] n
166
166
167 $ ls folder1/
167 $ ls folder1/
168 g
168 g
169
169
170 Test --no-backup
170 Test --no-backup
171 $ (echo y; echo y) | hg revert -i -C -r 2 folder1/g
171 $ (echo y; echo y) | hg revert -i -C -r 2 folder1/g
172 diff --git a/folder1/g b/folder1/g
172 diff --git a/folder1/g b/folder1/g
173 1 hunks, 1 lines changed
173 1 hunks, 1 lines changed
174 examine changes to 'folder1/g'? [Ynesfdaq?] y
174 examine changes to 'folder1/g'? [Ynesfdaq?] y
175
175
176 @@ -3,3 +3,4 @@
176 @@ -3,3 +3,4 @@
177 3
177 3
178 4
178 4
179 5
179 5
180 +d
180 +d
181 revert this change to 'folder1/g'? [Ynesfdaq?] y
181 revert this change to 'folder1/g'? [Ynesfdaq?] y
182
182
183 $ ls folder1/
183 $ ls folder1/
184 g
184 g
185 >>> open('folder1/g', 'wb').write("1\n2\n3\n4\n5\nd\n")
185 >>> open('folder1/g', 'wb').write("1\n2\n3\n4\n5\nd\n")
186
186
187
187
188 $ hg update -C 6
188 $ hg update -C 6
189 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
189 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
190 $ hg revert -i -r 2 --all -- << EOF
190 $ hg revert -i -r 2 --all -- << EOF
191 > n
191 > n
192 > y
192 > y
193 > y
193 > y
194 > y
194 > y
195 > y
195 > y
196 > y
196 > y
197 > n
197 > n
198 > n
198 > n
199 > EOF
199 > EOF
200 reverting f
200 reverting f
201 reverting folder1/g (glob)
201 reverting folder1/g (glob)
202 removing folder1/i (glob)
202 removing folder1/i (glob)
203 reverting folder2/h (glob)
203 reverting folder2/h (glob)
204 remove added file folder1/i (Yn)? n
204 remove added file folder1/i (Yn)? n
205 diff --git a/f b/f
205 diff --git a/f b/f
206 2 hunks, 2 lines changed
206 2 hunks, 2 lines changed
207 examine changes to 'f'? [Ynesfdaq?] y
207 examine changes to 'f'? [Ynesfdaq?] y
208
208
209 @@ -1,5 +1,6 @@
209 @@ -1,5 +1,6 @@
210 +a
210 +a
211 1
211 1
212 2
212 2
213 3
213 3
214 4
214 4
215 5
215 5
216 revert change 1/6 to 'f'? [Ynesfdaq?] y
216 revert change 1/6 to 'f'? [Ynesfdaq?] y
217
217
218 @@ -1,5 +2,6 @@
218 @@ -1,5 +2,6 @@
219 1
219 1
220 2
220 2
221 3
221 3
222 4
222 4
223 5
223 5
224 +b
224 +b
225 revert change 2/6 to 'f'? [Ynesfdaq?] y
225 revert change 2/6 to 'f'? [Ynesfdaq?] y
226
226
227 diff --git a/folder1/g b/folder1/g
227 diff --git a/folder1/g b/folder1/g
228 2 hunks, 2 lines changed
228 2 hunks, 2 lines changed
229 examine changes to 'folder1/g'? [Ynesfdaq?] y
229 examine changes to 'folder1/g'? [Ynesfdaq?] y
230
230
231 @@ -1,5 +1,6 @@
231 @@ -1,5 +1,6 @@
232 +c
232 +c
233 1
233 1
234 2
234 2
235 3
235 3
236 4
236 4
237 5
237 5
238 revert change 3/6 to 'folder1/g'? [Ynesfdaq?] y
238 revert change 3/6 to 'folder1/g'? [Ynesfdaq?] y
239
239
240 @@ -1,5 +2,6 @@
240 @@ -1,5 +2,6 @@
241 1
241 1
242 2
242 2
243 3
243 3
244 4
244 4
245 5
245 5
246 +d
246 +d
247 revert change 4/6 to 'folder1/g'? [Ynesfdaq?] n
247 revert change 4/6 to 'folder1/g'? [Ynesfdaq?] n
248
248
249 diff --git a/folder2/h b/folder2/h
249 diff --git a/folder2/h b/folder2/h
250 2 hunks, 2 lines changed
250 2 hunks, 2 lines changed
251 examine changes to 'folder2/h'? [Ynesfdaq?] n
251 examine changes to 'folder2/h'? [Ynesfdaq?] n
252
252
253 $ cat f
253 $ cat f
254 1
254 1
255 2
255 2
256 3
256 3
257 4
257 4
258 5
258 5
259 $ cat folder1/g
259 $ cat folder1/g
260 1
260 1
261 2
261 2
262 3
262 3
263 4
263 4
264 5
264 5
265 d
265 d
266 $ cat folder2/h
266 $ cat folder2/h
267 e
267 e
268 1
268 1
269 2
269 2
270 3
270 3
271 4
271 4
272 5
272 5
273 f
273 f
274 $ hg st
274 $ hg st
275 M f
275 M f
276 M folder1/g
276 M folder1/g
277 $ hg revert --interactive f << EOF
277 $ hg revert --interactive f << EOF
278 > y
278 > y
279 > ?
279 > ?
280 > y
280 > y
281 > n
281 > n
282 > n
282 > n
283 > EOF
283 > EOF
284 diff --git a/f b/f
284 diff --git a/f b/f
285 2 hunks, 2 lines changed
285 2 hunks, 2 lines changed
286 examine changes to 'f'? [Ynesfdaq?] y
286 examine changes to 'f'? [Ynesfdaq?] y
287
287
288 @@ -1,6 +1,5 @@
288 @@ -1,6 +1,5 @@
289 -a
289 -a
290 1
290 1
291 2
291 2
292 3
292 3
293 4
293 4
294 5
294 5
295 discard change 1/2 to 'f'? [Ynesfdaq?] ?
295 discard change 1/2 to 'f'? [Ynesfdaq?] ?
296
296
297 y - yes, discard this change
297 y - yes, discard this change
298 n - no, skip this change
298 n - no, skip this change
299 e - edit this change manually
299 e - edit this change manually
300 s - skip remaining changes to this file
300 s - skip remaining changes to this file
301 f - discard remaining changes to this file
301 f - discard remaining changes to this file
302 d - done, skip remaining changes and files
302 d - done, skip remaining changes and files
303 a - discard all changes to all remaining files
303 a - discard all changes to all remaining files
304 q - quit, discarding no changes
304 q - quit, discarding no changes
305 ? - ? (display help)
305 ? - ? (display help)
306 discard change 1/2 to 'f'? [Ynesfdaq?] y
306 discard change 1/2 to 'f'? [Ynesfdaq?] y
307
307
308 @@ -2,6 +1,5 @@
308 @@ -2,6 +1,5 @@
309 1
309 1
310 2
310 2
311 3
311 3
312 4
312 4
313 5
313 5
314 -b
314 -b
315 discard change 2/2 to 'f'? [Ynesfdaq?] n
315 discard change 2/2 to 'f'? [Ynesfdaq?] n
316
316
317 $ hg st
317 $ hg st
318 M f
318 M f
319 M folder1/g
319 M folder1/g
320 ? f.orig
320 ? f.orig
321 $ cat f
321 $ cat f
322 a
322 a
323 1
323 1
324 2
324 2
325 3
325 3
326 4
326 4
327 5
327 5
328 $ cat f.orig
328 $ cat f.orig
329 1
329 1
330 2
330 2
331 3
331 3
332 4
332 4
333 5
333 5
334 $ rm f.orig
334 $ rm f.orig
335 $ hg update -C .
335 $ hg update -C .
336 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
336 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
337
337
338 Check editing files newly added by a revert
338 Check editing files newly added by a revert
339
339
340 1) Create a dummy editor changing 1 to 42
340 1) Create a dummy editor changing 1 to 42
341 $ cat > $TESTTMP/editor.sh << '__EOF__'
341 $ cat > $TESTTMP/editor.sh << '__EOF__'
342 > cat "$1" | sed "s/1/42/g" > tt
342 > cat "$1" | sed "s/1/42/g" > tt
343 > mv tt "$1"
343 > mv tt "$1"
344 > __EOF__
344 > __EOF__
345
345
346 2) Add k
346 2) Add k
347 $ printf "1\n" > k
347 $ printf "1\n" > k
348 $ hg add k
348 $ hg add k
349 $ hg commit -m "add k"
349 $ hg commit -m "add k"
350
350
351 3) Use interactive revert with editing (replacing +1 with +42):
351 3) Use interactive revert with editing (replacing +1 with +42):
352 $ printf "0\n2\n" > k
352 $ printf "0\n2\n" > k
353 $ HGEDITOR="\"sh\" \"${TESTTMP}/editor.sh\"" hg revert -i <<EOF
353 $ HGEDITOR="\"sh\" \"${TESTTMP}/editor.sh\"" hg revert -i <<EOF
354 > y
354 > y
355 > e
355 > e
356 > EOF
356 > EOF
357 reverting k
357 reverting k
358 diff --git a/k b/k
358 diff --git a/k b/k
359 1 hunks, 2 lines changed
359 1 hunks, 2 lines changed
360 examine changes to 'k'? [Ynesfdaq?] y
360 examine changes to 'k'? [Ynesfdaq?] y
361
361
362 @@ -1,1 +1,2 @@
362 @@ -1,1 +1,2 @@
363 -1
363 -1
364 +0
364 +0
365 +2
365 +2
366 discard this change to 'k'? [Ynesfdaq?] e
366 discard this change to 'k'? [Ynesfdaq?] e
367
367
368 $ cat k
368 $ cat k
369 42
369 42
370
370
371 Check the experimental config to invert the selection:
371 Check the experimental config to invert the selection:
372 $ cat <<EOF >> $HGRCPATH
372 $ cat <<EOF >> $HGRCPATH
373 > [experimental]
373 > [experimental]
374 > revertalternateinteractivemode=False
374 > revertalternateinteractivemode=False
375 > EOF
375 > EOF
376
376
377
377
378 $ hg up -C .
378 $ hg up -C .
379 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
379 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
380 $ printf 'firstline\nc\n1\n2\n3\n 3\n5\nd\nlastline\n' > folder1/g
380 $ printf 'firstline\nc\n1\n2\n3\n 3\n5\nd\nlastline\n' > folder1/g
381 $ hg diff --nodates
381 $ hg diff --nodates
382 diff -r a3d963a027aa folder1/g
382 diff -r a3d963a027aa folder1/g
383 --- a/folder1/g
383 --- a/folder1/g
384 +++ b/folder1/g
384 +++ b/folder1/g
385 @@ -1,7 +1,9 @@
385 @@ -1,7 +1,9 @@
386 +firstline
386 +firstline
387 c
387 c
388 1
388 1
389 2
389 2
390 3
390 3
391 -4
391 -4
392 + 3
392 + 3
393 5
393 5
394 d
394 d
395 +lastline
395 +lastline
396 $ hg revert -i <<EOF
396 $ hg revert -i <<EOF
397 > y
397 > y
398 > y
398 > y
399 > y
399 > y
400 > n
400 > n
401 > EOF
401 > EOF
402 reverting folder1/g (glob)
402 reverting folder1/g (glob)
403 diff --git a/folder1/g b/folder1/g
403 diff --git a/folder1/g b/folder1/g
404 3 hunks, 3 lines changed
404 3 hunks, 3 lines changed
405 examine changes to 'folder1/g'? [Ynesfdaq?] y
405 examine changes to 'folder1/g'? [Ynesfdaq?] y
406
406
407 @@ -1,4 +1,5 @@
407 @@ -1,4 +1,5 @@
408 +firstline
408 +firstline
409 c
409 c
410 1
410 1
411 2
411 2
412 3
412 3
413 discard change 1/3 to 'folder1/g'? [Ynesfdaq?] y
413 discard change 1/3 to 'folder1/g'? [Ynesfdaq?] y
414
414
415 @@ -1,7 +2,7 @@
415 @@ -1,7 +2,7 @@
416 c
416 c
417 1
417 1
418 2
418 2
419 3
419 3
420 -4
420 -4
421 + 3
421 + 3
422 5
422 5
423 d
423 d
424 discard change 2/3 to 'folder1/g'? [Ynesfdaq?] y
424 discard change 2/3 to 'folder1/g'? [Ynesfdaq?] y
425
425
426 @@ -6,2 +7,3 @@
426 @@ -6,2 +7,3 @@
427 5
427 5
428 d
428 d
429 +lastline
429 +lastline
430 discard change 3/3 to 'folder1/g'? [Ynesfdaq?] n
430 discard change 3/3 to 'folder1/g'? [Ynesfdaq?] n
431
431
432 $ hg diff --nodates
432 $ hg diff --nodates
433 diff -r a3d963a027aa folder1/g
433 diff -r a3d963a027aa folder1/g
434 --- a/folder1/g
434 --- a/folder1/g
435 +++ b/folder1/g
435 +++ b/folder1/g
436 @@ -5,3 +5,4 @@
436 @@ -5,3 +5,4 @@
437 4
437 4
438 5
438 5
439 d
439 d
440 +lastline
440 +lastline
441
441
442 $ hg update -C .
442 $ hg update -C .
443 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
443 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
444 $ hg purge
444 $ hg purge
445 $ touch newfile
445 $ touch newfile
446 $ hg add newfile
446 $ hg add newfile
447 $ hg status
447 $ hg status
448 A newfile
448 A newfile
449 $ hg revert -i <<EOF
449 $ hg revert -i <<EOF
450 > n
450 > n
451 > EOF
451 > EOF
452 forgetting newfile
452 forgetting newfile
453 forget added file newfile (Yn)? n
453 forget added file newfile (Yn)? n
454 $ hg status
454 $ hg status
455 A newfile
455 A newfile
456 $ hg revert -i <<EOF
456 $ hg revert -i <<EOF
457 > y
457 > y
458 > EOF
458 > EOF
459 forgetting newfile
459 forgetting newfile
460 forget added file newfile (Yn)? y
460 forget added file newfile (Yn)? y
461 $ hg status
461 $ hg status
462 ? newfile
462 ? newfile
463
464 When a line without EOL is selected during "revert -i" (issue5651)
465
466 $ cat <<EOF >> $HGRCPATH
467 > [experimental]
468 > %unset revertalternateinteractivemode
469 > EOF
470
471 $ hg init $TESTTMP/revert-i-eol
472 $ cd $TESTTMP/revert-i-eol
473 $ echo 0 > a
474 $ hg ci -qAm 0
475 $ printf 1 >> a
476 $ hg ci -qAm 1
477 $ cat a
478 0
479 1 (no-eol)
480
481 $ hg revert -ir'.^' <<EOF
482 > y
483 > y
484 > EOF
485 reverting a
486 diff --git a/a b/a
487 1 hunks, 1 lines changed
488 examine changes to 'a'? [Ynesfdaq?] y
489
490 @@ -1,1 +1,2 @@
491 0
492 +1
493 \ No newline at end of file
494 revert this change to 'a'? [Ynesfdaq?] y
495
496 $ cat a
497 0
498
499 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now