##// END OF EJS Templates
patch: include flag-only file changes in "special" while filtering patch (issue5864)...
Sushil khanchi -
r42148:6308aa82 default draft
parent child Browse files
Show More
@@ -1,2850 +1,2850 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import re
18 import re
19 import shutil
19 import shutil
20 import zlib
20 import zlib
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 hex,
24 hex,
25 short,
25 short,
26 )
26 )
27 from . import (
27 from . import (
28 copies,
28 copies,
29 diffhelper,
29 diffhelper,
30 diffutil,
30 diffutil,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 dateutil,
43 dateutil,
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54
54
55 PatchError = error.PatchError
55 PatchError = error.PatchError
56
56
57 # public functions
57 # public functions
58
58
59 def split(stream):
59 def split(stream):
60 '''return an iterator of individual patches from a stream'''
60 '''return an iterator of individual patches from a stream'''
61 def isheader(line, inheader):
61 def isheader(line, inheader):
62 if inheader and line.startswith((' ', '\t')):
62 if inheader and line.startswith((' ', '\t')):
63 # continuation
63 # continuation
64 return True
64 return True
65 if line.startswith((' ', '-', '+')):
65 if line.startswith((' ', '-', '+')):
66 # diff line - don't check for header pattern in there
66 # diff line - don't check for header pattern in there
67 return False
67 return False
68 l = line.split(': ', 1)
68 l = line.split(': ', 1)
69 return len(l) == 2 and ' ' not in l[0]
69 return len(l) == 2 and ' ' not in l[0]
70
70
71 def chunk(lines):
71 def chunk(lines):
72 return stringio(''.join(lines))
72 return stringio(''.join(lines))
73
73
74 def hgsplit(stream, cur):
74 def hgsplit(stream, cur):
75 inheader = True
75 inheader = True
76
76
77 for line in stream:
77 for line in stream:
78 if not line.strip():
78 if not line.strip():
79 inheader = False
79 inheader = False
80 if not inheader and line.startswith('# HG changeset patch'):
80 if not inheader and line.startswith('# HG changeset patch'):
81 yield chunk(cur)
81 yield chunk(cur)
82 cur = []
82 cur = []
83 inheader = True
83 inheader = True
84
84
85 cur.append(line)
85 cur.append(line)
86
86
87 if cur:
87 if cur:
88 yield chunk(cur)
88 yield chunk(cur)
89
89
90 def mboxsplit(stream, cur):
90 def mboxsplit(stream, cur):
91 for line in stream:
91 for line in stream:
92 if line.startswith('From '):
92 if line.startswith('From '):
93 for c in split(chunk(cur[1:])):
93 for c in split(chunk(cur[1:])):
94 yield c
94 yield c
95 cur = []
95 cur = []
96
96
97 cur.append(line)
97 cur.append(line)
98
98
99 if cur:
99 if cur:
100 for c in split(chunk(cur[1:])):
100 for c in split(chunk(cur[1:])):
101 yield c
101 yield c
102
102
103 def mimesplit(stream, cur):
103 def mimesplit(stream, cur):
104 def msgfp(m):
104 def msgfp(m):
105 fp = stringio()
105 fp = stringio()
106 g = email.Generator.Generator(fp, mangle_from_=False)
106 g = email.Generator.Generator(fp, mangle_from_=False)
107 g.flatten(m)
107 g.flatten(m)
108 fp.seek(0)
108 fp.seek(0)
109 return fp
109 return fp
110
110
111 for line in stream:
111 for line in stream:
112 cur.append(line)
112 cur.append(line)
113 c = chunk(cur)
113 c = chunk(cur)
114
114
115 m = mail.parse(c)
115 m = mail.parse(c)
116 if not m.is_multipart():
116 if not m.is_multipart():
117 yield msgfp(m)
117 yield msgfp(m)
118 else:
118 else:
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 for part in m.walk():
120 for part in m.walk():
121 ct = part.get_content_type()
121 ct = part.get_content_type()
122 if ct not in ok_types:
122 if ct not in ok_types:
123 continue
123 continue
124 yield msgfp(part)
124 yield msgfp(part)
125
125
126 def headersplit(stream, cur):
126 def headersplit(stream, cur):
127 inheader = False
127 inheader = False
128
128
129 for line in stream:
129 for line in stream:
130 if not inheader and isheader(line, inheader):
130 if not inheader and isheader(line, inheader):
131 yield chunk(cur)
131 yield chunk(cur)
132 cur = []
132 cur = []
133 inheader = True
133 inheader = True
134 if inheader and not isheader(line, inheader):
134 if inheader and not isheader(line, inheader):
135 inheader = False
135 inheader = False
136
136
137 cur.append(line)
137 cur.append(line)
138
138
139 if cur:
139 if cur:
140 yield chunk(cur)
140 yield chunk(cur)
141
141
142 def remainder(cur):
142 def remainder(cur):
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 class fiter(object):
145 class fiter(object):
146 def __init__(self, fp):
146 def __init__(self, fp):
147 self.fp = fp
147 self.fp = fp
148
148
149 def __iter__(self):
149 def __iter__(self):
150 return self
150 return self
151
151
152 def next(self):
152 def next(self):
153 l = self.fp.readline()
153 l = self.fp.readline()
154 if not l:
154 if not l:
155 raise StopIteration
155 raise StopIteration
156 return l
156 return l
157
157
158 __next__ = next
158 __next__ = next
159
159
160 inheader = False
160 inheader = False
161 cur = []
161 cur = []
162
162
163 mimeheaders = ['content-type']
163 mimeheaders = ['content-type']
164
164
165 if not util.safehasattr(stream, 'next'):
165 if not util.safehasattr(stream, 'next'):
166 # http responses, for example, have readline but not next
166 # http responses, for example, have readline but not next
167 stream = fiter(stream)
167 stream = fiter(stream)
168
168
169 for line in stream:
169 for line in stream:
170 cur.append(line)
170 cur.append(line)
171 if line.startswith('# HG changeset patch'):
171 if line.startswith('# HG changeset patch'):
172 return hgsplit(stream, cur)
172 return hgsplit(stream, cur)
173 elif line.startswith('From '):
173 elif line.startswith('From '):
174 return mboxsplit(stream, cur)
174 return mboxsplit(stream, cur)
175 elif isheader(line, inheader):
175 elif isheader(line, inheader):
176 inheader = True
176 inheader = True
177 if line.split(':', 1)[0].lower() in mimeheaders:
177 if line.split(':', 1)[0].lower() in mimeheaders:
178 # let email parser handle this
178 # let email parser handle this
179 return mimesplit(stream, cur)
179 return mimesplit(stream, cur)
180 elif line.startswith('--- ') and inheader:
180 elif line.startswith('--- ') and inheader:
181 # No evil headers seen by diff start, split by hand
181 # No evil headers seen by diff start, split by hand
182 return headersplit(stream, cur)
182 return headersplit(stream, cur)
183 # Not enough info, keep reading
183 # Not enough info, keep reading
184
184
185 # if we are here, we have a very plain patch
185 # if we are here, we have a very plain patch
186 return remainder(cur)
186 return remainder(cur)
187
187
188 ## Some facility for extensible patch parsing:
188 ## Some facility for extensible patch parsing:
189 # list of pairs ("header to match", "data key")
189 # list of pairs ("header to match", "data key")
190 patchheadermap = [('Date', 'date'),
190 patchheadermap = [('Date', 'date'),
191 ('Branch', 'branch'),
191 ('Branch', 'branch'),
192 ('Node ID', 'nodeid'),
192 ('Node ID', 'nodeid'),
193 ]
193 ]
194
194
195 @contextlib.contextmanager
195 @contextlib.contextmanager
196 def extract(ui, fileobj):
196 def extract(ui, fileobj):
197 '''extract patch from data read from fileobj.
197 '''extract patch from data read from fileobj.
198
198
199 patch can be a normal patch or contained in an email message.
199 patch can be a normal patch or contained in an email message.
200
200
201 return a dictionary. Standard keys are:
201 return a dictionary. Standard keys are:
202 - filename,
202 - filename,
203 - message,
203 - message,
204 - user,
204 - user,
205 - date,
205 - date,
206 - branch,
206 - branch,
207 - node,
207 - node,
208 - p1,
208 - p1,
209 - p2.
209 - p2.
210 Any item can be missing from the dictionary. If filename is missing,
210 Any item can be missing from the dictionary. If filename is missing,
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
212
212
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, r'wb')
214 tmpfp = os.fdopen(fd, r'wb')
215 try:
215 try:
216 yield _extract(ui, fileobj, tmpname, tmpfp)
216 yield _extract(ui, fileobj, tmpname, tmpfp)
217 finally:
217 finally:
218 tmpfp.close()
218 tmpfp.close()
219 os.unlink(tmpname)
219 os.unlink(tmpname)
220
220
221 def _extract(ui, fileobj, tmpname, tmpfp):
221 def _extract(ui, fileobj, tmpname, tmpfp):
222
222
223 # attempt to detect the start of a patch
223 # attempt to detect the start of a patch
224 # (this heuristic is borrowed from quilt)
224 # (this heuristic is borrowed from quilt)
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
228 br'\*\*\*[ \t].*?^---[ \t])',
228 br'\*\*\*[ \t].*?^---[ \t])',
229 re.MULTILINE | re.DOTALL)
229 re.MULTILINE | re.DOTALL)
230
230
231 data = {}
231 data = {}
232
232
233 msg = mail.parse(fileobj)
233 msg = mail.parse(fileobj)
234
234
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 if not subject and not data['user']:
237 if not subject and not data['user']:
238 # Not an email, restore parsed headers if any
238 # Not an email, restore parsed headers if any
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 for h in msg.items()) + '\n'
240 for h in msg.items()) + '\n'
241
241
242 # should try to parse msg['Date']
242 # should try to parse msg['Date']
243 parents = []
243 parents = []
244
244
245 if subject:
245 if subject:
246 if subject.startswith('[PATCH'):
246 if subject.startswith('[PATCH'):
247 pend = subject.find(']')
247 pend = subject.find(']')
248 if pend >= 0:
248 if pend >= 0:
249 subject = subject[pend + 1:].lstrip()
249 subject = subject[pend + 1:].lstrip()
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 ui.debug('Subject: %s\n' % subject)
251 ui.debug('Subject: %s\n' % subject)
252 if data['user']:
252 if data['user']:
253 ui.debug('From: %s\n' % data['user'])
253 ui.debug('From: %s\n' % data['user'])
254 diffs_seen = 0
254 diffs_seen = 0
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 message = ''
256 message = ''
257 for part in msg.walk():
257 for part in msg.walk():
258 content_type = pycompat.bytestr(part.get_content_type())
258 content_type = pycompat.bytestr(part.get_content_type())
259 ui.debug('Content-Type: %s\n' % content_type)
259 ui.debug('Content-Type: %s\n' % content_type)
260 if content_type not in ok_types:
260 if content_type not in ok_types:
261 continue
261 continue
262 payload = part.get_payload(decode=True)
262 payload = part.get_payload(decode=True)
263 m = diffre.search(payload)
263 m = diffre.search(payload)
264 if m:
264 if m:
265 hgpatch = False
265 hgpatch = False
266 hgpatchheader = False
266 hgpatchheader = False
267 ignoretext = False
267 ignoretext = False
268
268
269 ui.debug('found patch at byte %d\n' % m.start(0))
269 ui.debug('found patch at byte %d\n' % m.start(0))
270 diffs_seen += 1
270 diffs_seen += 1
271 cfp = stringio()
271 cfp = stringio()
272 for line in payload[:m.start(0)].splitlines():
272 for line in payload[:m.start(0)].splitlines():
273 if line.startswith('# HG changeset patch') and not hgpatch:
273 if line.startswith('# HG changeset patch') and not hgpatch:
274 ui.debug('patch generated by hg export\n')
274 ui.debug('patch generated by hg export\n')
275 hgpatch = True
275 hgpatch = True
276 hgpatchheader = True
276 hgpatchheader = True
277 # drop earlier commit message content
277 # drop earlier commit message content
278 cfp.seek(0)
278 cfp.seek(0)
279 cfp.truncate()
279 cfp.truncate()
280 subject = None
280 subject = None
281 elif hgpatchheader:
281 elif hgpatchheader:
282 if line.startswith('# User '):
282 if line.startswith('# User '):
283 data['user'] = line[7:]
283 data['user'] = line[7:]
284 ui.debug('From: %s\n' % data['user'])
284 ui.debug('From: %s\n' % data['user'])
285 elif line.startswith("# Parent "):
285 elif line.startswith("# Parent "):
286 parents.append(line[9:].lstrip())
286 parents.append(line[9:].lstrip())
287 elif line.startswith("# "):
287 elif line.startswith("# "):
288 for header, key in patchheadermap:
288 for header, key in patchheadermap:
289 prefix = '# %s ' % header
289 prefix = '# %s ' % header
290 if line.startswith(prefix):
290 if line.startswith(prefix):
291 data[key] = line[len(prefix):]
291 data[key] = line[len(prefix):]
292 else:
292 else:
293 hgpatchheader = False
293 hgpatchheader = False
294 elif line == '---':
294 elif line == '---':
295 ignoretext = True
295 ignoretext = True
296 if not hgpatchheader and not ignoretext:
296 if not hgpatchheader and not ignoretext:
297 cfp.write(line)
297 cfp.write(line)
298 cfp.write('\n')
298 cfp.write('\n')
299 message = cfp.getvalue()
299 message = cfp.getvalue()
300 if tmpfp:
300 if tmpfp:
301 tmpfp.write(payload)
301 tmpfp.write(payload)
302 if not payload.endswith('\n'):
302 if not payload.endswith('\n'):
303 tmpfp.write('\n')
303 tmpfp.write('\n')
304 elif not diffs_seen and message and content_type == 'text/plain':
304 elif not diffs_seen and message and content_type == 'text/plain':
305 message += '\n' + payload
305 message += '\n' + payload
306
306
307 if subject and not message.startswith(subject):
307 if subject and not message.startswith(subject):
308 message = '%s\n%s' % (subject, message)
308 message = '%s\n%s' % (subject, message)
309 data['message'] = message
309 data['message'] = message
310 tmpfp.close()
310 tmpfp.close()
311 if parents:
311 if parents:
312 data['p1'] = parents.pop(0)
312 data['p1'] = parents.pop(0)
313 if parents:
313 if parents:
314 data['p2'] = parents.pop(0)
314 data['p2'] = parents.pop(0)
315
315
316 if diffs_seen:
316 if diffs_seen:
317 data['filename'] = tmpname
317 data['filename'] = tmpname
318
318
319 return data
319 return data
320
320
321 class patchmeta(object):
321 class patchmeta(object):
322 """Patched file metadata
322 """Patched file metadata
323
323
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 'islink' is True if the file is a symlink and 'isexec' is True if
328 'islink' is True if the file is a symlink and 'isexec' is True if
329 the file is executable. Otherwise, 'mode' is None.
329 the file is executable. Otherwise, 'mode' is None.
330 """
330 """
331 def __init__(self, path):
331 def __init__(self, path):
332 self.path = path
332 self.path = path
333 self.oldpath = None
333 self.oldpath = None
334 self.mode = None
334 self.mode = None
335 self.op = 'MODIFY'
335 self.op = 'MODIFY'
336 self.binary = False
336 self.binary = False
337
337
338 def setmode(self, mode):
338 def setmode(self, mode):
339 islink = mode & 0o20000
339 islink = mode & 0o20000
340 isexec = mode & 0o100
340 isexec = mode & 0o100
341 self.mode = (islink, isexec)
341 self.mode = (islink, isexec)
342
342
343 def copy(self):
343 def copy(self):
344 other = patchmeta(self.path)
344 other = patchmeta(self.path)
345 other.oldpath = self.oldpath
345 other.oldpath = self.oldpath
346 other.mode = self.mode
346 other.mode = self.mode
347 other.op = self.op
347 other.op = self.op
348 other.binary = self.binary
348 other.binary = self.binary
349 return other
349 return other
350
350
351 def _ispatchinga(self, afile):
351 def _ispatchinga(self, afile):
352 if afile == '/dev/null':
352 if afile == '/dev/null':
353 return self.op == 'ADD'
353 return self.op == 'ADD'
354 return afile == 'a/' + (self.oldpath or self.path)
354 return afile == 'a/' + (self.oldpath or self.path)
355
355
356 def _ispatchingb(self, bfile):
356 def _ispatchingb(self, bfile):
357 if bfile == '/dev/null':
357 if bfile == '/dev/null':
358 return self.op == 'DELETE'
358 return self.op == 'DELETE'
359 return bfile == 'b/' + self.path
359 return bfile == 'b/' + self.path
360
360
361 def ispatching(self, afile, bfile):
361 def ispatching(self, afile, bfile):
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363
363
364 def __repr__(self):
364 def __repr__(self):
365 return r"<patchmeta %s %r>" % (self.op, self.path)
365 return r"<patchmeta %s %r>" % (self.op, self.path)
366
366
367 def readgitpatch(lr):
367 def readgitpatch(lr):
368 """extract git-style metadata about patches from <patchname>"""
368 """extract git-style metadata about patches from <patchname>"""
369
369
370 # Filter patch for git information
370 # Filter patch for git information
371 gp = None
371 gp = None
372 gitpatches = []
372 gitpatches = []
373 for line in lr:
373 for line in lr:
374 line = line.rstrip(' \r\n')
374 line = line.rstrip(' \r\n')
375 if line.startswith('diff --git a/'):
375 if line.startswith('diff --git a/'):
376 m = gitre.match(line)
376 m = gitre.match(line)
377 if m:
377 if m:
378 if gp:
378 if gp:
379 gitpatches.append(gp)
379 gitpatches.append(gp)
380 dst = m.group(2)
380 dst = m.group(2)
381 gp = patchmeta(dst)
381 gp = patchmeta(dst)
382 elif gp:
382 elif gp:
383 if line.startswith('--- '):
383 if line.startswith('--- '):
384 gitpatches.append(gp)
384 gitpatches.append(gp)
385 gp = None
385 gp = None
386 continue
386 continue
387 if line.startswith('rename from '):
387 if line.startswith('rename from '):
388 gp.op = 'RENAME'
388 gp.op = 'RENAME'
389 gp.oldpath = line[12:]
389 gp.oldpath = line[12:]
390 elif line.startswith('rename to '):
390 elif line.startswith('rename to '):
391 gp.path = line[10:]
391 gp.path = line[10:]
392 elif line.startswith('copy from '):
392 elif line.startswith('copy from '):
393 gp.op = 'COPY'
393 gp.op = 'COPY'
394 gp.oldpath = line[10:]
394 gp.oldpath = line[10:]
395 elif line.startswith('copy to '):
395 elif line.startswith('copy to '):
396 gp.path = line[8:]
396 gp.path = line[8:]
397 elif line.startswith('deleted file'):
397 elif line.startswith('deleted file'):
398 gp.op = 'DELETE'
398 gp.op = 'DELETE'
399 elif line.startswith('new file mode '):
399 elif line.startswith('new file mode '):
400 gp.op = 'ADD'
400 gp.op = 'ADD'
401 gp.setmode(int(line[-6:], 8))
401 gp.setmode(int(line[-6:], 8))
402 elif line.startswith('new mode '):
402 elif line.startswith('new mode '):
403 gp.setmode(int(line[-6:], 8))
403 gp.setmode(int(line[-6:], 8))
404 elif line.startswith('GIT binary patch'):
404 elif line.startswith('GIT binary patch'):
405 gp.binary = True
405 gp.binary = True
406 if gp:
406 if gp:
407 gitpatches.append(gp)
407 gitpatches.append(gp)
408
408
409 return gitpatches
409 return gitpatches
410
410
411 class linereader(object):
411 class linereader(object):
412 # simple class to allow pushing lines back into the input stream
412 # simple class to allow pushing lines back into the input stream
413 def __init__(self, fp):
413 def __init__(self, fp):
414 self.fp = fp
414 self.fp = fp
415 self.buf = []
415 self.buf = []
416
416
417 def push(self, line):
417 def push(self, line):
418 if line is not None:
418 if line is not None:
419 self.buf.append(line)
419 self.buf.append(line)
420
420
421 def readline(self):
421 def readline(self):
422 if self.buf:
422 if self.buf:
423 l = self.buf[0]
423 l = self.buf[0]
424 del self.buf[0]
424 del self.buf[0]
425 return l
425 return l
426 return self.fp.readline()
426 return self.fp.readline()
427
427
428 def __iter__(self):
428 def __iter__(self):
429 return iter(self.readline, '')
429 return iter(self.readline, '')
430
430
431 class abstractbackend(object):
431 class abstractbackend(object):
432 def __init__(self, ui):
432 def __init__(self, ui):
433 self.ui = ui
433 self.ui = ui
434
434
435 def getfile(self, fname):
435 def getfile(self, fname):
436 """Return target file data and flags as a (data, (islink,
436 """Return target file data and flags as a (data, (islink,
437 isexec)) tuple. Data is None if file is missing/deleted.
437 isexec)) tuple. Data is None if file is missing/deleted.
438 """
438 """
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def setfile(self, fname, data, mode, copysource):
441 def setfile(self, fname, data, mode, copysource):
442 """Write data to target file fname and set its mode. mode is a
442 """Write data to target file fname and set its mode. mode is a
443 (islink, isexec) tuple. If data is None, the file content should
443 (islink, isexec) tuple. If data is None, the file content should
444 be left unchanged. If the file is modified after being copied,
444 be left unchanged. If the file is modified after being copied,
445 copysource is set to the original file name.
445 copysource is set to the original file name.
446 """
446 """
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 """Unlink target file."""
450 """Unlink target file."""
451 raise NotImplementedError
451 raise NotImplementedError
452
452
453 def writerej(self, fname, failed, total, lines):
453 def writerej(self, fname, failed, total, lines):
454 """Write rejected lines for fname. total is the number of hunks
454 """Write rejected lines for fname. total is the number of hunks
455 which failed to apply and total the total number of hunks for this
455 which failed to apply and total the total number of hunks for this
456 files.
456 files.
457 """
457 """
458
458
459 def exists(self, fname):
459 def exists(self, fname):
460 raise NotImplementedError
460 raise NotImplementedError
461
461
462 def close(self):
462 def close(self):
463 raise NotImplementedError
463 raise NotImplementedError
464
464
465 class fsbackend(abstractbackend):
465 class fsbackend(abstractbackend):
466 def __init__(self, ui, basedir):
466 def __init__(self, ui, basedir):
467 super(fsbackend, self).__init__(ui)
467 super(fsbackend, self).__init__(ui)
468 self.opener = vfsmod.vfs(basedir)
468 self.opener = vfsmod.vfs(basedir)
469
469
470 def getfile(self, fname):
470 def getfile(self, fname):
471 if self.opener.islink(fname):
471 if self.opener.islink(fname):
472 return (self.opener.readlink(fname), (True, False))
472 return (self.opener.readlink(fname), (True, False))
473
473
474 isexec = False
474 isexec = False
475 try:
475 try:
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 except OSError as e:
477 except OSError as e:
478 if e.errno != errno.ENOENT:
478 if e.errno != errno.ENOENT:
479 raise
479 raise
480 try:
480 try:
481 return (self.opener.read(fname), (False, isexec))
481 return (self.opener.read(fname), (False, isexec))
482 except IOError as e:
482 except IOError as e:
483 if e.errno != errno.ENOENT:
483 if e.errno != errno.ENOENT:
484 raise
484 raise
485 return None, None
485 return None, None
486
486
487 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
488 islink, isexec = mode
488 islink, isexec = mode
489 if data is None:
489 if data is None:
490 self.opener.setflags(fname, islink, isexec)
490 self.opener.setflags(fname, islink, isexec)
491 return
491 return
492 if islink:
492 if islink:
493 self.opener.symlink(data, fname)
493 self.opener.symlink(data, fname)
494 else:
494 else:
495 self.opener.write(fname, data)
495 self.opener.write(fname, data)
496 if isexec:
496 if isexec:
497 self.opener.setflags(fname, False, True)
497 self.opener.setflags(fname, False, True)
498
498
499 def unlink(self, fname):
499 def unlink(self, fname):
500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
502
502
503 def writerej(self, fname, failed, total, lines):
503 def writerej(self, fname, failed, total, lines):
504 fname = fname + ".rej"
504 fname = fname + ".rej"
505 self.ui.warn(
505 self.ui.warn(
506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
507 (failed, total, fname))
507 (failed, total, fname))
508 fp = self.opener(fname, 'w')
508 fp = self.opener(fname, 'w')
509 fp.writelines(lines)
509 fp.writelines(lines)
510 fp.close()
510 fp.close()
511
511
512 def exists(self, fname):
512 def exists(self, fname):
513 return self.opener.lexists(fname)
513 return self.opener.lexists(fname)
514
514
515 class workingbackend(fsbackend):
515 class workingbackend(fsbackend):
516 def __init__(self, ui, repo, similarity):
516 def __init__(self, ui, repo, similarity):
517 super(workingbackend, self).__init__(ui, repo.root)
517 super(workingbackend, self).__init__(ui, repo.root)
518 self.repo = repo
518 self.repo = repo
519 self.similarity = similarity
519 self.similarity = similarity
520 self.removed = set()
520 self.removed = set()
521 self.changed = set()
521 self.changed = set()
522 self.copied = []
522 self.copied = []
523
523
524 def _checkknown(self, fname):
524 def _checkknown(self, fname):
525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
527
527
528 def setfile(self, fname, data, mode, copysource):
528 def setfile(self, fname, data, mode, copysource):
529 self._checkknown(fname)
529 self._checkknown(fname)
530 super(workingbackend, self).setfile(fname, data, mode, copysource)
530 super(workingbackend, self).setfile(fname, data, mode, copysource)
531 if copysource is not None:
531 if copysource is not None:
532 self.copied.append((copysource, fname))
532 self.copied.append((copysource, fname))
533 self.changed.add(fname)
533 self.changed.add(fname)
534
534
535 def unlink(self, fname):
535 def unlink(self, fname):
536 self._checkknown(fname)
536 self._checkknown(fname)
537 super(workingbackend, self).unlink(fname)
537 super(workingbackend, self).unlink(fname)
538 self.removed.add(fname)
538 self.removed.add(fname)
539 self.changed.add(fname)
539 self.changed.add(fname)
540
540
541 def close(self):
541 def close(self):
542 wctx = self.repo[None]
542 wctx = self.repo[None]
543 changed = set(self.changed)
543 changed = set(self.changed)
544 for src, dst in self.copied:
544 for src, dst in self.copied:
545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
546 if self.removed:
546 if self.removed:
547 wctx.forget(sorted(self.removed))
547 wctx.forget(sorted(self.removed))
548 for f in self.removed:
548 for f in self.removed:
549 if f not in self.repo.dirstate:
549 if f not in self.repo.dirstate:
550 # File was deleted and no longer belongs to the
550 # File was deleted and no longer belongs to the
551 # dirstate, it was probably marked added then
551 # dirstate, it was probably marked added then
552 # deleted, and should not be considered by
552 # deleted, and should not be considered by
553 # marktouched().
553 # marktouched().
554 changed.discard(f)
554 changed.discard(f)
555 if changed:
555 if changed:
556 scmutil.marktouched(self.repo, changed, self.similarity)
556 scmutil.marktouched(self.repo, changed, self.similarity)
557 return sorted(self.changed)
557 return sorted(self.changed)
558
558
559 class filestore(object):
559 class filestore(object):
560 def __init__(self, maxsize=None):
560 def __init__(self, maxsize=None):
561 self.opener = None
561 self.opener = None
562 self.files = {}
562 self.files = {}
563 self.created = 0
563 self.created = 0
564 self.maxsize = maxsize
564 self.maxsize = maxsize
565 if self.maxsize is None:
565 if self.maxsize is None:
566 self.maxsize = 4*(2**20)
566 self.maxsize = 4*(2**20)
567 self.size = 0
567 self.size = 0
568 self.data = {}
568 self.data = {}
569
569
570 def setfile(self, fname, data, mode, copied=None):
570 def setfile(self, fname, data, mode, copied=None):
571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
572 self.data[fname] = (data, mode, copied)
572 self.data[fname] = (data, mode, copied)
573 self.size += len(data)
573 self.size += len(data)
574 else:
574 else:
575 if self.opener is None:
575 if self.opener is None:
576 root = pycompat.mkdtemp(prefix='hg-patch-')
576 root = pycompat.mkdtemp(prefix='hg-patch-')
577 self.opener = vfsmod.vfs(root)
577 self.opener = vfsmod.vfs(root)
578 # Avoid filename issues with these simple names
578 # Avoid filename issues with these simple names
579 fn = '%d' % self.created
579 fn = '%d' % self.created
580 self.opener.write(fn, data)
580 self.opener.write(fn, data)
581 self.created += 1
581 self.created += 1
582 self.files[fname] = (fn, mode, copied)
582 self.files[fname] = (fn, mode, copied)
583
583
584 def getfile(self, fname):
584 def getfile(self, fname):
585 if fname in self.data:
585 if fname in self.data:
586 return self.data[fname]
586 return self.data[fname]
587 if not self.opener or fname not in self.files:
587 if not self.opener or fname not in self.files:
588 return None, None, None
588 return None, None, None
589 fn, mode, copied = self.files[fname]
589 fn, mode, copied = self.files[fname]
590 return self.opener.read(fn), mode, copied
590 return self.opener.read(fn), mode, copied
591
591
592 def close(self):
592 def close(self):
593 if self.opener:
593 if self.opener:
594 shutil.rmtree(self.opener.base)
594 shutil.rmtree(self.opener.base)
595
595
596 class repobackend(abstractbackend):
596 class repobackend(abstractbackend):
597 def __init__(self, ui, repo, ctx, store):
597 def __init__(self, ui, repo, ctx, store):
598 super(repobackend, self).__init__(ui)
598 super(repobackend, self).__init__(ui)
599 self.repo = repo
599 self.repo = repo
600 self.ctx = ctx
600 self.ctx = ctx
601 self.store = store
601 self.store = store
602 self.changed = set()
602 self.changed = set()
603 self.removed = set()
603 self.removed = set()
604 self.copied = {}
604 self.copied = {}
605
605
606 def _checkknown(self, fname):
606 def _checkknown(self, fname):
607 if fname not in self.ctx:
607 if fname not in self.ctx:
608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
609
609
610 def getfile(self, fname):
610 def getfile(self, fname):
611 try:
611 try:
612 fctx = self.ctx[fname]
612 fctx = self.ctx[fname]
613 except error.LookupError:
613 except error.LookupError:
614 return None, None
614 return None, None
615 flags = fctx.flags()
615 flags = fctx.flags()
616 return fctx.data(), ('l' in flags, 'x' in flags)
616 return fctx.data(), ('l' in flags, 'x' in flags)
617
617
618 def setfile(self, fname, data, mode, copysource):
618 def setfile(self, fname, data, mode, copysource):
619 if copysource:
619 if copysource:
620 self._checkknown(copysource)
620 self._checkknown(copysource)
621 if data is None:
621 if data is None:
622 data = self.ctx[fname].data()
622 data = self.ctx[fname].data()
623 self.store.setfile(fname, data, mode, copysource)
623 self.store.setfile(fname, data, mode, copysource)
624 self.changed.add(fname)
624 self.changed.add(fname)
625 if copysource:
625 if copysource:
626 self.copied[fname] = copysource
626 self.copied[fname] = copysource
627
627
628 def unlink(self, fname):
628 def unlink(self, fname):
629 self._checkknown(fname)
629 self._checkknown(fname)
630 self.removed.add(fname)
630 self.removed.add(fname)
631
631
632 def exists(self, fname):
632 def exists(self, fname):
633 return fname in self.ctx
633 return fname in self.ctx
634
634
635 def close(self):
635 def close(self):
636 return self.changed | self.removed
636 return self.changed | self.removed
637
637
638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
639 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
639 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
640 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
640 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
642
642
643 class patchfile(object):
643 class patchfile(object):
644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
645 self.fname = gp.path
645 self.fname = gp.path
646 self.eolmode = eolmode
646 self.eolmode = eolmode
647 self.eol = None
647 self.eol = None
648 self.backend = backend
648 self.backend = backend
649 self.ui = ui
649 self.ui = ui
650 self.lines = []
650 self.lines = []
651 self.exists = False
651 self.exists = False
652 self.missing = True
652 self.missing = True
653 self.mode = gp.mode
653 self.mode = gp.mode
654 self.copysource = gp.oldpath
654 self.copysource = gp.oldpath
655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
656 self.remove = gp.op == 'DELETE'
656 self.remove = gp.op == 'DELETE'
657 if self.copysource is None:
657 if self.copysource is None:
658 data, mode = backend.getfile(self.fname)
658 data, mode = backend.getfile(self.fname)
659 else:
659 else:
660 data, mode = store.getfile(self.copysource)[:2]
660 data, mode = store.getfile(self.copysource)[:2]
661 if data is not None:
661 if data is not None:
662 self.exists = self.copysource is None or backend.exists(self.fname)
662 self.exists = self.copysource is None or backend.exists(self.fname)
663 self.missing = False
663 self.missing = False
664 if data:
664 if data:
665 self.lines = mdiff.splitnewlines(data)
665 self.lines = mdiff.splitnewlines(data)
666 if self.mode is None:
666 if self.mode is None:
667 self.mode = mode
667 self.mode = mode
668 if self.lines:
668 if self.lines:
669 # Normalize line endings
669 # Normalize line endings
670 if self.lines[0].endswith('\r\n'):
670 if self.lines[0].endswith('\r\n'):
671 self.eol = '\r\n'
671 self.eol = '\r\n'
672 elif self.lines[0].endswith('\n'):
672 elif self.lines[0].endswith('\n'):
673 self.eol = '\n'
673 self.eol = '\n'
674 if eolmode != 'strict':
674 if eolmode != 'strict':
675 nlines = []
675 nlines = []
676 for l in self.lines:
676 for l in self.lines:
677 if l.endswith('\r\n'):
677 if l.endswith('\r\n'):
678 l = l[:-2] + '\n'
678 l = l[:-2] + '\n'
679 nlines.append(l)
679 nlines.append(l)
680 self.lines = nlines
680 self.lines = nlines
681 else:
681 else:
682 if self.create:
682 if self.create:
683 self.missing = False
683 self.missing = False
684 if self.mode is None:
684 if self.mode is None:
685 self.mode = (False, False)
685 self.mode = (False, False)
686 if self.missing:
686 if self.missing:
687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
689 "current directory)\n"))
689 "current directory)\n"))
690
690
691 self.hash = {}
691 self.hash = {}
692 self.dirty = 0
692 self.dirty = 0
693 self.offset = 0
693 self.offset = 0
694 self.skew = 0
694 self.skew = 0
695 self.rej = []
695 self.rej = []
696 self.fileprinted = False
696 self.fileprinted = False
697 self.printfile(False)
697 self.printfile(False)
698 self.hunks = 0
698 self.hunks = 0
699
699
700 def writelines(self, fname, lines, mode):
700 def writelines(self, fname, lines, mode):
701 if self.eolmode == 'auto':
701 if self.eolmode == 'auto':
702 eol = self.eol
702 eol = self.eol
703 elif self.eolmode == 'crlf':
703 elif self.eolmode == 'crlf':
704 eol = '\r\n'
704 eol = '\r\n'
705 else:
705 else:
706 eol = '\n'
706 eol = '\n'
707
707
708 if self.eolmode != 'strict' and eol and eol != '\n':
708 if self.eolmode != 'strict' and eol and eol != '\n':
709 rawlines = []
709 rawlines = []
710 for l in lines:
710 for l in lines:
711 if l and l.endswith('\n'):
711 if l and l.endswith('\n'):
712 l = l[:-1] + eol
712 l = l[:-1] + eol
713 rawlines.append(l)
713 rawlines.append(l)
714 lines = rawlines
714 lines = rawlines
715
715
716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
717
717
718 def printfile(self, warn):
718 def printfile(self, warn):
719 if self.fileprinted:
719 if self.fileprinted:
720 return
720 return
721 if warn or self.ui.verbose:
721 if warn or self.ui.verbose:
722 self.fileprinted = True
722 self.fileprinted = True
723 s = _("patching file %s\n") % self.fname
723 s = _("patching file %s\n") % self.fname
724 if warn:
724 if warn:
725 self.ui.warn(s)
725 self.ui.warn(s)
726 else:
726 else:
727 self.ui.note(s)
727 self.ui.note(s)
728
728
729
729
730 def findlines(self, l, linenum):
730 def findlines(self, l, linenum):
731 # looks through the hash and finds candidate lines. The
731 # looks through the hash and finds candidate lines. The
732 # result is a list of line numbers sorted based on distance
732 # result is a list of line numbers sorted based on distance
733 # from linenum
733 # from linenum
734
734
735 cand = self.hash.get(l, [])
735 cand = self.hash.get(l, [])
736 if len(cand) > 1:
736 if len(cand) > 1:
737 # resort our list of potentials forward then back.
737 # resort our list of potentials forward then back.
738 cand.sort(key=lambda x: abs(x - linenum))
738 cand.sort(key=lambda x: abs(x - linenum))
739 return cand
739 return cand
740
740
741 def write_rej(self):
741 def write_rej(self):
742 # our rejects are a little different from patch(1). This always
742 # our rejects are a little different from patch(1). This always
743 # creates rejects in the same form as the original patch. A file
743 # creates rejects in the same form as the original patch. A file
744 # header is inserted so that you can run the reject through patch again
744 # header is inserted so that you can run the reject through patch again
745 # without having to type the filename.
745 # without having to type the filename.
746 if not self.rej:
746 if not self.rej:
747 return
747 return
748 base = os.path.basename(self.fname)
748 base = os.path.basename(self.fname)
749 lines = ["--- %s\n+++ %s\n" % (base, base)]
749 lines = ["--- %s\n+++ %s\n" % (base, base)]
750 for x in self.rej:
750 for x in self.rej:
751 for l in x.hunk:
751 for l in x.hunk:
752 lines.append(l)
752 lines.append(l)
753 if l[-1:] != '\n':
753 if l[-1:] != '\n':
754 lines.append("\n\\ No newline at end of file\n")
754 lines.append("\n\\ No newline at end of file\n")
755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
756
756
757 def apply(self, h):
757 def apply(self, h):
758 if not h.complete():
758 if not h.complete():
759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
761 h.lenb))
761 h.lenb))
762
762
763 self.hunks += 1
763 self.hunks += 1
764
764
765 if self.missing:
765 if self.missing:
766 self.rej.append(h)
766 self.rej.append(h)
767 return -1
767 return -1
768
768
769 if self.exists and self.create:
769 if self.exists and self.create:
770 if self.copysource:
770 if self.copysource:
771 self.ui.warn(_("cannot create %s: destination already "
771 self.ui.warn(_("cannot create %s: destination already "
772 "exists\n") % self.fname)
772 "exists\n") % self.fname)
773 else:
773 else:
774 self.ui.warn(_("file %s already exists\n") % self.fname)
774 self.ui.warn(_("file %s already exists\n") % self.fname)
775 self.rej.append(h)
775 self.rej.append(h)
776 return -1
776 return -1
777
777
778 if isinstance(h, binhunk):
778 if isinstance(h, binhunk):
779 if self.remove:
779 if self.remove:
780 self.backend.unlink(self.fname)
780 self.backend.unlink(self.fname)
781 else:
781 else:
782 l = h.new(self.lines)
782 l = h.new(self.lines)
783 self.lines[:] = l
783 self.lines[:] = l
784 self.offset += len(l)
784 self.offset += len(l)
785 self.dirty = True
785 self.dirty = True
786 return 0
786 return 0
787
787
788 horig = h
788 horig = h
789 if (self.eolmode in ('crlf', 'lf')
789 if (self.eolmode in ('crlf', 'lf')
790 or self.eolmode == 'auto' and self.eol):
790 or self.eolmode == 'auto' and self.eol):
791 # If new eols are going to be normalized, then normalize
791 # If new eols are going to be normalized, then normalize
792 # hunk data before patching. Otherwise, preserve input
792 # hunk data before patching. Otherwise, preserve input
793 # line-endings.
793 # line-endings.
794 h = h.getnormalized()
794 h = h.getnormalized()
795
795
796 # fast case first, no offsets, no fuzz
796 # fast case first, no offsets, no fuzz
797 old, oldstart, new, newstart = h.fuzzit(0, False)
797 old, oldstart, new, newstart = h.fuzzit(0, False)
798 oldstart += self.offset
798 oldstart += self.offset
799 orig_start = oldstart
799 orig_start = oldstart
800 # if there's skew we want to emit the "(offset %d lines)" even
800 # if there's skew we want to emit the "(offset %d lines)" even
801 # when the hunk cleanly applies at start + skew, so skip the
801 # when the hunk cleanly applies at start + skew, so skip the
802 # fast case code
802 # fast case code
803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
804 if self.remove:
804 if self.remove:
805 self.backend.unlink(self.fname)
805 self.backend.unlink(self.fname)
806 else:
806 else:
807 self.lines[oldstart:oldstart + len(old)] = new
807 self.lines[oldstart:oldstart + len(old)] = new
808 self.offset += len(new) - len(old)
808 self.offset += len(new) - len(old)
809 self.dirty = True
809 self.dirty = True
810 return 0
810 return 0
811
811
812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
813 self.hash = {}
813 self.hash = {}
814 for x, s in enumerate(self.lines):
814 for x, s in enumerate(self.lines):
815 self.hash.setdefault(s, []).append(x)
815 self.hash.setdefault(s, []).append(x)
816
816
817 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
817 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
818 for toponly in [True, False]:
818 for toponly in [True, False]:
819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
820 oldstart = oldstart + self.offset + self.skew
820 oldstart = oldstart + self.offset + self.skew
821 oldstart = min(oldstart, len(self.lines))
821 oldstart = min(oldstart, len(self.lines))
822 if old:
822 if old:
823 cand = self.findlines(old[0][1:], oldstart)
823 cand = self.findlines(old[0][1:], oldstart)
824 else:
824 else:
825 # Only adding lines with no or fuzzed context, just
825 # Only adding lines with no or fuzzed context, just
826 # take the skew in account
826 # take the skew in account
827 cand = [oldstart]
827 cand = [oldstart]
828
828
829 for l in cand:
829 for l in cand:
830 if not old or diffhelper.testhunk(old, self.lines, l):
830 if not old or diffhelper.testhunk(old, self.lines, l):
831 self.lines[l : l + len(old)] = new
831 self.lines[l : l + len(old)] = new
832 self.offset += len(new) - len(old)
832 self.offset += len(new) - len(old)
833 self.skew = l - orig_start
833 self.skew = l - orig_start
834 self.dirty = True
834 self.dirty = True
835 offset = l - orig_start - fuzzlen
835 offset = l - orig_start - fuzzlen
836 if fuzzlen:
836 if fuzzlen:
837 msg = _("Hunk #%d succeeded at %d "
837 msg = _("Hunk #%d succeeded at %d "
838 "with fuzz %d "
838 "with fuzz %d "
839 "(offset %d lines).\n")
839 "(offset %d lines).\n")
840 self.printfile(True)
840 self.printfile(True)
841 self.ui.warn(msg %
841 self.ui.warn(msg %
842 (h.number, l + 1, fuzzlen, offset))
842 (h.number, l + 1, fuzzlen, offset))
843 else:
843 else:
844 msg = _("Hunk #%d succeeded at %d "
844 msg = _("Hunk #%d succeeded at %d "
845 "(offset %d lines).\n")
845 "(offset %d lines).\n")
846 self.ui.note(msg % (h.number, l + 1, offset))
846 self.ui.note(msg % (h.number, l + 1, offset))
847 return fuzzlen
847 return fuzzlen
848 self.printfile(True)
848 self.printfile(True)
849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
850 self.rej.append(horig)
850 self.rej.append(horig)
851 return -1
851 return -1
852
852
853 def close(self):
853 def close(self):
854 if self.dirty:
854 if self.dirty:
855 self.writelines(self.fname, self.lines, self.mode)
855 self.writelines(self.fname, self.lines, self.mode)
856 self.write_rej()
856 self.write_rej()
857 return len(self.rej)
857 return len(self.rej)
858
858
859 class header(object):
859 class header(object):
860 """patch header
860 """patch header
861 """
861 """
862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
863 diff_re = re.compile('diff -r .* (.*)$')
863 diff_re = re.compile('diff -r .* (.*)$')
864 allhunks_re = re.compile('(?:index|deleted file) ')
864 allhunks_re = re.compile('(?:index|deleted file) ')
865 pretty_re = re.compile('(?:new file|deleted file) ')
865 pretty_re = re.compile('(?:new file|deleted file) ')
866 special_re = re.compile('(?:index|deleted|copy|rename) ')
866 special_re = re.compile('(?:index|deleted|copy|rename|new mode) ')
867 newfile_re = re.compile('(?:new file)')
867 newfile_re = re.compile('(?:new file)')
868
868
869 def __init__(self, header):
869 def __init__(self, header):
870 self.header = header
870 self.header = header
871 self.hunks = []
871 self.hunks = []
872
872
873 def binary(self):
873 def binary(self):
874 return any(h.startswith('index ') for h in self.header)
874 return any(h.startswith('index ') for h in self.header)
875
875
876 def pretty(self, fp):
876 def pretty(self, fp):
877 for h in self.header:
877 for h in self.header:
878 if h.startswith('index '):
878 if h.startswith('index '):
879 fp.write(_('this modifies a binary file (all or nothing)\n'))
879 fp.write(_('this modifies a binary file (all or nothing)\n'))
880 break
880 break
881 if self.pretty_re.match(h):
881 if self.pretty_re.match(h):
882 fp.write(h)
882 fp.write(h)
883 if self.binary():
883 if self.binary():
884 fp.write(_('this is a binary file\n'))
884 fp.write(_('this is a binary file\n'))
885 break
885 break
886 if h.startswith('---'):
886 if h.startswith('---'):
887 fp.write(_('%d hunks, %d lines changed\n') %
887 fp.write(_('%d hunks, %d lines changed\n') %
888 (len(self.hunks),
888 (len(self.hunks),
889 sum([max(h.added, h.removed) for h in self.hunks])))
889 sum([max(h.added, h.removed) for h in self.hunks])))
890 break
890 break
891 fp.write(h)
891 fp.write(h)
892
892
893 def write(self, fp):
893 def write(self, fp):
894 fp.write(''.join(self.header))
894 fp.write(''.join(self.header))
895
895
896 def allhunks(self):
896 def allhunks(self):
897 return any(self.allhunks_re.match(h) for h in self.header)
897 return any(self.allhunks_re.match(h) for h in self.header)
898
898
899 def files(self):
899 def files(self):
900 match = self.diffgit_re.match(self.header[0])
900 match = self.diffgit_re.match(self.header[0])
901 if match:
901 if match:
902 fromfile, tofile = match.groups()
902 fromfile, tofile = match.groups()
903 if fromfile == tofile:
903 if fromfile == tofile:
904 return [fromfile]
904 return [fromfile]
905 return [fromfile, tofile]
905 return [fromfile, tofile]
906 else:
906 else:
907 return self.diff_re.match(self.header[0]).groups()
907 return self.diff_re.match(self.header[0]).groups()
908
908
909 def filename(self):
909 def filename(self):
910 return self.files()[-1]
910 return self.files()[-1]
911
911
912 def __repr__(self):
912 def __repr__(self):
913 return '<header %s>' % (' '.join(map(repr, self.files())))
913 return '<header %s>' % (' '.join(map(repr, self.files())))
914
914
915 def isnewfile(self):
915 def isnewfile(self):
916 return any(self.newfile_re.match(h) for h in self.header)
916 return any(self.newfile_re.match(h) for h in self.header)
917
917
918 def special(self):
918 def special(self):
919 # Special files are shown only at the header level and not at the hunk
919 # Special files are shown only at the header level and not at the hunk
920 # level for example a file that has been deleted is a special file.
920 # level for example a file that has been deleted is a special file.
921 # The user cannot change the content of the operation, in the case of
921 # The user cannot change the content of the operation, in the case of
922 # the deleted file he has to take the deletion or not take it, he
922 # the deleted file he has to take the deletion or not take it, he
923 # cannot take some of it.
923 # cannot take some of it.
924 # Newly added files are special if they are empty, they are not special
924 # Newly added files are special if they are empty, they are not special
925 # if they have some content as we want to be able to change it
925 # if they have some content as we want to be able to change it
926 nocontent = len(self.header) == 2
926 nocontent = len(self.header) == 2
927 emptynewfile = self.isnewfile() and nocontent
927 emptynewfile = self.isnewfile() and nocontent
928 return (emptynewfile
928 return (emptynewfile
929 or any(self.special_re.match(h) for h in self.header))
929 or any(self.special_re.match(h) for h in self.header))
930
930
931 class recordhunk(object):
931 class recordhunk(object):
932 """patch hunk
932 """patch hunk
933
933
934 XXX shouldn't we merge this with the other hunk class?
934 XXX shouldn't we merge this with the other hunk class?
935 """
935 """
936
936
937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
938 maxcontext=None):
938 maxcontext=None):
939 def trimcontext(lines, reverse=False):
939 def trimcontext(lines, reverse=False):
940 if maxcontext is not None:
940 if maxcontext is not None:
941 delta = len(lines) - maxcontext
941 delta = len(lines) - maxcontext
942 if delta > 0:
942 if delta > 0:
943 if reverse:
943 if reverse:
944 return delta, lines[delta:]
944 return delta, lines[delta:]
945 else:
945 else:
946 return delta, lines[:maxcontext]
946 return delta, lines[:maxcontext]
947 return 0, lines
947 return 0, lines
948
948
949 self.header = header
949 self.header = header
950 trimedbefore, self.before = trimcontext(before, True)
950 trimedbefore, self.before = trimcontext(before, True)
951 self.fromline = fromline + trimedbefore
951 self.fromline = fromline + trimedbefore
952 self.toline = toline + trimedbefore
952 self.toline = toline + trimedbefore
953 _trimedafter, self.after = trimcontext(after, False)
953 _trimedafter, self.after = trimcontext(after, False)
954 self.proc = proc
954 self.proc = proc
955 self.hunk = hunk
955 self.hunk = hunk
956 self.added, self.removed = self.countchanges(self.hunk)
956 self.added, self.removed = self.countchanges(self.hunk)
957
957
958 def __eq__(self, v):
958 def __eq__(self, v):
959 if not isinstance(v, recordhunk):
959 if not isinstance(v, recordhunk):
960 return False
960 return False
961
961
962 return ((v.hunk == self.hunk) and
962 return ((v.hunk == self.hunk) and
963 (v.proc == self.proc) and
963 (v.proc == self.proc) and
964 (self.fromline == v.fromline) and
964 (self.fromline == v.fromline) and
965 (self.header.files() == v.header.files()))
965 (self.header.files() == v.header.files()))
966
966
967 def __hash__(self):
967 def __hash__(self):
968 return hash((tuple(self.hunk),
968 return hash((tuple(self.hunk),
969 tuple(self.header.files()),
969 tuple(self.header.files()),
970 self.fromline,
970 self.fromline,
971 self.proc))
971 self.proc))
972
972
973 def countchanges(self, hunk):
973 def countchanges(self, hunk):
974 """hunk -> (n+,n-)"""
974 """hunk -> (n+,n-)"""
975 add = len([h for h in hunk if h.startswith('+')])
975 add = len([h for h in hunk if h.startswith('+')])
976 rem = len([h for h in hunk if h.startswith('-')])
976 rem = len([h for h in hunk if h.startswith('-')])
977 return add, rem
977 return add, rem
978
978
979 def reversehunk(self):
979 def reversehunk(self):
980 """return another recordhunk which is the reverse of the hunk
980 """return another recordhunk which is the reverse of the hunk
981
981
982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
983 that, swap fromline/toline and +/- signs while keep other things
983 that, swap fromline/toline and +/- signs while keep other things
984 unchanged.
984 unchanged.
985 """
985 """
986 m = {'+': '-', '-': '+', '\\': '\\'}
986 m = {'+': '-', '-': '+', '\\': '\\'}
987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
989 self.before, hunk, self.after)
989 self.before, hunk, self.after)
990
990
991 def write(self, fp):
991 def write(self, fp):
992 delta = len(self.before) + len(self.after)
992 delta = len(self.before) + len(self.after)
993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
994 delta -= 1
994 delta -= 1
995 fromlen = delta + self.removed
995 fromlen = delta + self.removed
996 tolen = delta + self.added
996 tolen = delta + self.added
997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
998 (self.fromline, fromlen, self.toline, tolen,
998 (self.fromline, fromlen, self.toline, tolen,
999 self.proc and (' ' + self.proc)))
999 self.proc and (' ' + self.proc)))
1000 fp.write(''.join(self.before + self.hunk + self.after))
1000 fp.write(''.join(self.before + self.hunk + self.after))
1001
1001
1002 pretty = write
1002 pretty = write
1003
1003
1004 def filename(self):
1004 def filename(self):
1005 return self.header.filename()
1005 return self.header.filename()
1006
1006
1007 def __repr__(self):
1007 def __repr__(self):
1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1009
1009
1010 def getmessages():
1010 def getmessages():
1011 return {
1011 return {
1012 'multiple': {
1012 'multiple': {
1013 'apply': _("apply change %d/%d to '%s'?"),
1013 'apply': _("apply change %d/%d to '%s'?"),
1014 'discard': _("discard change %d/%d to '%s'?"),
1014 'discard': _("discard change %d/%d to '%s'?"),
1015 'record': _("record change %d/%d to '%s'?"),
1015 'record': _("record change %d/%d to '%s'?"),
1016 },
1016 },
1017 'single': {
1017 'single': {
1018 'apply': _("apply this change to '%s'?"),
1018 'apply': _("apply this change to '%s'?"),
1019 'discard': _("discard this change to '%s'?"),
1019 'discard': _("discard this change to '%s'?"),
1020 'record': _("record this change to '%s'?"),
1020 'record': _("record this change to '%s'?"),
1021 },
1021 },
1022 'help': {
1022 'help': {
1023 'apply': _('[Ynesfdaq?]'
1023 'apply': _('[Ynesfdaq?]'
1024 '$$ &Yes, apply this change'
1024 '$$ &Yes, apply this change'
1025 '$$ &No, skip this change'
1025 '$$ &No, skip this change'
1026 '$$ &Edit this change manually'
1026 '$$ &Edit this change manually'
1027 '$$ &Skip remaining changes to this file'
1027 '$$ &Skip remaining changes to this file'
1028 '$$ Apply remaining changes to this &file'
1028 '$$ Apply remaining changes to this &file'
1029 '$$ &Done, skip remaining changes and files'
1029 '$$ &Done, skip remaining changes and files'
1030 '$$ Apply &all changes to all remaining files'
1030 '$$ Apply &all changes to all remaining files'
1031 '$$ &Quit, applying no changes'
1031 '$$ &Quit, applying no changes'
1032 '$$ &? (display help)'),
1032 '$$ &? (display help)'),
1033 'discard': _('[Ynesfdaq?]'
1033 'discard': _('[Ynesfdaq?]'
1034 '$$ &Yes, discard this change'
1034 '$$ &Yes, discard this change'
1035 '$$ &No, skip this change'
1035 '$$ &No, skip this change'
1036 '$$ &Edit this change manually'
1036 '$$ &Edit this change manually'
1037 '$$ &Skip remaining changes to this file'
1037 '$$ &Skip remaining changes to this file'
1038 '$$ Discard remaining changes to this &file'
1038 '$$ Discard remaining changes to this &file'
1039 '$$ &Done, skip remaining changes and files'
1039 '$$ &Done, skip remaining changes and files'
1040 '$$ Discard &all changes to all remaining files'
1040 '$$ Discard &all changes to all remaining files'
1041 '$$ &Quit, discarding no changes'
1041 '$$ &Quit, discarding no changes'
1042 '$$ &? (display help)'),
1042 '$$ &? (display help)'),
1043 'record': _('[Ynesfdaq?]'
1043 'record': _('[Ynesfdaq?]'
1044 '$$ &Yes, record this change'
1044 '$$ &Yes, record this change'
1045 '$$ &No, skip this change'
1045 '$$ &No, skip this change'
1046 '$$ &Edit this change manually'
1046 '$$ &Edit this change manually'
1047 '$$ &Skip remaining changes to this file'
1047 '$$ &Skip remaining changes to this file'
1048 '$$ Record remaining changes to this &file'
1048 '$$ Record remaining changes to this &file'
1049 '$$ &Done, skip remaining changes and files'
1049 '$$ &Done, skip remaining changes and files'
1050 '$$ Record &all changes to all remaining files'
1050 '$$ Record &all changes to all remaining files'
1051 '$$ &Quit, recording no changes'
1051 '$$ &Quit, recording no changes'
1052 '$$ &? (display help)'),
1052 '$$ &? (display help)'),
1053 }
1053 }
1054 }
1054 }
1055
1055
1056 def filterpatch(ui, headers, operation=None):
1056 def filterpatch(ui, headers, operation=None):
1057 """Interactively filter patch chunks into applied-only chunks"""
1057 """Interactively filter patch chunks into applied-only chunks"""
1058 messages = getmessages()
1058 messages = getmessages()
1059
1059
1060 if operation is None:
1060 if operation is None:
1061 operation = 'record'
1061 operation = 'record'
1062
1062
1063 def prompt(skipfile, skipall, query, chunk):
1063 def prompt(skipfile, skipall, query, chunk):
1064 """prompt query, and process base inputs
1064 """prompt query, and process base inputs
1065
1065
1066 - y/n for the rest of file
1066 - y/n for the rest of file
1067 - y/n for the rest
1067 - y/n for the rest
1068 - ? (help)
1068 - ? (help)
1069 - q (quit)
1069 - q (quit)
1070
1070
1071 Return True/False and possibly updated skipfile and skipall.
1071 Return True/False and possibly updated skipfile and skipall.
1072 """
1072 """
1073 newpatches = None
1073 newpatches = None
1074 if skipall is not None:
1074 if skipall is not None:
1075 return skipall, skipfile, skipall, newpatches
1075 return skipall, skipfile, skipall, newpatches
1076 if skipfile is not None:
1076 if skipfile is not None:
1077 return skipfile, skipfile, skipall, newpatches
1077 return skipfile, skipfile, skipall, newpatches
1078 while True:
1078 while True:
1079 resps = messages['help'][operation]
1079 resps = messages['help'][operation]
1080 r = ui.promptchoice("%s %s" % (query, resps))
1080 r = ui.promptchoice("%s %s" % (query, resps))
1081 ui.write("\n")
1081 ui.write("\n")
1082 if r == 8: # ?
1082 if r == 8: # ?
1083 for c, t in ui.extractchoices(resps)[1]:
1083 for c, t in ui.extractchoices(resps)[1]:
1084 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1084 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1085 continue
1085 continue
1086 elif r == 0: # yes
1086 elif r == 0: # yes
1087 ret = True
1087 ret = True
1088 elif r == 1: # no
1088 elif r == 1: # no
1089 ret = False
1089 ret = False
1090 elif r == 2: # Edit patch
1090 elif r == 2: # Edit patch
1091 if chunk is None:
1091 if chunk is None:
1092 ui.write(_('cannot edit patch for whole file'))
1092 ui.write(_('cannot edit patch for whole file'))
1093 ui.write("\n")
1093 ui.write("\n")
1094 continue
1094 continue
1095 if chunk.header.binary():
1095 if chunk.header.binary():
1096 ui.write(_('cannot edit patch for binary file'))
1096 ui.write(_('cannot edit patch for binary file'))
1097 ui.write("\n")
1097 ui.write("\n")
1098 continue
1098 continue
1099 # Patch comment based on the Git one (based on comment at end of
1099 # Patch comment based on the Git one (based on comment at end of
1100 # https://mercurial-scm.org/wiki/RecordExtension)
1100 # https://mercurial-scm.org/wiki/RecordExtension)
1101 phelp = '---' + _("""
1101 phelp = '---' + _("""
1102 To remove '-' lines, make them ' ' lines (context).
1102 To remove '-' lines, make them ' ' lines (context).
1103 To remove '+' lines, delete them.
1103 To remove '+' lines, delete them.
1104 Lines starting with # will be removed from the patch.
1104 Lines starting with # will be removed from the patch.
1105
1105
1106 If the patch applies cleanly, the edited hunk will immediately be
1106 If the patch applies cleanly, the edited hunk will immediately be
1107 added to the record list. If it does not apply cleanly, a rejects
1107 added to the record list. If it does not apply cleanly, a rejects
1108 file will be generated: you can use that when you try again. If
1108 file will be generated: you can use that when you try again. If
1109 all lines of the hunk are removed, then the edit is aborted and
1109 all lines of the hunk are removed, then the edit is aborted and
1110 the hunk is left unchanged.
1110 the hunk is left unchanged.
1111 """)
1111 """)
1112 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1112 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1113 suffix=".diff")
1113 suffix=".diff")
1114 ncpatchfp = None
1114 ncpatchfp = None
1115 try:
1115 try:
1116 # Write the initial patch
1116 # Write the initial patch
1117 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1117 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1118 chunk.header.write(f)
1118 chunk.header.write(f)
1119 chunk.write(f)
1119 chunk.write(f)
1120 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1120 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1121 f.close()
1121 f.close()
1122 # Start the editor and wait for it to complete
1122 # Start the editor and wait for it to complete
1123 editor = ui.geteditor()
1123 editor = ui.geteditor()
1124 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1124 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1125 environ={'HGUSER': ui.username()},
1125 environ={'HGUSER': ui.username()},
1126 blockedtag='filterpatch')
1126 blockedtag='filterpatch')
1127 if ret != 0:
1127 if ret != 0:
1128 ui.warn(_("editor exited with exit code %d\n") % ret)
1128 ui.warn(_("editor exited with exit code %d\n") % ret)
1129 continue
1129 continue
1130 # Remove comment lines
1130 # Remove comment lines
1131 patchfp = open(patchfn, r'rb')
1131 patchfp = open(patchfn, r'rb')
1132 ncpatchfp = stringio()
1132 ncpatchfp = stringio()
1133 for line in util.iterfile(patchfp):
1133 for line in util.iterfile(patchfp):
1134 line = util.fromnativeeol(line)
1134 line = util.fromnativeeol(line)
1135 if not line.startswith('#'):
1135 if not line.startswith('#'):
1136 ncpatchfp.write(line)
1136 ncpatchfp.write(line)
1137 patchfp.close()
1137 patchfp.close()
1138 ncpatchfp.seek(0)
1138 ncpatchfp.seek(0)
1139 newpatches = parsepatch(ncpatchfp)
1139 newpatches = parsepatch(ncpatchfp)
1140 finally:
1140 finally:
1141 os.unlink(patchfn)
1141 os.unlink(patchfn)
1142 del ncpatchfp
1142 del ncpatchfp
1143 # Signal that the chunk shouldn't be applied as-is, but
1143 # Signal that the chunk shouldn't be applied as-is, but
1144 # provide the new patch to be used instead.
1144 # provide the new patch to be used instead.
1145 ret = False
1145 ret = False
1146 elif r == 3: # Skip
1146 elif r == 3: # Skip
1147 ret = skipfile = False
1147 ret = skipfile = False
1148 elif r == 4: # file (Record remaining)
1148 elif r == 4: # file (Record remaining)
1149 ret = skipfile = True
1149 ret = skipfile = True
1150 elif r == 5: # done, skip remaining
1150 elif r == 5: # done, skip remaining
1151 ret = skipall = False
1151 ret = skipall = False
1152 elif r == 6: # all
1152 elif r == 6: # all
1153 ret = skipall = True
1153 ret = skipall = True
1154 elif r == 7: # quit
1154 elif r == 7: # quit
1155 raise error.Abort(_('user quit'))
1155 raise error.Abort(_('user quit'))
1156 return ret, skipfile, skipall, newpatches
1156 return ret, skipfile, skipall, newpatches
1157
1157
1158 seen = set()
1158 seen = set()
1159 applied = {} # 'filename' -> [] of chunks
1159 applied = {} # 'filename' -> [] of chunks
1160 skipfile, skipall = None, None
1160 skipfile, skipall = None, None
1161 pos, total = 1, sum(len(h.hunks) for h in headers)
1161 pos, total = 1, sum(len(h.hunks) for h in headers)
1162 for h in headers:
1162 for h in headers:
1163 pos += len(h.hunks)
1163 pos += len(h.hunks)
1164 skipfile = None
1164 skipfile = None
1165 fixoffset = 0
1165 fixoffset = 0
1166 hdr = ''.join(h.header)
1166 hdr = ''.join(h.header)
1167 if hdr in seen:
1167 if hdr in seen:
1168 continue
1168 continue
1169 seen.add(hdr)
1169 seen.add(hdr)
1170 if skipall is None:
1170 if skipall is None:
1171 h.pretty(ui)
1171 h.pretty(ui)
1172 msg = (_('examine changes to %s?') %
1172 msg = (_('examine changes to %s?') %
1173 _(' and ').join("'%s'" % f for f in h.files()))
1173 _(' and ').join("'%s'" % f for f in h.files()))
1174 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1174 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1175 if not r:
1175 if not r:
1176 continue
1176 continue
1177 applied[h.filename()] = [h]
1177 applied[h.filename()] = [h]
1178 if h.allhunks():
1178 if h.allhunks():
1179 applied[h.filename()] += h.hunks
1179 applied[h.filename()] += h.hunks
1180 continue
1180 continue
1181 for i, chunk in enumerate(h.hunks):
1181 for i, chunk in enumerate(h.hunks):
1182 if skipfile is None and skipall is None:
1182 if skipfile is None and skipall is None:
1183 chunk.pretty(ui)
1183 chunk.pretty(ui)
1184 if total == 1:
1184 if total == 1:
1185 msg = messages['single'][operation] % chunk.filename()
1185 msg = messages['single'][operation] % chunk.filename()
1186 else:
1186 else:
1187 idx = pos - len(h.hunks) + i
1187 idx = pos - len(h.hunks) + i
1188 msg = messages['multiple'][operation] % (idx, total,
1188 msg = messages['multiple'][operation] % (idx, total,
1189 chunk.filename())
1189 chunk.filename())
1190 r, skipfile, skipall, newpatches = prompt(skipfile,
1190 r, skipfile, skipall, newpatches = prompt(skipfile,
1191 skipall, msg, chunk)
1191 skipall, msg, chunk)
1192 if r:
1192 if r:
1193 if fixoffset:
1193 if fixoffset:
1194 chunk = copy.copy(chunk)
1194 chunk = copy.copy(chunk)
1195 chunk.toline += fixoffset
1195 chunk.toline += fixoffset
1196 applied[chunk.filename()].append(chunk)
1196 applied[chunk.filename()].append(chunk)
1197 elif newpatches is not None:
1197 elif newpatches is not None:
1198 for newpatch in newpatches:
1198 for newpatch in newpatches:
1199 for newhunk in newpatch.hunks:
1199 for newhunk in newpatch.hunks:
1200 if fixoffset:
1200 if fixoffset:
1201 newhunk.toline += fixoffset
1201 newhunk.toline += fixoffset
1202 applied[newhunk.filename()].append(newhunk)
1202 applied[newhunk.filename()].append(newhunk)
1203 else:
1203 else:
1204 fixoffset += chunk.removed - chunk.added
1204 fixoffset += chunk.removed - chunk.added
1205 return (sum([h for h in applied.itervalues()
1205 return (sum([h for h in applied.itervalues()
1206 if h[0].special() or len(h) > 1], []), {})
1206 if h[0].special() or len(h) > 1], []), {})
1207 class hunk(object):
1207 class hunk(object):
1208 def __init__(self, desc, num, lr, context):
1208 def __init__(self, desc, num, lr, context):
1209 self.number = num
1209 self.number = num
1210 self.desc = desc
1210 self.desc = desc
1211 self.hunk = [desc]
1211 self.hunk = [desc]
1212 self.a = []
1212 self.a = []
1213 self.b = []
1213 self.b = []
1214 self.starta = self.lena = None
1214 self.starta = self.lena = None
1215 self.startb = self.lenb = None
1215 self.startb = self.lenb = None
1216 if lr is not None:
1216 if lr is not None:
1217 if context:
1217 if context:
1218 self.read_context_hunk(lr)
1218 self.read_context_hunk(lr)
1219 else:
1219 else:
1220 self.read_unified_hunk(lr)
1220 self.read_unified_hunk(lr)
1221
1221
1222 def getnormalized(self):
1222 def getnormalized(self):
1223 """Return a copy with line endings normalized to LF."""
1223 """Return a copy with line endings normalized to LF."""
1224
1224
1225 def normalize(lines):
1225 def normalize(lines):
1226 nlines = []
1226 nlines = []
1227 for line in lines:
1227 for line in lines:
1228 if line.endswith('\r\n'):
1228 if line.endswith('\r\n'):
1229 line = line[:-2] + '\n'
1229 line = line[:-2] + '\n'
1230 nlines.append(line)
1230 nlines.append(line)
1231 return nlines
1231 return nlines
1232
1232
1233 # Dummy object, it is rebuilt manually
1233 # Dummy object, it is rebuilt manually
1234 nh = hunk(self.desc, self.number, None, None)
1234 nh = hunk(self.desc, self.number, None, None)
1235 nh.number = self.number
1235 nh.number = self.number
1236 nh.desc = self.desc
1236 nh.desc = self.desc
1237 nh.hunk = self.hunk
1237 nh.hunk = self.hunk
1238 nh.a = normalize(self.a)
1238 nh.a = normalize(self.a)
1239 nh.b = normalize(self.b)
1239 nh.b = normalize(self.b)
1240 nh.starta = self.starta
1240 nh.starta = self.starta
1241 nh.startb = self.startb
1241 nh.startb = self.startb
1242 nh.lena = self.lena
1242 nh.lena = self.lena
1243 nh.lenb = self.lenb
1243 nh.lenb = self.lenb
1244 return nh
1244 return nh
1245
1245
1246 def read_unified_hunk(self, lr):
1246 def read_unified_hunk(self, lr):
1247 m = unidesc.match(self.desc)
1247 m = unidesc.match(self.desc)
1248 if not m:
1248 if not m:
1249 raise PatchError(_("bad hunk #%d") % self.number)
1249 raise PatchError(_("bad hunk #%d") % self.number)
1250 self.starta, self.lena, self.startb, self.lenb = m.groups()
1250 self.starta, self.lena, self.startb, self.lenb = m.groups()
1251 if self.lena is None:
1251 if self.lena is None:
1252 self.lena = 1
1252 self.lena = 1
1253 else:
1253 else:
1254 self.lena = int(self.lena)
1254 self.lena = int(self.lena)
1255 if self.lenb is None:
1255 if self.lenb is None:
1256 self.lenb = 1
1256 self.lenb = 1
1257 else:
1257 else:
1258 self.lenb = int(self.lenb)
1258 self.lenb = int(self.lenb)
1259 self.starta = int(self.starta)
1259 self.starta = int(self.starta)
1260 self.startb = int(self.startb)
1260 self.startb = int(self.startb)
1261 try:
1261 try:
1262 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1262 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1263 self.a, self.b)
1263 self.a, self.b)
1264 except error.ParseError as e:
1264 except error.ParseError as e:
1265 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1265 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1266 # if we hit eof before finishing out the hunk, the last line will
1266 # if we hit eof before finishing out the hunk, the last line will
1267 # be zero length. Lets try to fix it up.
1267 # be zero length. Lets try to fix it up.
1268 while len(self.hunk[-1]) == 0:
1268 while len(self.hunk[-1]) == 0:
1269 del self.hunk[-1]
1269 del self.hunk[-1]
1270 del self.a[-1]
1270 del self.a[-1]
1271 del self.b[-1]
1271 del self.b[-1]
1272 self.lena -= 1
1272 self.lena -= 1
1273 self.lenb -= 1
1273 self.lenb -= 1
1274 self._fixnewline(lr)
1274 self._fixnewline(lr)
1275
1275
1276 def read_context_hunk(self, lr):
1276 def read_context_hunk(self, lr):
1277 self.desc = lr.readline()
1277 self.desc = lr.readline()
1278 m = contextdesc.match(self.desc)
1278 m = contextdesc.match(self.desc)
1279 if not m:
1279 if not m:
1280 raise PatchError(_("bad hunk #%d") % self.number)
1280 raise PatchError(_("bad hunk #%d") % self.number)
1281 self.starta, aend = m.groups()
1281 self.starta, aend = m.groups()
1282 self.starta = int(self.starta)
1282 self.starta = int(self.starta)
1283 if aend is None:
1283 if aend is None:
1284 aend = self.starta
1284 aend = self.starta
1285 self.lena = int(aend) - self.starta
1285 self.lena = int(aend) - self.starta
1286 if self.starta:
1286 if self.starta:
1287 self.lena += 1
1287 self.lena += 1
1288 for x in pycompat.xrange(self.lena):
1288 for x in pycompat.xrange(self.lena):
1289 l = lr.readline()
1289 l = lr.readline()
1290 if l.startswith('---'):
1290 if l.startswith('---'):
1291 # lines addition, old block is empty
1291 # lines addition, old block is empty
1292 lr.push(l)
1292 lr.push(l)
1293 break
1293 break
1294 s = l[2:]
1294 s = l[2:]
1295 if l.startswith('- ') or l.startswith('! '):
1295 if l.startswith('- ') or l.startswith('! '):
1296 u = '-' + s
1296 u = '-' + s
1297 elif l.startswith(' '):
1297 elif l.startswith(' '):
1298 u = ' ' + s
1298 u = ' ' + s
1299 else:
1299 else:
1300 raise PatchError(_("bad hunk #%d old text line %d") %
1300 raise PatchError(_("bad hunk #%d old text line %d") %
1301 (self.number, x))
1301 (self.number, x))
1302 self.a.append(u)
1302 self.a.append(u)
1303 self.hunk.append(u)
1303 self.hunk.append(u)
1304
1304
1305 l = lr.readline()
1305 l = lr.readline()
1306 if l.startswith(br'\ '):
1306 if l.startswith(br'\ '):
1307 s = self.a[-1][:-1]
1307 s = self.a[-1][:-1]
1308 self.a[-1] = s
1308 self.a[-1] = s
1309 self.hunk[-1] = s
1309 self.hunk[-1] = s
1310 l = lr.readline()
1310 l = lr.readline()
1311 m = contextdesc.match(l)
1311 m = contextdesc.match(l)
1312 if not m:
1312 if not m:
1313 raise PatchError(_("bad hunk #%d") % self.number)
1313 raise PatchError(_("bad hunk #%d") % self.number)
1314 self.startb, bend = m.groups()
1314 self.startb, bend = m.groups()
1315 self.startb = int(self.startb)
1315 self.startb = int(self.startb)
1316 if bend is None:
1316 if bend is None:
1317 bend = self.startb
1317 bend = self.startb
1318 self.lenb = int(bend) - self.startb
1318 self.lenb = int(bend) - self.startb
1319 if self.startb:
1319 if self.startb:
1320 self.lenb += 1
1320 self.lenb += 1
1321 hunki = 1
1321 hunki = 1
1322 for x in pycompat.xrange(self.lenb):
1322 for x in pycompat.xrange(self.lenb):
1323 l = lr.readline()
1323 l = lr.readline()
1324 if l.startswith(br'\ '):
1324 if l.startswith(br'\ '):
1325 # XXX: the only way to hit this is with an invalid line range.
1325 # XXX: the only way to hit this is with an invalid line range.
1326 # The no-eol marker is not counted in the line range, but I
1326 # The no-eol marker is not counted in the line range, but I
1327 # guess there are diff(1) out there which behave differently.
1327 # guess there are diff(1) out there which behave differently.
1328 s = self.b[-1][:-1]
1328 s = self.b[-1][:-1]
1329 self.b[-1] = s
1329 self.b[-1] = s
1330 self.hunk[hunki - 1] = s
1330 self.hunk[hunki - 1] = s
1331 continue
1331 continue
1332 if not l:
1332 if not l:
1333 # line deletions, new block is empty and we hit EOF
1333 # line deletions, new block is empty and we hit EOF
1334 lr.push(l)
1334 lr.push(l)
1335 break
1335 break
1336 s = l[2:]
1336 s = l[2:]
1337 if l.startswith('+ ') or l.startswith('! '):
1337 if l.startswith('+ ') or l.startswith('! '):
1338 u = '+' + s
1338 u = '+' + s
1339 elif l.startswith(' '):
1339 elif l.startswith(' '):
1340 u = ' ' + s
1340 u = ' ' + s
1341 elif len(self.b) == 0:
1341 elif len(self.b) == 0:
1342 # line deletions, new block is empty
1342 # line deletions, new block is empty
1343 lr.push(l)
1343 lr.push(l)
1344 break
1344 break
1345 else:
1345 else:
1346 raise PatchError(_("bad hunk #%d old text line %d") %
1346 raise PatchError(_("bad hunk #%d old text line %d") %
1347 (self.number, x))
1347 (self.number, x))
1348 self.b.append(s)
1348 self.b.append(s)
1349 while True:
1349 while True:
1350 if hunki >= len(self.hunk):
1350 if hunki >= len(self.hunk):
1351 h = ""
1351 h = ""
1352 else:
1352 else:
1353 h = self.hunk[hunki]
1353 h = self.hunk[hunki]
1354 hunki += 1
1354 hunki += 1
1355 if h == u:
1355 if h == u:
1356 break
1356 break
1357 elif h.startswith('-'):
1357 elif h.startswith('-'):
1358 continue
1358 continue
1359 else:
1359 else:
1360 self.hunk.insert(hunki - 1, u)
1360 self.hunk.insert(hunki - 1, u)
1361 break
1361 break
1362
1362
1363 if not self.a:
1363 if not self.a:
1364 # this happens when lines were only added to the hunk
1364 # this happens when lines were only added to the hunk
1365 for x in self.hunk:
1365 for x in self.hunk:
1366 if x.startswith('-') or x.startswith(' '):
1366 if x.startswith('-') or x.startswith(' '):
1367 self.a.append(x)
1367 self.a.append(x)
1368 if not self.b:
1368 if not self.b:
1369 # this happens when lines were only deleted from the hunk
1369 # this happens when lines were only deleted from the hunk
1370 for x in self.hunk:
1370 for x in self.hunk:
1371 if x.startswith('+') or x.startswith(' '):
1371 if x.startswith('+') or x.startswith(' '):
1372 self.b.append(x[1:])
1372 self.b.append(x[1:])
1373 # @@ -start,len +start,len @@
1373 # @@ -start,len +start,len @@
1374 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1374 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1375 self.startb, self.lenb)
1375 self.startb, self.lenb)
1376 self.hunk[0] = self.desc
1376 self.hunk[0] = self.desc
1377 self._fixnewline(lr)
1377 self._fixnewline(lr)
1378
1378
1379 def _fixnewline(self, lr):
1379 def _fixnewline(self, lr):
1380 l = lr.readline()
1380 l = lr.readline()
1381 if l.startswith(br'\ '):
1381 if l.startswith(br'\ '):
1382 diffhelper.fixnewline(self.hunk, self.a, self.b)
1382 diffhelper.fixnewline(self.hunk, self.a, self.b)
1383 else:
1383 else:
1384 lr.push(l)
1384 lr.push(l)
1385
1385
1386 def complete(self):
1386 def complete(self):
1387 return len(self.a) == self.lena and len(self.b) == self.lenb
1387 return len(self.a) == self.lena and len(self.b) == self.lenb
1388
1388
1389 def _fuzzit(self, old, new, fuzz, toponly):
1389 def _fuzzit(self, old, new, fuzz, toponly):
1390 # this removes context lines from the top and bottom of list 'l'. It
1390 # this removes context lines from the top and bottom of list 'l'. It
1391 # checks the hunk to make sure only context lines are removed, and then
1391 # checks the hunk to make sure only context lines are removed, and then
1392 # returns a new shortened list of lines.
1392 # returns a new shortened list of lines.
1393 fuzz = min(fuzz, len(old))
1393 fuzz = min(fuzz, len(old))
1394 if fuzz:
1394 if fuzz:
1395 top = 0
1395 top = 0
1396 bot = 0
1396 bot = 0
1397 hlen = len(self.hunk)
1397 hlen = len(self.hunk)
1398 for x in pycompat.xrange(hlen - 1):
1398 for x in pycompat.xrange(hlen - 1):
1399 # the hunk starts with the @@ line, so use x+1
1399 # the hunk starts with the @@ line, so use x+1
1400 if self.hunk[x + 1].startswith(' '):
1400 if self.hunk[x + 1].startswith(' '):
1401 top += 1
1401 top += 1
1402 else:
1402 else:
1403 break
1403 break
1404 if not toponly:
1404 if not toponly:
1405 for x in pycompat.xrange(hlen - 1):
1405 for x in pycompat.xrange(hlen - 1):
1406 if self.hunk[hlen - bot - 1].startswith(' '):
1406 if self.hunk[hlen - bot - 1].startswith(' '):
1407 bot += 1
1407 bot += 1
1408 else:
1408 else:
1409 break
1409 break
1410
1410
1411 bot = min(fuzz, bot)
1411 bot = min(fuzz, bot)
1412 top = min(fuzz, top)
1412 top = min(fuzz, top)
1413 return old[top:len(old) - bot], new[top:len(new) - bot], top
1413 return old[top:len(old) - bot], new[top:len(new) - bot], top
1414 return old, new, 0
1414 return old, new, 0
1415
1415
1416 def fuzzit(self, fuzz, toponly):
1416 def fuzzit(self, fuzz, toponly):
1417 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1417 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1418 oldstart = self.starta + top
1418 oldstart = self.starta + top
1419 newstart = self.startb + top
1419 newstart = self.startb + top
1420 # zero length hunk ranges already have their start decremented
1420 # zero length hunk ranges already have their start decremented
1421 if self.lena and oldstart > 0:
1421 if self.lena and oldstart > 0:
1422 oldstart -= 1
1422 oldstart -= 1
1423 if self.lenb and newstart > 0:
1423 if self.lenb and newstart > 0:
1424 newstart -= 1
1424 newstart -= 1
1425 return old, oldstart, new, newstart
1425 return old, oldstart, new, newstart
1426
1426
1427 class binhunk(object):
1427 class binhunk(object):
1428 'A binary patch file.'
1428 'A binary patch file.'
1429 def __init__(self, lr, fname):
1429 def __init__(self, lr, fname):
1430 self.text = None
1430 self.text = None
1431 self.delta = False
1431 self.delta = False
1432 self.hunk = ['GIT binary patch\n']
1432 self.hunk = ['GIT binary patch\n']
1433 self._fname = fname
1433 self._fname = fname
1434 self._read(lr)
1434 self._read(lr)
1435
1435
1436 def complete(self):
1436 def complete(self):
1437 return self.text is not None
1437 return self.text is not None
1438
1438
1439 def new(self, lines):
1439 def new(self, lines):
1440 if self.delta:
1440 if self.delta:
1441 return [applybindelta(self.text, ''.join(lines))]
1441 return [applybindelta(self.text, ''.join(lines))]
1442 return [self.text]
1442 return [self.text]
1443
1443
1444 def _read(self, lr):
1444 def _read(self, lr):
1445 def getline(lr, hunk):
1445 def getline(lr, hunk):
1446 l = lr.readline()
1446 l = lr.readline()
1447 hunk.append(l)
1447 hunk.append(l)
1448 return l.rstrip('\r\n')
1448 return l.rstrip('\r\n')
1449
1449
1450 while True:
1450 while True:
1451 line = getline(lr, self.hunk)
1451 line = getline(lr, self.hunk)
1452 if not line:
1452 if not line:
1453 raise PatchError(_('could not extract "%s" binary data')
1453 raise PatchError(_('could not extract "%s" binary data')
1454 % self._fname)
1454 % self._fname)
1455 if line.startswith('literal '):
1455 if line.startswith('literal '):
1456 size = int(line[8:].rstrip())
1456 size = int(line[8:].rstrip())
1457 break
1457 break
1458 if line.startswith('delta '):
1458 if line.startswith('delta '):
1459 size = int(line[6:].rstrip())
1459 size = int(line[6:].rstrip())
1460 self.delta = True
1460 self.delta = True
1461 break
1461 break
1462 dec = []
1462 dec = []
1463 line = getline(lr, self.hunk)
1463 line = getline(lr, self.hunk)
1464 while len(line) > 1:
1464 while len(line) > 1:
1465 l = line[0:1]
1465 l = line[0:1]
1466 if l <= 'Z' and l >= 'A':
1466 if l <= 'Z' and l >= 'A':
1467 l = ord(l) - ord('A') + 1
1467 l = ord(l) - ord('A') + 1
1468 else:
1468 else:
1469 l = ord(l) - ord('a') + 27
1469 l = ord(l) - ord('a') + 27
1470 try:
1470 try:
1471 dec.append(util.b85decode(line[1:])[:l])
1471 dec.append(util.b85decode(line[1:])[:l])
1472 except ValueError as e:
1472 except ValueError as e:
1473 raise PatchError(_('could not decode "%s" binary patch: %s')
1473 raise PatchError(_('could not decode "%s" binary patch: %s')
1474 % (self._fname, stringutil.forcebytestr(e)))
1474 % (self._fname, stringutil.forcebytestr(e)))
1475 line = getline(lr, self.hunk)
1475 line = getline(lr, self.hunk)
1476 text = zlib.decompress(''.join(dec))
1476 text = zlib.decompress(''.join(dec))
1477 if len(text) != size:
1477 if len(text) != size:
1478 raise PatchError(_('"%s" length is %d bytes, should be %d')
1478 raise PatchError(_('"%s" length is %d bytes, should be %d')
1479 % (self._fname, len(text), size))
1479 % (self._fname, len(text), size))
1480 self.text = text
1480 self.text = text
1481
1481
1482 def parsefilename(str):
1482 def parsefilename(str):
1483 # --- filename \t|space stuff
1483 # --- filename \t|space stuff
1484 s = str[4:].rstrip('\r\n')
1484 s = str[4:].rstrip('\r\n')
1485 i = s.find('\t')
1485 i = s.find('\t')
1486 if i < 0:
1486 if i < 0:
1487 i = s.find(' ')
1487 i = s.find(' ')
1488 if i < 0:
1488 if i < 0:
1489 return s
1489 return s
1490 return s[:i]
1490 return s[:i]
1491
1491
1492 def reversehunks(hunks):
1492 def reversehunks(hunks):
1493 '''reverse the signs in the hunks given as argument
1493 '''reverse the signs in the hunks given as argument
1494
1494
1495 This function operates on hunks coming out of patch.filterpatch, that is
1495 This function operates on hunks coming out of patch.filterpatch, that is
1496 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1496 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1497
1497
1498 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1498 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1499 ... --- a/folder1/g
1499 ... --- a/folder1/g
1500 ... +++ b/folder1/g
1500 ... +++ b/folder1/g
1501 ... @@ -1,7 +1,7 @@
1501 ... @@ -1,7 +1,7 @@
1502 ... +firstline
1502 ... +firstline
1503 ... c
1503 ... c
1504 ... 1
1504 ... 1
1505 ... 2
1505 ... 2
1506 ... + 3
1506 ... + 3
1507 ... -4
1507 ... -4
1508 ... 5
1508 ... 5
1509 ... d
1509 ... d
1510 ... +lastline"""
1510 ... +lastline"""
1511 >>> hunks = parsepatch([rawpatch])
1511 >>> hunks = parsepatch([rawpatch])
1512 >>> hunkscomingfromfilterpatch = []
1512 >>> hunkscomingfromfilterpatch = []
1513 >>> for h in hunks:
1513 >>> for h in hunks:
1514 ... hunkscomingfromfilterpatch.append(h)
1514 ... hunkscomingfromfilterpatch.append(h)
1515 ... hunkscomingfromfilterpatch.extend(h.hunks)
1515 ... hunkscomingfromfilterpatch.extend(h.hunks)
1516
1516
1517 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1517 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1518 >>> from . import util
1518 >>> from . import util
1519 >>> fp = util.stringio()
1519 >>> fp = util.stringio()
1520 >>> for c in reversedhunks:
1520 >>> for c in reversedhunks:
1521 ... c.write(fp)
1521 ... c.write(fp)
1522 >>> fp.seek(0) or None
1522 >>> fp.seek(0) or None
1523 >>> reversedpatch = fp.read()
1523 >>> reversedpatch = fp.read()
1524 >>> print(pycompat.sysstr(reversedpatch))
1524 >>> print(pycompat.sysstr(reversedpatch))
1525 diff --git a/folder1/g b/folder1/g
1525 diff --git a/folder1/g b/folder1/g
1526 --- a/folder1/g
1526 --- a/folder1/g
1527 +++ b/folder1/g
1527 +++ b/folder1/g
1528 @@ -1,4 +1,3 @@
1528 @@ -1,4 +1,3 @@
1529 -firstline
1529 -firstline
1530 c
1530 c
1531 1
1531 1
1532 2
1532 2
1533 @@ -2,6 +1,6 @@
1533 @@ -2,6 +1,6 @@
1534 c
1534 c
1535 1
1535 1
1536 2
1536 2
1537 - 3
1537 - 3
1538 +4
1538 +4
1539 5
1539 5
1540 d
1540 d
1541 @@ -6,3 +5,2 @@
1541 @@ -6,3 +5,2 @@
1542 5
1542 5
1543 d
1543 d
1544 -lastline
1544 -lastline
1545
1545
1546 '''
1546 '''
1547
1547
1548 newhunks = []
1548 newhunks = []
1549 for c in hunks:
1549 for c in hunks:
1550 if util.safehasattr(c, 'reversehunk'):
1550 if util.safehasattr(c, 'reversehunk'):
1551 c = c.reversehunk()
1551 c = c.reversehunk()
1552 newhunks.append(c)
1552 newhunks.append(c)
1553 return newhunks
1553 return newhunks
1554
1554
1555 def parsepatch(originalchunks, maxcontext=None):
1555 def parsepatch(originalchunks, maxcontext=None):
1556 """patch -> [] of headers -> [] of hunks
1556 """patch -> [] of headers -> [] of hunks
1557
1557
1558 If maxcontext is not None, trim context lines if necessary.
1558 If maxcontext is not None, trim context lines if necessary.
1559
1559
1560 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1560 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1561 ... --- a/folder1/g
1561 ... --- a/folder1/g
1562 ... +++ b/folder1/g
1562 ... +++ b/folder1/g
1563 ... @@ -1,8 +1,10 @@
1563 ... @@ -1,8 +1,10 @@
1564 ... 1
1564 ... 1
1565 ... 2
1565 ... 2
1566 ... -3
1566 ... -3
1567 ... 4
1567 ... 4
1568 ... 5
1568 ... 5
1569 ... 6
1569 ... 6
1570 ... +6.1
1570 ... +6.1
1571 ... +6.2
1571 ... +6.2
1572 ... 7
1572 ... 7
1573 ... 8
1573 ... 8
1574 ... +9'''
1574 ... +9'''
1575 >>> out = util.stringio()
1575 >>> out = util.stringio()
1576 >>> headers = parsepatch([rawpatch], maxcontext=1)
1576 >>> headers = parsepatch([rawpatch], maxcontext=1)
1577 >>> for header in headers:
1577 >>> for header in headers:
1578 ... header.write(out)
1578 ... header.write(out)
1579 ... for hunk in header.hunks:
1579 ... for hunk in header.hunks:
1580 ... hunk.write(out)
1580 ... hunk.write(out)
1581 >>> print(pycompat.sysstr(out.getvalue()))
1581 >>> print(pycompat.sysstr(out.getvalue()))
1582 diff --git a/folder1/g b/folder1/g
1582 diff --git a/folder1/g b/folder1/g
1583 --- a/folder1/g
1583 --- a/folder1/g
1584 +++ b/folder1/g
1584 +++ b/folder1/g
1585 @@ -2,3 +2,2 @@
1585 @@ -2,3 +2,2 @@
1586 2
1586 2
1587 -3
1587 -3
1588 4
1588 4
1589 @@ -6,2 +5,4 @@
1589 @@ -6,2 +5,4 @@
1590 6
1590 6
1591 +6.1
1591 +6.1
1592 +6.2
1592 +6.2
1593 7
1593 7
1594 @@ -8,1 +9,2 @@
1594 @@ -8,1 +9,2 @@
1595 8
1595 8
1596 +9
1596 +9
1597 """
1597 """
1598 class parser(object):
1598 class parser(object):
1599 """patch parsing state machine"""
1599 """patch parsing state machine"""
1600 def __init__(self):
1600 def __init__(self):
1601 self.fromline = 0
1601 self.fromline = 0
1602 self.toline = 0
1602 self.toline = 0
1603 self.proc = ''
1603 self.proc = ''
1604 self.header = None
1604 self.header = None
1605 self.context = []
1605 self.context = []
1606 self.before = []
1606 self.before = []
1607 self.hunk = []
1607 self.hunk = []
1608 self.headers = []
1608 self.headers = []
1609
1609
1610 def addrange(self, limits):
1610 def addrange(self, limits):
1611 self.addcontext([])
1611 self.addcontext([])
1612 fromstart, fromend, tostart, toend, proc = limits
1612 fromstart, fromend, tostart, toend, proc = limits
1613 self.fromline = int(fromstart)
1613 self.fromline = int(fromstart)
1614 self.toline = int(tostart)
1614 self.toline = int(tostart)
1615 self.proc = proc
1615 self.proc = proc
1616
1616
1617 def addcontext(self, context):
1617 def addcontext(self, context):
1618 if self.hunk:
1618 if self.hunk:
1619 h = recordhunk(self.header, self.fromline, self.toline,
1619 h = recordhunk(self.header, self.fromline, self.toline,
1620 self.proc, self.before, self.hunk, context, maxcontext)
1620 self.proc, self.before, self.hunk, context, maxcontext)
1621 self.header.hunks.append(h)
1621 self.header.hunks.append(h)
1622 self.fromline += len(self.before) + h.removed
1622 self.fromline += len(self.before) + h.removed
1623 self.toline += len(self.before) + h.added
1623 self.toline += len(self.before) + h.added
1624 self.before = []
1624 self.before = []
1625 self.hunk = []
1625 self.hunk = []
1626 self.context = context
1626 self.context = context
1627
1627
1628 def addhunk(self, hunk):
1628 def addhunk(self, hunk):
1629 if self.context:
1629 if self.context:
1630 self.before = self.context
1630 self.before = self.context
1631 self.context = []
1631 self.context = []
1632 if self.hunk:
1632 if self.hunk:
1633 self.addcontext([])
1633 self.addcontext([])
1634 self.hunk = hunk
1634 self.hunk = hunk
1635
1635
1636 def newfile(self, hdr):
1636 def newfile(self, hdr):
1637 self.addcontext([])
1637 self.addcontext([])
1638 h = header(hdr)
1638 h = header(hdr)
1639 self.headers.append(h)
1639 self.headers.append(h)
1640 self.header = h
1640 self.header = h
1641
1641
1642 def addother(self, line):
1642 def addother(self, line):
1643 pass # 'other' lines are ignored
1643 pass # 'other' lines are ignored
1644
1644
1645 def finished(self):
1645 def finished(self):
1646 self.addcontext([])
1646 self.addcontext([])
1647 return self.headers
1647 return self.headers
1648
1648
1649 transitions = {
1649 transitions = {
1650 'file': {'context': addcontext,
1650 'file': {'context': addcontext,
1651 'file': newfile,
1651 'file': newfile,
1652 'hunk': addhunk,
1652 'hunk': addhunk,
1653 'range': addrange},
1653 'range': addrange},
1654 'context': {'file': newfile,
1654 'context': {'file': newfile,
1655 'hunk': addhunk,
1655 'hunk': addhunk,
1656 'range': addrange,
1656 'range': addrange,
1657 'other': addother},
1657 'other': addother},
1658 'hunk': {'context': addcontext,
1658 'hunk': {'context': addcontext,
1659 'file': newfile,
1659 'file': newfile,
1660 'range': addrange},
1660 'range': addrange},
1661 'range': {'context': addcontext,
1661 'range': {'context': addcontext,
1662 'hunk': addhunk},
1662 'hunk': addhunk},
1663 'other': {'other': addother},
1663 'other': {'other': addother},
1664 }
1664 }
1665
1665
1666 p = parser()
1666 p = parser()
1667 fp = stringio()
1667 fp = stringio()
1668 fp.write(''.join(originalchunks))
1668 fp.write(''.join(originalchunks))
1669 fp.seek(0)
1669 fp.seek(0)
1670
1670
1671 state = 'context'
1671 state = 'context'
1672 for newstate, data in scanpatch(fp):
1672 for newstate, data in scanpatch(fp):
1673 try:
1673 try:
1674 p.transitions[state][newstate](p, data)
1674 p.transitions[state][newstate](p, data)
1675 except KeyError:
1675 except KeyError:
1676 raise PatchError('unhandled transition: %s -> %s' %
1676 raise PatchError('unhandled transition: %s -> %s' %
1677 (state, newstate))
1677 (state, newstate))
1678 state = newstate
1678 state = newstate
1679 del fp
1679 del fp
1680 return p.finished()
1680 return p.finished()
1681
1681
1682 def pathtransform(path, strip, prefix):
1682 def pathtransform(path, strip, prefix):
1683 '''turn a path from a patch into a path suitable for the repository
1683 '''turn a path from a patch into a path suitable for the repository
1684
1684
1685 prefix, if not empty, is expected to be normalized with a / at the end.
1685 prefix, if not empty, is expected to be normalized with a / at the end.
1686
1686
1687 Returns (stripped components, path in repository).
1687 Returns (stripped components, path in repository).
1688
1688
1689 >>> pathtransform(b'a/b/c', 0, b'')
1689 >>> pathtransform(b'a/b/c', 0, b'')
1690 ('', 'a/b/c')
1690 ('', 'a/b/c')
1691 >>> pathtransform(b' a/b/c ', 0, b'')
1691 >>> pathtransform(b' a/b/c ', 0, b'')
1692 ('', ' a/b/c')
1692 ('', ' a/b/c')
1693 >>> pathtransform(b' a/b/c ', 2, b'')
1693 >>> pathtransform(b' a/b/c ', 2, b'')
1694 ('a/b/', 'c')
1694 ('a/b/', 'c')
1695 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1695 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1696 ('', 'd/e/a/b/c')
1696 ('', 'd/e/a/b/c')
1697 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1697 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1698 ('a//b/', 'd/e/c')
1698 ('a//b/', 'd/e/c')
1699 >>> pathtransform(b'a/b/c', 3, b'')
1699 >>> pathtransform(b'a/b/c', 3, b'')
1700 Traceback (most recent call last):
1700 Traceback (most recent call last):
1701 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1701 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1702 '''
1702 '''
1703 pathlen = len(path)
1703 pathlen = len(path)
1704 i = 0
1704 i = 0
1705 if strip == 0:
1705 if strip == 0:
1706 return '', prefix + path.rstrip()
1706 return '', prefix + path.rstrip()
1707 count = strip
1707 count = strip
1708 while count > 0:
1708 while count > 0:
1709 i = path.find('/', i)
1709 i = path.find('/', i)
1710 if i == -1:
1710 if i == -1:
1711 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1711 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1712 (count, strip, path))
1712 (count, strip, path))
1713 i += 1
1713 i += 1
1714 # consume '//' in the path
1714 # consume '//' in the path
1715 while i < pathlen - 1 and path[i:i + 1] == '/':
1715 while i < pathlen - 1 and path[i:i + 1] == '/':
1716 i += 1
1716 i += 1
1717 count -= 1
1717 count -= 1
1718 return path[:i].lstrip(), prefix + path[i:].rstrip()
1718 return path[:i].lstrip(), prefix + path[i:].rstrip()
1719
1719
1720 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1720 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1721 nulla = afile_orig == "/dev/null"
1721 nulla = afile_orig == "/dev/null"
1722 nullb = bfile_orig == "/dev/null"
1722 nullb = bfile_orig == "/dev/null"
1723 create = nulla and hunk.starta == 0 and hunk.lena == 0
1723 create = nulla and hunk.starta == 0 and hunk.lena == 0
1724 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1724 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1725 abase, afile = pathtransform(afile_orig, strip, prefix)
1725 abase, afile = pathtransform(afile_orig, strip, prefix)
1726 gooda = not nulla and backend.exists(afile)
1726 gooda = not nulla and backend.exists(afile)
1727 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1727 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1728 if afile == bfile:
1728 if afile == bfile:
1729 goodb = gooda
1729 goodb = gooda
1730 else:
1730 else:
1731 goodb = not nullb and backend.exists(bfile)
1731 goodb = not nullb and backend.exists(bfile)
1732 missing = not goodb and not gooda and not create
1732 missing = not goodb and not gooda and not create
1733
1733
1734 # some diff programs apparently produce patches where the afile is
1734 # some diff programs apparently produce patches where the afile is
1735 # not /dev/null, but afile starts with bfile
1735 # not /dev/null, but afile starts with bfile
1736 abasedir = afile[:afile.rfind('/') + 1]
1736 abasedir = afile[:afile.rfind('/') + 1]
1737 bbasedir = bfile[:bfile.rfind('/') + 1]
1737 bbasedir = bfile[:bfile.rfind('/') + 1]
1738 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1738 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1739 and hunk.starta == 0 and hunk.lena == 0):
1739 and hunk.starta == 0 and hunk.lena == 0):
1740 create = True
1740 create = True
1741 missing = False
1741 missing = False
1742
1742
1743 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1743 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1744 # diff is between a file and its backup. In this case, the original
1744 # diff is between a file and its backup. In this case, the original
1745 # file should be patched (see original mpatch code).
1745 # file should be patched (see original mpatch code).
1746 isbackup = (abase == bbase and bfile.startswith(afile))
1746 isbackup = (abase == bbase and bfile.startswith(afile))
1747 fname = None
1747 fname = None
1748 if not missing:
1748 if not missing:
1749 if gooda and goodb:
1749 if gooda and goodb:
1750 if isbackup:
1750 if isbackup:
1751 fname = afile
1751 fname = afile
1752 else:
1752 else:
1753 fname = bfile
1753 fname = bfile
1754 elif gooda:
1754 elif gooda:
1755 fname = afile
1755 fname = afile
1756
1756
1757 if not fname:
1757 if not fname:
1758 if not nullb:
1758 if not nullb:
1759 if isbackup:
1759 if isbackup:
1760 fname = afile
1760 fname = afile
1761 else:
1761 else:
1762 fname = bfile
1762 fname = bfile
1763 elif not nulla:
1763 elif not nulla:
1764 fname = afile
1764 fname = afile
1765 else:
1765 else:
1766 raise PatchError(_("undefined source and destination files"))
1766 raise PatchError(_("undefined source and destination files"))
1767
1767
1768 gp = patchmeta(fname)
1768 gp = patchmeta(fname)
1769 if create:
1769 if create:
1770 gp.op = 'ADD'
1770 gp.op = 'ADD'
1771 elif remove:
1771 elif remove:
1772 gp.op = 'DELETE'
1772 gp.op = 'DELETE'
1773 return gp
1773 return gp
1774
1774
1775 def scanpatch(fp):
1775 def scanpatch(fp):
1776 """like patch.iterhunks, but yield different events
1776 """like patch.iterhunks, but yield different events
1777
1777
1778 - ('file', [header_lines + fromfile + tofile])
1778 - ('file', [header_lines + fromfile + tofile])
1779 - ('context', [context_lines])
1779 - ('context', [context_lines])
1780 - ('hunk', [hunk_lines])
1780 - ('hunk', [hunk_lines])
1781 - ('range', (-start,len, +start,len, proc))
1781 - ('range', (-start,len, +start,len, proc))
1782 """
1782 """
1783 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1783 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1784 lr = linereader(fp)
1784 lr = linereader(fp)
1785
1785
1786 def scanwhile(first, p):
1786 def scanwhile(first, p):
1787 """scan lr while predicate holds"""
1787 """scan lr while predicate holds"""
1788 lines = [first]
1788 lines = [first]
1789 for line in iter(lr.readline, ''):
1789 for line in iter(lr.readline, ''):
1790 if p(line):
1790 if p(line):
1791 lines.append(line)
1791 lines.append(line)
1792 else:
1792 else:
1793 lr.push(line)
1793 lr.push(line)
1794 break
1794 break
1795 return lines
1795 return lines
1796
1796
1797 for line in iter(lr.readline, ''):
1797 for line in iter(lr.readline, ''):
1798 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1798 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1799 def notheader(line):
1799 def notheader(line):
1800 s = line.split(None, 1)
1800 s = line.split(None, 1)
1801 return not s or s[0] not in ('---', 'diff')
1801 return not s or s[0] not in ('---', 'diff')
1802 header = scanwhile(line, notheader)
1802 header = scanwhile(line, notheader)
1803 fromfile = lr.readline()
1803 fromfile = lr.readline()
1804 if fromfile.startswith('---'):
1804 if fromfile.startswith('---'):
1805 tofile = lr.readline()
1805 tofile = lr.readline()
1806 header += [fromfile, tofile]
1806 header += [fromfile, tofile]
1807 else:
1807 else:
1808 lr.push(fromfile)
1808 lr.push(fromfile)
1809 yield 'file', header
1809 yield 'file', header
1810 elif line.startswith(' '):
1810 elif line.startswith(' '):
1811 cs = (' ', '\\')
1811 cs = (' ', '\\')
1812 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1812 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1813 elif line.startswith(('-', '+')):
1813 elif line.startswith(('-', '+')):
1814 cs = ('-', '+', '\\')
1814 cs = ('-', '+', '\\')
1815 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1815 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1816 else:
1816 else:
1817 m = lines_re.match(line)
1817 m = lines_re.match(line)
1818 if m:
1818 if m:
1819 yield 'range', m.groups()
1819 yield 'range', m.groups()
1820 else:
1820 else:
1821 yield 'other', line
1821 yield 'other', line
1822
1822
1823 def scangitpatch(lr, firstline):
1823 def scangitpatch(lr, firstline):
1824 """
1824 """
1825 Git patches can emit:
1825 Git patches can emit:
1826 - rename a to b
1826 - rename a to b
1827 - change b
1827 - change b
1828 - copy a to c
1828 - copy a to c
1829 - change c
1829 - change c
1830
1830
1831 We cannot apply this sequence as-is, the renamed 'a' could not be
1831 We cannot apply this sequence as-is, the renamed 'a' could not be
1832 found for it would have been renamed already. And we cannot copy
1832 found for it would have been renamed already. And we cannot copy
1833 from 'b' instead because 'b' would have been changed already. So
1833 from 'b' instead because 'b' would have been changed already. So
1834 we scan the git patch for copy and rename commands so we can
1834 we scan the git patch for copy and rename commands so we can
1835 perform the copies ahead of time.
1835 perform the copies ahead of time.
1836 """
1836 """
1837 pos = 0
1837 pos = 0
1838 try:
1838 try:
1839 pos = lr.fp.tell()
1839 pos = lr.fp.tell()
1840 fp = lr.fp
1840 fp = lr.fp
1841 except IOError:
1841 except IOError:
1842 fp = stringio(lr.fp.read())
1842 fp = stringio(lr.fp.read())
1843 gitlr = linereader(fp)
1843 gitlr = linereader(fp)
1844 gitlr.push(firstline)
1844 gitlr.push(firstline)
1845 gitpatches = readgitpatch(gitlr)
1845 gitpatches = readgitpatch(gitlr)
1846 fp.seek(pos)
1846 fp.seek(pos)
1847 return gitpatches
1847 return gitpatches
1848
1848
1849 def iterhunks(fp):
1849 def iterhunks(fp):
1850 """Read a patch and yield the following events:
1850 """Read a patch and yield the following events:
1851 - ("file", afile, bfile, firsthunk): select a new target file.
1851 - ("file", afile, bfile, firsthunk): select a new target file.
1852 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1852 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1853 "file" event.
1853 "file" event.
1854 - ("git", gitchanges): current diff is in git format, gitchanges
1854 - ("git", gitchanges): current diff is in git format, gitchanges
1855 maps filenames to gitpatch records. Unique event.
1855 maps filenames to gitpatch records. Unique event.
1856 """
1856 """
1857 afile = ""
1857 afile = ""
1858 bfile = ""
1858 bfile = ""
1859 state = None
1859 state = None
1860 hunknum = 0
1860 hunknum = 0
1861 emitfile = newfile = False
1861 emitfile = newfile = False
1862 gitpatches = None
1862 gitpatches = None
1863
1863
1864 # our states
1864 # our states
1865 BFILE = 1
1865 BFILE = 1
1866 context = None
1866 context = None
1867 lr = linereader(fp)
1867 lr = linereader(fp)
1868
1868
1869 for x in iter(lr.readline, ''):
1869 for x in iter(lr.readline, ''):
1870 if state == BFILE and (
1870 if state == BFILE and (
1871 (not context and x.startswith('@'))
1871 (not context and x.startswith('@'))
1872 or (context is not False and x.startswith('***************'))
1872 or (context is not False and x.startswith('***************'))
1873 or x.startswith('GIT binary patch')):
1873 or x.startswith('GIT binary patch')):
1874 gp = None
1874 gp = None
1875 if (gitpatches and
1875 if (gitpatches and
1876 gitpatches[-1].ispatching(afile, bfile)):
1876 gitpatches[-1].ispatching(afile, bfile)):
1877 gp = gitpatches.pop()
1877 gp = gitpatches.pop()
1878 if x.startswith('GIT binary patch'):
1878 if x.startswith('GIT binary patch'):
1879 h = binhunk(lr, gp.path)
1879 h = binhunk(lr, gp.path)
1880 else:
1880 else:
1881 if context is None and x.startswith('***************'):
1881 if context is None and x.startswith('***************'):
1882 context = True
1882 context = True
1883 h = hunk(x, hunknum + 1, lr, context)
1883 h = hunk(x, hunknum + 1, lr, context)
1884 hunknum += 1
1884 hunknum += 1
1885 if emitfile:
1885 if emitfile:
1886 emitfile = False
1886 emitfile = False
1887 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1887 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1888 yield 'hunk', h
1888 yield 'hunk', h
1889 elif x.startswith('diff --git a/'):
1889 elif x.startswith('diff --git a/'):
1890 m = gitre.match(x.rstrip(' \r\n'))
1890 m = gitre.match(x.rstrip(' \r\n'))
1891 if not m:
1891 if not m:
1892 continue
1892 continue
1893 if gitpatches is None:
1893 if gitpatches is None:
1894 # scan whole input for git metadata
1894 # scan whole input for git metadata
1895 gitpatches = scangitpatch(lr, x)
1895 gitpatches = scangitpatch(lr, x)
1896 yield 'git', [g.copy() for g in gitpatches
1896 yield 'git', [g.copy() for g in gitpatches
1897 if g.op in ('COPY', 'RENAME')]
1897 if g.op in ('COPY', 'RENAME')]
1898 gitpatches.reverse()
1898 gitpatches.reverse()
1899 afile = 'a/' + m.group(1)
1899 afile = 'a/' + m.group(1)
1900 bfile = 'b/' + m.group(2)
1900 bfile = 'b/' + m.group(2)
1901 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1901 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1902 gp = gitpatches.pop()
1902 gp = gitpatches.pop()
1903 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1903 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1904 if not gitpatches:
1904 if not gitpatches:
1905 raise PatchError(_('failed to synchronize metadata for "%s"')
1905 raise PatchError(_('failed to synchronize metadata for "%s"')
1906 % afile[2:])
1906 % afile[2:])
1907 newfile = True
1907 newfile = True
1908 elif x.startswith('---'):
1908 elif x.startswith('---'):
1909 # check for a unified diff
1909 # check for a unified diff
1910 l2 = lr.readline()
1910 l2 = lr.readline()
1911 if not l2.startswith('+++'):
1911 if not l2.startswith('+++'):
1912 lr.push(l2)
1912 lr.push(l2)
1913 continue
1913 continue
1914 newfile = True
1914 newfile = True
1915 context = False
1915 context = False
1916 afile = parsefilename(x)
1916 afile = parsefilename(x)
1917 bfile = parsefilename(l2)
1917 bfile = parsefilename(l2)
1918 elif x.startswith('***'):
1918 elif x.startswith('***'):
1919 # check for a context diff
1919 # check for a context diff
1920 l2 = lr.readline()
1920 l2 = lr.readline()
1921 if not l2.startswith('---'):
1921 if not l2.startswith('---'):
1922 lr.push(l2)
1922 lr.push(l2)
1923 continue
1923 continue
1924 l3 = lr.readline()
1924 l3 = lr.readline()
1925 lr.push(l3)
1925 lr.push(l3)
1926 if not l3.startswith("***************"):
1926 if not l3.startswith("***************"):
1927 lr.push(l2)
1927 lr.push(l2)
1928 continue
1928 continue
1929 newfile = True
1929 newfile = True
1930 context = True
1930 context = True
1931 afile = parsefilename(x)
1931 afile = parsefilename(x)
1932 bfile = parsefilename(l2)
1932 bfile = parsefilename(l2)
1933
1933
1934 if newfile:
1934 if newfile:
1935 newfile = False
1935 newfile = False
1936 emitfile = True
1936 emitfile = True
1937 state = BFILE
1937 state = BFILE
1938 hunknum = 0
1938 hunknum = 0
1939
1939
1940 while gitpatches:
1940 while gitpatches:
1941 gp = gitpatches.pop()
1941 gp = gitpatches.pop()
1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1943
1943
1944 def applybindelta(binchunk, data):
1944 def applybindelta(binchunk, data):
1945 """Apply a binary delta hunk
1945 """Apply a binary delta hunk
1946 The algorithm used is the algorithm from git's patch-delta.c
1946 The algorithm used is the algorithm from git's patch-delta.c
1947 """
1947 """
1948 def deltahead(binchunk):
1948 def deltahead(binchunk):
1949 i = 0
1949 i = 0
1950 for c in pycompat.bytestr(binchunk):
1950 for c in pycompat.bytestr(binchunk):
1951 i += 1
1951 i += 1
1952 if not (ord(c) & 0x80):
1952 if not (ord(c) & 0x80):
1953 return i
1953 return i
1954 return i
1954 return i
1955 out = ""
1955 out = ""
1956 s = deltahead(binchunk)
1956 s = deltahead(binchunk)
1957 binchunk = binchunk[s:]
1957 binchunk = binchunk[s:]
1958 s = deltahead(binchunk)
1958 s = deltahead(binchunk)
1959 binchunk = binchunk[s:]
1959 binchunk = binchunk[s:]
1960 i = 0
1960 i = 0
1961 while i < len(binchunk):
1961 while i < len(binchunk):
1962 cmd = ord(binchunk[i:i + 1])
1962 cmd = ord(binchunk[i:i + 1])
1963 i += 1
1963 i += 1
1964 if (cmd & 0x80):
1964 if (cmd & 0x80):
1965 offset = 0
1965 offset = 0
1966 size = 0
1966 size = 0
1967 if (cmd & 0x01):
1967 if (cmd & 0x01):
1968 offset = ord(binchunk[i:i + 1])
1968 offset = ord(binchunk[i:i + 1])
1969 i += 1
1969 i += 1
1970 if (cmd & 0x02):
1970 if (cmd & 0x02):
1971 offset |= ord(binchunk[i:i + 1]) << 8
1971 offset |= ord(binchunk[i:i + 1]) << 8
1972 i += 1
1972 i += 1
1973 if (cmd & 0x04):
1973 if (cmd & 0x04):
1974 offset |= ord(binchunk[i:i + 1]) << 16
1974 offset |= ord(binchunk[i:i + 1]) << 16
1975 i += 1
1975 i += 1
1976 if (cmd & 0x08):
1976 if (cmd & 0x08):
1977 offset |= ord(binchunk[i:i + 1]) << 24
1977 offset |= ord(binchunk[i:i + 1]) << 24
1978 i += 1
1978 i += 1
1979 if (cmd & 0x10):
1979 if (cmd & 0x10):
1980 size = ord(binchunk[i:i + 1])
1980 size = ord(binchunk[i:i + 1])
1981 i += 1
1981 i += 1
1982 if (cmd & 0x20):
1982 if (cmd & 0x20):
1983 size |= ord(binchunk[i:i + 1]) << 8
1983 size |= ord(binchunk[i:i + 1]) << 8
1984 i += 1
1984 i += 1
1985 if (cmd & 0x40):
1985 if (cmd & 0x40):
1986 size |= ord(binchunk[i:i + 1]) << 16
1986 size |= ord(binchunk[i:i + 1]) << 16
1987 i += 1
1987 i += 1
1988 if size == 0:
1988 if size == 0:
1989 size = 0x10000
1989 size = 0x10000
1990 offset_end = offset + size
1990 offset_end = offset + size
1991 out += data[offset:offset_end]
1991 out += data[offset:offset_end]
1992 elif cmd != 0:
1992 elif cmd != 0:
1993 offset_end = i + cmd
1993 offset_end = i + cmd
1994 out += binchunk[i:offset_end]
1994 out += binchunk[i:offset_end]
1995 i += cmd
1995 i += cmd
1996 else:
1996 else:
1997 raise PatchError(_('unexpected delta opcode 0'))
1997 raise PatchError(_('unexpected delta opcode 0'))
1998 return out
1998 return out
1999
1999
2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2001 """Reads a patch from fp and tries to apply it.
2001 """Reads a patch from fp and tries to apply it.
2002
2002
2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2004 there was any fuzz.
2004 there was any fuzz.
2005
2005
2006 If 'eolmode' is 'strict', the patch content and patched file are
2006 If 'eolmode' is 'strict', the patch content and patched file are
2007 read in binary mode. Otherwise, line endings are ignored when
2007 read in binary mode. Otherwise, line endings are ignored when
2008 patching then normalized according to 'eolmode'.
2008 patching then normalized according to 'eolmode'.
2009 """
2009 """
2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2011 prefix=prefix, eolmode=eolmode)
2011 prefix=prefix, eolmode=eolmode)
2012
2012
2013 def _canonprefix(repo, prefix):
2013 def _canonprefix(repo, prefix):
2014 if prefix:
2014 if prefix:
2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2016 if prefix != '':
2016 if prefix != '':
2017 prefix += '/'
2017 prefix += '/'
2018 return prefix
2018 return prefix
2019
2019
2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2021 eolmode='strict'):
2021 eolmode='strict'):
2022 prefix = _canonprefix(backend.repo, prefix)
2022 prefix = _canonprefix(backend.repo, prefix)
2023 def pstrip(p):
2023 def pstrip(p):
2024 return pathtransform(p, strip - 1, prefix)[1]
2024 return pathtransform(p, strip - 1, prefix)[1]
2025
2025
2026 rejects = 0
2026 rejects = 0
2027 err = 0
2027 err = 0
2028 current_file = None
2028 current_file = None
2029
2029
2030 for state, values in iterhunks(fp):
2030 for state, values in iterhunks(fp):
2031 if state == 'hunk':
2031 if state == 'hunk':
2032 if not current_file:
2032 if not current_file:
2033 continue
2033 continue
2034 ret = current_file.apply(values)
2034 ret = current_file.apply(values)
2035 if ret > 0:
2035 if ret > 0:
2036 err = 1
2036 err = 1
2037 elif state == 'file':
2037 elif state == 'file':
2038 if current_file:
2038 if current_file:
2039 rejects += current_file.close()
2039 rejects += current_file.close()
2040 current_file = None
2040 current_file = None
2041 afile, bfile, first_hunk, gp = values
2041 afile, bfile, first_hunk, gp = values
2042 if gp:
2042 if gp:
2043 gp.path = pstrip(gp.path)
2043 gp.path = pstrip(gp.path)
2044 if gp.oldpath:
2044 if gp.oldpath:
2045 gp.oldpath = pstrip(gp.oldpath)
2045 gp.oldpath = pstrip(gp.oldpath)
2046 else:
2046 else:
2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2048 prefix)
2048 prefix)
2049 if gp.op == 'RENAME':
2049 if gp.op == 'RENAME':
2050 backend.unlink(gp.oldpath)
2050 backend.unlink(gp.oldpath)
2051 if not first_hunk:
2051 if not first_hunk:
2052 if gp.op == 'DELETE':
2052 if gp.op == 'DELETE':
2053 backend.unlink(gp.path)
2053 backend.unlink(gp.path)
2054 continue
2054 continue
2055 data, mode = None, None
2055 data, mode = None, None
2056 if gp.op in ('RENAME', 'COPY'):
2056 if gp.op in ('RENAME', 'COPY'):
2057 data, mode = store.getfile(gp.oldpath)[:2]
2057 data, mode = store.getfile(gp.oldpath)[:2]
2058 if data is None:
2058 if data is None:
2059 # This means that the old path does not exist
2059 # This means that the old path does not exist
2060 raise PatchError(_("source file '%s' does not exist")
2060 raise PatchError(_("source file '%s' does not exist")
2061 % gp.oldpath)
2061 % gp.oldpath)
2062 if gp.mode:
2062 if gp.mode:
2063 mode = gp.mode
2063 mode = gp.mode
2064 if gp.op == 'ADD':
2064 if gp.op == 'ADD':
2065 # Added files without content have no hunk and
2065 # Added files without content have no hunk and
2066 # must be created
2066 # must be created
2067 data = ''
2067 data = ''
2068 if data or mode:
2068 if data or mode:
2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2070 and backend.exists(gp.path)):
2070 and backend.exists(gp.path)):
2071 raise PatchError(_("cannot create %s: destination "
2071 raise PatchError(_("cannot create %s: destination "
2072 "already exists") % gp.path)
2072 "already exists") % gp.path)
2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2074 continue
2074 continue
2075 try:
2075 try:
2076 current_file = patcher(ui, gp, backend, store,
2076 current_file = patcher(ui, gp, backend, store,
2077 eolmode=eolmode)
2077 eolmode=eolmode)
2078 except PatchError as inst:
2078 except PatchError as inst:
2079 ui.warn(str(inst) + '\n')
2079 ui.warn(str(inst) + '\n')
2080 current_file = None
2080 current_file = None
2081 rejects += 1
2081 rejects += 1
2082 continue
2082 continue
2083 elif state == 'git':
2083 elif state == 'git':
2084 for gp in values:
2084 for gp in values:
2085 path = pstrip(gp.oldpath)
2085 path = pstrip(gp.oldpath)
2086 data, mode = backend.getfile(path)
2086 data, mode = backend.getfile(path)
2087 if data is None:
2087 if data is None:
2088 # The error ignored here will trigger a getfile()
2088 # The error ignored here will trigger a getfile()
2089 # error in a place more appropriate for error
2089 # error in a place more appropriate for error
2090 # handling, and will not interrupt the patching
2090 # handling, and will not interrupt the patching
2091 # process.
2091 # process.
2092 pass
2092 pass
2093 else:
2093 else:
2094 store.setfile(path, data, mode)
2094 store.setfile(path, data, mode)
2095 else:
2095 else:
2096 raise error.Abort(_('unsupported parser state: %s') % state)
2096 raise error.Abort(_('unsupported parser state: %s') % state)
2097
2097
2098 if current_file:
2098 if current_file:
2099 rejects += current_file.close()
2099 rejects += current_file.close()
2100
2100
2101 if rejects:
2101 if rejects:
2102 return -1
2102 return -1
2103 return err
2103 return err
2104
2104
2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2106 similarity):
2106 similarity):
2107 """use <patcher> to apply <patchname> to the working directory.
2107 """use <patcher> to apply <patchname> to the working directory.
2108 returns whether patch was applied with fuzz factor."""
2108 returns whether patch was applied with fuzz factor."""
2109
2109
2110 fuzz = False
2110 fuzz = False
2111 args = []
2111 args = []
2112 cwd = repo.root
2112 cwd = repo.root
2113 if cwd:
2113 if cwd:
2114 args.append('-d %s' % procutil.shellquote(cwd))
2114 args.append('-d %s' % procutil.shellquote(cwd))
2115 cmd = ('%s %s -p%d < %s'
2115 cmd = ('%s %s -p%d < %s'
2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2117 ui.debug('Using external patch tool: %s\n' % cmd)
2117 ui.debug('Using external patch tool: %s\n' % cmd)
2118 fp = procutil.popen(cmd, 'rb')
2118 fp = procutil.popen(cmd, 'rb')
2119 try:
2119 try:
2120 for line in util.iterfile(fp):
2120 for line in util.iterfile(fp):
2121 line = line.rstrip()
2121 line = line.rstrip()
2122 ui.note(line + '\n')
2122 ui.note(line + '\n')
2123 if line.startswith('patching file '):
2123 if line.startswith('patching file '):
2124 pf = util.parsepatchoutput(line)
2124 pf = util.parsepatchoutput(line)
2125 printed_file = False
2125 printed_file = False
2126 files.add(pf)
2126 files.add(pf)
2127 elif line.find('with fuzz') >= 0:
2127 elif line.find('with fuzz') >= 0:
2128 fuzz = True
2128 fuzz = True
2129 if not printed_file:
2129 if not printed_file:
2130 ui.warn(pf + '\n')
2130 ui.warn(pf + '\n')
2131 printed_file = True
2131 printed_file = True
2132 ui.warn(line + '\n')
2132 ui.warn(line + '\n')
2133 elif line.find('saving rejects to file') >= 0:
2133 elif line.find('saving rejects to file') >= 0:
2134 ui.warn(line + '\n')
2134 ui.warn(line + '\n')
2135 elif line.find('FAILED') >= 0:
2135 elif line.find('FAILED') >= 0:
2136 if not printed_file:
2136 if not printed_file:
2137 ui.warn(pf + '\n')
2137 ui.warn(pf + '\n')
2138 printed_file = True
2138 printed_file = True
2139 ui.warn(line + '\n')
2139 ui.warn(line + '\n')
2140 finally:
2140 finally:
2141 if files:
2141 if files:
2142 scmutil.marktouched(repo, files, similarity)
2142 scmutil.marktouched(repo, files, similarity)
2143 code = fp.close()
2143 code = fp.close()
2144 if code:
2144 if code:
2145 raise PatchError(_("patch command failed: %s") %
2145 raise PatchError(_("patch command failed: %s") %
2146 procutil.explainexit(code))
2146 procutil.explainexit(code))
2147 return fuzz
2147 return fuzz
2148
2148
2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2150 eolmode='strict'):
2150 eolmode='strict'):
2151 if files is None:
2151 if files is None:
2152 files = set()
2152 files = set()
2153 if eolmode is None:
2153 if eolmode is None:
2154 eolmode = ui.config('patch', 'eol')
2154 eolmode = ui.config('patch', 'eol')
2155 if eolmode.lower() not in eolmodes:
2155 if eolmode.lower() not in eolmodes:
2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2157 eolmode = eolmode.lower()
2157 eolmode = eolmode.lower()
2158
2158
2159 store = filestore()
2159 store = filestore()
2160 try:
2160 try:
2161 fp = open(patchobj, 'rb')
2161 fp = open(patchobj, 'rb')
2162 except TypeError:
2162 except TypeError:
2163 fp = patchobj
2163 fp = patchobj
2164 try:
2164 try:
2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2166 eolmode=eolmode)
2166 eolmode=eolmode)
2167 finally:
2167 finally:
2168 if fp != patchobj:
2168 if fp != patchobj:
2169 fp.close()
2169 fp.close()
2170 files.update(backend.close())
2170 files.update(backend.close())
2171 store.close()
2171 store.close()
2172 if ret < 0:
2172 if ret < 0:
2173 raise PatchError(_('patch failed to apply'))
2173 raise PatchError(_('patch failed to apply'))
2174 return ret > 0
2174 return ret > 0
2175
2175
2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2177 eolmode='strict', similarity=0):
2177 eolmode='strict', similarity=0):
2178 """use builtin patch to apply <patchobj> to the working directory.
2178 """use builtin patch to apply <patchobj> to the working directory.
2179 returns whether patch was applied with fuzz factor."""
2179 returns whether patch was applied with fuzz factor."""
2180 backend = workingbackend(ui, repo, similarity)
2180 backend = workingbackend(ui, repo, similarity)
2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2182
2182
2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2184 eolmode='strict'):
2184 eolmode='strict'):
2185 backend = repobackend(ui, repo, ctx, store)
2185 backend = repobackend(ui, repo, ctx, store)
2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2187
2187
2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2189 similarity=0):
2189 similarity=0):
2190 """Apply <patchname> to the working directory.
2190 """Apply <patchname> to the working directory.
2191
2191
2192 'eolmode' specifies how end of lines should be handled. It can be:
2192 'eolmode' specifies how end of lines should be handled. It can be:
2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2195 - 'lf': EOLs are ignored when patching and reset to LF
2195 - 'lf': EOLs are ignored when patching and reset to LF
2196 - None: get it from user settings, default to 'strict'
2196 - None: get it from user settings, default to 'strict'
2197 'eolmode' is ignored when using an external patcher program.
2197 'eolmode' is ignored when using an external patcher program.
2198
2198
2199 Returns whether patch was applied with fuzz factor.
2199 Returns whether patch was applied with fuzz factor.
2200 """
2200 """
2201 patcher = ui.config('ui', 'patch')
2201 patcher = ui.config('ui', 'patch')
2202 if files is None:
2202 if files is None:
2203 files = set()
2203 files = set()
2204 if patcher:
2204 if patcher:
2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2206 files, similarity)
2206 files, similarity)
2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2208 similarity)
2208 similarity)
2209
2209
2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2211 backend = fsbackend(ui, repo.root)
2211 backend = fsbackend(ui, repo.root)
2212 prefix = _canonprefix(repo, prefix)
2212 prefix = _canonprefix(repo, prefix)
2213 with open(patchpath, 'rb') as fp:
2213 with open(patchpath, 'rb') as fp:
2214 changed = set()
2214 changed = set()
2215 for state, values in iterhunks(fp):
2215 for state, values in iterhunks(fp):
2216 if state == 'file':
2216 if state == 'file':
2217 afile, bfile, first_hunk, gp = values
2217 afile, bfile, first_hunk, gp = values
2218 if gp:
2218 if gp:
2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2220 if gp.oldpath:
2220 if gp.oldpath:
2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2222 prefix)[1]
2222 prefix)[1]
2223 else:
2223 else:
2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2225 prefix)
2225 prefix)
2226 changed.add(gp.path)
2226 changed.add(gp.path)
2227 if gp.op == 'RENAME':
2227 if gp.op == 'RENAME':
2228 changed.add(gp.oldpath)
2228 changed.add(gp.oldpath)
2229 elif state not in ('hunk', 'git'):
2229 elif state not in ('hunk', 'git'):
2230 raise error.Abort(_('unsupported parser state: %s') % state)
2230 raise error.Abort(_('unsupported parser state: %s') % state)
2231 return changed
2231 return changed
2232
2232
2233 class GitDiffRequired(Exception):
2233 class GitDiffRequired(Exception):
2234 pass
2234 pass
2235
2235
2236 diffopts = diffutil.diffallopts
2236 diffopts = diffutil.diffallopts
2237 diffallopts = diffutil.diffallopts
2237 diffallopts = diffutil.diffallopts
2238 difffeatureopts = diffutil.difffeatureopts
2238 difffeatureopts = diffutil.difffeatureopts
2239
2239
2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2241 opts=None, losedatafn=None, pathfn=None, copy=None,
2241 opts=None, losedatafn=None, pathfn=None, copy=None,
2242 copysourcematch=None, hunksfilterfn=None):
2242 copysourcematch=None, hunksfilterfn=None):
2243 '''yields diff of changes to files between two nodes, or node and
2243 '''yields diff of changes to files between two nodes, or node and
2244 working directory.
2244 working directory.
2245
2245
2246 if node1 is None, use first dirstate parent instead.
2246 if node1 is None, use first dirstate parent instead.
2247 if node2 is None, compare node1 with working directory.
2247 if node2 is None, compare node1 with working directory.
2248
2248
2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2250 every time some change cannot be represented with the current
2250 every time some change cannot be represented with the current
2251 patch format. Return False to upgrade to git patch format, True to
2251 patch format. Return False to upgrade to git patch format, True to
2252 accept the loss or raise an exception to abort the diff. It is
2252 accept the loss or raise an exception to abort the diff. It is
2253 called with the name of current file being diffed as 'fn'. If set
2253 called with the name of current file being diffed as 'fn'. If set
2254 to None, patches will always be upgraded to git format when
2254 to None, patches will always be upgraded to git format when
2255 necessary.
2255 necessary.
2256
2256
2257 prefix is a filename prefix that is prepended to all filenames on
2257 prefix is a filename prefix that is prepended to all filenames on
2258 display (used for subrepos).
2258 display (used for subrepos).
2259
2259
2260 relroot, if not empty, must be normalized with a trailing /. Any match
2260 relroot, if not empty, must be normalized with a trailing /. Any match
2261 patterns that fall outside it will be ignored.
2261 patterns that fall outside it will be ignored.
2262
2262
2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2264 information.
2264 information.
2265
2265
2266 if copysourcematch is not None, then copy sources will be filtered by this
2266 if copysourcematch is not None, then copy sources will be filtered by this
2267 matcher
2267 matcher
2268
2268
2269 hunksfilterfn, if not None, should be a function taking a filectx and
2269 hunksfilterfn, if not None, should be a function taking a filectx and
2270 hunks generator that may yield filtered hunks.
2270 hunks generator that may yield filtered hunks.
2271 '''
2271 '''
2272 if not node1 and not node2:
2272 if not node1 and not node2:
2273 node1 = repo.dirstate.p1()
2273 node1 = repo.dirstate.p1()
2274
2274
2275 ctx1 = repo[node1]
2275 ctx1 = repo[node1]
2276 ctx2 = repo[node2]
2276 ctx2 = repo[node2]
2277
2277
2278 for fctx1, fctx2, hdr, hunks in diffhunks(
2278 for fctx1, fctx2, hdr, hunks in diffhunks(
2279 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
2279 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
2280 losedatafn=losedatafn, pathfn=pathfn, copy=copy,
2280 losedatafn=losedatafn, pathfn=pathfn, copy=copy,
2281 copysourcematch=copysourcematch):
2281 copysourcematch=copysourcematch):
2282 if hunksfilterfn is not None:
2282 if hunksfilterfn is not None:
2283 # If the file has been removed, fctx2 is None; but this should
2283 # If the file has been removed, fctx2 is None; but this should
2284 # not occur here since we catch removed files early in
2284 # not occur here since we catch removed files early in
2285 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2285 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2286 assert fctx2 is not None, (
2286 assert fctx2 is not None, (
2287 'fctx2 unexpectly None in diff hunks filtering')
2287 'fctx2 unexpectly None in diff hunks filtering')
2288 hunks = hunksfilterfn(fctx2, hunks)
2288 hunks = hunksfilterfn(fctx2, hunks)
2289 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2289 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2290 if hdr and (text or len(hdr) > 1):
2290 if hdr and (text or len(hdr) > 1):
2291 yield '\n'.join(hdr) + '\n'
2291 yield '\n'.join(hdr) + '\n'
2292 if text:
2292 if text:
2293 yield text
2293 yield text
2294
2294
2295 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
2295 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
2296 losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
2296 losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
2297 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2297 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2298 where `header` is a list of diff headers and `hunks` is an iterable of
2298 where `header` is a list of diff headers and `hunks` is an iterable of
2299 (`hunkrange`, `hunklines`) tuples.
2299 (`hunkrange`, `hunklines`) tuples.
2300
2300
2301 See diff() for the meaning of parameters.
2301 See diff() for the meaning of parameters.
2302 """
2302 """
2303
2303
2304 if opts is None:
2304 if opts is None:
2305 opts = mdiff.defaultopts
2305 opts = mdiff.defaultopts
2306
2306
2307 def lrugetfilectx():
2307 def lrugetfilectx():
2308 cache = {}
2308 cache = {}
2309 order = collections.deque()
2309 order = collections.deque()
2310 def getfilectx(f, ctx):
2310 def getfilectx(f, ctx):
2311 fctx = ctx.filectx(f, filelog=cache.get(f))
2311 fctx = ctx.filectx(f, filelog=cache.get(f))
2312 if f not in cache:
2312 if f not in cache:
2313 if len(cache) > 20:
2313 if len(cache) > 20:
2314 del cache[order.popleft()]
2314 del cache[order.popleft()]
2315 cache[f] = fctx.filelog()
2315 cache[f] = fctx.filelog()
2316 else:
2316 else:
2317 order.remove(f)
2317 order.remove(f)
2318 order.append(f)
2318 order.append(f)
2319 return fctx
2319 return fctx
2320 return getfilectx
2320 return getfilectx
2321 getfilectx = lrugetfilectx()
2321 getfilectx = lrugetfilectx()
2322
2322
2323 if not changes:
2323 if not changes:
2324 changes = ctx1.status(ctx2, match=match)
2324 changes = ctx1.status(ctx2, match=match)
2325 modified, added, removed = changes[:3]
2325 modified, added, removed = changes[:3]
2326
2326
2327 if not modified and not added and not removed:
2327 if not modified and not added and not removed:
2328 return []
2328 return []
2329
2329
2330 if repo.ui.debugflag:
2330 if repo.ui.debugflag:
2331 hexfunc = hex
2331 hexfunc = hex
2332 else:
2332 else:
2333 hexfunc = short
2333 hexfunc = short
2334 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2335
2335
2336 if copy is None:
2336 if copy is None:
2337 copy = {}
2337 copy = {}
2338 if opts.git or opts.upgrade:
2338 if opts.git or opts.upgrade:
2339 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339 copy = copies.pathcopies(ctx1, ctx2, match=match)
2340
2340
2341 if copysourcematch:
2341 if copysourcematch:
2342 # filter out copies where source side isn't inside the matcher
2342 # filter out copies where source side isn't inside the matcher
2343 # (copies.pathcopies() already filtered out the destination)
2343 # (copies.pathcopies() already filtered out the destination)
2344 copy = {dst: src for dst, src in copy.iteritems()
2344 copy = {dst: src for dst, src in copy.iteritems()
2345 if copysourcematch(src)}
2345 if copysourcematch(src)}
2346
2346
2347 modifiedset = set(modified)
2347 modifiedset = set(modified)
2348 addedset = set(added)
2348 addedset = set(added)
2349 removedset = set(removed)
2349 removedset = set(removed)
2350 for f in modified:
2350 for f in modified:
2351 if f not in ctx1:
2351 if f not in ctx1:
2352 # Fix up added, since merged-in additions appear as
2352 # Fix up added, since merged-in additions appear as
2353 # modifications during merges
2353 # modifications during merges
2354 modifiedset.remove(f)
2354 modifiedset.remove(f)
2355 addedset.add(f)
2355 addedset.add(f)
2356 for f in removed:
2356 for f in removed:
2357 if f not in ctx1:
2357 if f not in ctx1:
2358 # Merged-in additions that are then removed are reported as removed.
2358 # Merged-in additions that are then removed are reported as removed.
2359 # They are not in ctx1, so We don't want to show them in the diff.
2359 # They are not in ctx1, so We don't want to show them in the diff.
2360 removedset.remove(f)
2360 removedset.remove(f)
2361 modified = sorted(modifiedset)
2361 modified = sorted(modifiedset)
2362 added = sorted(addedset)
2362 added = sorted(addedset)
2363 removed = sorted(removedset)
2363 removed = sorted(removedset)
2364 for dst, src in list(copy.items()):
2364 for dst, src in list(copy.items()):
2365 if src not in ctx1:
2365 if src not in ctx1:
2366 # Files merged in during a merge and then copied/renamed are
2366 # Files merged in during a merge and then copied/renamed are
2367 # reported as copies. We want to show them in the diff as additions.
2367 # reported as copies. We want to show them in the diff as additions.
2368 del copy[dst]
2368 del copy[dst]
2369
2369
2370 prefetchmatch = scmutil.matchfiles(
2370 prefetchmatch = scmutil.matchfiles(
2371 repo, list(modifiedset | addedset | removedset))
2371 repo, list(modifiedset | addedset | removedset))
2372 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2372 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2373
2373
2374 def difffn(opts, losedata):
2374 def difffn(opts, losedata):
2375 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2375 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2376 copy, getfilectx, opts, losedata, pathfn)
2376 copy, getfilectx, opts, losedata, pathfn)
2377 if opts.upgrade and not opts.git:
2377 if opts.upgrade and not opts.git:
2378 try:
2378 try:
2379 def losedata(fn):
2379 def losedata(fn):
2380 if not losedatafn or not losedatafn(fn=fn):
2380 if not losedatafn or not losedatafn(fn=fn):
2381 raise GitDiffRequired
2381 raise GitDiffRequired
2382 # Buffer the whole output until we are sure it can be generated
2382 # Buffer the whole output until we are sure it can be generated
2383 return list(difffn(opts.copy(git=False), losedata))
2383 return list(difffn(opts.copy(git=False), losedata))
2384 except GitDiffRequired:
2384 except GitDiffRequired:
2385 return difffn(opts.copy(git=True), None)
2385 return difffn(opts.copy(git=True), None)
2386 else:
2386 else:
2387 return difffn(opts, None)
2387 return difffn(opts, None)
2388
2388
2389 def diffsinglehunk(hunklines):
2389 def diffsinglehunk(hunklines):
2390 """yield tokens for a list of lines in a single hunk"""
2390 """yield tokens for a list of lines in a single hunk"""
2391 for line in hunklines:
2391 for line in hunklines:
2392 # chomp
2392 # chomp
2393 chompline = line.rstrip('\r\n')
2393 chompline = line.rstrip('\r\n')
2394 # highlight tabs and trailing whitespace
2394 # highlight tabs and trailing whitespace
2395 stripline = chompline.rstrip()
2395 stripline = chompline.rstrip()
2396 if line.startswith('-'):
2396 if line.startswith('-'):
2397 label = 'diff.deleted'
2397 label = 'diff.deleted'
2398 elif line.startswith('+'):
2398 elif line.startswith('+'):
2399 label = 'diff.inserted'
2399 label = 'diff.inserted'
2400 else:
2400 else:
2401 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2401 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2402 for token in tabsplitter.findall(stripline):
2402 for token in tabsplitter.findall(stripline):
2403 if token.startswith('\t'):
2403 if token.startswith('\t'):
2404 yield (token, 'diff.tab')
2404 yield (token, 'diff.tab')
2405 else:
2405 else:
2406 yield (token, label)
2406 yield (token, label)
2407
2407
2408 if chompline != stripline:
2408 if chompline != stripline:
2409 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2409 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2410 if chompline != line:
2410 if chompline != line:
2411 yield (line[len(chompline):], '')
2411 yield (line[len(chompline):], '')
2412
2412
2413 def diffsinglehunkinline(hunklines):
2413 def diffsinglehunkinline(hunklines):
2414 """yield tokens for a list of lines in a single hunk, with inline colors"""
2414 """yield tokens for a list of lines in a single hunk, with inline colors"""
2415 # prepare deleted, and inserted content
2415 # prepare deleted, and inserted content
2416 a = ''
2416 a = ''
2417 b = ''
2417 b = ''
2418 for line in hunklines:
2418 for line in hunklines:
2419 if line[0:1] == '-':
2419 if line[0:1] == '-':
2420 a += line[1:]
2420 a += line[1:]
2421 elif line[0:1] == '+':
2421 elif line[0:1] == '+':
2422 b += line[1:]
2422 b += line[1:]
2423 else:
2423 else:
2424 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2424 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2425 # fast path: if either side is empty, use diffsinglehunk
2425 # fast path: if either side is empty, use diffsinglehunk
2426 if not a or not b:
2426 if not a or not b:
2427 for t in diffsinglehunk(hunklines):
2427 for t in diffsinglehunk(hunklines):
2428 yield t
2428 yield t
2429 return
2429 return
2430 # re-split the content into words
2430 # re-split the content into words
2431 al = wordsplitter.findall(a)
2431 al = wordsplitter.findall(a)
2432 bl = wordsplitter.findall(b)
2432 bl = wordsplitter.findall(b)
2433 # re-arrange the words to lines since the diff algorithm is line-based
2433 # re-arrange the words to lines since the diff algorithm is line-based
2434 aln = [s if s == '\n' else s + '\n' for s in al]
2434 aln = [s if s == '\n' else s + '\n' for s in al]
2435 bln = [s if s == '\n' else s + '\n' for s in bl]
2435 bln = [s if s == '\n' else s + '\n' for s in bl]
2436 an = ''.join(aln)
2436 an = ''.join(aln)
2437 bn = ''.join(bln)
2437 bn = ''.join(bln)
2438 # run the diff algorithm, prepare atokens and btokens
2438 # run the diff algorithm, prepare atokens and btokens
2439 atokens = []
2439 atokens = []
2440 btokens = []
2440 btokens = []
2441 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2441 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2442 for (a1, a2, b1, b2), btype in blocks:
2442 for (a1, a2, b1, b2), btype in blocks:
2443 changed = btype == '!'
2443 changed = btype == '!'
2444 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2444 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2445 atokens.append((changed, token))
2445 atokens.append((changed, token))
2446 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2446 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2447 btokens.append((changed, token))
2447 btokens.append((changed, token))
2448
2448
2449 # yield deleted tokens, then inserted ones
2449 # yield deleted tokens, then inserted ones
2450 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2450 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2451 ('+', 'diff.inserted', btokens)]:
2451 ('+', 'diff.inserted', btokens)]:
2452 nextisnewline = True
2452 nextisnewline = True
2453 for changed, token in tokens:
2453 for changed, token in tokens:
2454 if nextisnewline:
2454 if nextisnewline:
2455 yield (prefix, label)
2455 yield (prefix, label)
2456 nextisnewline = False
2456 nextisnewline = False
2457 # special handling line end
2457 # special handling line end
2458 isendofline = token.endswith('\n')
2458 isendofline = token.endswith('\n')
2459 if isendofline:
2459 if isendofline:
2460 chomp = token[:-1] # chomp
2460 chomp = token[:-1] # chomp
2461 if chomp.endswith('\r'):
2461 if chomp.endswith('\r'):
2462 chomp = chomp[:-1]
2462 chomp = chomp[:-1]
2463 endofline = token[len(chomp):]
2463 endofline = token[len(chomp):]
2464 token = chomp.rstrip() # detect spaces at the end
2464 token = chomp.rstrip() # detect spaces at the end
2465 endspaces = chomp[len(token):]
2465 endspaces = chomp[len(token):]
2466 # scan tabs
2466 # scan tabs
2467 for maybetab in tabsplitter.findall(token):
2467 for maybetab in tabsplitter.findall(token):
2468 if b'\t' == maybetab[0:1]:
2468 if b'\t' == maybetab[0:1]:
2469 currentlabel = 'diff.tab'
2469 currentlabel = 'diff.tab'
2470 else:
2470 else:
2471 if changed:
2471 if changed:
2472 currentlabel = label + '.changed'
2472 currentlabel = label + '.changed'
2473 else:
2473 else:
2474 currentlabel = label + '.unchanged'
2474 currentlabel = label + '.unchanged'
2475 yield (maybetab, currentlabel)
2475 yield (maybetab, currentlabel)
2476 if isendofline:
2476 if isendofline:
2477 if endspaces:
2477 if endspaces:
2478 yield (endspaces, 'diff.trailingwhitespace')
2478 yield (endspaces, 'diff.trailingwhitespace')
2479 yield (endofline, '')
2479 yield (endofline, '')
2480 nextisnewline = True
2480 nextisnewline = True
2481
2481
2482 def difflabel(func, *args, **kw):
2482 def difflabel(func, *args, **kw):
2483 '''yields 2-tuples of (output, label) based on the output of func()'''
2483 '''yields 2-tuples of (output, label) based on the output of func()'''
2484 if kw.get(r'opts') and kw[r'opts'].worddiff:
2484 if kw.get(r'opts') and kw[r'opts'].worddiff:
2485 dodiffhunk = diffsinglehunkinline
2485 dodiffhunk = diffsinglehunkinline
2486 else:
2486 else:
2487 dodiffhunk = diffsinglehunk
2487 dodiffhunk = diffsinglehunk
2488 headprefixes = [('diff', 'diff.diffline'),
2488 headprefixes = [('diff', 'diff.diffline'),
2489 ('copy', 'diff.extended'),
2489 ('copy', 'diff.extended'),
2490 ('rename', 'diff.extended'),
2490 ('rename', 'diff.extended'),
2491 ('old', 'diff.extended'),
2491 ('old', 'diff.extended'),
2492 ('new', 'diff.extended'),
2492 ('new', 'diff.extended'),
2493 ('deleted', 'diff.extended'),
2493 ('deleted', 'diff.extended'),
2494 ('index', 'diff.extended'),
2494 ('index', 'diff.extended'),
2495 ('similarity', 'diff.extended'),
2495 ('similarity', 'diff.extended'),
2496 ('---', 'diff.file_a'),
2496 ('---', 'diff.file_a'),
2497 ('+++', 'diff.file_b')]
2497 ('+++', 'diff.file_b')]
2498 textprefixes = [('@', 'diff.hunk'),
2498 textprefixes = [('@', 'diff.hunk'),
2499 # - and + are handled by diffsinglehunk
2499 # - and + are handled by diffsinglehunk
2500 ]
2500 ]
2501 head = False
2501 head = False
2502
2502
2503 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2503 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2504 hunkbuffer = []
2504 hunkbuffer = []
2505 def consumehunkbuffer():
2505 def consumehunkbuffer():
2506 if hunkbuffer:
2506 if hunkbuffer:
2507 for token in dodiffhunk(hunkbuffer):
2507 for token in dodiffhunk(hunkbuffer):
2508 yield token
2508 yield token
2509 hunkbuffer[:] = []
2509 hunkbuffer[:] = []
2510
2510
2511 for chunk in func(*args, **kw):
2511 for chunk in func(*args, **kw):
2512 lines = chunk.split('\n')
2512 lines = chunk.split('\n')
2513 linecount = len(lines)
2513 linecount = len(lines)
2514 for i, line in enumerate(lines):
2514 for i, line in enumerate(lines):
2515 if head:
2515 if head:
2516 if line.startswith('@'):
2516 if line.startswith('@'):
2517 head = False
2517 head = False
2518 else:
2518 else:
2519 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2519 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2520 head = True
2520 head = True
2521 diffline = False
2521 diffline = False
2522 if not head and line and line.startswith(('+', '-')):
2522 if not head and line and line.startswith(('+', '-')):
2523 diffline = True
2523 diffline = True
2524
2524
2525 prefixes = textprefixes
2525 prefixes = textprefixes
2526 if head:
2526 if head:
2527 prefixes = headprefixes
2527 prefixes = headprefixes
2528 if diffline:
2528 if diffline:
2529 # buffered
2529 # buffered
2530 bufferedline = line
2530 bufferedline = line
2531 if i + 1 < linecount:
2531 if i + 1 < linecount:
2532 bufferedline += "\n"
2532 bufferedline += "\n"
2533 hunkbuffer.append(bufferedline)
2533 hunkbuffer.append(bufferedline)
2534 else:
2534 else:
2535 # unbuffered
2535 # unbuffered
2536 for token in consumehunkbuffer():
2536 for token in consumehunkbuffer():
2537 yield token
2537 yield token
2538 stripline = line.rstrip()
2538 stripline = line.rstrip()
2539 for prefix, label in prefixes:
2539 for prefix, label in prefixes:
2540 if stripline.startswith(prefix):
2540 if stripline.startswith(prefix):
2541 yield (stripline, label)
2541 yield (stripline, label)
2542 if line != stripline:
2542 if line != stripline:
2543 yield (line[len(stripline):],
2543 yield (line[len(stripline):],
2544 'diff.trailingwhitespace')
2544 'diff.trailingwhitespace')
2545 break
2545 break
2546 else:
2546 else:
2547 yield (line, '')
2547 yield (line, '')
2548 if i + 1 < linecount:
2548 if i + 1 < linecount:
2549 yield ('\n', '')
2549 yield ('\n', '')
2550 for token in consumehunkbuffer():
2550 for token in consumehunkbuffer():
2551 yield token
2551 yield token
2552
2552
2553 def diffui(*args, **kw):
2553 def diffui(*args, **kw):
2554 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2554 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2555 return difflabel(diff, *args, **kw)
2555 return difflabel(diff, *args, **kw)
2556
2556
2557 def _filepairs(modified, added, removed, copy, opts):
2557 def _filepairs(modified, added, removed, copy, opts):
2558 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2558 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2559 before and f2 is the the name after. For added files, f1 will be None,
2559 before and f2 is the the name after. For added files, f1 will be None,
2560 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2560 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2561 or 'rename' (the latter two only if opts.git is set).'''
2561 or 'rename' (the latter two only if opts.git is set).'''
2562 gone = set()
2562 gone = set()
2563
2563
2564 copyto = dict([(v, k) for k, v in copy.items()])
2564 copyto = dict([(v, k) for k, v in copy.items()])
2565
2565
2566 addedset, removedset = set(added), set(removed)
2566 addedset, removedset = set(added), set(removed)
2567
2567
2568 for f in sorted(modified + added + removed):
2568 for f in sorted(modified + added + removed):
2569 copyop = None
2569 copyop = None
2570 f1, f2 = f, f
2570 f1, f2 = f, f
2571 if f in addedset:
2571 if f in addedset:
2572 f1 = None
2572 f1 = None
2573 if f in copy:
2573 if f in copy:
2574 if opts.git:
2574 if opts.git:
2575 f1 = copy[f]
2575 f1 = copy[f]
2576 if f1 in removedset and f1 not in gone:
2576 if f1 in removedset and f1 not in gone:
2577 copyop = 'rename'
2577 copyop = 'rename'
2578 gone.add(f1)
2578 gone.add(f1)
2579 else:
2579 else:
2580 copyop = 'copy'
2580 copyop = 'copy'
2581 elif f in removedset:
2581 elif f in removedset:
2582 f2 = None
2582 f2 = None
2583 if opts.git:
2583 if opts.git:
2584 # have we already reported a copy above?
2584 # have we already reported a copy above?
2585 if (f in copyto and copyto[f] in addedset
2585 if (f in copyto and copyto[f] in addedset
2586 and copy[copyto[f]] == f):
2586 and copy[copyto[f]] == f):
2587 continue
2587 continue
2588 yield f1, f2, copyop
2588 yield f1, f2, copyop
2589
2589
2590 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2590 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2591 copy, getfilectx, opts, losedatafn, pathfn):
2591 copy, getfilectx, opts, losedatafn, pathfn):
2592 '''given input data, generate a diff and yield it in blocks
2592 '''given input data, generate a diff and yield it in blocks
2593
2593
2594 If generating a diff would lose data like flags or binary data and
2594 If generating a diff would lose data like flags or binary data and
2595 losedatafn is not None, it will be called.
2595 losedatafn is not None, it will be called.
2596
2596
2597 pathfn is applied to every path in the diff output.
2597 pathfn is applied to every path in the diff output.
2598 '''
2598 '''
2599
2599
2600 def gitindex(text):
2600 def gitindex(text):
2601 if not text:
2601 if not text:
2602 text = ""
2602 text = ""
2603 l = len(text)
2603 l = len(text)
2604 s = hashlib.sha1('blob %d\0' % l)
2604 s = hashlib.sha1('blob %d\0' % l)
2605 s.update(text)
2605 s.update(text)
2606 return hex(s.digest())
2606 return hex(s.digest())
2607
2607
2608 if opts.noprefix:
2608 if opts.noprefix:
2609 aprefix = bprefix = ''
2609 aprefix = bprefix = ''
2610 else:
2610 else:
2611 aprefix = 'a/'
2611 aprefix = 'a/'
2612 bprefix = 'b/'
2612 bprefix = 'b/'
2613
2613
2614 def diffline(f, revs):
2614 def diffline(f, revs):
2615 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2615 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2616 return 'diff %s %s' % (revinfo, f)
2616 return 'diff %s %s' % (revinfo, f)
2617
2617
2618 def isempty(fctx):
2618 def isempty(fctx):
2619 return fctx is None or fctx.size() == 0
2619 return fctx is None or fctx.size() == 0
2620
2620
2621 date1 = dateutil.datestr(ctx1.date())
2621 date1 = dateutil.datestr(ctx1.date())
2622 date2 = dateutil.datestr(ctx2.date())
2622 date2 = dateutil.datestr(ctx2.date())
2623
2623
2624 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2624 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2625
2625
2626 if not pathfn:
2626 if not pathfn:
2627 pathfn = lambda f: f
2627 pathfn = lambda f: f
2628
2628
2629 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2629 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2630 content1 = None
2630 content1 = None
2631 content2 = None
2631 content2 = None
2632 fctx1 = None
2632 fctx1 = None
2633 fctx2 = None
2633 fctx2 = None
2634 flag1 = None
2634 flag1 = None
2635 flag2 = None
2635 flag2 = None
2636 if f1:
2636 if f1:
2637 fctx1 = getfilectx(f1, ctx1)
2637 fctx1 = getfilectx(f1, ctx1)
2638 if opts.git or losedatafn:
2638 if opts.git or losedatafn:
2639 flag1 = ctx1.flags(f1)
2639 flag1 = ctx1.flags(f1)
2640 if f2:
2640 if f2:
2641 fctx2 = getfilectx(f2, ctx2)
2641 fctx2 = getfilectx(f2, ctx2)
2642 if opts.git or losedatafn:
2642 if opts.git or losedatafn:
2643 flag2 = ctx2.flags(f2)
2643 flag2 = ctx2.flags(f2)
2644 # if binary is True, output "summary" or "base85", but not "text diff"
2644 # if binary is True, output "summary" or "base85", but not "text diff"
2645 if opts.text:
2645 if opts.text:
2646 binary = False
2646 binary = False
2647 else:
2647 else:
2648 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2648 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2649
2649
2650 if losedatafn and not opts.git:
2650 if losedatafn and not opts.git:
2651 if (binary or
2651 if (binary or
2652 # copy/rename
2652 # copy/rename
2653 f2 in copy or
2653 f2 in copy or
2654 # empty file creation
2654 # empty file creation
2655 (not f1 and isempty(fctx2)) or
2655 (not f1 and isempty(fctx2)) or
2656 # empty file deletion
2656 # empty file deletion
2657 (isempty(fctx1) and not f2) or
2657 (isempty(fctx1) and not f2) or
2658 # create with flags
2658 # create with flags
2659 (not f1 and flag2) or
2659 (not f1 and flag2) or
2660 # change flags
2660 # change flags
2661 (f1 and f2 and flag1 != flag2)):
2661 (f1 and f2 and flag1 != flag2)):
2662 losedatafn(f2 or f1)
2662 losedatafn(f2 or f1)
2663
2663
2664 path1 = pathfn(f1 or f2)
2664 path1 = pathfn(f1 or f2)
2665 path2 = pathfn(f2 or f1)
2665 path2 = pathfn(f2 or f1)
2666 header = []
2666 header = []
2667 if opts.git:
2667 if opts.git:
2668 header.append('diff --git %s%s %s%s' %
2668 header.append('diff --git %s%s %s%s' %
2669 (aprefix, path1, bprefix, path2))
2669 (aprefix, path1, bprefix, path2))
2670 if not f1: # added
2670 if not f1: # added
2671 header.append('new file mode %s' % gitmode[flag2])
2671 header.append('new file mode %s' % gitmode[flag2])
2672 elif not f2: # removed
2672 elif not f2: # removed
2673 header.append('deleted file mode %s' % gitmode[flag1])
2673 header.append('deleted file mode %s' % gitmode[flag1])
2674 else: # modified/copied/renamed
2674 else: # modified/copied/renamed
2675 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2675 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2676 if mode1 != mode2:
2676 if mode1 != mode2:
2677 header.append('old mode %s' % mode1)
2677 header.append('old mode %s' % mode1)
2678 header.append('new mode %s' % mode2)
2678 header.append('new mode %s' % mode2)
2679 if copyop is not None:
2679 if copyop is not None:
2680 if opts.showsimilarity:
2680 if opts.showsimilarity:
2681 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2681 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2682 header.append('similarity index %d%%' % sim)
2682 header.append('similarity index %d%%' % sim)
2683 header.append('%s from %s' % (copyop, path1))
2683 header.append('%s from %s' % (copyop, path1))
2684 header.append('%s to %s' % (copyop, path2))
2684 header.append('%s to %s' % (copyop, path2))
2685 elif revs:
2685 elif revs:
2686 header.append(diffline(path1, revs))
2686 header.append(diffline(path1, revs))
2687
2687
2688 # fctx.is | diffopts | what to | is fctx.data()
2688 # fctx.is | diffopts | what to | is fctx.data()
2689 # binary() | text nobinary git index | output? | outputted?
2689 # binary() | text nobinary git index | output? | outputted?
2690 # ------------------------------------|----------------------------
2690 # ------------------------------------|----------------------------
2691 # yes | no no no * | summary | no
2691 # yes | no no no * | summary | no
2692 # yes | no no yes * | base85 | yes
2692 # yes | no no yes * | base85 | yes
2693 # yes | no yes no * | summary | no
2693 # yes | no yes no * | summary | no
2694 # yes | no yes yes 0 | summary | no
2694 # yes | no yes yes 0 | summary | no
2695 # yes | no yes yes >0 | summary | semi [1]
2695 # yes | no yes yes >0 | summary | semi [1]
2696 # yes | yes * * * | text diff | yes
2696 # yes | yes * * * | text diff | yes
2697 # no | * * * * | text diff | yes
2697 # no | * * * * | text diff | yes
2698 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2698 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2699 if binary and (not opts.git or (opts.git and opts.nobinary and not
2699 if binary and (not opts.git or (opts.git and opts.nobinary and not
2700 opts.index)):
2700 opts.index)):
2701 # fast path: no binary content will be displayed, content1 and
2701 # fast path: no binary content will be displayed, content1 and
2702 # content2 are only used for equivalent test. cmp() could have a
2702 # content2 are only used for equivalent test. cmp() could have a
2703 # fast path.
2703 # fast path.
2704 if fctx1 is not None:
2704 if fctx1 is not None:
2705 content1 = b'\0'
2705 content1 = b'\0'
2706 if fctx2 is not None:
2706 if fctx2 is not None:
2707 if fctx1 is not None and not fctx1.cmp(fctx2):
2707 if fctx1 is not None and not fctx1.cmp(fctx2):
2708 content2 = b'\0' # not different
2708 content2 = b'\0' # not different
2709 else:
2709 else:
2710 content2 = b'\0\0'
2710 content2 = b'\0\0'
2711 else:
2711 else:
2712 # normal path: load contents
2712 # normal path: load contents
2713 if fctx1 is not None:
2713 if fctx1 is not None:
2714 content1 = fctx1.data()
2714 content1 = fctx1.data()
2715 if fctx2 is not None:
2715 if fctx2 is not None:
2716 content2 = fctx2.data()
2716 content2 = fctx2.data()
2717
2717
2718 if binary and opts.git and not opts.nobinary:
2718 if binary and opts.git and not opts.nobinary:
2719 text = mdiff.b85diff(content1, content2)
2719 text = mdiff.b85diff(content1, content2)
2720 if text:
2720 if text:
2721 header.append('index %s..%s' %
2721 header.append('index %s..%s' %
2722 (gitindex(content1), gitindex(content2)))
2722 (gitindex(content1), gitindex(content2)))
2723 hunks = (None, [text]),
2723 hunks = (None, [text]),
2724 else:
2724 else:
2725 if opts.git and opts.index > 0:
2725 if opts.git and opts.index > 0:
2726 flag = flag1
2726 flag = flag1
2727 if flag is None:
2727 if flag is None:
2728 flag = flag2
2728 flag = flag2
2729 header.append('index %s..%s %s' %
2729 header.append('index %s..%s %s' %
2730 (gitindex(content1)[0:opts.index],
2730 (gitindex(content1)[0:opts.index],
2731 gitindex(content2)[0:opts.index],
2731 gitindex(content2)[0:opts.index],
2732 gitmode[flag]))
2732 gitmode[flag]))
2733
2733
2734 uheaders, hunks = mdiff.unidiff(content1, date1,
2734 uheaders, hunks = mdiff.unidiff(content1, date1,
2735 content2, date2,
2735 content2, date2,
2736 path1, path2,
2736 path1, path2,
2737 binary=binary, opts=opts)
2737 binary=binary, opts=opts)
2738 header.extend(uheaders)
2738 header.extend(uheaders)
2739 yield fctx1, fctx2, header, hunks
2739 yield fctx1, fctx2, header, hunks
2740
2740
2741 def diffstatsum(stats):
2741 def diffstatsum(stats):
2742 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2742 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2743 for f, a, r, b in stats:
2743 for f, a, r, b in stats:
2744 maxfile = max(maxfile, encoding.colwidth(f))
2744 maxfile = max(maxfile, encoding.colwidth(f))
2745 maxtotal = max(maxtotal, a + r)
2745 maxtotal = max(maxtotal, a + r)
2746 addtotal += a
2746 addtotal += a
2747 removetotal += r
2747 removetotal += r
2748 binary = binary or b
2748 binary = binary or b
2749
2749
2750 return maxfile, maxtotal, addtotal, removetotal, binary
2750 return maxfile, maxtotal, addtotal, removetotal, binary
2751
2751
2752 def diffstatdata(lines):
2752 def diffstatdata(lines):
2753 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2753 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2754
2754
2755 results = []
2755 results = []
2756 filename, adds, removes, isbinary = None, 0, 0, False
2756 filename, adds, removes, isbinary = None, 0, 0, False
2757
2757
2758 def addresult():
2758 def addresult():
2759 if filename:
2759 if filename:
2760 results.append((filename, adds, removes, isbinary))
2760 results.append((filename, adds, removes, isbinary))
2761
2761
2762 # inheader is used to track if a line is in the
2762 # inheader is used to track if a line is in the
2763 # header portion of the diff. This helps properly account
2763 # header portion of the diff. This helps properly account
2764 # for lines that start with '--' or '++'
2764 # for lines that start with '--' or '++'
2765 inheader = False
2765 inheader = False
2766
2766
2767 for line in lines:
2767 for line in lines:
2768 if line.startswith('diff'):
2768 if line.startswith('diff'):
2769 addresult()
2769 addresult()
2770 # starting a new file diff
2770 # starting a new file diff
2771 # set numbers to 0 and reset inheader
2771 # set numbers to 0 and reset inheader
2772 inheader = True
2772 inheader = True
2773 adds, removes, isbinary = 0, 0, False
2773 adds, removes, isbinary = 0, 0, False
2774 if line.startswith('diff --git a/'):
2774 if line.startswith('diff --git a/'):
2775 filename = gitre.search(line).group(2)
2775 filename = gitre.search(line).group(2)
2776 elif line.startswith('diff -r'):
2776 elif line.startswith('diff -r'):
2777 # format: "diff -r ... -r ... filename"
2777 # format: "diff -r ... -r ... filename"
2778 filename = diffre.search(line).group(1)
2778 filename = diffre.search(line).group(1)
2779 elif line.startswith('@@'):
2779 elif line.startswith('@@'):
2780 inheader = False
2780 inheader = False
2781 elif line.startswith('+') and not inheader:
2781 elif line.startswith('+') and not inheader:
2782 adds += 1
2782 adds += 1
2783 elif line.startswith('-') and not inheader:
2783 elif line.startswith('-') and not inheader:
2784 removes += 1
2784 removes += 1
2785 elif (line.startswith('GIT binary patch') or
2785 elif (line.startswith('GIT binary patch') or
2786 line.startswith('Binary file')):
2786 line.startswith('Binary file')):
2787 isbinary = True
2787 isbinary = True
2788 elif line.startswith('rename from'):
2788 elif line.startswith('rename from'):
2789 filename = line[12:]
2789 filename = line[12:]
2790 elif line.startswith('rename to'):
2790 elif line.startswith('rename to'):
2791 filename += ' => %s' % line[10:]
2791 filename += ' => %s' % line[10:]
2792 addresult()
2792 addresult()
2793 return results
2793 return results
2794
2794
2795 def diffstat(lines, width=80):
2795 def diffstat(lines, width=80):
2796 output = []
2796 output = []
2797 stats = diffstatdata(lines)
2797 stats = diffstatdata(lines)
2798 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2798 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2799
2799
2800 countwidth = len(str(maxtotal))
2800 countwidth = len(str(maxtotal))
2801 if hasbinary and countwidth < 3:
2801 if hasbinary and countwidth < 3:
2802 countwidth = 3
2802 countwidth = 3
2803 graphwidth = width - countwidth - maxname - 6
2803 graphwidth = width - countwidth - maxname - 6
2804 if graphwidth < 10:
2804 if graphwidth < 10:
2805 graphwidth = 10
2805 graphwidth = 10
2806
2806
2807 def scale(i):
2807 def scale(i):
2808 if maxtotal <= graphwidth:
2808 if maxtotal <= graphwidth:
2809 return i
2809 return i
2810 # If diffstat runs out of room it doesn't print anything,
2810 # If diffstat runs out of room it doesn't print anything,
2811 # which isn't very useful, so always print at least one + or -
2811 # which isn't very useful, so always print at least one + or -
2812 # if there were at least some changes.
2812 # if there were at least some changes.
2813 return max(i * graphwidth // maxtotal, int(bool(i)))
2813 return max(i * graphwidth // maxtotal, int(bool(i)))
2814
2814
2815 for filename, adds, removes, isbinary in stats:
2815 for filename, adds, removes, isbinary in stats:
2816 if isbinary:
2816 if isbinary:
2817 count = 'Bin'
2817 count = 'Bin'
2818 else:
2818 else:
2819 count = '%d' % (adds + removes)
2819 count = '%d' % (adds + removes)
2820 pluses = '+' * scale(adds)
2820 pluses = '+' * scale(adds)
2821 minuses = '-' * scale(removes)
2821 minuses = '-' * scale(removes)
2822 output.append(' %s%s | %*s %s%s\n' %
2822 output.append(' %s%s | %*s %s%s\n' %
2823 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2823 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2824 countwidth, count, pluses, minuses))
2824 countwidth, count, pluses, minuses))
2825
2825
2826 if stats:
2826 if stats:
2827 output.append(_(' %d files changed, %d insertions(+), '
2827 output.append(_(' %d files changed, %d insertions(+), '
2828 '%d deletions(-)\n')
2828 '%d deletions(-)\n')
2829 % (len(stats), totaladds, totalremoves))
2829 % (len(stats), totaladds, totalremoves))
2830
2830
2831 return ''.join(output)
2831 return ''.join(output)
2832
2832
2833 def diffstatui(*args, **kw):
2833 def diffstatui(*args, **kw):
2834 '''like diffstat(), but yields 2-tuples of (output, label) for
2834 '''like diffstat(), but yields 2-tuples of (output, label) for
2835 ui.write()
2835 ui.write()
2836 '''
2836 '''
2837
2837
2838 for line in diffstat(*args, **kw).splitlines():
2838 for line in diffstat(*args, **kw).splitlines():
2839 if line and line[-1] in '+-':
2839 if line and line[-1] in '+-':
2840 name, graph = line.rsplit(' ', 1)
2840 name, graph = line.rsplit(' ', 1)
2841 yield (name + ' ', '')
2841 yield (name + ' ', '')
2842 m = re.search(br'\++', graph)
2842 m = re.search(br'\++', graph)
2843 if m:
2843 if m:
2844 yield (m.group(0), 'diffstat.inserted')
2844 yield (m.group(0), 'diffstat.inserted')
2845 m = re.search(br'-+', graph)
2845 m = re.search(br'-+', graph)
2846 if m:
2846 if m:
2847 yield (m.group(0), 'diffstat.deleted')
2847 yield (m.group(0), 'diffstat.deleted')
2848 else:
2848 else:
2849 yield (line, '')
2849 yield (line, '')
2850 yield ('\n', '')
2850 yield ('\n', '')
@@ -1,766 +1,769 b''
1 #testcases obsstore-on obsstore-off
1 #testcases obsstore-on obsstore-off
2
2
3 $ cat > $TESTTMP/editor.py <<EOF
3 $ cat > $TESTTMP/editor.py <<EOF
4 > #!"$PYTHON"
4 > #!"$PYTHON"
5 > import os
5 > import os
6 > import sys
6 > import sys
7 > path = os.path.join(os.environ['TESTTMP'], 'messages')
7 > path = os.path.join(os.environ['TESTTMP'], 'messages')
8 > messages = open(path).read().split('--\n')
8 > messages = open(path).read().split('--\n')
9 > prompt = open(sys.argv[1]).read()
9 > prompt = open(sys.argv[1]).read()
10 > sys.stdout.write(''.join('EDITOR: %s' % l for l in prompt.splitlines(True)))
10 > sys.stdout.write(''.join('EDITOR: %s' % l for l in prompt.splitlines(True)))
11 > sys.stdout.flush()
11 > sys.stdout.flush()
12 > with open(sys.argv[1], 'w') as f:
12 > with open(sys.argv[1], 'w') as f:
13 > f.write(messages[0])
13 > f.write(messages[0])
14 > with open(path, 'w') as f:
14 > with open(path, 'w') as f:
15 > f.write('--\n'.join(messages[1:]))
15 > f.write('--\n'.join(messages[1:]))
16 > EOF
16 > EOF
17
17
18 $ cat >> $HGRCPATH <<EOF
18 $ cat >> $HGRCPATH <<EOF
19 > [extensions]
19 > [extensions]
20 > drawdag=$TESTDIR/drawdag.py
20 > drawdag=$TESTDIR/drawdag.py
21 > split=
21 > split=
22 > [ui]
22 > [ui]
23 > interactive=1
23 > interactive=1
24 > color=no
24 > color=no
25 > paginate=never
25 > paginate=never
26 > [diff]
26 > [diff]
27 > git=1
27 > git=1
28 > unified=0
28 > unified=0
29 > [commands]
29 > [commands]
30 > commit.interactive.unified=0
30 > commit.interactive.unified=0
31 > [alias]
31 > [alias]
32 > glog=log -G -T '{rev}:{node|short} {desc} {bookmarks}\n'
32 > glog=log -G -T '{rev}:{node|short} {desc} {bookmarks}\n'
33 > EOF
33 > EOF
34
34
35 #if obsstore-on
35 #if obsstore-on
36 $ cat >> $HGRCPATH <<EOF
36 $ cat >> $HGRCPATH <<EOF
37 > [experimental]
37 > [experimental]
38 > evolution=all
38 > evolution=all
39 > EOF
39 > EOF
40 #endif
40 #endif
41
41
42 $ hg init a
42 $ hg init a
43 $ cd a
43 $ cd a
44
44
45 Nothing to split
45 Nothing to split
46
46
47 $ hg split
47 $ hg split
48 nothing to split
48 nothing to split
49 [1]
49 [1]
50
50
51 $ hg commit -m empty --config ui.allowemptycommit=1
51 $ hg commit -m empty --config ui.allowemptycommit=1
52 $ hg split
52 $ hg split
53 abort: cannot split an empty revision
53 abort: cannot split an empty revision
54 [255]
54 [255]
55
55
56 $ rm -rf .hg
56 $ rm -rf .hg
57 $ hg init
57 $ hg init
58
58
59 Cannot split working directory
59 Cannot split working directory
60
60
61 $ hg split -r 'wdir()'
61 $ hg split -r 'wdir()'
62 abort: cannot split working directory
62 abort: cannot split working directory
63 [255]
63 [255]
64
64
65 Generate some content. The sed filter drop CR on Windows, which is dropped in
65 Generate some content. The sed filter drop CR on Windows, which is dropped in
66 the a > b line.
66 the a > b line.
67
67
68 $ $TESTDIR/seq.py 1 5 | sed 's/\r$//' >> a
68 $ $TESTDIR/seq.py 1 5 | sed 's/\r$//' >> a
69 $ hg ci -m a1 -A a -q
69 $ hg ci -m a1 -A a -q
70 $ hg bookmark -i r1
70 $ hg bookmark -i r1
71 $ sed 's/1/11/;s/3/33/;s/5/55/' a > b
71 $ sed 's/1/11/;s/3/33/;s/5/55/' a > b
72 $ mv b a
72 $ mv b a
73 $ hg ci -m a2 -q
73 $ hg ci -m a2 -q
74 $ hg bookmark -i r2
74 $ hg bookmark -i r2
75
75
76 Cannot split a public changeset
76 Cannot split a public changeset
77
77
78 $ hg phase --public -r 'all()'
78 $ hg phase --public -r 'all()'
79 $ hg split .
79 $ hg split .
80 abort: cannot split public changeset
80 abort: cannot split public changeset
81 (see 'hg help phases' for details)
81 (see 'hg help phases' for details)
82 [255]
82 [255]
83
83
84 $ hg phase --draft -f -r 'all()'
84 $ hg phase --draft -f -r 'all()'
85
85
86 Cannot split while working directory is dirty
86 Cannot split while working directory is dirty
87
87
88 $ touch dirty
88 $ touch dirty
89 $ hg add dirty
89 $ hg add dirty
90 $ hg split .
90 $ hg split .
91 abort: uncommitted changes
91 abort: uncommitted changes
92 [255]
92 [255]
93 $ hg forget dirty
93 $ hg forget dirty
94 $ rm dirty
94 $ rm dirty
95
95
96 Make a clean directory for future tests to build off of
96 Make a clean directory for future tests to build off of
97
97
98 $ cp -R . ../clean
98 $ cp -R . ../clean
99
99
100 Split a head
100 Split a head
101
101
102 $ hg bookmark r3
102 $ hg bookmark r3
103
103
104 $ hg split 'all()'
104 $ hg split 'all()'
105 abort: cannot split multiple revisions
105 abort: cannot split multiple revisions
106 [255]
106 [255]
107
107
108 This function splits a bit strangely primarily to avoid changing the behavior of
108 This function splits a bit strangely primarily to avoid changing the behavior of
109 the test after a bug was fixed with how split/commit --interactive handled
109 the test after a bug was fixed with how split/commit --interactive handled
110 `commands.commit.interactive.unified=0`: when there were no context lines,
110 `commands.commit.interactive.unified=0`: when there were no context lines,
111 it kept only the last diff hunk. When running split, this meant that runsplit
111 it kept only the last diff hunk. When running split, this meant that runsplit
112 was always recording three commits, one for each diff hunk, in reverse order
112 was always recording three commits, one for each diff hunk, in reverse order
113 (the base commit was the last diff hunk in the file).
113 (the base commit was the last diff hunk in the file).
114 $ runsplit() {
114 $ runsplit() {
115 > cat > $TESTTMP/messages <<EOF
115 > cat > $TESTTMP/messages <<EOF
116 > split 1
116 > split 1
117 > --
117 > --
118 > split 2
118 > split 2
119 > --
119 > --
120 > split 3
120 > split 3
121 > EOF
121 > EOF
122 > cat <<EOF | hg split "$@"
122 > cat <<EOF | hg split "$@"
123 > y
123 > y
124 > n
124 > n
125 > n
125 > n
126 > y
126 > y
127 > y
127 > y
128 > n
128 > n
129 > y
129 > y
130 > y
130 > y
131 > y
131 > y
132 > EOF
132 > EOF
133 > }
133 > }
134
134
135 $ HGEDITOR=false runsplit
135 $ HGEDITOR=false runsplit
136 diff --git a/a b/a
136 diff --git a/a b/a
137 3 hunks, 3 lines changed
137 3 hunks, 3 lines changed
138 examine changes to 'a'? [Ynesfdaq?] y
138 examine changes to 'a'? [Ynesfdaq?] y
139
139
140 @@ -1,1 +1,1 @@
140 @@ -1,1 +1,1 @@
141 -1
141 -1
142 +11
142 +11
143 record change 1/3 to 'a'? [Ynesfdaq?] n
143 record change 1/3 to 'a'? [Ynesfdaq?] n
144
144
145 @@ -3,1 +3,1 @@ 2
145 @@ -3,1 +3,1 @@ 2
146 -3
146 -3
147 +33
147 +33
148 record change 2/3 to 'a'? [Ynesfdaq?] n
148 record change 2/3 to 'a'? [Ynesfdaq?] n
149
149
150 @@ -5,1 +5,1 @@ 4
150 @@ -5,1 +5,1 @@ 4
151 -5
151 -5
152 +55
152 +55
153 record change 3/3 to 'a'? [Ynesfdaq?] y
153 record change 3/3 to 'a'? [Ynesfdaq?] y
154
154
155 transaction abort!
155 transaction abort!
156 rollback completed
156 rollback completed
157 abort: edit failed: false exited with status 1
157 abort: edit failed: false exited with status 1
158 [255]
158 [255]
159 $ hg status
159 $ hg status
160
160
161 $ HGEDITOR="\"$PYTHON\" $TESTTMP/editor.py"
161 $ HGEDITOR="\"$PYTHON\" $TESTTMP/editor.py"
162 $ runsplit
162 $ runsplit
163 diff --git a/a b/a
163 diff --git a/a b/a
164 3 hunks, 3 lines changed
164 3 hunks, 3 lines changed
165 examine changes to 'a'? [Ynesfdaq?] y
165 examine changes to 'a'? [Ynesfdaq?] y
166
166
167 @@ -1,1 +1,1 @@
167 @@ -1,1 +1,1 @@
168 -1
168 -1
169 +11
169 +11
170 record change 1/3 to 'a'? [Ynesfdaq?] n
170 record change 1/3 to 'a'? [Ynesfdaq?] n
171
171
172 @@ -3,1 +3,1 @@ 2
172 @@ -3,1 +3,1 @@ 2
173 -3
173 -3
174 +33
174 +33
175 record change 2/3 to 'a'? [Ynesfdaq?] n
175 record change 2/3 to 'a'? [Ynesfdaq?] n
176
176
177 @@ -5,1 +5,1 @@ 4
177 @@ -5,1 +5,1 @@ 4
178 -5
178 -5
179 +55
179 +55
180 record change 3/3 to 'a'? [Ynesfdaq?] y
180 record change 3/3 to 'a'? [Ynesfdaq?] y
181
181
182 EDITOR: HG: Splitting 1df0d5c5a3ab. Write commit message for the first split changeset.
182 EDITOR: HG: Splitting 1df0d5c5a3ab. Write commit message for the first split changeset.
183 EDITOR: a2
183 EDITOR: a2
184 EDITOR:
184 EDITOR:
185 EDITOR:
185 EDITOR:
186 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
186 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
187 EDITOR: HG: Leave message empty to abort commit.
187 EDITOR: HG: Leave message empty to abort commit.
188 EDITOR: HG: --
188 EDITOR: HG: --
189 EDITOR: HG: user: test
189 EDITOR: HG: user: test
190 EDITOR: HG: branch 'default'
190 EDITOR: HG: branch 'default'
191 EDITOR: HG: changed a
191 EDITOR: HG: changed a
192 created new head
192 created new head
193 diff --git a/a b/a
193 diff --git a/a b/a
194 2 hunks, 2 lines changed
194 2 hunks, 2 lines changed
195 examine changes to 'a'? [Ynesfdaq?] y
195 examine changes to 'a'? [Ynesfdaq?] y
196
196
197 @@ -1,1 +1,1 @@
197 @@ -1,1 +1,1 @@
198 -1
198 -1
199 +11
199 +11
200 record change 1/2 to 'a'? [Ynesfdaq?] n
200 record change 1/2 to 'a'? [Ynesfdaq?] n
201
201
202 @@ -3,1 +3,1 @@ 2
202 @@ -3,1 +3,1 @@ 2
203 -3
203 -3
204 +33
204 +33
205 record change 2/2 to 'a'? [Ynesfdaq?] y
205 record change 2/2 to 'a'? [Ynesfdaq?] y
206
206
207 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
207 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
208 EDITOR: HG: - e704349bd21b: split 1
208 EDITOR: HG: - e704349bd21b: split 1
209 EDITOR: HG: Write commit message for the next split changeset.
209 EDITOR: HG: Write commit message for the next split changeset.
210 EDITOR: a2
210 EDITOR: a2
211 EDITOR:
211 EDITOR:
212 EDITOR:
212 EDITOR:
213 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
213 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
214 EDITOR: HG: Leave message empty to abort commit.
214 EDITOR: HG: Leave message empty to abort commit.
215 EDITOR: HG: --
215 EDITOR: HG: --
216 EDITOR: HG: user: test
216 EDITOR: HG: user: test
217 EDITOR: HG: branch 'default'
217 EDITOR: HG: branch 'default'
218 EDITOR: HG: changed a
218 EDITOR: HG: changed a
219 diff --git a/a b/a
219 diff --git a/a b/a
220 1 hunks, 1 lines changed
220 1 hunks, 1 lines changed
221 examine changes to 'a'? [Ynesfdaq?] y
221 examine changes to 'a'? [Ynesfdaq?] y
222
222
223 @@ -1,1 +1,1 @@
223 @@ -1,1 +1,1 @@
224 -1
224 -1
225 +11
225 +11
226 record this change to 'a'? [Ynesfdaq?] y
226 record this change to 'a'? [Ynesfdaq?] y
227
227
228 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
228 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
229 EDITOR: HG: - e704349bd21b: split 1
229 EDITOR: HG: - e704349bd21b: split 1
230 EDITOR: HG: - a09ad58faae3: split 2
230 EDITOR: HG: - a09ad58faae3: split 2
231 EDITOR: HG: Write commit message for the next split changeset.
231 EDITOR: HG: Write commit message for the next split changeset.
232 EDITOR: a2
232 EDITOR: a2
233 EDITOR:
233 EDITOR:
234 EDITOR:
234 EDITOR:
235 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
235 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
236 EDITOR: HG: Leave message empty to abort commit.
236 EDITOR: HG: Leave message empty to abort commit.
237 EDITOR: HG: --
237 EDITOR: HG: --
238 EDITOR: HG: user: test
238 EDITOR: HG: user: test
239 EDITOR: HG: branch 'default'
239 EDITOR: HG: branch 'default'
240 EDITOR: HG: changed a
240 EDITOR: HG: changed a
241 saved backup bundle to $TESTTMP/a/.hg/strip-backup/1df0d5c5a3ab-8341b760-split.hg (obsstore-off !)
241 saved backup bundle to $TESTTMP/a/.hg/strip-backup/1df0d5c5a3ab-8341b760-split.hg (obsstore-off !)
242
242
243 #if obsstore-off
243 #if obsstore-off
244 $ hg bookmark
244 $ hg bookmark
245 r1 0:a61bcde8c529
245 r1 0:a61bcde8c529
246 r2 3:00eebaf8d2e2
246 r2 3:00eebaf8d2e2
247 * r3 3:00eebaf8d2e2
247 * r3 3:00eebaf8d2e2
248 $ hg glog -p
248 $ hg glog -p
249 @ 3:00eebaf8d2e2 split 3 r2 r3
249 @ 3:00eebaf8d2e2 split 3 r2 r3
250 | diff --git a/a b/a
250 | diff --git a/a b/a
251 | --- a/a
251 | --- a/a
252 | +++ b/a
252 | +++ b/a
253 | @@ -1,1 +1,1 @@
253 | @@ -1,1 +1,1 @@
254 | -1
254 | -1
255 | +11
255 | +11
256 |
256 |
257 o 2:a09ad58faae3 split 2
257 o 2:a09ad58faae3 split 2
258 | diff --git a/a b/a
258 | diff --git a/a b/a
259 | --- a/a
259 | --- a/a
260 | +++ b/a
260 | +++ b/a
261 | @@ -3,1 +3,1 @@
261 | @@ -3,1 +3,1 @@
262 | -3
262 | -3
263 | +33
263 | +33
264 |
264 |
265 o 1:e704349bd21b split 1
265 o 1:e704349bd21b split 1
266 | diff --git a/a b/a
266 | diff --git a/a b/a
267 | --- a/a
267 | --- a/a
268 | +++ b/a
268 | +++ b/a
269 | @@ -5,1 +5,1 @@
269 | @@ -5,1 +5,1 @@
270 | -5
270 | -5
271 | +55
271 | +55
272 |
272 |
273 o 0:a61bcde8c529 a1 r1
273 o 0:a61bcde8c529 a1 r1
274 diff --git a/a b/a
274 diff --git a/a b/a
275 new file mode 100644
275 new file mode 100644
276 --- /dev/null
276 --- /dev/null
277 +++ b/a
277 +++ b/a
278 @@ -0,0 +1,5 @@
278 @@ -0,0 +1,5 @@
279 +1
279 +1
280 +2
280 +2
281 +3
281 +3
282 +4
282 +4
283 +5
283 +5
284
284
285 #else
285 #else
286 $ hg bookmark
286 $ hg bookmark
287 r1 0:a61bcde8c529
287 r1 0:a61bcde8c529
288 r2 4:00eebaf8d2e2
288 r2 4:00eebaf8d2e2
289 * r3 4:00eebaf8d2e2
289 * r3 4:00eebaf8d2e2
290 $ hg glog
290 $ hg glog
291 @ 4:00eebaf8d2e2 split 3 r2 r3
291 @ 4:00eebaf8d2e2 split 3 r2 r3
292 |
292 |
293 o 3:a09ad58faae3 split 2
293 o 3:a09ad58faae3 split 2
294 |
294 |
295 o 2:e704349bd21b split 1
295 o 2:e704349bd21b split 1
296 |
296 |
297 o 0:a61bcde8c529 a1 r1
297 o 0:a61bcde8c529 a1 r1
298
298
299 #endif
299 #endif
300
300
301 Split a head while working parent is not that head
301 Split a head while working parent is not that head
302
302
303 $ cp -R $TESTTMP/clean $TESTTMP/b
303 $ cp -R $TESTTMP/clean $TESTTMP/b
304 $ cd $TESTTMP/b
304 $ cd $TESTTMP/b
305
305
306 $ hg up 0 -q
306 $ hg up 0 -q
307 $ hg bookmark r3
307 $ hg bookmark r3
308
308
309 $ runsplit tip >/dev/null
309 $ runsplit tip >/dev/null
310
310
311 #if obsstore-off
311 #if obsstore-off
312 $ hg bookmark
312 $ hg bookmark
313 r1 0:a61bcde8c529
313 r1 0:a61bcde8c529
314 r2 3:00eebaf8d2e2
314 r2 3:00eebaf8d2e2
315 * r3 0:a61bcde8c529
315 * r3 0:a61bcde8c529
316 $ hg glog
316 $ hg glog
317 o 3:00eebaf8d2e2 split 3 r2
317 o 3:00eebaf8d2e2 split 3 r2
318 |
318 |
319 o 2:a09ad58faae3 split 2
319 o 2:a09ad58faae3 split 2
320 |
320 |
321 o 1:e704349bd21b split 1
321 o 1:e704349bd21b split 1
322 |
322 |
323 @ 0:a61bcde8c529 a1 r1 r3
323 @ 0:a61bcde8c529 a1 r1 r3
324
324
325 #else
325 #else
326 $ hg bookmark
326 $ hg bookmark
327 r1 0:a61bcde8c529
327 r1 0:a61bcde8c529
328 r2 4:00eebaf8d2e2
328 r2 4:00eebaf8d2e2
329 * r3 0:a61bcde8c529
329 * r3 0:a61bcde8c529
330 $ hg glog
330 $ hg glog
331 o 4:00eebaf8d2e2 split 3 r2
331 o 4:00eebaf8d2e2 split 3 r2
332 |
332 |
333 o 3:a09ad58faae3 split 2
333 o 3:a09ad58faae3 split 2
334 |
334 |
335 o 2:e704349bd21b split 1
335 o 2:e704349bd21b split 1
336 |
336 |
337 @ 0:a61bcde8c529 a1 r1 r3
337 @ 0:a61bcde8c529 a1 r1 r3
338
338
339 #endif
339 #endif
340
340
341 Split a non-head
341 Split a non-head
342
342
343 $ cp -R $TESTTMP/clean $TESTTMP/c
343 $ cp -R $TESTTMP/clean $TESTTMP/c
344 $ cd $TESTTMP/c
344 $ cd $TESTTMP/c
345 $ echo d > d
345 $ echo d > d
346 $ hg ci -m d1 -A d
346 $ hg ci -m d1 -A d
347 $ hg bookmark -i d1
347 $ hg bookmark -i d1
348 $ echo 2 >> d
348 $ echo 2 >> d
349 $ hg ci -m d2
349 $ hg ci -m d2
350 $ echo 3 >> d
350 $ echo 3 >> d
351 $ hg ci -m d3
351 $ hg ci -m d3
352 $ hg bookmark -i d3
352 $ hg bookmark -i d3
353 $ hg up '.^' -q
353 $ hg up '.^' -q
354 $ hg bookmark d2
354 $ hg bookmark d2
355 $ cp -R . ../d
355 $ cp -R . ../d
356
356
357 $ runsplit -r 1 | grep rebasing
357 $ runsplit -r 1 | grep rebasing
358 rebasing 2:b5c5ea414030 "d1" (d1)
358 rebasing 2:b5c5ea414030 "d1" (d1)
359 rebasing 3:f4a0a8d004cc "d2" (d2)
359 rebasing 3:f4a0a8d004cc "d2" (d2)
360 rebasing 4:777940761eba "d3" (d3)
360 rebasing 4:777940761eba "d3" (d3)
361 #if obsstore-off
361 #if obsstore-off
362 $ hg bookmark
362 $ hg bookmark
363 d1 4:c4b449ef030e
363 d1 4:c4b449ef030e
364 * d2 5:c9dd00ab36a3
364 * d2 5:c9dd00ab36a3
365 d3 6:19f476bc865c
365 d3 6:19f476bc865c
366 r1 0:a61bcde8c529
366 r1 0:a61bcde8c529
367 r2 3:00eebaf8d2e2
367 r2 3:00eebaf8d2e2
368 $ hg glog -p
368 $ hg glog -p
369 o 6:19f476bc865c d3 d3
369 o 6:19f476bc865c d3 d3
370 | diff --git a/d b/d
370 | diff --git a/d b/d
371 | --- a/d
371 | --- a/d
372 | +++ b/d
372 | +++ b/d
373 | @@ -2,0 +3,1 @@
373 | @@ -2,0 +3,1 @@
374 | +3
374 | +3
375 |
375 |
376 @ 5:c9dd00ab36a3 d2 d2
376 @ 5:c9dd00ab36a3 d2 d2
377 | diff --git a/d b/d
377 | diff --git a/d b/d
378 | --- a/d
378 | --- a/d
379 | +++ b/d
379 | +++ b/d
380 | @@ -1,0 +2,1 @@
380 | @@ -1,0 +2,1 @@
381 | +2
381 | +2
382 |
382 |
383 o 4:c4b449ef030e d1 d1
383 o 4:c4b449ef030e d1 d1
384 | diff --git a/d b/d
384 | diff --git a/d b/d
385 | new file mode 100644
385 | new file mode 100644
386 | --- /dev/null
386 | --- /dev/null
387 | +++ b/d
387 | +++ b/d
388 | @@ -0,0 +1,1 @@
388 | @@ -0,0 +1,1 @@
389 | +d
389 | +d
390 |
390 |
391 o 3:00eebaf8d2e2 split 3 r2
391 o 3:00eebaf8d2e2 split 3 r2
392 | diff --git a/a b/a
392 | diff --git a/a b/a
393 | --- a/a
393 | --- a/a
394 | +++ b/a
394 | +++ b/a
395 | @@ -1,1 +1,1 @@
395 | @@ -1,1 +1,1 @@
396 | -1
396 | -1
397 | +11
397 | +11
398 |
398 |
399 o 2:a09ad58faae3 split 2
399 o 2:a09ad58faae3 split 2
400 | diff --git a/a b/a
400 | diff --git a/a b/a
401 | --- a/a
401 | --- a/a
402 | +++ b/a
402 | +++ b/a
403 | @@ -3,1 +3,1 @@
403 | @@ -3,1 +3,1 @@
404 | -3
404 | -3
405 | +33
405 | +33
406 |
406 |
407 o 1:e704349bd21b split 1
407 o 1:e704349bd21b split 1
408 | diff --git a/a b/a
408 | diff --git a/a b/a
409 | --- a/a
409 | --- a/a
410 | +++ b/a
410 | +++ b/a
411 | @@ -5,1 +5,1 @@
411 | @@ -5,1 +5,1 @@
412 | -5
412 | -5
413 | +55
413 | +55
414 |
414 |
415 o 0:a61bcde8c529 a1 r1
415 o 0:a61bcde8c529 a1 r1
416 diff --git a/a b/a
416 diff --git a/a b/a
417 new file mode 100644
417 new file mode 100644
418 --- /dev/null
418 --- /dev/null
419 +++ b/a
419 +++ b/a
420 @@ -0,0 +1,5 @@
420 @@ -0,0 +1,5 @@
421 +1
421 +1
422 +2
422 +2
423 +3
423 +3
424 +4
424 +4
425 +5
425 +5
426
426
427 #else
427 #else
428 $ hg bookmark
428 $ hg bookmark
429 d1 8:c4b449ef030e
429 d1 8:c4b449ef030e
430 * d2 9:c9dd00ab36a3
430 * d2 9:c9dd00ab36a3
431 d3 10:19f476bc865c
431 d3 10:19f476bc865c
432 r1 0:a61bcde8c529
432 r1 0:a61bcde8c529
433 r2 7:00eebaf8d2e2
433 r2 7:00eebaf8d2e2
434 $ hg glog
434 $ hg glog
435 o 10:19f476bc865c d3 d3
435 o 10:19f476bc865c d3 d3
436 |
436 |
437 @ 9:c9dd00ab36a3 d2 d2
437 @ 9:c9dd00ab36a3 d2 d2
438 |
438 |
439 o 8:c4b449ef030e d1 d1
439 o 8:c4b449ef030e d1 d1
440 |
440 |
441 o 7:00eebaf8d2e2 split 3 r2
441 o 7:00eebaf8d2e2 split 3 r2
442 |
442 |
443 o 6:a09ad58faae3 split 2
443 o 6:a09ad58faae3 split 2
444 |
444 |
445 o 5:e704349bd21b split 1
445 o 5:e704349bd21b split 1
446 |
446 |
447 o 0:a61bcde8c529 a1 r1
447 o 0:a61bcde8c529 a1 r1
448
448
449 #endif
449 #endif
450
450
451 Split a non-head without rebase
451 Split a non-head without rebase
452
452
453 $ cd $TESTTMP/d
453 $ cd $TESTTMP/d
454 #if obsstore-off
454 #if obsstore-off
455 $ runsplit -r 1 --no-rebase
455 $ runsplit -r 1 --no-rebase
456 abort: cannot split changeset with children without rebase
456 abort: cannot split changeset with children without rebase
457 [255]
457 [255]
458 #else
458 #else
459 $ runsplit -r 1 --no-rebase >/dev/null
459 $ runsplit -r 1 --no-rebase >/dev/null
460 3 new orphan changesets
460 3 new orphan changesets
461 $ hg bookmark
461 $ hg bookmark
462 d1 2:b5c5ea414030
462 d1 2:b5c5ea414030
463 * d2 3:f4a0a8d004cc
463 * d2 3:f4a0a8d004cc
464 d3 4:777940761eba
464 d3 4:777940761eba
465 r1 0:a61bcde8c529
465 r1 0:a61bcde8c529
466 r2 7:00eebaf8d2e2
466 r2 7:00eebaf8d2e2
467
467
468 $ hg glog
468 $ hg glog
469 o 7:00eebaf8d2e2 split 3 r2
469 o 7:00eebaf8d2e2 split 3 r2
470 |
470 |
471 o 6:a09ad58faae3 split 2
471 o 6:a09ad58faae3 split 2
472 |
472 |
473 o 5:e704349bd21b split 1
473 o 5:e704349bd21b split 1
474 |
474 |
475 | * 4:777940761eba d3 d3
475 | * 4:777940761eba d3 d3
476 | |
476 | |
477 | @ 3:f4a0a8d004cc d2 d2
477 | @ 3:f4a0a8d004cc d2 d2
478 | |
478 | |
479 | * 2:b5c5ea414030 d1 d1
479 | * 2:b5c5ea414030 d1 d1
480 | |
480 | |
481 | x 1:1df0d5c5a3ab a2
481 | x 1:1df0d5c5a3ab a2
482 |/
482 |/
483 o 0:a61bcde8c529 a1 r1
483 o 0:a61bcde8c529 a1 r1
484
484
485 #endif
485 #endif
486
486
487 Split a non-head with obsoleted descendants
487 Split a non-head with obsoleted descendants
488
488
489 #if obsstore-on
489 #if obsstore-on
490 $ hg init $TESTTMP/e
490 $ hg init $TESTTMP/e
491 $ cd $TESTTMP/e
491 $ cd $TESTTMP/e
492 $ hg debugdrawdag <<'EOS'
492 $ hg debugdrawdag <<'EOS'
493 > H I J
493 > H I J
494 > | | |
494 > | | |
495 > F G1 G2 # amend: G1 -> G2
495 > F G1 G2 # amend: G1 -> G2
496 > | | / # prune: F
496 > | | / # prune: F
497 > C D E
497 > C D E
498 > \|/
498 > \|/
499 > B
499 > B
500 > |
500 > |
501 > A
501 > A
502 > EOS
502 > EOS
503 2 new orphan changesets
503 2 new orphan changesets
504 $ eval `hg tags -T '{tag}={node}\n'`
504 $ eval `hg tags -T '{tag}={node}\n'`
505 $ rm .hg/localtags
505 $ rm .hg/localtags
506 $ hg split $B --config experimental.evolution=createmarkers
506 $ hg split $B --config experimental.evolution=createmarkers
507 abort: split would leave orphaned changesets behind
507 abort: split would leave orphaned changesets behind
508 [255]
508 [255]
509 $ cat > $TESTTMP/messages <<EOF
509 $ cat > $TESTTMP/messages <<EOF
510 > Split B
510 > Split B
511 > EOF
511 > EOF
512 $ cat <<EOF | hg split $B
512 $ cat <<EOF | hg split $B
513 > y
513 > y
514 > y
514 > y
515 > EOF
515 > EOF
516 diff --git a/B b/B
516 diff --git a/B b/B
517 new file mode 100644
517 new file mode 100644
518 examine changes to 'B'? [Ynesfdaq?] y
518 examine changes to 'B'? [Ynesfdaq?] y
519
519
520 @@ -0,0 +1,1 @@
520 @@ -0,0 +1,1 @@
521 +B
521 +B
522 \ No newline at end of file
522 \ No newline at end of file
523 record this change to 'B'? [Ynesfdaq?] y
523 record this change to 'B'? [Ynesfdaq?] y
524
524
525 EDITOR: HG: Splitting 112478962961. Write commit message for the first split changeset.
525 EDITOR: HG: Splitting 112478962961. Write commit message for the first split changeset.
526 EDITOR: B
526 EDITOR: B
527 EDITOR:
527 EDITOR:
528 EDITOR:
528 EDITOR:
529 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
529 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
530 EDITOR: HG: Leave message empty to abort commit.
530 EDITOR: HG: Leave message empty to abort commit.
531 EDITOR: HG: --
531 EDITOR: HG: --
532 EDITOR: HG: user: test
532 EDITOR: HG: user: test
533 EDITOR: HG: branch 'default'
533 EDITOR: HG: branch 'default'
534 EDITOR: HG: added B
534 EDITOR: HG: added B
535 created new head
535 created new head
536 rebasing 2:26805aba1e60 "C"
536 rebasing 2:26805aba1e60 "C"
537 rebasing 3:be0ef73c17ad "D"
537 rebasing 3:be0ef73c17ad "D"
538 rebasing 4:49cb92066bfd "E"
538 rebasing 4:49cb92066bfd "E"
539 rebasing 7:97a6268cc7ef "G2"
539 rebasing 7:97a6268cc7ef "G2"
540 rebasing 10:e2f1e425c0db "J"
540 rebasing 10:e2f1e425c0db "J"
541 $ hg glog -r 'sort(all(), topo)'
541 $ hg glog -r 'sort(all(), topo)'
542 o 16:556c085f8b52 J
542 o 16:556c085f8b52 J
543 |
543 |
544 o 15:8761f6c9123f G2
544 o 15:8761f6c9123f G2
545 |
545 |
546 o 14:a7aeffe59b65 E
546 o 14:a7aeffe59b65 E
547 |
547 |
548 | o 13:e1e914ede9ab D
548 | o 13:e1e914ede9ab D
549 |/
549 |/
550 | o 12:01947e9b98aa C
550 | o 12:01947e9b98aa C
551 |/
551 |/
552 o 11:0947baa74d47 Split B
552 o 11:0947baa74d47 Split B
553 |
553 |
554 | * 9:88ede1d5ee13 I
554 | * 9:88ede1d5ee13 I
555 | |
555 | |
556 | x 6:af8cbf225b7b G1
556 | x 6:af8cbf225b7b G1
557 | |
557 | |
558 | x 3:be0ef73c17ad D
558 | x 3:be0ef73c17ad D
559 | |
559 | |
560 | | * 8:74863e5b5074 H
560 | | * 8:74863e5b5074 H
561 | | |
561 | | |
562 | | x 5:ee481a2a1e69 F
562 | | x 5:ee481a2a1e69 F
563 | | |
563 | | |
564 | | x 2:26805aba1e60 C
564 | | x 2:26805aba1e60 C
565 | |/
565 | |/
566 | x 1:112478962961 B
566 | x 1:112478962961 B
567 |/
567 |/
568 o 0:426bada5c675 A
568 o 0:426bada5c675 A
569
569
570 #endif
570 #endif
571
571
572 Preserve secret phase in split
572 Preserve secret phase in split
573
573
574 $ cp -R $TESTTMP/clean $TESTTMP/phases1
574 $ cp -R $TESTTMP/clean $TESTTMP/phases1
575 $ cd $TESTTMP/phases1
575 $ cd $TESTTMP/phases1
576 $ hg phase --secret -fr tip
576 $ hg phase --secret -fr tip
577 $ hg log -T '{short(node)} {phase}\n'
577 $ hg log -T '{short(node)} {phase}\n'
578 1df0d5c5a3ab secret
578 1df0d5c5a3ab secret
579 a61bcde8c529 draft
579 a61bcde8c529 draft
580 $ runsplit tip >/dev/null
580 $ runsplit tip >/dev/null
581 $ hg log -T '{short(node)} {phase}\n'
581 $ hg log -T '{short(node)} {phase}\n'
582 00eebaf8d2e2 secret
582 00eebaf8d2e2 secret
583 a09ad58faae3 secret
583 a09ad58faae3 secret
584 e704349bd21b secret
584 e704349bd21b secret
585 a61bcde8c529 draft
585 a61bcde8c529 draft
586
586
587 Do not move things to secret even if phases.new-commit=secret
587 Do not move things to secret even if phases.new-commit=secret
588
588
589 $ cp -R $TESTTMP/clean $TESTTMP/phases2
589 $ cp -R $TESTTMP/clean $TESTTMP/phases2
590 $ cd $TESTTMP/phases2
590 $ cd $TESTTMP/phases2
591 $ cat >> .hg/hgrc <<EOF
591 $ cat >> .hg/hgrc <<EOF
592 > [phases]
592 > [phases]
593 > new-commit=secret
593 > new-commit=secret
594 > EOF
594 > EOF
595 $ hg log -T '{short(node)} {phase}\n'
595 $ hg log -T '{short(node)} {phase}\n'
596 1df0d5c5a3ab draft
596 1df0d5c5a3ab draft
597 a61bcde8c529 draft
597 a61bcde8c529 draft
598 $ runsplit tip >/dev/null
598 $ runsplit tip >/dev/null
599 $ hg log -T '{short(node)} {phase}\n'
599 $ hg log -T '{short(node)} {phase}\n'
600 00eebaf8d2e2 draft
600 00eebaf8d2e2 draft
601 a09ad58faae3 draft
601 a09ad58faae3 draft
602 e704349bd21b draft
602 e704349bd21b draft
603 a61bcde8c529 draft
603 a61bcde8c529 draft
604
604
605 `hg split` with ignoreblanklines=1 does not infinite loop
605 `hg split` with ignoreblanklines=1 does not infinite loop
606
606
607 $ mkdir $TESTTMP/f
607 $ mkdir $TESTTMP/f
608 $ hg init $TESTTMP/f/a
608 $ hg init $TESTTMP/f/a
609 $ cd $TESTTMP/f/a
609 $ cd $TESTTMP/f/a
610 $ printf '1\n2\n3\n4\n5\n' > foo
610 $ printf '1\n2\n3\n4\n5\n' > foo
611 $ cp foo bar
611 $ cp foo bar
612 $ hg ci -qAm initial
612 $ hg ci -qAm initial
613 $ printf '1\n\n2\n3\ntest\n4\n5\n' > bar
613 $ printf '1\n\n2\n3\ntest\n4\n5\n' > bar
614 $ printf '1\n2\n3\ntest\n4\n5\n' > foo
614 $ printf '1\n2\n3\ntest\n4\n5\n' > foo
615 $ hg ci -qm splitme
615 $ hg ci -qm splitme
616 $ cat > $TESTTMP/messages <<EOF
616 $ cat > $TESTTMP/messages <<EOF
617 > split 1
617 > split 1
618 > --
618 > --
619 > split 2
619 > split 2
620 > EOF
620 > EOF
621 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
621 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
622 diff --git a/bar b/bar
622 diff --git a/bar b/bar
623 2 hunks, 2 lines changed
623 2 hunks, 2 lines changed
624 examine changes to 'bar'? [Ynesfdaq?] f
624 examine changes to 'bar'? [Ynesfdaq?] f
625
625
626 diff --git a/foo b/foo
626 diff --git a/foo b/foo
627 1 hunks, 1 lines changed
627 1 hunks, 1 lines changed
628 examine changes to 'foo'? [Ynesfdaq?] n
628 examine changes to 'foo'? [Ynesfdaq?] n
629
629
630 EDITOR: HG: Splitting dd3c45017cbf. Write commit message for the first split changeset.
630 EDITOR: HG: Splitting dd3c45017cbf. Write commit message for the first split changeset.
631 EDITOR: splitme
631 EDITOR: splitme
632 EDITOR:
632 EDITOR:
633 EDITOR:
633 EDITOR:
634 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
634 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
635 EDITOR: HG: Leave message empty to abort commit.
635 EDITOR: HG: Leave message empty to abort commit.
636 EDITOR: HG: --
636 EDITOR: HG: --
637 EDITOR: HG: user: test
637 EDITOR: HG: user: test
638 EDITOR: HG: branch 'default'
638 EDITOR: HG: branch 'default'
639 EDITOR: HG: changed bar
639 EDITOR: HG: changed bar
640 created new head
640 created new head
641 diff --git a/foo b/foo
641 diff --git a/foo b/foo
642 1 hunks, 1 lines changed
642 1 hunks, 1 lines changed
643 examine changes to 'foo'? [Ynesfdaq?] f
643 examine changes to 'foo'? [Ynesfdaq?] f
644
644
645 EDITOR: HG: Splitting dd3c45017cbf. So far it has been split into:
645 EDITOR: HG: Splitting dd3c45017cbf. So far it has been split into:
646 EDITOR: HG: - f205aea1c624: split 1
646 EDITOR: HG: - f205aea1c624: split 1
647 EDITOR: HG: Write commit message for the next split changeset.
647 EDITOR: HG: Write commit message for the next split changeset.
648 EDITOR: splitme
648 EDITOR: splitme
649 EDITOR:
649 EDITOR:
650 EDITOR:
650 EDITOR:
651 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
651 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
652 EDITOR: HG: Leave message empty to abort commit.
652 EDITOR: HG: Leave message empty to abort commit.
653 EDITOR: HG: --
653 EDITOR: HG: --
654 EDITOR: HG: user: test
654 EDITOR: HG: user: test
655 EDITOR: HG: branch 'default'
655 EDITOR: HG: branch 'default'
656 EDITOR: HG: changed foo
656 EDITOR: HG: changed foo
657 saved backup bundle to $TESTTMP/f/a/.hg/strip-backup/dd3c45017cbf-463441b5-split.hg (obsstore-off !)
657 saved backup bundle to $TESTTMP/f/a/.hg/strip-backup/dd3c45017cbf-463441b5-split.hg (obsstore-off !)
658
658
659 Let's try that again, with a slightly different set of patches, to ensure that
659 Let's try that again, with a slightly different set of patches, to ensure that
660 the ignoreblanklines thing isn't somehow position dependent.
660 the ignoreblanklines thing isn't somehow position dependent.
661
661
662 $ hg init $TESTTMP/f/b
662 $ hg init $TESTTMP/f/b
663 $ cd $TESTTMP/f/b
663 $ cd $TESTTMP/f/b
664 $ printf '1\n2\n3\n4\n5\n' > foo
664 $ printf '1\n2\n3\n4\n5\n' > foo
665 $ cp foo bar
665 $ cp foo bar
666 $ hg ci -qAm initial
666 $ hg ci -qAm initial
667 $ printf '1\n2\n3\ntest\n4\n5\n' > bar
667 $ printf '1\n2\n3\ntest\n4\n5\n' > bar
668 $ printf '1\n2\n3\ntest\n4\n\n5\n' > foo
668 $ printf '1\n2\n3\ntest\n4\n\n5\n' > foo
669 $ hg ci -qm splitme
669 $ hg ci -qm splitme
670 $ cat > $TESTTMP/messages <<EOF
670 $ cat > $TESTTMP/messages <<EOF
671 > split 1
671 > split 1
672 > --
672 > --
673 > split 2
673 > split 2
674 > EOF
674 > EOF
675 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
675 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
676 diff --git a/bar b/bar
676 diff --git a/bar b/bar
677 1 hunks, 1 lines changed
677 1 hunks, 1 lines changed
678 examine changes to 'bar'? [Ynesfdaq?] f
678 examine changes to 'bar'? [Ynesfdaq?] f
679
679
680 diff --git a/foo b/foo
680 diff --git a/foo b/foo
681 2 hunks, 2 lines changed
681 2 hunks, 2 lines changed
682 examine changes to 'foo'? [Ynesfdaq?] n
682 examine changes to 'foo'? [Ynesfdaq?] n
683
683
684 EDITOR: HG: Splitting 904c80b40a4a. Write commit message for the first split changeset.
684 EDITOR: HG: Splitting 904c80b40a4a. Write commit message for the first split changeset.
685 EDITOR: splitme
685 EDITOR: splitme
686 EDITOR:
686 EDITOR:
687 EDITOR:
687 EDITOR:
688 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
688 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
689 EDITOR: HG: Leave message empty to abort commit.
689 EDITOR: HG: Leave message empty to abort commit.
690 EDITOR: HG: --
690 EDITOR: HG: --
691 EDITOR: HG: user: test
691 EDITOR: HG: user: test
692 EDITOR: HG: branch 'default'
692 EDITOR: HG: branch 'default'
693 EDITOR: HG: changed bar
693 EDITOR: HG: changed bar
694 created new head
694 created new head
695 diff --git a/foo b/foo
695 diff --git a/foo b/foo
696 2 hunks, 2 lines changed
696 2 hunks, 2 lines changed
697 examine changes to 'foo'? [Ynesfdaq?] f
697 examine changes to 'foo'? [Ynesfdaq?] f
698
698
699 EDITOR: HG: Splitting 904c80b40a4a. So far it has been split into:
699 EDITOR: HG: Splitting 904c80b40a4a. So far it has been split into:
700 EDITOR: HG: - ffecf40fa954: split 1
700 EDITOR: HG: - ffecf40fa954: split 1
701 EDITOR: HG: Write commit message for the next split changeset.
701 EDITOR: HG: Write commit message for the next split changeset.
702 EDITOR: splitme
702 EDITOR: splitme
703 EDITOR:
703 EDITOR:
704 EDITOR:
704 EDITOR:
705 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
705 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
706 EDITOR: HG: Leave message empty to abort commit.
706 EDITOR: HG: Leave message empty to abort commit.
707 EDITOR: HG: --
707 EDITOR: HG: --
708 EDITOR: HG: user: test
708 EDITOR: HG: user: test
709 EDITOR: HG: branch 'default'
709 EDITOR: HG: branch 'default'
710 EDITOR: HG: changed foo
710 EDITOR: HG: changed foo
711 saved backup bundle to $TESTTMP/f/b/.hg/strip-backup/904c80b40a4a-47fb907f-split.hg (obsstore-off !)
711 saved backup bundle to $TESTTMP/f/b/.hg/strip-backup/904c80b40a4a-47fb907f-split.hg (obsstore-off !)
712
712
713
713
714 Testing the case in split when commiting flag-only file changes (issue5864)
714 Testing the case in split when commiting flag-only file changes (issue5864)
715 ---------------------------------------------------------------------------
715 ---------------------------------------------------------------------------
716 $ hg init $TESTTMP/issue5864
716 $ hg init $TESTTMP/issue5864
717 $ cd $TESTTMP/issue5864
717 $ cd $TESTTMP/issue5864
718 $ echo foo > foo
718 $ echo foo > foo
719 $ hg add foo
719 $ hg add foo
720 $ hg ci -m "initial"
720 $ hg ci -m "initial"
721 $ hg import -q --bypass -m "make executable" - <<EOF
721 $ hg import -q --bypass -m "make executable" - <<EOF
722 > diff --git a/foo b/foo
722 > diff --git a/foo b/foo
723 > old mode 100644
723 > old mode 100644
724 > new mode 100755
724 > new mode 100755
725 > EOF
725 > EOF
726 $ hg up -q
726 $ hg up -q
727
727
728 $ hg glog
728 $ hg glog
729 @ 1:3a2125f0f4cb make executable
729 @ 1:3a2125f0f4cb make executable
730 |
730 |
731 o 0:51f273a58d82 initial
731 o 0:51f273a58d82 initial
732
732
733
733
734 #if no-windows
734 #if no-windows
735 $ printf 'y\ny\ny\n' | hg split
735 $ cat > $TESTTMP/messages <<EOF
736 diff --git a/foo b/foo
736 > split 1
737 old mode 100644
737 > EOF
738 new mode 100755
738 $ printf 'y\n' | hg split
739 examine changes to 'foo'? [Ynesfdaq?] y
740
741 no changes to record
742 diff --git a/foo b/foo
739 diff --git a/foo b/foo
743 old mode 100644
740 old mode 100644
744 new mode 100755
741 new mode 100755
745 examine changes to 'foo'? [Ynesfdaq?] y
742 examine changes to 'foo'? [Ynesfdaq?] y
746
743
747 no changes to record
744 EDITOR: HG: Splitting 3a2125f0f4cb. Write commit message for the first split changeset.
748 diff --git a/foo b/foo
745 EDITOR: make executable
749 old mode 100644
746 EDITOR:
750 new mode 100755
747 EDITOR:
751 examine changes to 'foo'? [Ynesfdaq?] y
748 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
749 EDITOR: HG: Leave message empty to abort commit.
750 EDITOR: HG: --
751 EDITOR: HG: user: test
752 EDITOR: HG: branch 'default'
753 EDITOR: HG: changed foo
754 created new head
755 saved backup bundle to $TESTTMP/issue5864/.hg/strip-backup/3a2125f0f4cb-629e4432-split.hg (obsstore-off !)
752
756
753 no changes to record
757 $ hg log -G -T "{node|short} {desc}\n"
754 diff --git a/foo b/foo
758 @ b154670c87da split 1
755 old mode 100644
759 |
756 new mode 100755
760 o 51f273a58d82 initial
757 examine changes to 'foo'? [Ynesfdaq?] abort: response expected
761
758 [255]
759 #else
762 #else
760
763
761 TODO: Fix this on Windows. See issue 2020 and 5883
764 TODO: Fix this on Windows. See issue 2020 and 5883
762
765
763 $ printf 'y\ny\ny\n' | hg split
766 $ printf 'y\ny\ny\n' | hg split
764 abort: cannot split an empty revision
767 abort: cannot split an empty revision
765 [255]
768 [255]
766 #endif
769 #endif
General Comments 0
You need to be logged in to leave comments. Login now