##// END OF EJS Templates
patch: when importing from email, RFC2047-decode From/Subject headers...
Julien Cristau -
r28341:8286f551 default
parent child Browse files
Show More
@@ -1,334 +1,352 b''
1 # mail.py - mail sending bits for mercurial
1 # mail.py - mail sending bits for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import email
10 import email
11 import os
11 import os
12 import quopri
12 import quopri
13 import smtplib
13 import smtplib
14 import socket
14 import socket
15 import sys
15 import sys
16 import time
16 import time
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 encoding,
20 encoding,
21 error,
21 error,
22 sslutil,
22 sslutil,
23 util,
23 util,
24 )
24 )
25
25
26 _oldheaderinit = email.Header.Header.__init__
26 _oldheaderinit = email.Header.Header.__init__
27 def _unifiedheaderinit(self, *args, **kw):
27 def _unifiedheaderinit(self, *args, **kw):
28 """
28 """
29 Python 2.7 introduces a backwards incompatible change
29 Python 2.7 introduces a backwards incompatible change
30 (Python issue1974, r70772) in email.Generator.Generator code:
30 (Python issue1974, r70772) in email.Generator.Generator code:
31 pre-2.7 code passed "continuation_ws='\t'" to the Header
31 pre-2.7 code passed "continuation_ws='\t'" to the Header
32 constructor, and 2.7 removed this parameter.
32 constructor, and 2.7 removed this parameter.
33
33
34 Default argument is continuation_ws=' ', which means that the
34 Default argument is continuation_ws=' ', which means that the
35 behavior is different in <2.7 and 2.7
35 behavior is different in <2.7 and 2.7
36
36
37 We consider the 2.7 behavior to be preferable, but need
37 We consider the 2.7 behavior to be preferable, but need
38 to have an unified behavior for versions 2.4 to 2.7
38 to have an unified behavior for versions 2.4 to 2.7
39 """
39 """
40 # override continuation_ws
40 # override continuation_ws
41 kw['continuation_ws'] = ' '
41 kw['continuation_ws'] = ' '
42 _oldheaderinit(self, *args, **kw)
42 _oldheaderinit(self, *args, **kw)
43
43
44 email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
44 email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
45
45
46 class STARTTLS(smtplib.SMTP):
46 class STARTTLS(smtplib.SMTP):
47 '''Derived class to verify the peer certificate for STARTTLS.
47 '''Derived class to verify the peer certificate for STARTTLS.
48
48
49 This class allows to pass any keyword arguments to SSL socket creation.
49 This class allows to pass any keyword arguments to SSL socket creation.
50 '''
50 '''
51 def __init__(self, sslkwargs, **kwargs):
51 def __init__(self, sslkwargs, **kwargs):
52 smtplib.SMTP.__init__(self, **kwargs)
52 smtplib.SMTP.__init__(self, **kwargs)
53 self._sslkwargs = sslkwargs
53 self._sslkwargs = sslkwargs
54
54
55 def starttls(self, keyfile=None, certfile=None):
55 def starttls(self, keyfile=None, certfile=None):
56 if not self.has_extn("starttls"):
56 if not self.has_extn("starttls"):
57 msg = "STARTTLS extension not supported by server"
57 msg = "STARTTLS extension not supported by server"
58 raise smtplib.SMTPException(msg)
58 raise smtplib.SMTPException(msg)
59 (resp, reply) = self.docmd("STARTTLS")
59 (resp, reply) = self.docmd("STARTTLS")
60 if resp == 220:
60 if resp == 220:
61 self.sock = sslutil.wrapsocket(self.sock, keyfile, certfile,
61 self.sock = sslutil.wrapsocket(self.sock, keyfile, certfile,
62 **self._sslkwargs)
62 **self._sslkwargs)
63 self.file = smtplib.SSLFakeFile(self.sock)
63 self.file = smtplib.SSLFakeFile(self.sock)
64 self.helo_resp = None
64 self.helo_resp = None
65 self.ehlo_resp = None
65 self.ehlo_resp = None
66 self.esmtp_features = {}
66 self.esmtp_features = {}
67 self.does_esmtp = 0
67 self.does_esmtp = 0
68 return (resp, reply)
68 return (resp, reply)
69
69
70 class SMTPS(smtplib.SMTP):
70 class SMTPS(smtplib.SMTP):
71 '''Derived class to verify the peer certificate for SMTPS.
71 '''Derived class to verify the peer certificate for SMTPS.
72
72
73 This class allows to pass any keyword arguments to SSL socket creation.
73 This class allows to pass any keyword arguments to SSL socket creation.
74 '''
74 '''
75 def __init__(self, sslkwargs, keyfile=None, certfile=None, **kwargs):
75 def __init__(self, sslkwargs, keyfile=None, certfile=None, **kwargs):
76 self.keyfile = keyfile
76 self.keyfile = keyfile
77 self.certfile = certfile
77 self.certfile = certfile
78 smtplib.SMTP.__init__(self, **kwargs)
78 smtplib.SMTP.__init__(self, **kwargs)
79 self.default_port = smtplib.SMTP_SSL_PORT
79 self.default_port = smtplib.SMTP_SSL_PORT
80 self._sslkwargs = sslkwargs
80 self._sslkwargs = sslkwargs
81
81
82 def _get_socket(self, host, port, timeout):
82 def _get_socket(self, host, port, timeout):
83 if self.debuglevel > 0:
83 if self.debuglevel > 0:
84 print('connect:', (host, port), file=sys.stderr)
84 print('connect:', (host, port), file=sys.stderr)
85 new_socket = socket.create_connection((host, port), timeout)
85 new_socket = socket.create_connection((host, port), timeout)
86 new_socket = sslutil.wrapsocket(new_socket,
86 new_socket = sslutil.wrapsocket(new_socket,
87 self.keyfile, self.certfile,
87 self.keyfile, self.certfile,
88 **self._sslkwargs)
88 **self._sslkwargs)
89 self.file = smtplib.SSLFakeFile(new_socket)
89 self.file = smtplib.SSLFakeFile(new_socket)
90 return new_socket
90 return new_socket
91
91
92 def _smtp(ui):
92 def _smtp(ui):
93 '''build an smtp connection and return a function to send mail'''
93 '''build an smtp connection and return a function to send mail'''
94 local_hostname = ui.config('smtp', 'local_hostname')
94 local_hostname = ui.config('smtp', 'local_hostname')
95 tls = ui.config('smtp', 'tls', 'none')
95 tls = ui.config('smtp', 'tls', 'none')
96 # backward compatible: when tls = true, we use starttls.
96 # backward compatible: when tls = true, we use starttls.
97 starttls = tls == 'starttls' or util.parsebool(tls)
97 starttls = tls == 'starttls' or util.parsebool(tls)
98 smtps = tls == 'smtps'
98 smtps = tls == 'smtps'
99 if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
99 if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
100 raise error.Abort(_("can't use TLS: Python SSL support not installed"))
100 raise error.Abort(_("can't use TLS: Python SSL support not installed"))
101 mailhost = ui.config('smtp', 'host')
101 mailhost = ui.config('smtp', 'host')
102 if not mailhost:
102 if not mailhost:
103 raise error.Abort(_('smtp.host not configured - cannot send mail'))
103 raise error.Abort(_('smtp.host not configured - cannot send mail'))
104 verifycert = ui.config('smtp', 'verifycert', 'strict')
104 verifycert = ui.config('smtp', 'verifycert', 'strict')
105 if verifycert not in ['strict', 'loose']:
105 if verifycert not in ['strict', 'loose']:
106 if util.parsebool(verifycert) is not False:
106 if util.parsebool(verifycert) is not False:
107 raise error.Abort(_('invalid smtp.verifycert configuration: %s')
107 raise error.Abort(_('invalid smtp.verifycert configuration: %s')
108 % (verifycert))
108 % (verifycert))
109 verifycert = False
109 verifycert = False
110 if (starttls or smtps) and verifycert:
110 if (starttls or smtps) and verifycert:
111 sslkwargs = sslutil.sslkwargs(ui, mailhost)
111 sslkwargs = sslutil.sslkwargs(ui, mailhost)
112 else:
112 else:
113 # 'ui' is required by sslutil.wrapsocket() and set by sslkwargs()
113 # 'ui' is required by sslutil.wrapsocket() and set by sslkwargs()
114 sslkwargs = {'ui': ui}
114 sslkwargs = {'ui': ui}
115 if smtps:
115 if smtps:
116 ui.note(_('(using smtps)\n'))
116 ui.note(_('(using smtps)\n'))
117 s = SMTPS(sslkwargs, local_hostname=local_hostname)
117 s = SMTPS(sslkwargs, local_hostname=local_hostname)
118 elif starttls:
118 elif starttls:
119 s = STARTTLS(sslkwargs, local_hostname=local_hostname)
119 s = STARTTLS(sslkwargs, local_hostname=local_hostname)
120 else:
120 else:
121 s = smtplib.SMTP(local_hostname=local_hostname)
121 s = smtplib.SMTP(local_hostname=local_hostname)
122 if smtps:
122 if smtps:
123 defaultport = 465
123 defaultport = 465
124 else:
124 else:
125 defaultport = 25
125 defaultport = 25
126 mailport = util.getport(ui.config('smtp', 'port', defaultport))
126 mailport = util.getport(ui.config('smtp', 'port', defaultport))
127 ui.note(_('sending mail: smtp host %s, port %d\n') %
127 ui.note(_('sending mail: smtp host %s, port %d\n') %
128 (mailhost, mailport))
128 (mailhost, mailport))
129 s.connect(host=mailhost, port=mailport)
129 s.connect(host=mailhost, port=mailport)
130 if starttls:
130 if starttls:
131 ui.note(_('(using starttls)\n'))
131 ui.note(_('(using starttls)\n'))
132 s.ehlo()
132 s.ehlo()
133 s.starttls()
133 s.starttls()
134 s.ehlo()
134 s.ehlo()
135 if (starttls or smtps) and verifycert:
135 if (starttls or smtps) and verifycert:
136 ui.note(_('(verifying remote certificate)\n'))
136 ui.note(_('(verifying remote certificate)\n'))
137 sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
137 sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
138 username = ui.config('smtp', 'username')
138 username = ui.config('smtp', 'username')
139 password = ui.config('smtp', 'password')
139 password = ui.config('smtp', 'password')
140 if username and not password:
140 if username and not password:
141 password = ui.getpass()
141 password = ui.getpass()
142 if username and password:
142 if username and password:
143 ui.note(_('(authenticating to mail server as %s)\n') %
143 ui.note(_('(authenticating to mail server as %s)\n') %
144 (username))
144 (username))
145 try:
145 try:
146 s.login(username, password)
146 s.login(username, password)
147 except smtplib.SMTPException as inst:
147 except smtplib.SMTPException as inst:
148 raise error.Abort(inst)
148 raise error.Abort(inst)
149
149
150 def send(sender, recipients, msg):
150 def send(sender, recipients, msg):
151 try:
151 try:
152 return s.sendmail(sender, recipients, msg)
152 return s.sendmail(sender, recipients, msg)
153 except smtplib.SMTPRecipientsRefused as inst:
153 except smtplib.SMTPRecipientsRefused as inst:
154 recipients = [r[1] for r in inst.recipients.values()]
154 recipients = [r[1] for r in inst.recipients.values()]
155 raise error.Abort('\n' + '\n'.join(recipients))
155 raise error.Abort('\n' + '\n'.join(recipients))
156 except smtplib.SMTPException as inst:
156 except smtplib.SMTPException as inst:
157 raise error.Abort(inst)
157 raise error.Abort(inst)
158
158
159 return send
159 return send
160
160
161 def _sendmail(ui, sender, recipients, msg):
161 def _sendmail(ui, sender, recipients, msg):
162 '''send mail using sendmail.'''
162 '''send mail using sendmail.'''
163 program = ui.config('email', 'method', 'smtp')
163 program = ui.config('email', 'method', 'smtp')
164 cmdline = '%s -f %s %s' % (program, util.email(sender),
164 cmdline = '%s -f %s %s' % (program, util.email(sender),
165 ' '.join(map(util.email, recipients)))
165 ' '.join(map(util.email, recipients)))
166 ui.note(_('sending mail: %s\n') % cmdline)
166 ui.note(_('sending mail: %s\n') % cmdline)
167 fp = util.popen(cmdline, 'w')
167 fp = util.popen(cmdline, 'w')
168 fp.write(msg)
168 fp.write(msg)
169 ret = fp.close()
169 ret = fp.close()
170 if ret:
170 if ret:
171 raise error.Abort('%s %s' % (
171 raise error.Abort('%s %s' % (
172 os.path.basename(program.split(None, 1)[0]),
172 os.path.basename(program.split(None, 1)[0]),
173 util.explainexit(ret)[0]))
173 util.explainexit(ret)[0]))
174
174
175 def _mbox(mbox, sender, recipients, msg):
175 def _mbox(mbox, sender, recipients, msg):
176 '''write mails to mbox'''
176 '''write mails to mbox'''
177 fp = open(mbox, 'ab+')
177 fp = open(mbox, 'ab+')
178 # Should be time.asctime(), but Windows prints 2-characters day
178 # Should be time.asctime(), but Windows prints 2-characters day
179 # of month instead of one. Make them print the same thing.
179 # of month instead of one. Make them print the same thing.
180 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
180 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
181 fp.write('From %s %s\n' % (sender, date))
181 fp.write('From %s %s\n' % (sender, date))
182 fp.write(msg)
182 fp.write(msg)
183 fp.write('\n\n')
183 fp.write('\n\n')
184 fp.close()
184 fp.close()
185
185
186 def connect(ui, mbox=None):
186 def connect(ui, mbox=None):
187 '''make a mail connection. return a function to send mail.
187 '''make a mail connection. return a function to send mail.
188 call as sendmail(sender, list-of-recipients, msg).'''
188 call as sendmail(sender, list-of-recipients, msg).'''
189 if mbox:
189 if mbox:
190 open(mbox, 'wb').close()
190 open(mbox, 'wb').close()
191 return lambda s, r, m: _mbox(mbox, s, r, m)
191 return lambda s, r, m: _mbox(mbox, s, r, m)
192 if ui.config('email', 'method', 'smtp') == 'smtp':
192 if ui.config('email', 'method', 'smtp') == 'smtp':
193 return _smtp(ui)
193 return _smtp(ui)
194 return lambda s, r, m: _sendmail(ui, s, r, m)
194 return lambda s, r, m: _sendmail(ui, s, r, m)
195
195
196 def sendmail(ui, sender, recipients, msg, mbox=None):
196 def sendmail(ui, sender, recipients, msg, mbox=None):
197 send = connect(ui, mbox=mbox)
197 send = connect(ui, mbox=mbox)
198 return send(sender, recipients, msg)
198 return send(sender, recipients, msg)
199
199
200 def validateconfig(ui):
200 def validateconfig(ui):
201 '''determine if we have enough config data to try sending email.'''
201 '''determine if we have enough config data to try sending email.'''
202 method = ui.config('email', 'method', 'smtp')
202 method = ui.config('email', 'method', 'smtp')
203 if method == 'smtp':
203 if method == 'smtp':
204 if not ui.config('smtp', 'host'):
204 if not ui.config('smtp', 'host'):
205 raise error.Abort(_('smtp specified as email transport, '
205 raise error.Abort(_('smtp specified as email transport, '
206 'but no smtp host configured'))
206 'but no smtp host configured'))
207 else:
207 else:
208 if not util.findexe(method):
208 if not util.findexe(method):
209 raise error.Abort(_('%r specified as email transport, '
209 raise error.Abort(_('%r specified as email transport, '
210 'but not in PATH') % method)
210 'but not in PATH') % method)
211
211
212 def mimetextpatch(s, subtype='plain', display=False):
212 def mimetextpatch(s, subtype='plain', display=False):
213 '''Return MIME message suitable for a patch.
213 '''Return MIME message suitable for a patch.
214 Charset will be detected as utf-8 or (possibly fake) us-ascii.
214 Charset will be detected as utf-8 or (possibly fake) us-ascii.
215 Transfer encodings will be used if necessary.'''
215 Transfer encodings will be used if necessary.'''
216
216
217 cs = 'us-ascii'
217 cs = 'us-ascii'
218 if not display:
218 if not display:
219 try:
219 try:
220 s.decode('us-ascii')
220 s.decode('us-ascii')
221 except UnicodeDecodeError:
221 except UnicodeDecodeError:
222 try:
222 try:
223 s.decode('utf-8')
223 s.decode('utf-8')
224 cs = 'utf-8'
224 cs = 'utf-8'
225 except UnicodeDecodeError:
225 except UnicodeDecodeError:
226 # We'll go with us-ascii as a fallback.
226 # We'll go with us-ascii as a fallback.
227 pass
227 pass
228
228
229 return mimetextqp(s, subtype, cs)
229 return mimetextqp(s, subtype, cs)
230
230
231 def mimetextqp(body, subtype, charset):
231 def mimetextqp(body, subtype, charset):
232 '''Return MIME message.
232 '''Return MIME message.
233 Quoted-printable transfer encoding will be used if necessary.
233 Quoted-printable transfer encoding will be used if necessary.
234 '''
234 '''
235 enc = None
235 enc = None
236 for line in body.splitlines():
236 for line in body.splitlines():
237 if len(line) > 950:
237 if len(line) > 950:
238 body = quopri.encodestring(body)
238 body = quopri.encodestring(body)
239 enc = "quoted-printable"
239 enc = "quoted-printable"
240 break
240 break
241
241
242 msg = email.MIMEText.MIMEText(body, subtype, charset)
242 msg = email.MIMEText.MIMEText(body, subtype, charset)
243 if enc:
243 if enc:
244 del msg['Content-Transfer-Encoding']
244 del msg['Content-Transfer-Encoding']
245 msg['Content-Transfer-Encoding'] = enc
245 msg['Content-Transfer-Encoding'] = enc
246 return msg
246 return msg
247
247
248 def _charsets(ui):
248 def _charsets(ui):
249 '''Obtains charsets to send mail parts not containing patches.'''
249 '''Obtains charsets to send mail parts not containing patches.'''
250 charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
250 charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
251 fallbacks = [encoding.fallbackencoding.lower(),
251 fallbacks = [encoding.fallbackencoding.lower(),
252 encoding.encoding.lower(), 'utf-8']
252 encoding.encoding.lower(), 'utf-8']
253 for cs in fallbacks: # find unique charsets while keeping order
253 for cs in fallbacks: # find unique charsets while keeping order
254 if cs not in charsets:
254 if cs not in charsets:
255 charsets.append(cs)
255 charsets.append(cs)
256 return [cs for cs in charsets if not cs.endswith('ascii')]
256 return [cs for cs in charsets if not cs.endswith('ascii')]
257
257
258 def _encode(ui, s, charsets):
258 def _encode(ui, s, charsets):
259 '''Returns (converted) string, charset tuple.
259 '''Returns (converted) string, charset tuple.
260 Finds out best charset by cycling through sendcharsets in descending
260 Finds out best charset by cycling through sendcharsets in descending
261 order. Tries both encoding and fallbackencoding for input. Only as
261 order. Tries both encoding and fallbackencoding for input. Only as
262 last resort send as is in fake ascii.
262 last resort send as is in fake ascii.
263 Caveat: Do not use for mail parts containing patches!'''
263 Caveat: Do not use for mail parts containing patches!'''
264 try:
264 try:
265 s.decode('ascii')
265 s.decode('ascii')
266 except UnicodeDecodeError:
266 except UnicodeDecodeError:
267 sendcharsets = charsets or _charsets(ui)
267 sendcharsets = charsets or _charsets(ui)
268 for ics in (encoding.encoding, encoding.fallbackencoding):
268 for ics in (encoding.encoding, encoding.fallbackencoding):
269 try:
269 try:
270 u = s.decode(ics)
270 u = s.decode(ics)
271 except UnicodeDecodeError:
271 except UnicodeDecodeError:
272 continue
272 continue
273 for ocs in sendcharsets:
273 for ocs in sendcharsets:
274 try:
274 try:
275 return u.encode(ocs), ocs
275 return u.encode(ocs), ocs
276 except UnicodeEncodeError:
276 except UnicodeEncodeError:
277 pass
277 pass
278 except LookupError:
278 except LookupError:
279 ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
279 ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
280 # if ascii, or all conversion attempts fail, send (broken) ascii
280 # if ascii, or all conversion attempts fail, send (broken) ascii
281 return s, 'us-ascii'
281 return s, 'us-ascii'
282
282
283 def headencode(ui, s, charsets=None, display=False):
283 def headencode(ui, s, charsets=None, display=False):
284 '''Returns RFC-2047 compliant header from given string.'''
284 '''Returns RFC-2047 compliant header from given string.'''
285 if not display:
285 if not display:
286 # split into words?
286 # split into words?
287 s, cs = _encode(ui, s, charsets)
287 s, cs = _encode(ui, s, charsets)
288 return str(email.Header.Header(s, cs))
288 return str(email.Header.Header(s, cs))
289 return s
289 return s
290
290
291 def _addressencode(ui, name, addr, charsets=None):
291 def _addressencode(ui, name, addr, charsets=None):
292 name = headencode(ui, name, charsets)
292 name = headencode(ui, name, charsets)
293 try:
293 try:
294 acc, dom = addr.split('@')
294 acc, dom = addr.split('@')
295 acc = acc.encode('ascii')
295 acc = acc.encode('ascii')
296 dom = dom.decode(encoding.encoding).encode('idna')
296 dom = dom.decode(encoding.encoding).encode('idna')
297 addr = '%s@%s' % (acc, dom)
297 addr = '%s@%s' % (acc, dom)
298 except UnicodeDecodeError:
298 except UnicodeDecodeError:
299 raise error.Abort(_('invalid email address: %s') % addr)
299 raise error.Abort(_('invalid email address: %s') % addr)
300 except ValueError:
300 except ValueError:
301 try:
301 try:
302 # too strict?
302 # too strict?
303 addr = addr.encode('ascii')
303 addr = addr.encode('ascii')
304 except UnicodeDecodeError:
304 except UnicodeDecodeError:
305 raise error.Abort(_('invalid local address: %s') % addr)
305 raise error.Abort(_('invalid local address: %s') % addr)
306 return email.Utils.formataddr((name, addr))
306 return email.Utils.formataddr((name, addr))
307
307
308 def addressencode(ui, address, charsets=None, display=False):
308 def addressencode(ui, address, charsets=None, display=False):
309 '''Turns address into RFC-2047 compliant header.'''
309 '''Turns address into RFC-2047 compliant header.'''
310 if display or not address:
310 if display or not address:
311 return address or ''
311 return address or ''
312 name, addr = email.Utils.parseaddr(address)
312 name, addr = email.Utils.parseaddr(address)
313 return _addressencode(ui, name, addr, charsets)
313 return _addressencode(ui, name, addr, charsets)
314
314
315 def addrlistencode(ui, addrs, charsets=None, display=False):
315 def addrlistencode(ui, addrs, charsets=None, display=False):
316 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
316 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
317 A single element of input list may contain multiple addresses, but output
317 A single element of input list may contain multiple addresses, but output
318 always has one address per item'''
318 always has one address per item'''
319 if display:
319 if display:
320 return [a.strip() for a in addrs if a.strip()]
320 return [a.strip() for a in addrs if a.strip()]
321
321
322 result = []
322 result = []
323 for name, addr in email.Utils.getaddresses(addrs):
323 for name, addr in email.Utils.getaddresses(addrs):
324 if name or addr:
324 if name or addr:
325 result.append(_addressencode(ui, name, addr, charsets))
325 result.append(_addressencode(ui, name, addr, charsets))
326 return result
326 return result
327
327
328 def mimeencode(ui, s, charsets=None, display=False):
328 def mimeencode(ui, s, charsets=None, display=False):
329 '''creates mime text object, encodes it if needed, and sets
329 '''creates mime text object, encodes it if needed, and sets
330 charset and transfer-encoding accordingly.'''
330 charset and transfer-encoding accordingly.'''
331 cs = 'us-ascii'
331 cs = 'us-ascii'
332 if not display:
332 if not display:
333 s, cs = _encode(ui, s, charsets)
333 s, cs = _encode(ui, s, charsets)
334 return mimetextqp(s, 'plain', cs)
334 return mimetextqp(s, 'plain', cs)
335
336 def headdecode(s):
337 '''Decodes RFC-2047 header'''
338 uparts = []
339 for part, charset in email.Header.decode_header(s):
340 if charset is not None:
341 try:
342 uparts.append(part.decode(charset))
343 continue
344 except UnicodeDecodeError:
345 pass
346 try:
347 uparts.append(part.decode('UTF-8'))
348 continue
349 except UnicodeDecodeError:
350 pass
351 uparts.append(part.decode('ISO-8859-1'))
352 return encoding.tolocal(u' '.join(uparts).encode('UTF-8'))
@@ -1,2592 +1,2593 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import cStringIO
11 import cStringIO
12 import collections
12 import collections
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mdiff,
35 mdiff,
35 pathutil,
36 pathutil,
36 scmutil,
37 scmutil,
37 util,
38 util,
38 )
39 )
39
40
40 gitre = re.compile('diff --git a/(.*) b/(.*)')
41 gitre = re.compile('diff --git a/(.*) b/(.*)')
41 tabsplitter = re.compile(r'(\t+|[^\t]+)')
42 tabsplitter = re.compile(r'(\t+|[^\t]+)')
42
43
43 class PatchError(Exception):
44 class PatchError(Exception):
44 pass
45 pass
45
46
46
47
47 # public functions
48 # public functions
48
49
49 def split(stream):
50 def split(stream):
50 '''return an iterator of individual patches from a stream'''
51 '''return an iterator of individual patches from a stream'''
51 def isheader(line, inheader):
52 def isheader(line, inheader):
52 if inheader and line[0] in (' ', '\t'):
53 if inheader and line[0] in (' ', '\t'):
53 # continuation
54 # continuation
54 return True
55 return True
55 if line[0] in (' ', '-', '+'):
56 if line[0] in (' ', '-', '+'):
56 # diff line - don't check for header pattern in there
57 # diff line - don't check for header pattern in there
57 return False
58 return False
58 l = line.split(': ', 1)
59 l = line.split(': ', 1)
59 return len(l) == 2 and ' ' not in l[0]
60 return len(l) == 2 and ' ' not in l[0]
60
61
61 def chunk(lines):
62 def chunk(lines):
62 return cStringIO.StringIO(''.join(lines))
63 return cStringIO.StringIO(''.join(lines))
63
64
64 def hgsplit(stream, cur):
65 def hgsplit(stream, cur):
65 inheader = True
66 inheader = True
66
67
67 for line in stream:
68 for line in stream:
68 if not line.strip():
69 if not line.strip():
69 inheader = False
70 inheader = False
70 if not inheader and line.startswith('# HG changeset patch'):
71 if not inheader and line.startswith('# HG changeset patch'):
71 yield chunk(cur)
72 yield chunk(cur)
72 cur = []
73 cur = []
73 inheader = True
74 inheader = True
74
75
75 cur.append(line)
76 cur.append(line)
76
77
77 if cur:
78 if cur:
78 yield chunk(cur)
79 yield chunk(cur)
79
80
80 def mboxsplit(stream, cur):
81 def mboxsplit(stream, cur):
81 for line in stream:
82 for line in stream:
82 if line.startswith('From '):
83 if line.startswith('From '):
83 for c in split(chunk(cur[1:])):
84 for c in split(chunk(cur[1:])):
84 yield c
85 yield c
85 cur = []
86 cur = []
86
87
87 cur.append(line)
88 cur.append(line)
88
89
89 if cur:
90 if cur:
90 for c in split(chunk(cur[1:])):
91 for c in split(chunk(cur[1:])):
91 yield c
92 yield c
92
93
93 def mimesplit(stream, cur):
94 def mimesplit(stream, cur):
94 def msgfp(m):
95 def msgfp(m):
95 fp = cStringIO.StringIO()
96 fp = cStringIO.StringIO()
96 g = email.Generator.Generator(fp, mangle_from_=False)
97 g = email.Generator.Generator(fp, mangle_from_=False)
97 g.flatten(m)
98 g.flatten(m)
98 fp.seek(0)
99 fp.seek(0)
99 return fp
100 return fp
100
101
101 for line in stream:
102 for line in stream:
102 cur.append(line)
103 cur.append(line)
103 c = chunk(cur)
104 c = chunk(cur)
104
105
105 m = email.Parser.Parser().parse(c)
106 m = email.Parser.Parser().parse(c)
106 if not m.is_multipart():
107 if not m.is_multipart():
107 yield msgfp(m)
108 yield msgfp(m)
108 else:
109 else:
109 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
110 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
110 for part in m.walk():
111 for part in m.walk():
111 ct = part.get_content_type()
112 ct = part.get_content_type()
112 if ct not in ok_types:
113 if ct not in ok_types:
113 continue
114 continue
114 yield msgfp(part)
115 yield msgfp(part)
115
116
116 def headersplit(stream, cur):
117 def headersplit(stream, cur):
117 inheader = False
118 inheader = False
118
119
119 for line in stream:
120 for line in stream:
120 if not inheader and isheader(line, inheader):
121 if not inheader and isheader(line, inheader):
121 yield chunk(cur)
122 yield chunk(cur)
122 cur = []
123 cur = []
123 inheader = True
124 inheader = True
124 if inheader and not isheader(line, inheader):
125 if inheader and not isheader(line, inheader):
125 inheader = False
126 inheader = False
126
127
127 cur.append(line)
128 cur.append(line)
128
129
129 if cur:
130 if cur:
130 yield chunk(cur)
131 yield chunk(cur)
131
132
132 def remainder(cur):
133 def remainder(cur):
133 yield chunk(cur)
134 yield chunk(cur)
134
135
135 class fiter(object):
136 class fiter(object):
136 def __init__(self, fp):
137 def __init__(self, fp):
137 self.fp = fp
138 self.fp = fp
138
139
139 def __iter__(self):
140 def __iter__(self):
140 return self
141 return self
141
142
142 def next(self):
143 def next(self):
143 l = self.fp.readline()
144 l = self.fp.readline()
144 if not l:
145 if not l:
145 raise StopIteration
146 raise StopIteration
146 return l
147 return l
147
148
148 inheader = False
149 inheader = False
149 cur = []
150 cur = []
150
151
151 mimeheaders = ['content-type']
152 mimeheaders = ['content-type']
152
153
153 if not util.safehasattr(stream, 'next'):
154 if not util.safehasattr(stream, 'next'):
154 # http responses, for example, have readline but not next
155 # http responses, for example, have readline but not next
155 stream = fiter(stream)
156 stream = fiter(stream)
156
157
157 for line in stream:
158 for line in stream:
158 cur.append(line)
159 cur.append(line)
159 if line.startswith('# HG changeset patch'):
160 if line.startswith('# HG changeset patch'):
160 return hgsplit(stream, cur)
161 return hgsplit(stream, cur)
161 elif line.startswith('From '):
162 elif line.startswith('From '):
162 return mboxsplit(stream, cur)
163 return mboxsplit(stream, cur)
163 elif isheader(line, inheader):
164 elif isheader(line, inheader):
164 inheader = True
165 inheader = True
165 if line.split(':', 1)[0].lower() in mimeheaders:
166 if line.split(':', 1)[0].lower() in mimeheaders:
166 # let email parser handle this
167 # let email parser handle this
167 return mimesplit(stream, cur)
168 return mimesplit(stream, cur)
168 elif line.startswith('--- ') and inheader:
169 elif line.startswith('--- ') and inheader:
169 # No evil headers seen by diff start, split by hand
170 # No evil headers seen by diff start, split by hand
170 return headersplit(stream, cur)
171 return headersplit(stream, cur)
171 # Not enough info, keep reading
172 # Not enough info, keep reading
172
173
173 # if we are here, we have a very plain patch
174 # if we are here, we have a very plain patch
174 return remainder(cur)
175 return remainder(cur)
175
176
176 ## Some facility for extensible patch parsing:
177 ## Some facility for extensible patch parsing:
177 # list of pairs ("header to match", "data key")
178 # list of pairs ("header to match", "data key")
178 patchheadermap = [('Date', 'date'),
179 patchheadermap = [('Date', 'date'),
179 ('Branch', 'branch'),
180 ('Branch', 'branch'),
180 ('Node ID', 'nodeid'),
181 ('Node ID', 'nodeid'),
181 ]
182 ]
182
183
183 def extract(ui, fileobj):
184 def extract(ui, fileobj):
184 '''extract patch from data read from fileobj.
185 '''extract patch from data read from fileobj.
185
186
186 patch can be a normal patch or contained in an email message.
187 patch can be a normal patch or contained in an email message.
187
188
188 return a dictionary. Standard keys are:
189 return a dictionary. Standard keys are:
189 - filename,
190 - filename,
190 - message,
191 - message,
191 - user,
192 - user,
192 - date,
193 - date,
193 - branch,
194 - branch,
194 - node,
195 - node,
195 - p1,
196 - p1,
196 - p2.
197 - p2.
197 Any item can be missing from the dictionary. If filename is missing,
198 Any item can be missing from the dictionary. If filename is missing,
198 fileobj did not contain a patch. Caller must unlink filename when done.'''
199 fileobj did not contain a patch. Caller must unlink filename when done.'''
199
200
200 # attempt to detect the start of a patch
201 # attempt to detect the start of a patch
201 # (this heuristic is borrowed from quilt)
202 # (this heuristic is borrowed from quilt)
202 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
203 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
203 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
204 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
204 r'---[ \t].*?^\+\+\+[ \t]|'
205 r'---[ \t].*?^\+\+\+[ \t]|'
205 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
206 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
206
207
207 data = {}
208 data = {}
208 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
209 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
209 tmpfp = os.fdopen(fd, 'w')
210 tmpfp = os.fdopen(fd, 'w')
210 try:
211 try:
211 msg = email.Parser.Parser().parse(fileobj)
212 msg = email.Parser.Parser().parse(fileobj)
212
213
213 subject = msg['Subject']
214 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
214 data['user'] = msg['From']
215 data['user'] = msg['From'] and mail.headdecode(msg['From'])
215 if not subject and not data['user']:
216 if not subject and not data['user']:
216 # Not an email, restore parsed headers if any
217 # Not an email, restore parsed headers if any
217 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
218 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
218
219
219 # should try to parse msg['Date']
220 # should try to parse msg['Date']
220 parents = []
221 parents = []
221
222
222 if subject:
223 if subject:
223 if subject.startswith('[PATCH'):
224 if subject.startswith('[PATCH'):
224 pend = subject.find(']')
225 pend = subject.find(']')
225 if pend >= 0:
226 if pend >= 0:
226 subject = subject[pend + 1:].lstrip()
227 subject = subject[pend + 1:].lstrip()
227 subject = re.sub(r'\n[ \t]+', ' ', subject)
228 subject = re.sub(r'\n[ \t]+', ' ', subject)
228 ui.debug('Subject: %s\n' % subject)
229 ui.debug('Subject: %s\n' % subject)
229 if data['user']:
230 if data['user']:
230 ui.debug('From: %s\n' % data['user'])
231 ui.debug('From: %s\n' % data['user'])
231 diffs_seen = 0
232 diffs_seen = 0
232 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
233 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
233 message = ''
234 message = ''
234 for part in msg.walk():
235 for part in msg.walk():
235 content_type = part.get_content_type()
236 content_type = part.get_content_type()
236 ui.debug('Content-Type: %s\n' % content_type)
237 ui.debug('Content-Type: %s\n' % content_type)
237 if content_type not in ok_types:
238 if content_type not in ok_types:
238 continue
239 continue
239 payload = part.get_payload(decode=True)
240 payload = part.get_payload(decode=True)
240 m = diffre.search(payload)
241 m = diffre.search(payload)
241 if m:
242 if m:
242 hgpatch = False
243 hgpatch = False
243 hgpatchheader = False
244 hgpatchheader = False
244 ignoretext = False
245 ignoretext = False
245
246
246 ui.debug('found patch at byte %d\n' % m.start(0))
247 ui.debug('found patch at byte %d\n' % m.start(0))
247 diffs_seen += 1
248 diffs_seen += 1
248 cfp = cStringIO.StringIO()
249 cfp = cStringIO.StringIO()
249 for line in payload[:m.start(0)].splitlines():
250 for line in payload[:m.start(0)].splitlines():
250 if line.startswith('# HG changeset patch') and not hgpatch:
251 if line.startswith('# HG changeset patch') and not hgpatch:
251 ui.debug('patch generated by hg export\n')
252 ui.debug('patch generated by hg export\n')
252 hgpatch = True
253 hgpatch = True
253 hgpatchheader = True
254 hgpatchheader = True
254 # drop earlier commit message content
255 # drop earlier commit message content
255 cfp.seek(0)
256 cfp.seek(0)
256 cfp.truncate()
257 cfp.truncate()
257 subject = None
258 subject = None
258 elif hgpatchheader:
259 elif hgpatchheader:
259 if line.startswith('# User '):
260 if line.startswith('# User '):
260 data['user'] = line[7:]
261 data['user'] = line[7:]
261 ui.debug('From: %s\n' % data['user'])
262 ui.debug('From: %s\n' % data['user'])
262 elif line.startswith("# Parent "):
263 elif line.startswith("# Parent "):
263 parents.append(line[9:].lstrip())
264 parents.append(line[9:].lstrip())
264 elif line.startswith("# "):
265 elif line.startswith("# "):
265 for header, key in patchheadermap:
266 for header, key in patchheadermap:
266 prefix = '# %s ' % header
267 prefix = '# %s ' % header
267 if line.startswith(prefix):
268 if line.startswith(prefix):
268 data[key] = line[len(prefix):]
269 data[key] = line[len(prefix):]
269 else:
270 else:
270 hgpatchheader = False
271 hgpatchheader = False
271 elif line == '---':
272 elif line == '---':
272 ignoretext = True
273 ignoretext = True
273 if not hgpatchheader and not ignoretext:
274 if not hgpatchheader and not ignoretext:
274 cfp.write(line)
275 cfp.write(line)
275 cfp.write('\n')
276 cfp.write('\n')
276 message = cfp.getvalue()
277 message = cfp.getvalue()
277 if tmpfp:
278 if tmpfp:
278 tmpfp.write(payload)
279 tmpfp.write(payload)
279 if not payload.endswith('\n'):
280 if not payload.endswith('\n'):
280 tmpfp.write('\n')
281 tmpfp.write('\n')
281 elif not diffs_seen and message and content_type == 'text/plain':
282 elif not diffs_seen and message and content_type == 'text/plain':
282 message += '\n' + payload
283 message += '\n' + payload
283 except: # re-raises
284 except: # re-raises
284 tmpfp.close()
285 tmpfp.close()
285 os.unlink(tmpname)
286 os.unlink(tmpname)
286 raise
287 raise
287
288
288 if subject and not message.startswith(subject):
289 if subject and not message.startswith(subject):
289 message = '%s\n%s' % (subject, message)
290 message = '%s\n%s' % (subject, message)
290 data['message'] = message
291 data['message'] = message
291 tmpfp.close()
292 tmpfp.close()
292 if parents:
293 if parents:
293 data['p1'] = parents.pop(0)
294 data['p1'] = parents.pop(0)
294 if parents:
295 if parents:
295 data['p2'] = parents.pop(0)
296 data['p2'] = parents.pop(0)
296
297
297 if diffs_seen:
298 if diffs_seen:
298 data['filename'] = tmpname
299 data['filename'] = tmpname
299 else:
300 else:
300 os.unlink(tmpname)
301 os.unlink(tmpname)
301 return data
302 return data
302
303
303 class patchmeta(object):
304 class patchmeta(object):
304 """Patched file metadata
305 """Patched file metadata
305
306
306 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
307 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
307 or COPY. 'path' is patched file path. 'oldpath' is set to the
308 or COPY. 'path' is patched file path. 'oldpath' is set to the
308 origin file when 'op' is either COPY or RENAME, None otherwise. If
309 origin file when 'op' is either COPY or RENAME, None otherwise. If
309 file mode is changed, 'mode' is a tuple (islink, isexec) where
310 file mode is changed, 'mode' is a tuple (islink, isexec) where
310 'islink' is True if the file is a symlink and 'isexec' is True if
311 'islink' is True if the file is a symlink and 'isexec' is True if
311 the file is executable. Otherwise, 'mode' is None.
312 the file is executable. Otherwise, 'mode' is None.
312 """
313 """
313 def __init__(self, path):
314 def __init__(self, path):
314 self.path = path
315 self.path = path
315 self.oldpath = None
316 self.oldpath = None
316 self.mode = None
317 self.mode = None
317 self.op = 'MODIFY'
318 self.op = 'MODIFY'
318 self.binary = False
319 self.binary = False
319
320
320 def setmode(self, mode):
321 def setmode(self, mode):
321 islink = mode & 0o20000
322 islink = mode & 0o20000
322 isexec = mode & 0o100
323 isexec = mode & 0o100
323 self.mode = (islink, isexec)
324 self.mode = (islink, isexec)
324
325
325 def copy(self):
326 def copy(self):
326 other = patchmeta(self.path)
327 other = patchmeta(self.path)
327 other.oldpath = self.oldpath
328 other.oldpath = self.oldpath
328 other.mode = self.mode
329 other.mode = self.mode
329 other.op = self.op
330 other.op = self.op
330 other.binary = self.binary
331 other.binary = self.binary
331 return other
332 return other
332
333
333 def _ispatchinga(self, afile):
334 def _ispatchinga(self, afile):
334 if afile == '/dev/null':
335 if afile == '/dev/null':
335 return self.op == 'ADD'
336 return self.op == 'ADD'
336 return afile == 'a/' + (self.oldpath or self.path)
337 return afile == 'a/' + (self.oldpath or self.path)
337
338
338 def _ispatchingb(self, bfile):
339 def _ispatchingb(self, bfile):
339 if bfile == '/dev/null':
340 if bfile == '/dev/null':
340 return self.op == 'DELETE'
341 return self.op == 'DELETE'
341 return bfile == 'b/' + self.path
342 return bfile == 'b/' + self.path
342
343
343 def ispatching(self, afile, bfile):
344 def ispatching(self, afile, bfile):
344 return self._ispatchinga(afile) and self._ispatchingb(bfile)
345 return self._ispatchinga(afile) and self._ispatchingb(bfile)
345
346
346 def __repr__(self):
347 def __repr__(self):
347 return "<patchmeta %s %r>" % (self.op, self.path)
348 return "<patchmeta %s %r>" % (self.op, self.path)
348
349
349 def readgitpatch(lr):
350 def readgitpatch(lr):
350 """extract git-style metadata about patches from <patchname>"""
351 """extract git-style metadata about patches from <patchname>"""
351
352
352 # Filter patch for git information
353 # Filter patch for git information
353 gp = None
354 gp = None
354 gitpatches = []
355 gitpatches = []
355 for line in lr:
356 for line in lr:
356 line = line.rstrip(' \r\n')
357 line = line.rstrip(' \r\n')
357 if line.startswith('diff --git a/'):
358 if line.startswith('diff --git a/'):
358 m = gitre.match(line)
359 m = gitre.match(line)
359 if m:
360 if m:
360 if gp:
361 if gp:
361 gitpatches.append(gp)
362 gitpatches.append(gp)
362 dst = m.group(2)
363 dst = m.group(2)
363 gp = patchmeta(dst)
364 gp = patchmeta(dst)
364 elif gp:
365 elif gp:
365 if line.startswith('--- '):
366 if line.startswith('--- '):
366 gitpatches.append(gp)
367 gitpatches.append(gp)
367 gp = None
368 gp = None
368 continue
369 continue
369 if line.startswith('rename from '):
370 if line.startswith('rename from '):
370 gp.op = 'RENAME'
371 gp.op = 'RENAME'
371 gp.oldpath = line[12:]
372 gp.oldpath = line[12:]
372 elif line.startswith('rename to '):
373 elif line.startswith('rename to '):
373 gp.path = line[10:]
374 gp.path = line[10:]
374 elif line.startswith('copy from '):
375 elif line.startswith('copy from '):
375 gp.op = 'COPY'
376 gp.op = 'COPY'
376 gp.oldpath = line[10:]
377 gp.oldpath = line[10:]
377 elif line.startswith('copy to '):
378 elif line.startswith('copy to '):
378 gp.path = line[8:]
379 gp.path = line[8:]
379 elif line.startswith('deleted file'):
380 elif line.startswith('deleted file'):
380 gp.op = 'DELETE'
381 gp.op = 'DELETE'
381 elif line.startswith('new file mode '):
382 elif line.startswith('new file mode '):
382 gp.op = 'ADD'
383 gp.op = 'ADD'
383 gp.setmode(int(line[-6:], 8))
384 gp.setmode(int(line[-6:], 8))
384 elif line.startswith('new mode '):
385 elif line.startswith('new mode '):
385 gp.setmode(int(line[-6:], 8))
386 gp.setmode(int(line[-6:], 8))
386 elif line.startswith('GIT binary patch'):
387 elif line.startswith('GIT binary patch'):
387 gp.binary = True
388 gp.binary = True
388 if gp:
389 if gp:
389 gitpatches.append(gp)
390 gitpatches.append(gp)
390
391
391 return gitpatches
392 return gitpatches
392
393
393 class linereader(object):
394 class linereader(object):
394 # simple class to allow pushing lines back into the input stream
395 # simple class to allow pushing lines back into the input stream
395 def __init__(self, fp):
396 def __init__(self, fp):
396 self.fp = fp
397 self.fp = fp
397 self.buf = []
398 self.buf = []
398
399
399 def push(self, line):
400 def push(self, line):
400 if line is not None:
401 if line is not None:
401 self.buf.append(line)
402 self.buf.append(line)
402
403
403 def readline(self):
404 def readline(self):
404 if self.buf:
405 if self.buf:
405 l = self.buf[0]
406 l = self.buf[0]
406 del self.buf[0]
407 del self.buf[0]
407 return l
408 return l
408 return self.fp.readline()
409 return self.fp.readline()
409
410
410 def __iter__(self):
411 def __iter__(self):
411 while True:
412 while True:
412 l = self.readline()
413 l = self.readline()
413 if not l:
414 if not l:
414 break
415 break
415 yield l
416 yield l
416
417
417 class abstractbackend(object):
418 class abstractbackend(object):
418 def __init__(self, ui):
419 def __init__(self, ui):
419 self.ui = ui
420 self.ui = ui
420
421
421 def getfile(self, fname):
422 def getfile(self, fname):
422 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
423 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
424 """
425 """
425 raise NotImplementedError
426 raise NotImplementedError
426
427
427 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
428 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
429 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
430 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
431 copysource is set to the original file name.
432 copysource is set to the original file name.
432 """
433 """
433 raise NotImplementedError
434 raise NotImplementedError
434
435
435 def unlink(self, fname):
436 def unlink(self, fname):
436 """Unlink target file."""
437 """Unlink target file."""
437 raise NotImplementedError
438 raise NotImplementedError
438
439
439 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
440 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
441 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
442 files.
443 files.
443 """
444 """
444 pass
445 pass
445
446
446 def exists(self, fname):
447 def exists(self, fname):
447 raise NotImplementedError
448 raise NotImplementedError
448
449
449 class fsbackend(abstractbackend):
450 class fsbackend(abstractbackend):
450 def __init__(self, ui, basedir):
451 def __init__(self, ui, basedir):
451 super(fsbackend, self).__init__(ui)
452 super(fsbackend, self).__init__(ui)
452 self.opener = scmutil.opener(basedir)
453 self.opener = scmutil.opener(basedir)
453
454
454 def _join(self, f):
455 def _join(self, f):
455 return os.path.join(self.opener.base, f)
456 return os.path.join(self.opener.base, f)
456
457
457 def getfile(self, fname):
458 def getfile(self, fname):
458 if self.opener.islink(fname):
459 if self.opener.islink(fname):
459 return (self.opener.readlink(fname), (True, False))
460 return (self.opener.readlink(fname), (True, False))
460
461
461 isexec = False
462 isexec = False
462 try:
463 try:
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 except OSError as e:
465 except OSError as e:
465 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
466 raise
467 raise
467 try:
468 try:
468 return (self.opener.read(fname), (False, isexec))
469 return (self.opener.read(fname), (False, isexec))
469 except IOError as e:
470 except IOError as e:
470 if e.errno != errno.ENOENT:
471 if e.errno != errno.ENOENT:
471 raise
472 raise
472 return None, None
473 return None, None
473
474
474 def setfile(self, fname, data, mode, copysource):
475 def setfile(self, fname, data, mode, copysource):
475 islink, isexec = mode
476 islink, isexec = mode
476 if data is None:
477 if data is None:
477 self.opener.setflags(fname, islink, isexec)
478 self.opener.setflags(fname, islink, isexec)
478 return
479 return
479 if islink:
480 if islink:
480 self.opener.symlink(data, fname)
481 self.opener.symlink(data, fname)
481 else:
482 else:
482 self.opener.write(fname, data)
483 self.opener.write(fname, data)
483 if isexec:
484 if isexec:
484 self.opener.setflags(fname, False, True)
485 self.opener.setflags(fname, False, True)
485
486
486 def unlink(self, fname):
487 def unlink(self, fname):
487 self.opener.unlinkpath(fname, ignoremissing=True)
488 self.opener.unlinkpath(fname, ignoremissing=True)
488
489
489 def writerej(self, fname, failed, total, lines):
490 def writerej(self, fname, failed, total, lines):
490 fname = fname + ".rej"
491 fname = fname + ".rej"
491 self.ui.warn(
492 self.ui.warn(
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 (failed, total, fname))
494 (failed, total, fname))
494 fp = self.opener(fname, 'w')
495 fp = self.opener(fname, 'w')
495 fp.writelines(lines)
496 fp.writelines(lines)
496 fp.close()
497 fp.close()
497
498
498 def exists(self, fname):
499 def exists(self, fname):
499 return self.opener.lexists(fname)
500 return self.opener.lexists(fname)
500
501
501 class workingbackend(fsbackend):
502 class workingbackend(fsbackend):
502 def __init__(self, ui, repo, similarity):
503 def __init__(self, ui, repo, similarity):
503 super(workingbackend, self).__init__(ui, repo.root)
504 super(workingbackend, self).__init__(ui, repo.root)
504 self.repo = repo
505 self.repo = repo
505 self.similarity = similarity
506 self.similarity = similarity
506 self.removed = set()
507 self.removed = set()
507 self.changed = set()
508 self.changed = set()
508 self.copied = []
509 self.copied = []
509
510
510 def _checkknown(self, fname):
511 def _checkknown(self, fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513
514
514 def setfile(self, fname, data, mode, copysource):
515 def setfile(self, fname, data, mode, copysource):
515 self._checkknown(fname)
516 self._checkknown(fname)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 if copysource is not None:
518 if copysource is not None:
518 self.copied.append((copysource, fname))
519 self.copied.append((copysource, fname))
519 self.changed.add(fname)
520 self.changed.add(fname)
520
521
521 def unlink(self, fname):
522 def unlink(self, fname):
522 self._checkknown(fname)
523 self._checkknown(fname)
523 super(workingbackend, self).unlink(fname)
524 super(workingbackend, self).unlink(fname)
524 self.removed.add(fname)
525 self.removed.add(fname)
525 self.changed.add(fname)
526 self.changed.add(fname)
526
527
527 def close(self):
528 def close(self):
528 wctx = self.repo[None]
529 wctx = self.repo[None]
529 changed = set(self.changed)
530 changed = set(self.changed)
530 for src, dst in self.copied:
531 for src, dst in self.copied:
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 if self.removed:
533 if self.removed:
533 wctx.forget(sorted(self.removed))
534 wctx.forget(sorted(self.removed))
534 for f in self.removed:
535 for f in self.removed:
535 if f not in self.repo.dirstate:
536 if f not in self.repo.dirstate:
536 # File was deleted and no longer belongs to the
537 # File was deleted and no longer belongs to the
537 # dirstate, it was probably marked added then
538 # dirstate, it was probably marked added then
538 # deleted, and should not be considered by
539 # deleted, and should not be considered by
539 # marktouched().
540 # marktouched().
540 changed.discard(f)
541 changed.discard(f)
541 if changed:
542 if changed:
542 scmutil.marktouched(self.repo, changed, self.similarity)
543 scmutil.marktouched(self.repo, changed, self.similarity)
543 return sorted(self.changed)
544 return sorted(self.changed)
544
545
545 class filestore(object):
546 class filestore(object):
546 def __init__(self, maxsize=None):
547 def __init__(self, maxsize=None):
547 self.opener = None
548 self.opener = None
548 self.files = {}
549 self.files = {}
549 self.created = 0
550 self.created = 0
550 self.maxsize = maxsize
551 self.maxsize = maxsize
551 if self.maxsize is None:
552 if self.maxsize is None:
552 self.maxsize = 4*(2**20)
553 self.maxsize = 4*(2**20)
553 self.size = 0
554 self.size = 0
554 self.data = {}
555 self.data = {}
555
556
556 def setfile(self, fname, data, mode, copied=None):
557 def setfile(self, fname, data, mode, copied=None):
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 self.data[fname] = (data, mode, copied)
559 self.data[fname] = (data, mode, copied)
559 self.size += len(data)
560 self.size += len(data)
560 else:
561 else:
561 if self.opener is None:
562 if self.opener is None:
562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 root = tempfile.mkdtemp(prefix='hg-patch-')
563 self.opener = scmutil.opener(root)
564 self.opener = scmutil.opener(root)
564 # Avoid filename issues with these simple names
565 # Avoid filename issues with these simple names
565 fn = str(self.created)
566 fn = str(self.created)
566 self.opener.write(fn, data)
567 self.opener.write(fn, data)
567 self.created += 1
568 self.created += 1
568 self.files[fname] = (fn, mode, copied)
569 self.files[fname] = (fn, mode, copied)
569
570
570 def getfile(self, fname):
571 def getfile(self, fname):
571 if fname in self.data:
572 if fname in self.data:
572 return self.data[fname]
573 return self.data[fname]
573 if not self.opener or fname not in self.files:
574 if not self.opener or fname not in self.files:
574 return None, None, None
575 return None, None, None
575 fn, mode, copied = self.files[fname]
576 fn, mode, copied = self.files[fname]
576 return self.opener.read(fn), mode, copied
577 return self.opener.read(fn), mode, copied
577
578
578 def close(self):
579 def close(self):
579 if self.opener:
580 if self.opener:
580 shutil.rmtree(self.opener.base)
581 shutil.rmtree(self.opener.base)
581
582
582 class repobackend(abstractbackend):
583 class repobackend(abstractbackend):
583 def __init__(self, ui, repo, ctx, store):
584 def __init__(self, ui, repo, ctx, store):
584 super(repobackend, self).__init__(ui)
585 super(repobackend, self).__init__(ui)
585 self.repo = repo
586 self.repo = repo
586 self.ctx = ctx
587 self.ctx = ctx
587 self.store = store
588 self.store = store
588 self.changed = set()
589 self.changed = set()
589 self.removed = set()
590 self.removed = set()
590 self.copied = {}
591 self.copied = {}
591
592
592 def _checkknown(self, fname):
593 def _checkknown(self, fname):
593 if fname not in self.ctx:
594 if fname not in self.ctx:
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595
596
596 def getfile(self, fname):
597 def getfile(self, fname):
597 try:
598 try:
598 fctx = self.ctx[fname]
599 fctx = self.ctx[fname]
599 except error.LookupError:
600 except error.LookupError:
600 return None, None
601 return None, None
601 flags = fctx.flags()
602 flags = fctx.flags()
602 return fctx.data(), ('l' in flags, 'x' in flags)
603 return fctx.data(), ('l' in flags, 'x' in flags)
603
604
604 def setfile(self, fname, data, mode, copysource):
605 def setfile(self, fname, data, mode, copysource):
605 if copysource:
606 if copysource:
606 self._checkknown(copysource)
607 self._checkknown(copysource)
607 if data is None:
608 if data is None:
608 data = self.ctx[fname].data()
609 data = self.ctx[fname].data()
609 self.store.setfile(fname, data, mode, copysource)
610 self.store.setfile(fname, data, mode, copysource)
610 self.changed.add(fname)
611 self.changed.add(fname)
611 if copysource:
612 if copysource:
612 self.copied[fname] = copysource
613 self.copied[fname] = copysource
613
614
614 def unlink(self, fname):
615 def unlink(self, fname):
615 self._checkknown(fname)
616 self._checkknown(fname)
616 self.removed.add(fname)
617 self.removed.add(fname)
617
618
618 def exists(self, fname):
619 def exists(self, fname):
619 return fname in self.ctx
620 return fname in self.ctx
620
621
621 def close(self):
622 def close(self):
622 return self.changed | self.removed
623 return self.changed | self.removed
623
624
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628
629
629 class patchfile(object):
630 class patchfile(object):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 self.fname = gp.path
632 self.fname = gp.path
632 self.eolmode = eolmode
633 self.eolmode = eolmode
633 self.eol = None
634 self.eol = None
634 self.backend = backend
635 self.backend = backend
635 self.ui = ui
636 self.ui = ui
636 self.lines = []
637 self.lines = []
637 self.exists = False
638 self.exists = False
638 self.missing = True
639 self.missing = True
639 self.mode = gp.mode
640 self.mode = gp.mode
640 self.copysource = gp.oldpath
641 self.copysource = gp.oldpath
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.remove = gp.op == 'DELETE'
643 self.remove = gp.op == 'DELETE'
643 if self.copysource is None:
644 if self.copysource is None:
644 data, mode = backend.getfile(self.fname)
645 data, mode = backend.getfile(self.fname)
645 else:
646 else:
646 data, mode = store.getfile(self.copysource)[:2]
647 data, mode = store.getfile(self.copysource)[:2]
647 if data is not None:
648 if data is not None:
648 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.missing = False
650 self.missing = False
650 if data:
651 if data:
651 self.lines = mdiff.splitnewlines(data)
652 self.lines = mdiff.splitnewlines(data)
652 if self.mode is None:
653 if self.mode is None:
653 self.mode = mode
654 self.mode = mode
654 if self.lines:
655 if self.lines:
655 # Normalize line endings
656 # Normalize line endings
656 if self.lines[0].endswith('\r\n'):
657 if self.lines[0].endswith('\r\n'):
657 self.eol = '\r\n'
658 self.eol = '\r\n'
658 elif self.lines[0].endswith('\n'):
659 elif self.lines[0].endswith('\n'):
659 self.eol = '\n'
660 self.eol = '\n'
660 if eolmode != 'strict':
661 if eolmode != 'strict':
661 nlines = []
662 nlines = []
662 for l in self.lines:
663 for l in self.lines:
663 if l.endswith('\r\n'):
664 if l.endswith('\r\n'):
664 l = l[:-2] + '\n'
665 l = l[:-2] + '\n'
665 nlines.append(l)
666 nlines.append(l)
666 self.lines = nlines
667 self.lines = nlines
667 else:
668 else:
668 if self.create:
669 if self.create:
669 self.missing = False
670 self.missing = False
670 if self.mode is None:
671 if self.mode is None:
671 self.mode = (False, False)
672 self.mode = (False, False)
672 if self.missing:
673 if self.missing:
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674
675
675 self.hash = {}
676 self.hash = {}
676 self.dirty = 0
677 self.dirty = 0
677 self.offset = 0
678 self.offset = 0
678 self.skew = 0
679 self.skew = 0
679 self.rej = []
680 self.rej = []
680 self.fileprinted = False
681 self.fileprinted = False
681 self.printfile(False)
682 self.printfile(False)
682 self.hunks = 0
683 self.hunks = 0
683
684
684 def writelines(self, fname, lines, mode):
685 def writelines(self, fname, lines, mode):
685 if self.eolmode == 'auto':
686 if self.eolmode == 'auto':
686 eol = self.eol
687 eol = self.eol
687 elif self.eolmode == 'crlf':
688 elif self.eolmode == 'crlf':
688 eol = '\r\n'
689 eol = '\r\n'
689 else:
690 else:
690 eol = '\n'
691 eol = '\n'
691
692
692 if self.eolmode != 'strict' and eol and eol != '\n':
693 if self.eolmode != 'strict' and eol and eol != '\n':
693 rawlines = []
694 rawlines = []
694 for l in lines:
695 for l in lines:
695 if l and l[-1] == '\n':
696 if l and l[-1] == '\n':
696 l = l[:-1] + eol
697 l = l[:-1] + eol
697 rawlines.append(l)
698 rawlines.append(l)
698 lines = rawlines
699 lines = rawlines
699
700
700 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
701
702
702 def printfile(self, warn):
703 def printfile(self, warn):
703 if self.fileprinted:
704 if self.fileprinted:
704 return
705 return
705 if warn or self.ui.verbose:
706 if warn or self.ui.verbose:
706 self.fileprinted = True
707 self.fileprinted = True
707 s = _("patching file %s\n") % self.fname
708 s = _("patching file %s\n") % self.fname
708 if warn:
709 if warn:
709 self.ui.warn(s)
710 self.ui.warn(s)
710 else:
711 else:
711 self.ui.note(s)
712 self.ui.note(s)
712
713
713
714
714 def findlines(self, l, linenum):
715 def findlines(self, l, linenum):
715 # looks through the hash and finds candidate lines. The
716 # looks through the hash and finds candidate lines. The
716 # result is a list of line numbers sorted based on distance
717 # result is a list of line numbers sorted based on distance
717 # from linenum
718 # from linenum
718
719
719 cand = self.hash.get(l, [])
720 cand = self.hash.get(l, [])
720 if len(cand) > 1:
721 if len(cand) > 1:
721 # resort our list of potentials forward then back.
722 # resort our list of potentials forward then back.
722 cand.sort(key=lambda x: abs(x - linenum))
723 cand.sort(key=lambda x: abs(x - linenum))
723 return cand
724 return cand
724
725
725 def write_rej(self):
726 def write_rej(self):
726 # our rejects are a little different from patch(1). This always
727 # our rejects are a little different from patch(1). This always
727 # creates rejects in the same form as the original patch. A file
728 # creates rejects in the same form as the original patch. A file
728 # header is inserted so that you can run the reject through patch again
729 # header is inserted so that you can run the reject through patch again
729 # without having to type the filename.
730 # without having to type the filename.
730 if not self.rej:
731 if not self.rej:
731 return
732 return
732 base = os.path.basename(self.fname)
733 base = os.path.basename(self.fname)
733 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 lines = ["--- %s\n+++ %s\n" % (base, base)]
734 for x in self.rej:
735 for x in self.rej:
735 for l in x.hunk:
736 for l in x.hunk:
736 lines.append(l)
737 lines.append(l)
737 if l[-1] != '\n':
738 if l[-1] != '\n':
738 lines.append("\n\ No newline at end of file\n")
739 lines.append("\n\ No newline at end of file\n")
739 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
740
741
741 def apply(self, h):
742 def apply(self, h):
742 if not h.complete():
743 if not h.complete():
743 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
744 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 (h.number, h.desc, len(h.a), h.lena, len(h.b),
745 h.lenb))
746 h.lenb))
746
747
747 self.hunks += 1
748 self.hunks += 1
748
749
749 if self.missing:
750 if self.missing:
750 self.rej.append(h)
751 self.rej.append(h)
751 return -1
752 return -1
752
753
753 if self.exists and self.create:
754 if self.exists and self.create:
754 if self.copysource:
755 if self.copysource:
755 self.ui.warn(_("cannot create %s: destination already "
756 self.ui.warn(_("cannot create %s: destination already "
756 "exists\n") % self.fname)
757 "exists\n") % self.fname)
757 else:
758 else:
758 self.ui.warn(_("file %s already exists\n") % self.fname)
759 self.ui.warn(_("file %s already exists\n") % self.fname)
759 self.rej.append(h)
760 self.rej.append(h)
760 return -1
761 return -1
761
762
762 if isinstance(h, binhunk):
763 if isinstance(h, binhunk):
763 if self.remove:
764 if self.remove:
764 self.backend.unlink(self.fname)
765 self.backend.unlink(self.fname)
765 else:
766 else:
766 l = h.new(self.lines)
767 l = h.new(self.lines)
767 self.lines[:] = l
768 self.lines[:] = l
768 self.offset += len(l)
769 self.offset += len(l)
769 self.dirty = True
770 self.dirty = True
770 return 0
771 return 0
771
772
772 horig = h
773 horig = h
773 if (self.eolmode in ('crlf', 'lf')
774 if (self.eolmode in ('crlf', 'lf')
774 or self.eolmode == 'auto' and self.eol):
775 or self.eolmode == 'auto' and self.eol):
775 # If new eols are going to be normalized, then normalize
776 # If new eols are going to be normalized, then normalize
776 # hunk data before patching. Otherwise, preserve input
777 # hunk data before patching. Otherwise, preserve input
777 # line-endings.
778 # line-endings.
778 h = h.getnormalized()
779 h = h.getnormalized()
779
780
780 # fast case first, no offsets, no fuzz
781 # fast case first, no offsets, no fuzz
781 old, oldstart, new, newstart = h.fuzzit(0, False)
782 old, oldstart, new, newstart = h.fuzzit(0, False)
782 oldstart += self.offset
783 oldstart += self.offset
783 orig_start = oldstart
784 orig_start = oldstart
784 # if there's skew we want to emit the "(offset %d lines)" even
785 # if there's skew we want to emit the "(offset %d lines)" even
785 # when the hunk cleanly applies at start + skew, so skip the
786 # when the hunk cleanly applies at start + skew, so skip the
786 # fast case code
787 # fast case code
787 if (self.skew == 0 and
788 if (self.skew == 0 and
788 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
789 if self.remove:
790 if self.remove:
790 self.backend.unlink(self.fname)
791 self.backend.unlink(self.fname)
791 else:
792 else:
792 self.lines[oldstart:oldstart + len(old)] = new
793 self.lines[oldstart:oldstart + len(old)] = new
793 self.offset += len(new) - len(old)
794 self.offset += len(new) - len(old)
794 self.dirty = True
795 self.dirty = True
795 return 0
796 return 0
796
797
797 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
798 self.hash = {}
799 self.hash = {}
799 for x, s in enumerate(self.lines):
800 for x, s in enumerate(self.lines):
800 self.hash.setdefault(s, []).append(x)
801 self.hash.setdefault(s, []).append(x)
801
802
802 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
803 for toponly in [True, False]:
804 for toponly in [True, False]:
804 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
805 oldstart = oldstart + self.offset + self.skew
806 oldstart = oldstart + self.offset + self.skew
806 oldstart = min(oldstart, len(self.lines))
807 oldstart = min(oldstart, len(self.lines))
807 if old:
808 if old:
808 cand = self.findlines(old[0][1:], oldstart)
809 cand = self.findlines(old[0][1:], oldstart)
809 else:
810 else:
810 # Only adding lines with no or fuzzed context, just
811 # Only adding lines with no or fuzzed context, just
811 # take the skew in account
812 # take the skew in account
812 cand = [oldstart]
813 cand = [oldstart]
813
814
814 for l in cand:
815 for l in cand:
815 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
816 self.lines[l : l + len(old)] = new
817 self.lines[l : l + len(old)] = new
817 self.offset += len(new) - len(old)
818 self.offset += len(new) - len(old)
818 self.skew = l - orig_start
819 self.skew = l - orig_start
819 self.dirty = True
820 self.dirty = True
820 offset = l - orig_start - fuzzlen
821 offset = l - orig_start - fuzzlen
821 if fuzzlen:
822 if fuzzlen:
822 msg = _("Hunk #%d succeeded at %d "
823 msg = _("Hunk #%d succeeded at %d "
823 "with fuzz %d "
824 "with fuzz %d "
824 "(offset %d lines).\n")
825 "(offset %d lines).\n")
825 self.printfile(True)
826 self.printfile(True)
826 self.ui.warn(msg %
827 self.ui.warn(msg %
827 (h.number, l + 1, fuzzlen, offset))
828 (h.number, l + 1, fuzzlen, offset))
828 else:
829 else:
829 msg = _("Hunk #%d succeeded at %d "
830 msg = _("Hunk #%d succeeded at %d "
830 "(offset %d lines).\n")
831 "(offset %d lines).\n")
831 self.ui.note(msg % (h.number, l + 1, offset))
832 self.ui.note(msg % (h.number, l + 1, offset))
832 return fuzzlen
833 return fuzzlen
833 self.printfile(True)
834 self.printfile(True)
834 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
835 self.rej.append(horig)
836 self.rej.append(horig)
836 return -1
837 return -1
837
838
838 def close(self):
839 def close(self):
839 if self.dirty:
840 if self.dirty:
840 self.writelines(self.fname, self.lines, self.mode)
841 self.writelines(self.fname, self.lines, self.mode)
841 self.write_rej()
842 self.write_rej()
842 return len(self.rej)
843 return len(self.rej)
843
844
844 class header(object):
845 class header(object):
845 """patch header
846 """patch header
846 """
847 """
847 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
848 diff_re = re.compile('diff -r .* (.*)$')
849 diff_re = re.compile('diff -r .* (.*)$')
849 allhunks_re = re.compile('(?:index|deleted file) ')
850 allhunks_re = re.compile('(?:index|deleted file) ')
850 pretty_re = re.compile('(?:new file|deleted file) ')
851 pretty_re = re.compile('(?:new file|deleted file) ')
851 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 special_re = re.compile('(?:index|deleted|copy|rename) ')
852 newfile_re = re.compile('(?:new file)')
853 newfile_re = re.compile('(?:new file)')
853
854
854 def __init__(self, header):
855 def __init__(self, header):
855 self.header = header
856 self.header = header
856 self.hunks = []
857 self.hunks = []
857
858
858 def binary(self):
859 def binary(self):
859 return any(h.startswith('index ') for h in self.header)
860 return any(h.startswith('index ') for h in self.header)
860
861
861 def pretty(self, fp):
862 def pretty(self, fp):
862 for h in self.header:
863 for h in self.header:
863 if h.startswith('index '):
864 if h.startswith('index '):
864 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 fp.write(_('this modifies a binary file (all or nothing)\n'))
865 break
866 break
866 if self.pretty_re.match(h):
867 if self.pretty_re.match(h):
867 fp.write(h)
868 fp.write(h)
868 if self.binary():
869 if self.binary():
869 fp.write(_('this is a binary file\n'))
870 fp.write(_('this is a binary file\n'))
870 break
871 break
871 if h.startswith('---'):
872 if h.startswith('---'):
872 fp.write(_('%d hunks, %d lines changed\n') %
873 fp.write(_('%d hunks, %d lines changed\n') %
873 (len(self.hunks),
874 (len(self.hunks),
874 sum([max(h.added, h.removed) for h in self.hunks])))
875 sum([max(h.added, h.removed) for h in self.hunks])))
875 break
876 break
876 fp.write(h)
877 fp.write(h)
877
878
878 def write(self, fp):
879 def write(self, fp):
879 fp.write(''.join(self.header))
880 fp.write(''.join(self.header))
880
881
881 def allhunks(self):
882 def allhunks(self):
882 return any(self.allhunks_re.match(h) for h in self.header)
883 return any(self.allhunks_re.match(h) for h in self.header)
883
884
884 def files(self):
885 def files(self):
885 match = self.diffgit_re.match(self.header[0])
886 match = self.diffgit_re.match(self.header[0])
886 if match:
887 if match:
887 fromfile, tofile = match.groups()
888 fromfile, tofile = match.groups()
888 if fromfile == tofile:
889 if fromfile == tofile:
889 return [fromfile]
890 return [fromfile]
890 return [fromfile, tofile]
891 return [fromfile, tofile]
891 else:
892 else:
892 return self.diff_re.match(self.header[0]).groups()
893 return self.diff_re.match(self.header[0]).groups()
893
894
894 def filename(self):
895 def filename(self):
895 return self.files()[-1]
896 return self.files()[-1]
896
897
897 def __repr__(self):
898 def __repr__(self):
898 return '<header %s>' % (' '.join(map(repr, self.files())))
899 return '<header %s>' % (' '.join(map(repr, self.files())))
899
900
900 def isnewfile(self):
901 def isnewfile(self):
901 return any(self.newfile_re.match(h) for h in self.header)
902 return any(self.newfile_re.match(h) for h in self.header)
902
903
903 def special(self):
904 def special(self):
904 # Special files are shown only at the header level and not at the hunk
905 # Special files are shown only at the header level and not at the hunk
905 # level for example a file that has been deleted is a special file.
906 # level for example a file that has been deleted is a special file.
906 # The user cannot change the content of the operation, in the case of
907 # The user cannot change the content of the operation, in the case of
907 # the deleted file he has to take the deletion or not take it, he
908 # the deleted file he has to take the deletion or not take it, he
908 # cannot take some of it.
909 # cannot take some of it.
909 # Newly added files are special if they are empty, they are not special
910 # Newly added files are special if they are empty, they are not special
910 # if they have some content as we want to be able to change it
911 # if they have some content as we want to be able to change it
911 nocontent = len(self.header) == 2
912 nocontent = len(self.header) == 2
912 emptynewfile = self.isnewfile() and nocontent
913 emptynewfile = self.isnewfile() and nocontent
913 return emptynewfile or \
914 return emptynewfile or \
914 any(self.special_re.match(h) for h in self.header)
915 any(self.special_re.match(h) for h in self.header)
915
916
916 class recordhunk(object):
917 class recordhunk(object):
917 """patch hunk
918 """patch hunk
918
919
919 XXX shouldn't we merge this with the other hunk class?
920 XXX shouldn't we merge this with the other hunk class?
920 """
921 """
921 maxcontext = 3
922 maxcontext = 3
922
923
923 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 def __init__(self, header, fromline, toline, proc, before, hunk, after):
924 def trimcontext(number, lines):
925 def trimcontext(number, lines):
925 delta = len(lines) - self.maxcontext
926 delta = len(lines) - self.maxcontext
926 if False and delta > 0:
927 if False and delta > 0:
927 return number + delta, lines[:self.maxcontext]
928 return number + delta, lines[:self.maxcontext]
928 return number, lines
929 return number, lines
929
930
930 self.header = header
931 self.header = header
931 self.fromline, self.before = trimcontext(fromline, before)
932 self.fromline, self.before = trimcontext(fromline, before)
932 self.toline, self.after = trimcontext(toline, after)
933 self.toline, self.after = trimcontext(toline, after)
933 self.proc = proc
934 self.proc = proc
934 self.hunk = hunk
935 self.hunk = hunk
935 self.added, self.removed = self.countchanges(self.hunk)
936 self.added, self.removed = self.countchanges(self.hunk)
936
937
937 def __eq__(self, v):
938 def __eq__(self, v):
938 if not isinstance(v, recordhunk):
939 if not isinstance(v, recordhunk):
939 return False
940 return False
940
941
941 return ((v.hunk == self.hunk) and
942 return ((v.hunk == self.hunk) and
942 (v.proc == self.proc) and
943 (v.proc == self.proc) and
943 (self.fromline == v.fromline) and
944 (self.fromline == v.fromline) and
944 (self.header.files() == v.header.files()))
945 (self.header.files() == v.header.files()))
945
946
946 def __hash__(self):
947 def __hash__(self):
947 return hash((tuple(self.hunk),
948 return hash((tuple(self.hunk),
948 tuple(self.header.files()),
949 tuple(self.header.files()),
949 self.fromline,
950 self.fromline,
950 self.proc))
951 self.proc))
951
952
952 def countchanges(self, hunk):
953 def countchanges(self, hunk):
953 """hunk -> (n+,n-)"""
954 """hunk -> (n+,n-)"""
954 add = len([h for h in hunk if h[0] == '+'])
955 add = len([h for h in hunk if h[0] == '+'])
955 rem = len([h for h in hunk if h[0] == '-'])
956 rem = len([h for h in hunk if h[0] == '-'])
956 return add, rem
957 return add, rem
957
958
958 def write(self, fp):
959 def write(self, fp):
959 delta = len(self.before) + len(self.after)
960 delta = len(self.before) + len(self.after)
960 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 if self.after and self.after[-1] == '\\ No newline at end of file\n':
961 delta -= 1
962 delta -= 1
962 fromlen = delta + self.removed
963 fromlen = delta + self.removed
963 tolen = delta + self.added
964 tolen = delta + self.added
964 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
965 (self.fromline, fromlen, self.toline, tolen,
966 (self.fromline, fromlen, self.toline, tolen,
966 self.proc and (' ' + self.proc)))
967 self.proc and (' ' + self.proc)))
967 fp.write(''.join(self.before + self.hunk + self.after))
968 fp.write(''.join(self.before + self.hunk + self.after))
968
969
969 pretty = write
970 pretty = write
970
971
971 def filename(self):
972 def filename(self):
972 return self.header.filename()
973 return self.header.filename()
973
974
974 def __repr__(self):
975 def __repr__(self):
975 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976 return '<hunk %r@%d>' % (self.filename(), self.fromline)
976
977
977 def filterpatch(ui, headers, operation=None):
978 def filterpatch(ui, headers, operation=None):
978 """Interactively filter patch chunks into applied-only chunks"""
979 """Interactively filter patch chunks into applied-only chunks"""
979 if operation is None:
980 if operation is None:
980 operation = _('record')
981 operation = _('record')
981
982
982 def prompt(skipfile, skipall, query, chunk):
983 def prompt(skipfile, skipall, query, chunk):
983 """prompt query, and process base inputs
984 """prompt query, and process base inputs
984
985
985 - y/n for the rest of file
986 - y/n for the rest of file
986 - y/n for the rest
987 - y/n for the rest
987 - ? (help)
988 - ? (help)
988 - q (quit)
989 - q (quit)
989
990
990 Return True/False and possibly updated skipfile and skipall.
991 Return True/False and possibly updated skipfile and skipall.
991 """
992 """
992 newpatches = None
993 newpatches = None
993 if skipall is not None:
994 if skipall is not None:
994 return skipall, skipfile, skipall, newpatches
995 return skipall, skipfile, skipall, newpatches
995 if skipfile is not None:
996 if skipfile is not None:
996 return skipfile, skipfile, skipall, newpatches
997 return skipfile, skipfile, skipall, newpatches
997 while True:
998 while True:
998 resps = _('[Ynesfdaq?]'
999 resps = _('[Ynesfdaq?]'
999 '$$ &Yes, record this change'
1000 '$$ &Yes, record this change'
1000 '$$ &No, skip this change'
1001 '$$ &No, skip this change'
1001 '$$ &Edit this change manually'
1002 '$$ &Edit this change manually'
1002 '$$ &Skip remaining changes to this file'
1003 '$$ &Skip remaining changes to this file'
1003 '$$ Record remaining changes to this &file'
1004 '$$ Record remaining changes to this &file'
1004 '$$ &Done, skip remaining changes and files'
1005 '$$ &Done, skip remaining changes and files'
1005 '$$ Record &all changes to all remaining files'
1006 '$$ Record &all changes to all remaining files'
1006 '$$ &Quit, recording no changes'
1007 '$$ &Quit, recording no changes'
1007 '$$ &? (display help)')
1008 '$$ &? (display help)')
1008 r = ui.promptchoice("%s %s" % (query, resps))
1009 r = ui.promptchoice("%s %s" % (query, resps))
1009 ui.write("\n")
1010 ui.write("\n")
1010 if r == 8: # ?
1011 if r == 8: # ?
1011 for c, t in ui.extractchoices(resps)[1]:
1012 for c, t in ui.extractchoices(resps)[1]:
1012 ui.write('%s - %s\n' % (c, t.lower()))
1013 ui.write('%s - %s\n' % (c, t.lower()))
1013 continue
1014 continue
1014 elif r == 0: # yes
1015 elif r == 0: # yes
1015 ret = True
1016 ret = True
1016 elif r == 1: # no
1017 elif r == 1: # no
1017 ret = False
1018 ret = False
1018 elif r == 2: # Edit patch
1019 elif r == 2: # Edit patch
1019 if chunk is None:
1020 if chunk is None:
1020 ui.write(_('cannot edit patch for whole file'))
1021 ui.write(_('cannot edit patch for whole file'))
1021 ui.write("\n")
1022 ui.write("\n")
1022 continue
1023 continue
1023 if chunk.header.binary():
1024 if chunk.header.binary():
1024 ui.write(_('cannot edit patch for binary file'))
1025 ui.write(_('cannot edit patch for binary file'))
1025 ui.write("\n")
1026 ui.write("\n")
1026 continue
1027 continue
1027 # Patch comment based on the Git one (based on comment at end of
1028 # Patch comment based on the Git one (based on comment at end of
1028 # https://mercurial-scm.org/wiki/RecordExtension)
1029 # https://mercurial-scm.org/wiki/RecordExtension)
1029 phelp = '---' + _("""
1030 phelp = '---' + _("""
1030 To remove '-' lines, make them ' ' lines (context).
1031 To remove '-' lines, make them ' ' lines (context).
1031 To remove '+' lines, delete them.
1032 To remove '+' lines, delete them.
1032 Lines starting with # will be removed from the patch.
1033 Lines starting with # will be removed from the patch.
1033
1034
1034 If the patch applies cleanly, the edited hunk will immediately be
1035 If the patch applies cleanly, the edited hunk will immediately be
1035 added to the record list. If it does not apply cleanly, a rejects
1036 added to the record list. If it does not apply cleanly, a rejects
1036 file will be generated: you can use that when you try again. If
1037 file will be generated: you can use that when you try again. If
1037 all lines of the hunk are removed, then the edit is aborted and
1038 all lines of the hunk are removed, then the edit is aborted and
1038 the hunk is left unchanged.
1039 the hunk is left unchanged.
1039 """)
1040 """)
1040 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1041 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1041 suffix=".diff", text=True)
1042 suffix=".diff", text=True)
1042 ncpatchfp = None
1043 ncpatchfp = None
1043 try:
1044 try:
1044 # Write the initial patch
1045 # Write the initial patch
1045 f = os.fdopen(patchfd, "w")
1046 f = os.fdopen(patchfd, "w")
1046 chunk.header.write(f)
1047 chunk.header.write(f)
1047 chunk.write(f)
1048 chunk.write(f)
1048 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1049 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1049 f.close()
1050 f.close()
1050 # Start the editor and wait for it to complete
1051 # Start the editor and wait for it to complete
1051 editor = ui.geteditor()
1052 editor = ui.geteditor()
1052 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1053 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1053 environ={'HGUSER': ui.username()})
1054 environ={'HGUSER': ui.username()})
1054 if ret != 0:
1055 if ret != 0:
1055 ui.warn(_("editor exited with exit code %d\n") % ret)
1056 ui.warn(_("editor exited with exit code %d\n") % ret)
1056 continue
1057 continue
1057 # Remove comment lines
1058 # Remove comment lines
1058 patchfp = open(patchfn)
1059 patchfp = open(patchfn)
1059 ncpatchfp = cStringIO.StringIO()
1060 ncpatchfp = cStringIO.StringIO()
1060 for line in patchfp:
1061 for line in patchfp:
1061 if not line.startswith('#'):
1062 if not line.startswith('#'):
1062 ncpatchfp.write(line)
1063 ncpatchfp.write(line)
1063 patchfp.close()
1064 patchfp.close()
1064 ncpatchfp.seek(0)
1065 ncpatchfp.seek(0)
1065 newpatches = parsepatch(ncpatchfp)
1066 newpatches = parsepatch(ncpatchfp)
1066 finally:
1067 finally:
1067 os.unlink(patchfn)
1068 os.unlink(patchfn)
1068 del ncpatchfp
1069 del ncpatchfp
1069 # Signal that the chunk shouldn't be applied as-is, but
1070 # Signal that the chunk shouldn't be applied as-is, but
1070 # provide the new patch to be used instead.
1071 # provide the new patch to be used instead.
1071 ret = False
1072 ret = False
1072 elif r == 3: # Skip
1073 elif r == 3: # Skip
1073 ret = skipfile = False
1074 ret = skipfile = False
1074 elif r == 4: # file (Record remaining)
1075 elif r == 4: # file (Record remaining)
1075 ret = skipfile = True
1076 ret = skipfile = True
1076 elif r == 5: # done, skip remaining
1077 elif r == 5: # done, skip remaining
1077 ret = skipall = False
1078 ret = skipall = False
1078 elif r == 6: # all
1079 elif r == 6: # all
1079 ret = skipall = True
1080 ret = skipall = True
1080 elif r == 7: # quit
1081 elif r == 7: # quit
1081 raise error.Abort(_('user quit'))
1082 raise error.Abort(_('user quit'))
1082 return ret, skipfile, skipall, newpatches
1083 return ret, skipfile, skipall, newpatches
1083
1084
1084 seen = set()
1085 seen = set()
1085 applied = {} # 'filename' -> [] of chunks
1086 applied = {} # 'filename' -> [] of chunks
1086 skipfile, skipall = None, None
1087 skipfile, skipall = None, None
1087 pos, total = 1, sum(len(h.hunks) for h in headers)
1088 pos, total = 1, sum(len(h.hunks) for h in headers)
1088 for h in headers:
1089 for h in headers:
1089 pos += len(h.hunks)
1090 pos += len(h.hunks)
1090 skipfile = None
1091 skipfile = None
1091 fixoffset = 0
1092 fixoffset = 0
1092 hdr = ''.join(h.header)
1093 hdr = ''.join(h.header)
1093 if hdr in seen:
1094 if hdr in seen:
1094 continue
1095 continue
1095 seen.add(hdr)
1096 seen.add(hdr)
1096 if skipall is None:
1097 if skipall is None:
1097 h.pretty(ui)
1098 h.pretty(ui)
1098 msg = (_('examine changes to %s?') %
1099 msg = (_('examine changes to %s?') %
1099 _(' and ').join("'%s'" % f for f in h.files()))
1100 _(' and ').join("'%s'" % f for f in h.files()))
1100 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1101 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1101 if not r:
1102 if not r:
1102 continue
1103 continue
1103 applied[h.filename()] = [h]
1104 applied[h.filename()] = [h]
1104 if h.allhunks():
1105 if h.allhunks():
1105 applied[h.filename()] += h.hunks
1106 applied[h.filename()] += h.hunks
1106 continue
1107 continue
1107 for i, chunk in enumerate(h.hunks):
1108 for i, chunk in enumerate(h.hunks):
1108 if skipfile is None and skipall is None:
1109 if skipfile is None and skipall is None:
1109 chunk.pretty(ui)
1110 chunk.pretty(ui)
1110 if total == 1:
1111 if total == 1:
1111 msg = _("record this change to '%s'?") % chunk.filename()
1112 msg = _("record this change to '%s'?") % chunk.filename()
1112 else:
1113 else:
1113 idx = pos - len(h.hunks) + i
1114 idx = pos - len(h.hunks) + i
1114 msg = _("record change %d/%d to '%s'?") % (idx, total,
1115 msg = _("record change %d/%d to '%s'?") % (idx, total,
1115 chunk.filename())
1116 chunk.filename())
1116 r, skipfile, skipall, newpatches = prompt(skipfile,
1117 r, skipfile, skipall, newpatches = prompt(skipfile,
1117 skipall, msg, chunk)
1118 skipall, msg, chunk)
1118 if r:
1119 if r:
1119 if fixoffset:
1120 if fixoffset:
1120 chunk = copy.copy(chunk)
1121 chunk = copy.copy(chunk)
1121 chunk.toline += fixoffset
1122 chunk.toline += fixoffset
1122 applied[chunk.filename()].append(chunk)
1123 applied[chunk.filename()].append(chunk)
1123 elif newpatches is not None:
1124 elif newpatches is not None:
1124 for newpatch in newpatches:
1125 for newpatch in newpatches:
1125 for newhunk in newpatch.hunks:
1126 for newhunk in newpatch.hunks:
1126 if fixoffset:
1127 if fixoffset:
1127 newhunk.toline += fixoffset
1128 newhunk.toline += fixoffset
1128 applied[newhunk.filename()].append(newhunk)
1129 applied[newhunk.filename()].append(newhunk)
1129 else:
1130 else:
1130 fixoffset += chunk.removed - chunk.added
1131 fixoffset += chunk.removed - chunk.added
1131 return (sum([h for h in applied.itervalues()
1132 return (sum([h for h in applied.itervalues()
1132 if h[0].special() or len(h) > 1], []), {})
1133 if h[0].special() or len(h) > 1], []), {})
1133 class hunk(object):
1134 class hunk(object):
1134 def __init__(self, desc, num, lr, context):
1135 def __init__(self, desc, num, lr, context):
1135 self.number = num
1136 self.number = num
1136 self.desc = desc
1137 self.desc = desc
1137 self.hunk = [desc]
1138 self.hunk = [desc]
1138 self.a = []
1139 self.a = []
1139 self.b = []
1140 self.b = []
1140 self.starta = self.lena = None
1141 self.starta = self.lena = None
1141 self.startb = self.lenb = None
1142 self.startb = self.lenb = None
1142 if lr is not None:
1143 if lr is not None:
1143 if context:
1144 if context:
1144 self.read_context_hunk(lr)
1145 self.read_context_hunk(lr)
1145 else:
1146 else:
1146 self.read_unified_hunk(lr)
1147 self.read_unified_hunk(lr)
1147
1148
1148 def getnormalized(self):
1149 def getnormalized(self):
1149 """Return a copy with line endings normalized to LF."""
1150 """Return a copy with line endings normalized to LF."""
1150
1151
1151 def normalize(lines):
1152 def normalize(lines):
1152 nlines = []
1153 nlines = []
1153 for line in lines:
1154 for line in lines:
1154 if line.endswith('\r\n'):
1155 if line.endswith('\r\n'):
1155 line = line[:-2] + '\n'
1156 line = line[:-2] + '\n'
1156 nlines.append(line)
1157 nlines.append(line)
1157 return nlines
1158 return nlines
1158
1159
1159 # Dummy object, it is rebuilt manually
1160 # Dummy object, it is rebuilt manually
1160 nh = hunk(self.desc, self.number, None, None)
1161 nh = hunk(self.desc, self.number, None, None)
1161 nh.number = self.number
1162 nh.number = self.number
1162 nh.desc = self.desc
1163 nh.desc = self.desc
1163 nh.hunk = self.hunk
1164 nh.hunk = self.hunk
1164 nh.a = normalize(self.a)
1165 nh.a = normalize(self.a)
1165 nh.b = normalize(self.b)
1166 nh.b = normalize(self.b)
1166 nh.starta = self.starta
1167 nh.starta = self.starta
1167 nh.startb = self.startb
1168 nh.startb = self.startb
1168 nh.lena = self.lena
1169 nh.lena = self.lena
1169 nh.lenb = self.lenb
1170 nh.lenb = self.lenb
1170 return nh
1171 return nh
1171
1172
1172 def read_unified_hunk(self, lr):
1173 def read_unified_hunk(self, lr):
1173 m = unidesc.match(self.desc)
1174 m = unidesc.match(self.desc)
1174 if not m:
1175 if not m:
1175 raise PatchError(_("bad hunk #%d") % self.number)
1176 raise PatchError(_("bad hunk #%d") % self.number)
1176 self.starta, self.lena, self.startb, self.lenb = m.groups()
1177 self.starta, self.lena, self.startb, self.lenb = m.groups()
1177 if self.lena is None:
1178 if self.lena is None:
1178 self.lena = 1
1179 self.lena = 1
1179 else:
1180 else:
1180 self.lena = int(self.lena)
1181 self.lena = int(self.lena)
1181 if self.lenb is None:
1182 if self.lenb is None:
1182 self.lenb = 1
1183 self.lenb = 1
1183 else:
1184 else:
1184 self.lenb = int(self.lenb)
1185 self.lenb = int(self.lenb)
1185 self.starta = int(self.starta)
1186 self.starta = int(self.starta)
1186 self.startb = int(self.startb)
1187 self.startb = int(self.startb)
1187 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1188 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1188 self.b)
1189 self.b)
1189 # if we hit eof before finishing out the hunk, the last line will
1190 # if we hit eof before finishing out the hunk, the last line will
1190 # be zero length. Lets try to fix it up.
1191 # be zero length. Lets try to fix it up.
1191 while len(self.hunk[-1]) == 0:
1192 while len(self.hunk[-1]) == 0:
1192 del self.hunk[-1]
1193 del self.hunk[-1]
1193 del self.a[-1]
1194 del self.a[-1]
1194 del self.b[-1]
1195 del self.b[-1]
1195 self.lena -= 1
1196 self.lena -= 1
1196 self.lenb -= 1
1197 self.lenb -= 1
1197 self._fixnewline(lr)
1198 self._fixnewline(lr)
1198
1199
1199 def read_context_hunk(self, lr):
1200 def read_context_hunk(self, lr):
1200 self.desc = lr.readline()
1201 self.desc = lr.readline()
1201 m = contextdesc.match(self.desc)
1202 m = contextdesc.match(self.desc)
1202 if not m:
1203 if not m:
1203 raise PatchError(_("bad hunk #%d") % self.number)
1204 raise PatchError(_("bad hunk #%d") % self.number)
1204 self.starta, aend = m.groups()
1205 self.starta, aend = m.groups()
1205 self.starta = int(self.starta)
1206 self.starta = int(self.starta)
1206 if aend is None:
1207 if aend is None:
1207 aend = self.starta
1208 aend = self.starta
1208 self.lena = int(aend) - self.starta
1209 self.lena = int(aend) - self.starta
1209 if self.starta:
1210 if self.starta:
1210 self.lena += 1
1211 self.lena += 1
1211 for x in xrange(self.lena):
1212 for x in xrange(self.lena):
1212 l = lr.readline()
1213 l = lr.readline()
1213 if l.startswith('---'):
1214 if l.startswith('---'):
1214 # lines addition, old block is empty
1215 # lines addition, old block is empty
1215 lr.push(l)
1216 lr.push(l)
1216 break
1217 break
1217 s = l[2:]
1218 s = l[2:]
1218 if l.startswith('- ') or l.startswith('! '):
1219 if l.startswith('- ') or l.startswith('! '):
1219 u = '-' + s
1220 u = '-' + s
1220 elif l.startswith(' '):
1221 elif l.startswith(' '):
1221 u = ' ' + s
1222 u = ' ' + s
1222 else:
1223 else:
1223 raise PatchError(_("bad hunk #%d old text line %d") %
1224 raise PatchError(_("bad hunk #%d old text line %d") %
1224 (self.number, x))
1225 (self.number, x))
1225 self.a.append(u)
1226 self.a.append(u)
1226 self.hunk.append(u)
1227 self.hunk.append(u)
1227
1228
1228 l = lr.readline()
1229 l = lr.readline()
1229 if l.startswith('\ '):
1230 if l.startswith('\ '):
1230 s = self.a[-1][:-1]
1231 s = self.a[-1][:-1]
1231 self.a[-1] = s
1232 self.a[-1] = s
1232 self.hunk[-1] = s
1233 self.hunk[-1] = s
1233 l = lr.readline()
1234 l = lr.readline()
1234 m = contextdesc.match(l)
1235 m = contextdesc.match(l)
1235 if not m:
1236 if not m:
1236 raise PatchError(_("bad hunk #%d") % self.number)
1237 raise PatchError(_("bad hunk #%d") % self.number)
1237 self.startb, bend = m.groups()
1238 self.startb, bend = m.groups()
1238 self.startb = int(self.startb)
1239 self.startb = int(self.startb)
1239 if bend is None:
1240 if bend is None:
1240 bend = self.startb
1241 bend = self.startb
1241 self.lenb = int(bend) - self.startb
1242 self.lenb = int(bend) - self.startb
1242 if self.startb:
1243 if self.startb:
1243 self.lenb += 1
1244 self.lenb += 1
1244 hunki = 1
1245 hunki = 1
1245 for x in xrange(self.lenb):
1246 for x in xrange(self.lenb):
1246 l = lr.readline()
1247 l = lr.readline()
1247 if l.startswith('\ '):
1248 if l.startswith('\ '):
1248 # XXX: the only way to hit this is with an invalid line range.
1249 # XXX: the only way to hit this is with an invalid line range.
1249 # The no-eol marker is not counted in the line range, but I
1250 # The no-eol marker is not counted in the line range, but I
1250 # guess there are diff(1) out there which behave differently.
1251 # guess there are diff(1) out there which behave differently.
1251 s = self.b[-1][:-1]
1252 s = self.b[-1][:-1]
1252 self.b[-1] = s
1253 self.b[-1] = s
1253 self.hunk[hunki - 1] = s
1254 self.hunk[hunki - 1] = s
1254 continue
1255 continue
1255 if not l:
1256 if not l:
1256 # line deletions, new block is empty and we hit EOF
1257 # line deletions, new block is empty and we hit EOF
1257 lr.push(l)
1258 lr.push(l)
1258 break
1259 break
1259 s = l[2:]
1260 s = l[2:]
1260 if l.startswith('+ ') or l.startswith('! '):
1261 if l.startswith('+ ') or l.startswith('! '):
1261 u = '+' + s
1262 u = '+' + s
1262 elif l.startswith(' '):
1263 elif l.startswith(' '):
1263 u = ' ' + s
1264 u = ' ' + s
1264 elif len(self.b) == 0:
1265 elif len(self.b) == 0:
1265 # line deletions, new block is empty
1266 # line deletions, new block is empty
1266 lr.push(l)
1267 lr.push(l)
1267 break
1268 break
1268 else:
1269 else:
1269 raise PatchError(_("bad hunk #%d old text line %d") %
1270 raise PatchError(_("bad hunk #%d old text line %d") %
1270 (self.number, x))
1271 (self.number, x))
1271 self.b.append(s)
1272 self.b.append(s)
1272 while True:
1273 while True:
1273 if hunki >= len(self.hunk):
1274 if hunki >= len(self.hunk):
1274 h = ""
1275 h = ""
1275 else:
1276 else:
1276 h = self.hunk[hunki]
1277 h = self.hunk[hunki]
1277 hunki += 1
1278 hunki += 1
1278 if h == u:
1279 if h == u:
1279 break
1280 break
1280 elif h.startswith('-'):
1281 elif h.startswith('-'):
1281 continue
1282 continue
1282 else:
1283 else:
1283 self.hunk.insert(hunki - 1, u)
1284 self.hunk.insert(hunki - 1, u)
1284 break
1285 break
1285
1286
1286 if not self.a:
1287 if not self.a:
1287 # this happens when lines were only added to the hunk
1288 # this happens when lines were only added to the hunk
1288 for x in self.hunk:
1289 for x in self.hunk:
1289 if x.startswith('-') or x.startswith(' '):
1290 if x.startswith('-') or x.startswith(' '):
1290 self.a.append(x)
1291 self.a.append(x)
1291 if not self.b:
1292 if not self.b:
1292 # this happens when lines were only deleted from the hunk
1293 # this happens when lines were only deleted from the hunk
1293 for x in self.hunk:
1294 for x in self.hunk:
1294 if x.startswith('+') or x.startswith(' '):
1295 if x.startswith('+') or x.startswith(' '):
1295 self.b.append(x[1:])
1296 self.b.append(x[1:])
1296 # @@ -start,len +start,len @@
1297 # @@ -start,len +start,len @@
1297 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1298 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1298 self.startb, self.lenb)
1299 self.startb, self.lenb)
1299 self.hunk[0] = self.desc
1300 self.hunk[0] = self.desc
1300 self._fixnewline(lr)
1301 self._fixnewline(lr)
1301
1302
1302 def _fixnewline(self, lr):
1303 def _fixnewline(self, lr):
1303 l = lr.readline()
1304 l = lr.readline()
1304 if l.startswith('\ '):
1305 if l.startswith('\ '):
1305 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1306 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1306 else:
1307 else:
1307 lr.push(l)
1308 lr.push(l)
1308
1309
1309 def complete(self):
1310 def complete(self):
1310 return len(self.a) == self.lena and len(self.b) == self.lenb
1311 return len(self.a) == self.lena and len(self.b) == self.lenb
1311
1312
1312 def _fuzzit(self, old, new, fuzz, toponly):
1313 def _fuzzit(self, old, new, fuzz, toponly):
1313 # this removes context lines from the top and bottom of list 'l'. It
1314 # this removes context lines from the top and bottom of list 'l'. It
1314 # checks the hunk to make sure only context lines are removed, and then
1315 # checks the hunk to make sure only context lines are removed, and then
1315 # returns a new shortened list of lines.
1316 # returns a new shortened list of lines.
1316 fuzz = min(fuzz, len(old))
1317 fuzz = min(fuzz, len(old))
1317 if fuzz:
1318 if fuzz:
1318 top = 0
1319 top = 0
1319 bot = 0
1320 bot = 0
1320 hlen = len(self.hunk)
1321 hlen = len(self.hunk)
1321 for x in xrange(hlen - 1):
1322 for x in xrange(hlen - 1):
1322 # the hunk starts with the @@ line, so use x+1
1323 # the hunk starts with the @@ line, so use x+1
1323 if self.hunk[x + 1][0] == ' ':
1324 if self.hunk[x + 1][0] == ' ':
1324 top += 1
1325 top += 1
1325 else:
1326 else:
1326 break
1327 break
1327 if not toponly:
1328 if not toponly:
1328 for x in xrange(hlen - 1):
1329 for x in xrange(hlen - 1):
1329 if self.hunk[hlen - bot - 1][0] == ' ':
1330 if self.hunk[hlen - bot - 1][0] == ' ':
1330 bot += 1
1331 bot += 1
1331 else:
1332 else:
1332 break
1333 break
1333
1334
1334 bot = min(fuzz, bot)
1335 bot = min(fuzz, bot)
1335 top = min(fuzz, top)
1336 top = min(fuzz, top)
1336 return old[top:len(old) - bot], new[top:len(new) - bot], top
1337 return old[top:len(old) - bot], new[top:len(new) - bot], top
1337 return old, new, 0
1338 return old, new, 0
1338
1339
1339 def fuzzit(self, fuzz, toponly):
1340 def fuzzit(self, fuzz, toponly):
1340 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1341 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1341 oldstart = self.starta + top
1342 oldstart = self.starta + top
1342 newstart = self.startb + top
1343 newstart = self.startb + top
1343 # zero length hunk ranges already have their start decremented
1344 # zero length hunk ranges already have their start decremented
1344 if self.lena and oldstart > 0:
1345 if self.lena and oldstart > 0:
1345 oldstart -= 1
1346 oldstart -= 1
1346 if self.lenb and newstart > 0:
1347 if self.lenb and newstart > 0:
1347 newstart -= 1
1348 newstart -= 1
1348 return old, oldstart, new, newstart
1349 return old, oldstart, new, newstart
1349
1350
1350 class binhunk(object):
1351 class binhunk(object):
1351 'A binary patch file.'
1352 'A binary patch file.'
1352 def __init__(self, lr, fname):
1353 def __init__(self, lr, fname):
1353 self.text = None
1354 self.text = None
1354 self.delta = False
1355 self.delta = False
1355 self.hunk = ['GIT binary patch\n']
1356 self.hunk = ['GIT binary patch\n']
1356 self._fname = fname
1357 self._fname = fname
1357 self._read(lr)
1358 self._read(lr)
1358
1359
1359 def complete(self):
1360 def complete(self):
1360 return self.text is not None
1361 return self.text is not None
1361
1362
1362 def new(self, lines):
1363 def new(self, lines):
1363 if self.delta:
1364 if self.delta:
1364 return [applybindelta(self.text, ''.join(lines))]
1365 return [applybindelta(self.text, ''.join(lines))]
1365 return [self.text]
1366 return [self.text]
1366
1367
1367 def _read(self, lr):
1368 def _read(self, lr):
1368 def getline(lr, hunk):
1369 def getline(lr, hunk):
1369 l = lr.readline()
1370 l = lr.readline()
1370 hunk.append(l)
1371 hunk.append(l)
1371 return l.rstrip('\r\n')
1372 return l.rstrip('\r\n')
1372
1373
1373 size = 0
1374 size = 0
1374 while True:
1375 while True:
1375 line = getline(lr, self.hunk)
1376 line = getline(lr, self.hunk)
1376 if not line:
1377 if not line:
1377 raise PatchError(_('could not extract "%s" binary data')
1378 raise PatchError(_('could not extract "%s" binary data')
1378 % self._fname)
1379 % self._fname)
1379 if line.startswith('literal '):
1380 if line.startswith('literal '):
1380 size = int(line[8:].rstrip())
1381 size = int(line[8:].rstrip())
1381 break
1382 break
1382 if line.startswith('delta '):
1383 if line.startswith('delta '):
1383 size = int(line[6:].rstrip())
1384 size = int(line[6:].rstrip())
1384 self.delta = True
1385 self.delta = True
1385 break
1386 break
1386 dec = []
1387 dec = []
1387 line = getline(lr, self.hunk)
1388 line = getline(lr, self.hunk)
1388 while len(line) > 1:
1389 while len(line) > 1:
1389 l = line[0]
1390 l = line[0]
1390 if l <= 'Z' and l >= 'A':
1391 if l <= 'Z' and l >= 'A':
1391 l = ord(l) - ord('A') + 1
1392 l = ord(l) - ord('A') + 1
1392 else:
1393 else:
1393 l = ord(l) - ord('a') + 27
1394 l = ord(l) - ord('a') + 27
1394 try:
1395 try:
1395 dec.append(base85.b85decode(line[1:])[:l])
1396 dec.append(base85.b85decode(line[1:])[:l])
1396 except ValueError as e:
1397 except ValueError as e:
1397 raise PatchError(_('could not decode "%s" binary patch: %s')
1398 raise PatchError(_('could not decode "%s" binary patch: %s')
1398 % (self._fname, str(e)))
1399 % (self._fname, str(e)))
1399 line = getline(lr, self.hunk)
1400 line = getline(lr, self.hunk)
1400 text = zlib.decompress(''.join(dec))
1401 text = zlib.decompress(''.join(dec))
1401 if len(text) != size:
1402 if len(text) != size:
1402 raise PatchError(_('"%s" length is %d bytes, should be %d')
1403 raise PatchError(_('"%s" length is %d bytes, should be %d')
1403 % (self._fname, len(text), size))
1404 % (self._fname, len(text), size))
1404 self.text = text
1405 self.text = text
1405
1406
1406 def parsefilename(str):
1407 def parsefilename(str):
1407 # --- filename \t|space stuff
1408 # --- filename \t|space stuff
1408 s = str[4:].rstrip('\r\n')
1409 s = str[4:].rstrip('\r\n')
1409 i = s.find('\t')
1410 i = s.find('\t')
1410 if i < 0:
1411 if i < 0:
1411 i = s.find(' ')
1412 i = s.find(' ')
1412 if i < 0:
1413 if i < 0:
1413 return s
1414 return s
1414 return s[:i]
1415 return s[:i]
1415
1416
1416 def reversehunks(hunks):
1417 def reversehunks(hunks):
1417 '''reverse the signs in the hunks given as argument
1418 '''reverse the signs in the hunks given as argument
1418
1419
1419 This function operates on hunks coming out of patch.filterpatch, that is
1420 This function operates on hunks coming out of patch.filterpatch, that is
1420 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1421 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1421
1422
1422 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1423 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1423 ... --- a/folder1/g
1424 ... --- a/folder1/g
1424 ... +++ b/folder1/g
1425 ... +++ b/folder1/g
1425 ... @@ -1,7 +1,7 @@
1426 ... @@ -1,7 +1,7 @@
1426 ... +firstline
1427 ... +firstline
1427 ... c
1428 ... c
1428 ... 1
1429 ... 1
1429 ... 2
1430 ... 2
1430 ... + 3
1431 ... + 3
1431 ... -4
1432 ... -4
1432 ... 5
1433 ... 5
1433 ... d
1434 ... d
1434 ... +lastline"""
1435 ... +lastline"""
1435 >>> hunks = parsepatch(rawpatch)
1436 >>> hunks = parsepatch(rawpatch)
1436 >>> hunkscomingfromfilterpatch = []
1437 >>> hunkscomingfromfilterpatch = []
1437 >>> for h in hunks:
1438 >>> for h in hunks:
1438 ... hunkscomingfromfilterpatch.append(h)
1439 ... hunkscomingfromfilterpatch.append(h)
1439 ... hunkscomingfromfilterpatch.extend(h.hunks)
1440 ... hunkscomingfromfilterpatch.extend(h.hunks)
1440
1441
1441 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1442 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1442 >>> fp = cStringIO.StringIO()
1443 >>> fp = cStringIO.StringIO()
1443 >>> for c in reversedhunks:
1444 >>> for c in reversedhunks:
1444 ... c.write(fp)
1445 ... c.write(fp)
1445 >>> fp.seek(0)
1446 >>> fp.seek(0)
1446 >>> reversedpatch = fp.read()
1447 >>> reversedpatch = fp.read()
1447 >>> print reversedpatch
1448 >>> print reversedpatch
1448 diff --git a/folder1/g b/folder1/g
1449 diff --git a/folder1/g b/folder1/g
1449 --- a/folder1/g
1450 --- a/folder1/g
1450 +++ b/folder1/g
1451 +++ b/folder1/g
1451 @@ -1,4 +1,3 @@
1452 @@ -1,4 +1,3 @@
1452 -firstline
1453 -firstline
1453 c
1454 c
1454 1
1455 1
1455 2
1456 2
1456 @@ -1,6 +2,6 @@
1457 @@ -1,6 +2,6 @@
1457 c
1458 c
1458 1
1459 1
1459 2
1460 2
1460 - 3
1461 - 3
1461 +4
1462 +4
1462 5
1463 5
1463 d
1464 d
1464 @@ -5,3 +6,2 @@
1465 @@ -5,3 +6,2 @@
1465 5
1466 5
1466 d
1467 d
1467 -lastline
1468 -lastline
1468
1469
1469 '''
1470 '''
1470
1471
1471 from . import crecord as crecordmod
1472 from . import crecord as crecordmod
1472 newhunks = []
1473 newhunks = []
1473 for c in hunks:
1474 for c in hunks:
1474 if isinstance(c, crecordmod.uihunk):
1475 if isinstance(c, crecordmod.uihunk):
1475 # curses hunks encapsulate the record hunk in _hunk
1476 # curses hunks encapsulate the record hunk in _hunk
1476 c = c._hunk
1477 c = c._hunk
1477 if isinstance(c, recordhunk):
1478 if isinstance(c, recordhunk):
1478 for j, line in enumerate(c.hunk):
1479 for j, line in enumerate(c.hunk):
1479 if line.startswith("-"):
1480 if line.startswith("-"):
1480 c.hunk[j] = "+" + c.hunk[j][1:]
1481 c.hunk[j] = "+" + c.hunk[j][1:]
1481 elif line.startswith("+"):
1482 elif line.startswith("+"):
1482 c.hunk[j] = "-" + c.hunk[j][1:]
1483 c.hunk[j] = "-" + c.hunk[j][1:]
1483 c.added, c.removed = c.removed, c.added
1484 c.added, c.removed = c.removed, c.added
1484 newhunks.append(c)
1485 newhunks.append(c)
1485 return newhunks
1486 return newhunks
1486
1487
1487 def parsepatch(originalchunks):
1488 def parsepatch(originalchunks):
1488 """patch -> [] of headers -> [] of hunks """
1489 """patch -> [] of headers -> [] of hunks """
1489 class parser(object):
1490 class parser(object):
1490 """patch parsing state machine"""
1491 """patch parsing state machine"""
1491 def __init__(self):
1492 def __init__(self):
1492 self.fromline = 0
1493 self.fromline = 0
1493 self.toline = 0
1494 self.toline = 0
1494 self.proc = ''
1495 self.proc = ''
1495 self.header = None
1496 self.header = None
1496 self.context = []
1497 self.context = []
1497 self.before = []
1498 self.before = []
1498 self.hunk = []
1499 self.hunk = []
1499 self.headers = []
1500 self.headers = []
1500
1501
1501 def addrange(self, limits):
1502 def addrange(self, limits):
1502 fromstart, fromend, tostart, toend, proc = limits
1503 fromstart, fromend, tostart, toend, proc = limits
1503 self.fromline = int(fromstart)
1504 self.fromline = int(fromstart)
1504 self.toline = int(tostart)
1505 self.toline = int(tostart)
1505 self.proc = proc
1506 self.proc = proc
1506
1507
1507 def addcontext(self, context):
1508 def addcontext(self, context):
1508 if self.hunk:
1509 if self.hunk:
1509 h = recordhunk(self.header, self.fromline, self.toline,
1510 h = recordhunk(self.header, self.fromline, self.toline,
1510 self.proc, self.before, self.hunk, context)
1511 self.proc, self.before, self.hunk, context)
1511 self.header.hunks.append(h)
1512 self.header.hunks.append(h)
1512 self.fromline += len(self.before) + h.removed
1513 self.fromline += len(self.before) + h.removed
1513 self.toline += len(self.before) + h.added
1514 self.toline += len(self.before) + h.added
1514 self.before = []
1515 self.before = []
1515 self.hunk = []
1516 self.hunk = []
1516 self.context = context
1517 self.context = context
1517
1518
1518 def addhunk(self, hunk):
1519 def addhunk(self, hunk):
1519 if self.context:
1520 if self.context:
1520 self.before = self.context
1521 self.before = self.context
1521 self.context = []
1522 self.context = []
1522 self.hunk = hunk
1523 self.hunk = hunk
1523
1524
1524 def newfile(self, hdr):
1525 def newfile(self, hdr):
1525 self.addcontext([])
1526 self.addcontext([])
1526 h = header(hdr)
1527 h = header(hdr)
1527 self.headers.append(h)
1528 self.headers.append(h)
1528 self.header = h
1529 self.header = h
1529
1530
1530 def addother(self, line):
1531 def addother(self, line):
1531 pass # 'other' lines are ignored
1532 pass # 'other' lines are ignored
1532
1533
1533 def finished(self):
1534 def finished(self):
1534 self.addcontext([])
1535 self.addcontext([])
1535 return self.headers
1536 return self.headers
1536
1537
1537 transitions = {
1538 transitions = {
1538 'file': {'context': addcontext,
1539 'file': {'context': addcontext,
1539 'file': newfile,
1540 'file': newfile,
1540 'hunk': addhunk,
1541 'hunk': addhunk,
1541 'range': addrange},
1542 'range': addrange},
1542 'context': {'file': newfile,
1543 'context': {'file': newfile,
1543 'hunk': addhunk,
1544 'hunk': addhunk,
1544 'range': addrange,
1545 'range': addrange,
1545 'other': addother},
1546 'other': addother},
1546 'hunk': {'context': addcontext,
1547 'hunk': {'context': addcontext,
1547 'file': newfile,
1548 'file': newfile,
1548 'range': addrange},
1549 'range': addrange},
1549 'range': {'context': addcontext,
1550 'range': {'context': addcontext,
1550 'hunk': addhunk},
1551 'hunk': addhunk},
1551 'other': {'other': addother},
1552 'other': {'other': addother},
1552 }
1553 }
1553
1554
1554 p = parser()
1555 p = parser()
1555 fp = cStringIO.StringIO()
1556 fp = cStringIO.StringIO()
1556 fp.write(''.join(originalchunks))
1557 fp.write(''.join(originalchunks))
1557 fp.seek(0)
1558 fp.seek(0)
1558
1559
1559 state = 'context'
1560 state = 'context'
1560 for newstate, data in scanpatch(fp):
1561 for newstate, data in scanpatch(fp):
1561 try:
1562 try:
1562 p.transitions[state][newstate](p, data)
1563 p.transitions[state][newstate](p, data)
1563 except KeyError:
1564 except KeyError:
1564 raise PatchError('unhandled transition: %s -> %s' %
1565 raise PatchError('unhandled transition: %s -> %s' %
1565 (state, newstate))
1566 (state, newstate))
1566 state = newstate
1567 state = newstate
1567 del fp
1568 del fp
1568 return p.finished()
1569 return p.finished()
1569
1570
1570 def pathtransform(path, strip, prefix):
1571 def pathtransform(path, strip, prefix):
1571 '''turn a path from a patch into a path suitable for the repository
1572 '''turn a path from a patch into a path suitable for the repository
1572
1573
1573 prefix, if not empty, is expected to be normalized with a / at the end.
1574 prefix, if not empty, is expected to be normalized with a / at the end.
1574
1575
1575 Returns (stripped components, path in repository).
1576 Returns (stripped components, path in repository).
1576
1577
1577 >>> pathtransform('a/b/c', 0, '')
1578 >>> pathtransform('a/b/c', 0, '')
1578 ('', 'a/b/c')
1579 ('', 'a/b/c')
1579 >>> pathtransform(' a/b/c ', 0, '')
1580 >>> pathtransform(' a/b/c ', 0, '')
1580 ('', ' a/b/c')
1581 ('', ' a/b/c')
1581 >>> pathtransform(' a/b/c ', 2, '')
1582 >>> pathtransform(' a/b/c ', 2, '')
1582 ('a/b/', 'c')
1583 ('a/b/', 'c')
1583 >>> pathtransform('a/b/c', 0, 'd/e/')
1584 >>> pathtransform('a/b/c', 0, 'd/e/')
1584 ('', 'd/e/a/b/c')
1585 ('', 'd/e/a/b/c')
1585 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1586 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1586 ('a//b/', 'd/e/c')
1587 ('a//b/', 'd/e/c')
1587 >>> pathtransform('a/b/c', 3, '')
1588 >>> pathtransform('a/b/c', 3, '')
1588 Traceback (most recent call last):
1589 Traceback (most recent call last):
1589 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1590 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1590 '''
1591 '''
1591 pathlen = len(path)
1592 pathlen = len(path)
1592 i = 0
1593 i = 0
1593 if strip == 0:
1594 if strip == 0:
1594 return '', prefix + path.rstrip()
1595 return '', prefix + path.rstrip()
1595 count = strip
1596 count = strip
1596 while count > 0:
1597 while count > 0:
1597 i = path.find('/', i)
1598 i = path.find('/', i)
1598 if i == -1:
1599 if i == -1:
1599 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1600 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1600 (count, strip, path))
1601 (count, strip, path))
1601 i += 1
1602 i += 1
1602 # consume '//' in the path
1603 # consume '//' in the path
1603 while i < pathlen - 1 and path[i] == '/':
1604 while i < pathlen - 1 and path[i] == '/':
1604 i += 1
1605 i += 1
1605 count -= 1
1606 count -= 1
1606 return path[:i].lstrip(), prefix + path[i:].rstrip()
1607 return path[:i].lstrip(), prefix + path[i:].rstrip()
1607
1608
1608 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1609 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1609 nulla = afile_orig == "/dev/null"
1610 nulla = afile_orig == "/dev/null"
1610 nullb = bfile_orig == "/dev/null"
1611 nullb = bfile_orig == "/dev/null"
1611 create = nulla and hunk.starta == 0 and hunk.lena == 0
1612 create = nulla and hunk.starta == 0 and hunk.lena == 0
1612 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1613 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1613 abase, afile = pathtransform(afile_orig, strip, prefix)
1614 abase, afile = pathtransform(afile_orig, strip, prefix)
1614 gooda = not nulla and backend.exists(afile)
1615 gooda = not nulla and backend.exists(afile)
1615 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1616 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1616 if afile == bfile:
1617 if afile == bfile:
1617 goodb = gooda
1618 goodb = gooda
1618 else:
1619 else:
1619 goodb = not nullb and backend.exists(bfile)
1620 goodb = not nullb and backend.exists(bfile)
1620 missing = not goodb and not gooda and not create
1621 missing = not goodb and not gooda and not create
1621
1622
1622 # some diff programs apparently produce patches where the afile is
1623 # some diff programs apparently produce patches where the afile is
1623 # not /dev/null, but afile starts with bfile
1624 # not /dev/null, but afile starts with bfile
1624 abasedir = afile[:afile.rfind('/') + 1]
1625 abasedir = afile[:afile.rfind('/') + 1]
1625 bbasedir = bfile[:bfile.rfind('/') + 1]
1626 bbasedir = bfile[:bfile.rfind('/') + 1]
1626 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1627 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1627 and hunk.starta == 0 and hunk.lena == 0):
1628 and hunk.starta == 0 and hunk.lena == 0):
1628 create = True
1629 create = True
1629 missing = False
1630 missing = False
1630
1631
1631 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1632 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1632 # diff is between a file and its backup. In this case, the original
1633 # diff is between a file and its backup. In this case, the original
1633 # file should be patched (see original mpatch code).
1634 # file should be patched (see original mpatch code).
1634 isbackup = (abase == bbase and bfile.startswith(afile))
1635 isbackup = (abase == bbase and bfile.startswith(afile))
1635 fname = None
1636 fname = None
1636 if not missing:
1637 if not missing:
1637 if gooda and goodb:
1638 if gooda and goodb:
1638 if isbackup:
1639 if isbackup:
1639 fname = afile
1640 fname = afile
1640 else:
1641 else:
1641 fname = bfile
1642 fname = bfile
1642 elif gooda:
1643 elif gooda:
1643 fname = afile
1644 fname = afile
1644
1645
1645 if not fname:
1646 if not fname:
1646 if not nullb:
1647 if not nullb:
1647 if isbackup:
1648 if isbackup:
1648 fname = afile
1649 fname = afile
1649 else:
1650 else:
1650 fname = bfile
1651 fname = bfile
1651 elif not nulla:
1652 elif not nulla:
1652 fname = afile
1653 fname = afile
1653 else:
1654 else:
1654 raise PatchError(_("undefined source and destination files"))
1655 raise PatchError(_("undefined source and destination files"))
1655
1656
1656 gp = patchmeta(fname)
1657 gp = patchmeta(fname)
1657 if create:
1658 if create:
1658 gp.op = 'ADD'
1659 gp.op = 'ADD'
1659 elif remove:
1660 elif remove:
1660 gp.op = 'DELETE'
1661 gp.op = 'DELETE'
1661 return gp
1662 return gp
1662
1663
1663 def scanpatch(fp):
1664 def scanpatch(fp):
1664 """like patch.iterhunks, but yield different events
1665 """like patch.iterhunks, but yield different events
1665
1666
1666 - ('file', [header_lines + fromfile + tofile])
1667 - ('file', [header_lines + fromfile + tofile])
1667 - ('context', [context_lines])
1668 - ('context', [context_lines])
1668 - ('hunk', [hunk_lines])
1669 - ('hunk', [hunk_lines])
1669 - ('range', (-start,len, +start,len, proc))
1670 - ('range', (-start,len, +start,len, proc))
1670 """
1671 """
1671 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1672 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1672 lr = linereader(fp)
1673 lr = linereader(fp)
1673
1674
1674 def scanwhile(first, p):
1675 def scanwhile(first, p):
1675 """scan lr while predicate holds"""
1676 """scan lr while predicate holds"""
1676 lines = [first]
1677 lines = [first]
1677 while True:
1678 while True:
1678 line = lr.readline()
1679 line = lr.readline()
1679 if not line:
1680 if not line:
1680 break
1681 break
1681 if p(line):
1682 if p(line):
1682 lines.append(line)
1683 lines.append(line)
1683 else:
1684 else:
1684 lr.push(line)
1685 lr.push(line)
1685 break
1686 break
1686 return lines
1687 return lines
1687
1688
1688 while True:
1689 while True:
1689 line = lr.readline()
1690 line = lr.readline()
1690 if not line:
1691 if not line:
1691 break
1692 break
1692 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1693 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1693 def notheader(line):
1694 def notheader(line):
1694 s = line.split(None, 1)
1695 s = line.split(None, 1)
1695 return not s or s[0] not in ('---', 'diff')
1696 return not s or s[0] not in ('---', 'diff')
1696 header = scanwhile(line, notheader)
1697 header = scanwhile(line, notheader)
1697 fromfile = lr.readline()
1698 fromfile = lr.readline()
1698 if fromfile.startswith('---'):
1699 if fromfile.startswith('---'):
1699 tofile = lr.readline()
1700 tofile = lr.readline()
1700 header += [fromfile, tofile]
1701 header += [fromfile, tofile]
1701 else:
1702 else:
1702 lr.push(fromfile)
1703 lr.push(fromfile)
1703 yield 'file', header
1704 yield 'file', header
1704 elif line[0] == ' ':
1705 elif line[0] == ' ':
1705 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1706 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1706 elif line[0] in '-+':
1707 elif line[0] in '-+':
1707 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1708 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1708 else:
1709 else:
1709 m = lines_re.match(line)
1710 m = lines_re.match(line)
1710 if m:
1711 if m:
1711 yield 'range', m.groups()
1712 yield 'range', m.groups()
1712 else:
1713 else:
1713 yield 'other', line
1714 yield 'other', line
1714
1715
1715 def scangitpatch(lr, firstline):
1716 def scangitpatch(lr, firstline):
1716 """
1717 """
1717 Git patches can emit:
1718 Git patches can emit:
1718 - rename a to b
1719 - rename a to b
1719 - change b
1720 - change b
1720 - copy a to c
1721 - copy a to c
1721 - change c
1722 - change c
1722
1723
1723 We cannot apply this sequence as-is, the renamed 'a' could not be
1724 We cannot apply this sequence as-is, the renamed 'a' could not be
1724 found for it would have been renamed already. And we cannot copy
1725 found for it would have been renamed already. And we cannot copy
1725 from 'b' instead because 'b' would have been changed already. So
1726 from 'b' instead because 'b' would have been changed already. So
1726 we scan the git patch for copy and rename commands so we can
1727 we scan the git patch for copy and rename commands so we can
1727 perform the copies ahead of time.
1728 perform the copies ahead of time.
1728 """
1729 """
1729 pos = 0
1730 pos = 0
1730 try:
1731 try:
1731 pos = lr.fp.tell()
1732 pos = lr.fp.tell()
1732 fp = lr.fp
1733 fp = lr.fp
1733 except IOError:
1734 except IOError:
1734 fp = cStringIO.StringIO(lr.fp.read())
1735 fp = cStringIO.StringIO(lr.fp.read())
1735 gitlr = linereader(fp)
1736 gitlr = linereader(fp)
1736 gitlr.push(firstline)
1737 gitlr.push(firstline)
1737 gitpatches = readgitpatch(gitlr)
1738 gitpatches = readgitpatch(gitlr)
1738 fp.seek(pos)
1739 fp.seek(pos)
1739 return gitpatches
1740 return gitpatches
1740
1741
1741 def iterhunks(fp):
1742 def iterhunks(fp):
1742 """Read a patch and yield the following events:
1743 """Read a patch and yield the following events:
1743 - ("file", afile, bfile, firsthunk): select a new target file.
1744 - ("file", afile, bfile, firsthunk): select a new target file.
1744 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1745 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1745 "file" event.
1746 "file" event.
1746 - ("git", gitchanges): current diff is in git format, gitchanges
1747 - ("git", gitchanges): current diff is in git format, gitchanges
1747 maps filenames to gitpatch records. Unique event.
1748 maps filenames to gitpatch records. Unique event.
1748 """
1749 """
1749 afile = ""
1750 afile = ""
1750 bfile = ""
1751 bfile = ""
1751 state = None
1752 state = None
1752 hunknum = 0
1753 hunknum = 0
1753 emitfile = newfile = False
1754 emitfile = newfile = False
1754 gitpatches = None
1755 gitpatches = None
1755
1756
1756 # our states
1757 # our states
1757 BFILE = 1
1758 BFILE = 1
1758 context = None
1759 context = None
1759 lr = linereader(fp)
1760 lr = linereader(fp)
1760
1761
1761 while True:
1762 while True:
1762 x = lr.readline()
1763 x = lr.readline()
1763 if not x:
1764 if not x:
1764 break
1765 break
1765 if state == BFILE and (
1766 if state == BFILE and (
1766 (not context and x[0] == '@')
1767 (not context and x[0] == '@')
1767 or (context is not False and x.startswith('***************'))
1768 or (context is not False and x.startswith('***************'))
1768 or x.startswith('GIT binary patch')):
1769 or x.startswith('GIT binary patch')):
1769 gp = None
1770 gp = None
1770 if (gitpatches and
1771 if (gitpatches and
1771 gitpatches[-1].ispatching(afile, bfile)):
1772 gitpatches[-1].ispatching(afile, bfile)):
1772 gp = gitpatches.pop()
1773 gp = gitpatches.pop()
1773 if x.startswith('GIT binary patch'):
1774 if x.startswith('GIT binary patch'):
1774 h = binhunk(lr, gp.path)
1775 h = binhunk(lr, gp.path)
1775 else:
1776 else:
1776 if context is None and x.startswith('***************'):
1777 if context is None and x.startswith('***************'):
1777 context = True
1778 context = True
1778 h = hunk(x, hunknum + 1, lr, context)
1779 h = hunk(x, hunknum + 1, lr, context)
1779 hunknum += 1
1780 hunknum += 1
1780 if emitfile:
1781 if emitfile:
1781 emitfile = False
1782 emitfile = False
1782 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1783 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1783 yield 'hunk', h
1784 yield 'hunk', h
1784 elif x.startswith('diff --git a/'):
1785 elif x.startswith('diff --git a/'):
1785 m = gitre.match(x.rstrip(' \r\n'))
1786 m = gitre.match(x.rstrip(' \r\n'))
1786 if not m:
1787 if not m:
1787 continue
1788 continue
1788 if gitpatches is None:
1789 if gitpatches is None:
1789 # scan whole input for git metadata
1790 # scan whole input for git metadata
1790 gitpatches = scangitpatch(lr, x)
1791 gitpatches = scangitpatch(lr, x)
1791 yield 'git', [g.copy() for g in gitpatches
1792 yield 'git', [g.copy() for g in gitpatches
1792 if g.op in ('COPY', 'RENAME')]
1793 if g.op in ('COPY', 'RENAME')]
1793 gitpatches.reverse()
1794 gitpatches.reverse()
1794 afile = 'a/' + m.group(1)
1795 afile = 'a/' + m.group(1)
1795 bfile = 'b/' + m.group(2)
1796 bfile = 'b/' + m.group(2)
1796 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1797 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1797 gp = gitpatches.pop()
1798 gp = gitpatches.pop()
1798 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1799 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1799 if not gitpatches:
1800 if not gitpatches:
1800 raise PatchError(_('failed to synchronize metadata for "%s"')
1801 raise PatchError(_('failed to synchronize metadata for "%s"')
1801 % afile[2:])
1802 % afile[2:])
1802 gp = gitpatches[-1]
1803 gp = gitpatches[-1]
1803 newfile = True
1804 newfile = True
1804 elif x.startswith('---'):
1805 elif x.startswith('---'):
1805 # check for a unified diff
1806 # check for a unified diff
1806 l2 = lr.readline()
1807 l2 = lr.readline()
1807 if not l2.startswith('+++'):
1808 if not l2.startswith('+++'):
1808 lr.push(l2)
1809 lr.push(l2)
1809 continue
1810 continue
1810 newfile = True
1811 newfile = True
1811 context = False
1812 context = False
1812 afile = parsefilename(x)
1813 afile = parsefilename(x)
1813 bfile = parsefilename(l2)
1814 bfile = parsefilename(l2)
1814 elif x.startswith('***'):
1815 elif x.startswith('***'):
1815 # check for a context diff
1816 # check for a context diff
1816 l2 = lr.readline()
1817 l2 = lr.readline()
1817 if not l2.startswith('---'):
1818 if not l2.startswith('---'):
1818 lr.push(l2)
1819 lr.push(l2)
1819 continue
1820 continue
1820 l3 = lr.readline()
1821 l3 = lr.readline()
1821 lr.push(l3)
1822 lr.push(l3)
1822 if not l3.startswith("***************"):
1823 if not l3.startswith("***************"):
1823 lr.push(l2)
1824 lr.push(l2)
1824 continue
1825 continue
1825 newfile = True
1826 newfile = True
1826 context = True
1827 context = True
1827 afile = parsefilename(x)
1828 afile = parsefilename(x)
1828 bfile = parsefilename(l2)
1829 bfile = parsefilename(l2)
1829
1830
1830 if newfile:
1831 if newfile:
1831 newfile = False
1832 newfile = False
1832 emitfile = True
1833 emitfile = True
1833 state = BFILE
1834 state = BFILE
1834 hunknum = 0
1835 hunknum = 0
1835
1836
1836 while gitpatches:
1837 while gitpatches:
1837 gp = gitpatches.pop()
1838 gp = gitpatches.pop()
1838 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1839 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1839
1840
1840 def applybindelta(binchunk, data):
1841 def applybindelta(binchunk, data):
1841 """Apply a binary delta hunk
1842 """Apply a binary delta hunk
1842 The algorithm used is the algorithm from git's patch-delta.c
1843 The algorithm used is the algorithm from git's patch-delta.c
1843 """
1844 """
1844 def deltahead(binchunk):
1845 def deltahead(binchunk):
1845 i = 0
1846 i = 0
1846 for c in binchunk:
1847 for c in binchunk:
1847 i += 1
1848 i += 1
1848 if not (ord(c) & 0x80):
1849 if not (ord(c) & 0x80):
1849 return i
1850 return i
1850 return i
1851 return i
1851 out = ""
1852 out = ""
1852 s = deltahead(binchunk)
1853 s = deltahead(binchunk)
1853 binchunk = binchunk[s:]
1854 binchunk = binchunk[s:]
1854 s = deltahead(binchunk)
1855 s = deltahead(binchunk)
1855 binchunk = binchunk[s:]
1856 binchunk = binchunk[s:]
1856 i = 0
1857 i = 0
1857 while i < len(binchunk):
1858 while i < len(binchunk):
1858 cmd = ord(binchunk[i])
1859 cmd = ord(binchunk[i])
1859 i += 1
1860 i += 1
1860 if (cmd & 0x80):
1861 if (cmd & 0x80):
1861 offset = 0
1862 offset = 0
1862 size = 0
1863 size = 0
1863 if (cmd & 0x01):
1864 if (cmd & 0x01):
1864 offset = ord(binchunk[i])
1865 offset = ord(binchunk[i])
1865 i += 1
1866 i += 1
1866 if (cmd & 0x02):
1867 if (cmd & 0x02):
1867 offset |= ord(binchunk[i]) << 8
1868 offset |= ord(binchunk[i]) << 8
1868 i += 1
1869 i += 1
1869 if (cmd & 0x04):
1870 if (cmd & 0x04):
1870 offset |= ord(binchunk[i]) << 16
1871 offset |= ord(binchunk[i]) << 16
1871 i += 1
1872 i += 1
1872 if (cmd & 0x08):
1873 if (cmd & 0x08):
1873 offset |= ord(binchunk[i]) << 24
1874 offset |= ord(binchunk[i]) << 24
1874 i += 1
1875 i += 1
1875 if (cmd & 0x10):
1876 if (cmd & 0x10):
1876 size = ord(binchunk[i])
1877 size = ord(binchunk[i])
1877 i += 1
1878 i += 1
1878 if (cmd & 0x20):
1879 if (cmd & 0x20):
1879 size |= ord(binchunk[i]) << 8
1880 size |= ord(binchunk[i]) << 8
1880 i += 1
1881 i += 1
1881 if (cmd & 0x40):
1882 if (cmd & 0x40):
1882 size |= ord(binchunk[i]) << 16
1883 size |= ord(binchunk[i]) << 16
1883 i += 1
1884 i += 1
1884 if size == 0:
1885 if size == 0:
1885 size = 0x10000
1886 size = 0x10000
1886 offset_end = offset + size
1887 offset_end = offset + size
1887 out += data[offset:offset_end]
1888 out += data[offset:offset_end]
1888 elif cmd != 0:
1889 elif cmd != 0:
1889 offset_end = i + cmd
1890 offset_end = i + cmd
1890 out += binchunk[i:offset_end]
1891 out += binchunk[i:offset_end]
1891 i += cmd
1892 i += cmd
1892 else:
1893 else:
1893 raise PatchError(_('unexpected delta opcode 0'))
1894 raise PatchError(_('unexpected delta opcode 0'))
1894 return out
1895 return out
1895
1896
1896 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1897 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1897 """Reads a patch from fp and tries to apply it.
1898 """Reads a patch from fp and tries to apply it.
1898
1899
1899 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1900 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1900 there was any fuzz.
1901 there was any fuzz.
1901
1902
1902 If 'eolmode' is 'strict', the patch content and patched file are
1903 If 'eolmode' is 'strict', the patch content and patched file are
1903 read in binary mode. Otherwise, line endings are ignored when
1904 read in binary mode. Otherwise, line endings are ignored when
1904 patching then normalized according to 'eolmode'.
1905 patching then normalized according to 'eolmode'.
1905 """
1906 """
1906 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1907 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1907 prefix=prefix, eolmode=eolmode)
1908 prefix=prefix, eolmode=eolmode)
1908
1909
1909 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1910 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1910 eolmode='strict'):
1911 eolmode='strict'):
1911
1912
1912 if prefix:
1913 if prefix:
1913 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1914 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1914 prefix)
1915 prefix)
1915 if prefix != '':
1916 if prefix != '':
1916 prefix += '/'
1917 prefix += '/'
1917 def pstrip(p):
1918 def pstrip(p):
1918 return pathtransform(p, strip - 1, prefix)[1]
1919 return pathtransform(p, strip - 1, prefix)[1]
1919
1920
1920 rejects = 0
1921 rejects = 0
1921 err = 0
1922 err = 0
1922 current_file = None
1923 current_file = None
1923
1924
1924 for state, values in iterhunks(fp):
1925 for state, values in iterhunks(fp):
1925 if state == 'hunk':
1926 if state == 'hunk':
1926 if not current_file:
1927 if not current_file:
1927 continue
1928 continue
1928 ret = current_file.apply(values)
1929 ret = current_file.apply(values)
1929 if ret > 0:
1930 if ret > 0:
1930 err = 1
1931 err = 1
1931 elif state == 'file':
1932 elif state == 'file':
1932 if current_file:
1933 if current_file:
1933 rejects += current_file.close()
1934 rejects += current_file.close()
1934 current_file = None
1935 current_file = None
1935 afile, bfile, first_hunk, gp = values
1936 afile, bfile, first_hunk, gp = values
1936 if gp:
1937 if gp:
1937 gp.path = pstrip(gp.path)
1938 gp.path = pstrip(gp.path)
1938 if gp.oldpath:
1939 if gp.oldpath:
1939 gp.oldpath = pstrip(gp.oldpath)
1940 gp.oldpath = pstrip(gp.oldpath)
1940 else:
1941 else:
1941 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1942 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1942 prefix)
1943 prefix)
1943 if gp.op == 'RENAME':
1944 if gp.op == 'RENAME':
1944 backend.unlink(gp.oldpath)
1945 backend.unlink(gp.oldpath)
1945 if not first_hunk:
1946 if not first_hunk:
1946 if gp.op == 'DELETE':
1947 if gp.op == 'DELETE':
1947 backend.unlink(gp.path)
1948 backend.unlink(gp.path)
1948 continue
1949 continue
1949 data, mode = None, None
1950 data, mode = None, None
1950 if gp.op in ('RENAME', 'COPY'):
1951 if gp.op in ('RENAME', 'COPY'):
1951 data, mode = store.getfile(gp.oldpath)[:2]
1952 data, mode = store.getfile(gp.oldpath)[:2]
1952 # FIXME: failing getfile has never been handled here
1953 # FIXME: failing getfile has never been handled here
1953 assert data is not None
1954 assert data is not None
1954 if gp.mode:
1955 if gp.mode:
1955 mode = gp.mode
1956 mode = gp.mode
1956 if gp.op == 'ADD':
1957 if gp.op == 'ADD':
1957 # Added files without content have no hunk and
1958 # Added files without content have no hunk and
1958 # must be created
1959 # must be created
1959 data = ''
1960 data = ''
1960 if data or mode:
1961 if data or mode:
1961 if (gp.op in ('ADD', 'RENAME', 'COPY')
1962 if (gp.op in ('ADD', 'RENAME', 'COPY')
1962 and backend.exists(gp.path)):
1963 and backend.exists(gp.path)):
1963 raise PatchError(_("cannot create %s: destination "
1964 raise PatchError(_("cannot create %s: destination "
1964 "already exists") % gp.path)
1965 "already exists") % gp.path)
1965 backend.setfile(gp.path, data, mode, gp.oldpath)
1966 backend.setfile(gp.path, data, mode, gp.oldpath)
1966 continue
1967 continue
1967 try:
1968 try:
1968 current_file = patcher(ui, gp, backend, store,
1969 current_file = patcher(ui, gp, backend, store,
1969 eolmode=eolmode)
1970 eolmode=eolmode)
1970 except PatchError as inst:
1971 except PatchError as inst:
1971 ui.warn(str(inst) + '\n')
1972 ui.warn(str(inst) + '\n')
1972 current_file = None
1973 current_file = None
1973 rejects += 1
1974 rejects += 1
1974 continue
1975 continue
1975 elif state == 'git':
1976 elif state == 'git':
1976 for gp in values:
1977 for gp in values:
1977 path = pstrip(gp.oldpath)
1978 path = pstrip(gp.oldpath)
1978 data, mode = backend.getfile(path)
1979 data, mode = backend.getfile(path)
1979 if data is None:
1980 if data is None:
1980 # The error ignored here will trigger a getfile()
1981 # The error ignored here will trigger a getfile()
1981 # error in a place more appropriate for error
1982 # error in a place more appropriate for error
1982 # handling, and will not interrupt the patching
1983 # handling, and will not interrupt the patching
1983 # process.
1984 # process.
1984 pass
1985 pass
1985 else:
1986 else:
1986 store.setfile(path, data, mode)
1987 store.setfile(path, data, mode)
1987 else:
1988 else:
1988 raise error.Abort(_('unsupported parser state: %s') % state)
1989 raise error.Abort(_('unsupported parser state: %s') % state)
1989
1990
1990 if current_file:
1991 if current_file:
1991 rejects += current_file.close()
1992 rejects += current_file.close()
1992
1993
1993 if rejects:
1994 if rejects:
1994 return -1
1995 return -1
1995 return err
1996 return err
1996
1997
1997 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1998 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1998 similarity):
1999 similarity):
1999 """use <patcher> to apply <patchname> to the working directory.
2000 """use <patcher> to apply <patchname> to the working directory.
2000 returns whether patch was applied with fuzz factor."""
2001 returns whether patch was applied with fuzz factor."""
2001
2002
2002 fuzz = False
2003 fuzz = False
2003 args = []
2004 args = []
2004 cwd = repo.root
2005 cwd = repo.root
2005 if cwd:
2006 if cwd:
2006 args.append('-d %s' % util.shellquote(cwd))
2007 args.append('-d %s' % util.shellquote(cwd))
2007 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2008 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2008 util.shellquote(patchname)))
2009 util.shellquote(patchname)))
2009 try:
2010 try:
2010 for line in fp:
2011 for line in fp:
2011 line = line.rstrip()
2012 line = line.rstrip()
2012 ui.note(line + '\n')
2013 ui.note(line + '\n')
2013 if line.startswith('patching file '):
2014 if line.startswith('patching file '):
2014 pf = util.parsepatchoutput(line)
2015 pf = util.parsepatchoutput(line)
2015 printed_file = False
2016 printed_file = False
2016 files.add(pf)
2017 files.add(pf)
2017 elif line.find('with fuzz') >= 0:
2018 elif line.find('with fuzz') >= 0:
2018 fuzz = True
2019 fuzz = True
2019 if not printed_file:
2020 if not printed_file:
2020 ui.warn(pf + '\n')
2021 ui.warn(pf + '\n')
2021 printed_file = True
2022 printed_file = True
2022 ui.warn(line + '\n')
2023 ui.warn(line + '\n')
2023 elif line.find('saving rejects to file') >= 0:
2024 elif line.find('saving rejects to file') >= 0:
2024 ui.warn(line + '\n')
2025 ui.warn(line + '\n')
2025 elif line.find('FAILED') >= 0:
2026 elif line.find('FAILED') >= 0:
2026 if not printed_file:
2027 if not printed_file:
2027 ui.warn(pf + '\n')
2028 ui.warn(pf + '\n')
2028 printed_file = True
2029 printed_file = True
2029 ui.warn(line + '\n')
2030 ui.warn(line + '\n')
2030 finally:
2031 finally:
2031 if files:
2032 if files:
2032 scmutil.marktouched(repo, files, similarity)
2033 scmutil.marktouched(repo, files, similarity)
2033 code = fp.close()
2034 code = fp.close()
2034 if code:
2035 if code:
2035 raise PatchError(_("patch command failed: %s") %
2036 raise PatchError(_("patch command failed: %s") %
2036 util.explainexit(code)[0])
2037 util.explainexit(code)[0])
2037 return fuzz
2038 return fuzz
2038
2039
2039 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2040 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2040 eolmode='strict'):
2041 eolmode='strict'):
2041 if files is None:
2042 if files is None:
2042 files = set()
2043 files = set()
2043 if eolmode is None:
2044 if eolmode is None:
2044 eolmode = ui.config('patch', 'eol', 'strict')
2045 eolmode = ui.config('patch', 'eol', 'strict')
2045 if eolmode.lower() not in eolmodes:
2046 if eolmode.lower() not in eolmodes:
2046 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2047 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2047 eolmode = eolmode.lower()
2048 eolmode = eolmode.lower()
2048
2049
2049 store = filestore()
2050 store = filestore()
2050 try:
2051 try:
2051 fp = open(patchobj, 'rb')
2052 fp = open(patchobj, 'rb')
2052 except TypeError:
2053 except TypeError:
2053 fp = patchobj
2054 fp = patchobj
2054 try:
2055 try:
2055 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2056 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2056 eolmode=eolmode)
2057 eolmode=eolmode)
2057 finally:
2058 finally:
2058 if fp != patchobj:
2059 if fp != patchobj:
2059 fp.close()
2060 fp.close()
2060 files.update(backend.close())
2061 files.update(backend.close())
2061 store.close()
2062 store.close()
2062 if ret < 0:
2063 if ret < 0:
2063 raise PatchError(_('patch failed to apply'))
2064 raise PatchError(_('patch failed to apply'))
2064 return ret > 0
2065 return ret > 0
2065
2066
2066 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2067 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2067 eolmode='strict', similarity=0):
2068 eolmode='strict', similarity=0):
2068 """use builtin patch to apply <patchobj> to the working directory.
2069 """use builtin patch to apply <patchobj> to the working directory.
2069 returns whether patch was applied with fuzz factor."""
2070 returns whether patch was applied with fuzz factor."""
2070 backend = workingbackend(ui, repo, similarity)
2071 backend = workingbackend(ui, repo, similarity)
2071 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2072 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2072
2073
2073 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2074 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2074 eolmode='strict'):
2075 eolmode='strict'):
2075 backend = repobackend(ui, repo, ctx, store)
2076 backend = repobackend(ui, repo, ctx, store)
2076 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2077
2078
2078 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2079 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2079 similarity=0):
2080 similarity=0):
2080 """Apply <patchname> to the working directory.
2081 """Apply <patchname> to the working directory.
2081
2082
2082 'eolmode' specifies how end of lines should be handled. It can be:
2083 'eolmode' specifies how end of lines should be handled. It can be:
2083 - 'strict': inputs are read in binary mode, EOLs are preserved
2084 - 'strict': inputs are read in binary mode, EOLs are preserved
2084 - 'crlf': EOLs are ignored when patching and reset to CRLF
2085 - 'crlf': EOLs are ignored when patching and reset to CRLF
2085 - 'lf': EOLs are ignored when patching and reset to LF
2086 - 'lf': EOLs are ignored when patching and reset to LF
2086 - None: get it from user settings, default to 'strict'
2087 - None: get it from user settings, default to 'strict'
2087 'eolmode' is ignored when using an external patcher program.
2088 'eolmode' is ignored when using an external patcher program.
2088
2089
2089 Returns whether patch was applied with fuzz factor.
2090 Returns whether patch was applied with fuzz factor.
2090 """
2091 """
2091 patcher = ui.config('ui', 'patch')
2092 patcher = ui.config('ui', 'patch')
2092 if files is None:
2093 if files is None:
2093 files = set()
2094 files = set()
2094 if patcher:
2095 if patcher:
2095 return _externalpatch(ui, repo, patcher, patchname, strip,
2096 return _externalpatch(ui, repo, patcher, patchname, strip,
2096 files, similarity)
2097 files, similarity)
2097 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2098 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2098 similarity)
2099 similarity)
2099
2100
2100 def changedfiles(ui, repo, patchpath, strip=1):
2101 def changedfiles(ui, repo, patchpath, strip=1):
2101 backend = fsbackend(ui, repo.root)
2102 backend = fsbackend(ui, repo.root)
2102 with open(patchpath, 'rb') as fp:
2103 with open(patchpath, 'rb') as fp:
2103 changed = set()
2104 changed = set()
2104 for state, values in iterhunks(fp):
2105 for state, values in iterhunks(fp):
2105 if state == 'file':
2106 if state == 'file':
2106 afile, bfile, first_hunk, gp = values
2107 afile, bfile, first_hunk, gp = values
2107 if gp:
2108 if gp:
2108 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2109 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2109 if gp.oldpath:
2110 if gp.oldpath:
2110 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2111 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2111 else:
2112 else:
2112 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2113 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2113 '')
2114 '')
2114 changed.add(gp.path)
2115 changed.add(gp.path)
2115 if gp.op == 'RENAME':
2116 if gp.op == 'RENAME':
2116 changed.add(gp.oldpath)
2117 changed.add(gp.oldpath)
2117 elif state not in ('hunk', 'git'):
2118 elif state not in ('hunk', 'git'):
2118 raise error.Abort(_('unsupported parser state: %s') % state)
2119 raise error.Abort(_('unsupported parser state: %s') % state)
2119 return changed
2120 return changed
2120
2121
2121 class GitDiffRequired(Exception):
2122 class GitDiffRequired(Exception):
2122 pass
2123 pass
2123
2124
2124 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2125 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2125 '''return diffopts with all features supported and parsed'''
2126 '''return diffopts with all features supported and parsed'''
2126 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2127 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2127 git=True, whitespace=True, formatchanging=True)
2128 git=True, whitespace=True, formatchanging=True)
2128
2129
2129 diffopts = diffallopts
2130 diffopts = diffallopts
2130
2131
2131 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2132 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2132 whitespace=False, formatchanging=False):
2133 whitespace=False, formatchanging=False):
2133 '''return diffopts with only opted-in features parsed
2134 '''return diffopts with only opted-in features parsed
2134
2135
2135 Features:
2136 Features:
2136 - git: git-style diffs
2137 - git: git-style diffs
2137 - whitespace: whitespace options like ignoreblanklines and ignorews
2138 - whitespace: whitespace options like ignoreblanklines and ignorews
2138 - formatchanging: options that will likely break or cause correctness issues
2139 - formatchanging: options that will likely break or cause correctness issues
2139 with most diff parsers
2140 with most diff parsers
2140 '''
2141 '''
2141 def get(key, name=None, getter=ui.configbool, forceplain=None):
2142 def get(key, name=None, getter=ui.configbool, forceplain=None):
2142 if opts:
2143 if opts:
2143 v = opts.get(key)
2144 v = opts.get(key)
2144 if v:
2145 if v:
2145 return v
2146 return v
2146 if forceplain is not None and ui.plain():
2147 if forceplain is not None and ui.plain():
2147 return forceplain
2148 return forceplain
2148 return getter(section, name or key, None, untrusted=untrusted)
2149 return getter(section, name or key, None, untrusted=untrusted)
2149
2150
2150 # core options, expected to be understood by every diff parser
2151 # core options, expected to be understood by every diff parser
2151 buildopts = {
2152 buildopts = {
2152 'nodates': get('nodates'),
2153 'nodates': get('nodates'),
2153 'showfunc': get('show_function', 'showfunc'),
2154 'showfunc': get('show_function', 'showfunc'),
2154 'context': get('unified', getter=ui.config),
2155 'context': get('unified', getter=ui.config),
2155 }
2156 }
2156
2157
2157 if git:
2158 if git:
2158 buildopts['git'] = get('git')
2159 buildopts['git'] = get('git')
2159 if whitespace:
2160 if whitespace:
2160 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2161 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2161 buildopts['ignorewsamount'] = get('ignore_space_change',
2162 buildopts['ignorewsamount'] = get('ignore_space_change',
2162 'ignorewsamount')
2163 'ignorewsamount')
2163 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2164 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2164 'ignoreblanklines')
2165 'ignoreblanklines')
2165 if formatchanging:
2166 if formatchanging:
2166 buildopts['text'] = opts and opts.get('text')
2167 buildopts['text'] = opts and opts.get('text')
2167 buildopts['nobinary'] = get('nobinary', forceplain=False)
2168 buildopts['nobinary'] = get('nobinary', forceplain=False)
2168 buildopts['noprefix'] = get('noprefix', forceplain=False)
2169 buildopts['noprefix'] = get('noprefix', forceplain=False)
2169
2170
2170 return mdiff.diffopts(**buildopts)
2171 return mdiff.diffopts(**buildopts)
2171
2172
2172 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2173 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2173 losedatafn=None, prefix='', relroot=''):
2174 losedatafn=None, prefix='', relroot=''):
2174 '''yields diff of changes to files between two nodes, or node and
2175 '''yields diff of changes to files between two nodes, or node and
2175 working directory.
2176 working directory.
2176
2177
2177 if node1 is None, use first dirstate parent instead.
2178 if node1 is None, use first dirstate parent instead.
2178 if node2 is None, compare node1 with working directory.
2179 if node2 is None, compare node1 with working directory.
2179
2180
2180 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2181 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2181 every time some change cannot be represented with the current
2182 every time some change cannot be represented with the current
2182 patch format. Return False to upgrade to git patch format, True to
2183 patch format. Return False to upgrade to git patch format, True to
2183 accept the loss or raise an exception to abort the diff. It is
2184 accept the loss or raise an exception to abort the diff. It is
2184 called with the name of current file being diffed as 'fn'. If set
2185 called with the name of current file being diffed as 'fn'. If set
2185 to None, patches will always be upgraded to git format when
2186 to None, patches will always be upgraded to git format when
2186 necessary.
2187 necessary.
2187
2188
2188 prefix is a filename prefix that is prepended to all filenames on
2189 prefix is a filename prefix that is prepended to all filenames on
2189 display (used for subrepos).
2190 display (used for subrepos).
2190
2191
2191 relroot, if not empty, must be normalized with a trailing /. Any match
2192 relroot, if not empty, must be normalized with a trailing /. Any match
2192 patterns that fall outside it will be ignored.'''
2193 patterns that fall outside it will be ignored.'''
2193
2194
2194 if opts is None:
2195 if opts is None:
2195 opts = mdiff.defaultopts
2196 opts = mdiff.defaultopts
2196
2197
2197 if not node1 and not node2:
2198 if not node1 and not node2:
2198 node1 = repo.dirstate.p1()
2199 node1 = repo.dirstate.p1()
2199
2200
2200 def lrugetfilectx():
2201 def lrugetfilectx():
2201 cache = {}
2202 cache = {}
2202 order = collections.deque()
2203 order = collections.deque()
2203 def getfilectx(f, ctx):
2204 def getfilectx(f, ctx):
2204 fctx = ctx.filectx(f, filelog=cache.get(f))
2205 fctx = ctx.filectx(f, filelog=cache.get(f))
2205 if f not in cache:
2206 if f not in cache:
2206 if len(cache) > 20:
2207 if len(cache) > 20:
2207 del cache[order.popleft()]
2208 del cache[order.popleft()]
2208 cache[f] = fctx.filelog()
2209 cache[f] = fctx.filelog()
2209 else:
2210 else:
2210 order.remove(f)
2211 order.remove(f)
2211 order.append(f)
2212 order.append(f)
2212 return fctx
2213 return fctx
2213 return getfilectx
2214 return getfilectx
2214 getfilectx = lrugetfilectx()
2215 getfilectx = lrugetfilectx()
2215
2216
2216 ctx1 = repo[node1]
2217 ctx1 = repo[node1]
2217 ctx2 = repo[node2]
2218 ctx2 = repo[node2]
2218
2219
2219 relfiltered = False
2220 relfiltered = False
2220 if relroot != '' and match.always():
2221 if relroot != '' and match.always():
2221 # as a special case, create a new matcher with just the relroot
2222 # as a special case, create a new matcher with just the relroot
2222 pats = [relroot]
2223 pats = [relroot]
2223 match = scmutil.match(ctx2, pats, default='path')
2224 match = scmutil.match(ctx2, pats, default='path')
2224 relfiltered = True
2225 relfiltered = True
2225
2226
2226 if not changes:
2227 if not changes:
2227 changes = repo.status(ctx1, ctx2, match=match)
2228 changes = repo.status(ctx1, ctx2, match=match)
2228 modified, added, removed = changes[:3]
2229 modified, added, removed = changes[:3]
2229
2230
2230 if not modified and not added and not removed:
2231 if not modified and not added and not removed:
2231 return []
2232 return []
2232
2233
2233 if repo.ui.debugflag:
2234 if repo.ui.debugflag:
2234 hexfunc = hex
2235 hexfunc = hex
2235 else:
2236 else:
2236 hexfunc = short
2237 hexfunc = short
2237 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2238 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2238
2239
2239 copy = {}
2240 copy = {}
2240 if opts.git or opts.upgrade:
2241 if opts.git or opts.upgrade:
2241 copy = copies.pathcopies(ctx1, ctx2, match=match)
2242 copy = copies.pathcopies(ctx1, ctx2, match=match)
2242
2243
2243 if relroot is not None:
2244 if relroot is not None:
2244 if not relfiltered:
2245 if not relfiltered:
2245 # XXX this would ideally be done in the matcher, but that is
2246 # XXX this would ideally be done in the matcher, but that is
2246 # generally meant to 'or' patterns, not 'and' them. In this case we
2247 # generally meant to 'or' patterns, not 'and' them. In this case we
2247 # need to 'and' all the patterns from the matcher with relroot.
2248 # need to 'and' all the patterns from the matcher with relroot.
2248 def filterrel(l):
2249 def filterrel(l):
2249 return [f for f in l if f.startswith(relroot)]
2250 return [f for f in l if f.startswith(relroot)]
2250 modified = filterrel(modified)
2251 modified = filterrel(modified)
2251 added = filterrel(added)
2252 added = filterrel(added)
2252 removed = filterrel(removed)
2253 removed = filterrel(removed)
2253 relfiltered = True
2254 relfiltered = True
2254 # filter out copies where either side isn't inside the relative root
2255 # filter out copies where either side isn't inside the relative root
2255 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2256 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2256 if dst.startswith(relroot)
2257 if dst.startswith(relroot)
2257 and src.startswith(relroot)))
2258 and src.startswith(relroot)))
2258
2259
2259 modifiedset = set(modified)
2260 modifiedset = set(modified)
2260 addedset = set(added)
2261 addedset = set(added)
2261 removedset = set(removed)
2262 removedset = set(removed)
2262 for f in modified:
2263 for f in modified:
2263 if f not in ctx1:
2264 if f not in ctx1:
2264 # Fix up added, since merged-in additions appear as
2265 # Fix up added, since merged-in additions appear as
2265 # modifications during merges
2266 # modifications during merges
2266 modifiedset.remove(f)
2267 modifiedset.remove(f)
2267 addedset.add(f)
2268 addedset.add(f)
2268 for f in removed:
2269 for f in removed:
2269 if f not in ctx1:
2270 if f not in ctx1:
2270 # Merged-in additions that are then removed are reported as removed.
2271 # Merged-in additions that are then removed are reported as removed.
2271 # They are not in ctx1, so We don't want to show them in the diff.
2272 # They are not in ctx1, so We don't want to show them in the diff.
2272 removedset.remove(f)
2273 removedset.remove(f)
2273 modified = sorted(modifiedset)
2274 modified = sorted(modifiedset)
2274 added = sorted(addedset)
2275 added = sorted(addedset)
2275 removed = sorted(removedset)
2276 removed = sorted(removedset)
2276 for dst, src in copy.items():
2277 for dst, src in copy.items():
2277 if src not in ctx1:
2278 if src not in ctx1:
2278 # Files merged in during a merge and then copied/renamed are
2279 # Files merged in during a merge and then copied/renamed are
2279 # reported as copies. We want to show them in the diff as additions.
2280 # reported as copies. We want to show them in the diff as additions.
2280 del copy[dst]
2281 del copy[dst]
2281
2282
2282 def difffn(opts, losedata):
2283 def difffn(opts, losedata):
2283 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2284 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2284 copy, getfilectx, opts, losedata, prefix, relroot)
2285 copy, getfilectx, opts, losedata, prefix, relroot)
2285 if opts.upgrade and not opts.git:
2286 if opts.upgrade and not opts.git:
2286 try:
2287 try:
2287 def losedata(fn):
2288 def losedata(fn):
2288 if not losedatafn or not losedatafn(fn=fn):
2289 if not losedatafn or not losedatafn(fn=fn):
2289 raise GitDiffRequired
2290 raise GitDiffRequired
2290 # Buffer the whole output until we are sure it can be generated
2291 # Buffer the whole output until we are sure it can be generated
2291 return list(difffn(opts.copy(git=False), losedata))
2292 return list(difffn(opts.copy(git=False), losedata))
2292 except GitDiffRequired:
2293 except GitDiffRequired:
2293 return difffn(opts.copy(git=True), None)
2294 return difffn(opts.copy(git=True), None)
2294 else:
2295 else:
2295 return difffn(opts, None)
2296 return difffn(opts, None)
2296
2297
2297 def difflabel(func, *args, **kw):
2298 def difflabel(func, *args, **kw):
2298 '''yields 2-tuples of (output, label) based on the output of func()'''
2299 '''yields 2-tuples of (output, label) based on the output of func()'''
2299 headprefixes = [('diff', 'diff.diffline'),
2300 headprefixes = [('diff', 'diff.diffline'),
2300 ('copy', 'diff.extended'),
2301 ('copy', 'diff.extended'),
2301 ('rename', 'diff.extended'),
2302 ('rename', 'diff.extended'),
2302 ('old', 'diff.extended'),
2303 ('old', 'diff.extended'),
2303 ('new', 'diff.extended'),
2304 ('new', 'diff.extended'),
2304 ('deleted', 'diff.extended'),
2305 ('deleted', 'diff.extended'),
2305 ('---', 'diff.file_a'),
2306 ('---', 'diff.file_a'),
2306 ('+++', 'diff.file_b')]
2307 ('+++', 'diff.file_b')]
2307 textprefixes = [('@', 'diff.hunk'),
2308 textprefixes = [('@', 'diff.hunk'),
2308 ('-', 'diff.deleted'),
2309 ('-', 'diff.deleted'),
2309 ('+', 'diff.inserted')]
2310 ('+', 'diff.inserted')]
2310 head = False
2311 head = False
2311 for chunk in func(*args, **kw):
2312 for chunk in func(*args, **kw):
2312 lines = chunk.split('\n')
2313 lines = chunk.split('\n')
2313 for i, line in enumerate(lines):
2314 for i, line in enumerate(lines):
2314 if i != 0:
2315 if i != 0:
2315 yield ('\n', '')
2316 yield ('\n', '')
2316 if head:
2317 if head:
2317 if line.startswith('@'):
2318 if line.startswith('@'):
2318 head = False
2319 head = False
2319 else:
2320 else:
2320 if line and line[0] not in ' +-@\\':
2321 if line and line[0] not in ' +-@\\':
2321 head = True
2322 head = True
2322 stripline = line
2323 stripline = line
2323 diffline = False
2324 diffline = False
2324 if not head and line and line[0] in '+-':
2325 if not head and line and line[0] in '+-':
2325 # highlight tabs and trailing whitespace, but only in
2326 # highlight tabs and trailing whitespace, but only in
2326 # changed lines
2327 # changed lines
2327 stripline = line.rstrip()
2328 stripline = line.rstrip()
2328 diffline = True
2329 diffline = True
2329
2330
2330 prefixes = textprefixes
2331 prefixes = textprefixes
2331 if head:
2332 if head:
2332 prefixes = headprefixes
2333 prefixes = headprefixes
2333 for prefix, label in prefixes:
2334 for prefix, label in prefixes:
2334 if stripline.startswith(prefix):
2335 if stripline.startswith(prefix):
2335 if diffline:
2336 if diffline:
2336 for token in tabsplitter.findall(stripline):
2337 for token in tabsplitter.findall(stripline):
2337 if '\t' == token[0]:
2338 if '\t' == token[0]:
2338 yield (token, 'diff.tab')
2339 yield (token, 'diff.tab')
2339 else:
2340 else:
2340 yield (token, label)
2341 yield (token, label)
2341 else:
2342 else:
2342 yield (stripline, label)
2343 yield (stripline, label)
2343 break
2344 break
2344 else:
2345 else:
2345 yield (line, '')
2346 yield (line, '')
2346 if line != stripline:
2347 if line != stripline:
2347 yield (line[len(stripline):], 'diff.trailingwhitespace')
2348 yield (line[len(stripline):], 'diff.trailingwhitespace')
2348
2349
2349 def diffui(*args, **kw):
2350 def diffui(*args, **kw):
2350 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2351 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2351 return difflabel(diff, *args, **kw)
2352 return difflabel(diff, *args, **kw)
2352
2353
2353 def _filepairs(modified, added, removed, copy, opts):
2354 def _filepairs(modified, added, removed, copy, opts):
2354 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2355 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2355 before and f2 is the the name after. For added files, f1 will be None,
2356 before and f2 is the the name after. For added files, f1 will be None,
2356 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2357 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2357 or 'rename' (the latter two only if opts.git is set).'''
2358 or 'rename' (the latter two only if opts.git is set).'''
2358 gone = set()
2359 gone = set()
2359
2360
2360 copyto = dict([(v, k) for k, v in copy.items()])
2361 copyto = dict([(v, k) for k, v in copy.items()])
2361
2362
2362 addedset, removedset = set(added), set(removed)
2363 addedset, removedset = set(added), set(removed)
2363
2364
2364 for f in sorted(modified + added + removed):
2365 for f in sorted(modified + added + removed):
2365 copyop = None
2366 copyop = None
2366 f1, f2 = f, f
2367 f1, f2 = f, f
2367 if f in addedset:
2368 if f in addedset:
2368 f1 = None
2369 f1 = None
2369 if f in copy:
2370 if f in copy:
2370 if opts.git:
2371 if opts.git:
2371 f1 = copy[f]
2372 f1 = copy[f]
2372 if f1 in removedset and f1 not in gone:
2373 if f1 in removedset and f1 not in gone:
2373 copyop = 'rename'
2374 copyop = 'rename'
2374 gone.add(f1)
2375 gone.add(f1)
2375 else:
2376 else:
2376 copyop = 'copy'
2377 copyop = 'copy'
2377 elif f in removedset:
2378 elif f in removedset:
2378 f2 = None
2379 f2 = None
2379 if opts.git:
2380 if opts.git:
2380 # have we already reported a copy above?
2381 # have we already reported a copy above?
2381 if (f in copyto and copyto[f] in addedset
2382 if (f in copyto and copyto[f] in addedset
2382 and copy[copyto[f]] == f):
2383 and copy[copyto[f]] == f):
2383 continue
2384 continue
2384 yield f1, f2, copyop
2385 yield f1, f2, copyop
2385
2386
2386 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2387 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2387 copy, getfilectx, opts, losedatafn, prefix, relroot):
2388 copy, getfilectx, opts, losedatafn, prefix, relroot):
2388 '''given input data, generate a diff and yield it in blocks
2389 '''given input data, generate a diff and yield it in blocks
2389
2390
2390 If generating a diff would lose data like flags or binary data and
2391 If generating a diff would lose data like flags or binary data and
2391 losedatafn is not None, it will be called.
2392 losedatafn is not None, it will be called.
2392
2393
2393 relroot is removed and prefix is added to every path in the diff output.
2394 relroot is removed and prefix is added to every path in the diff output.
2394
2395
2395 If relroot is not empty, this function expects every path in modified,
2396 If relroot is not empty, this function expects every path in modified,
2396 added, removed and copy to start with it.'''
2397 added, removed and copy to start with it.'''
2397
2398
2398 def gitindex(text):
2399 def gitindex(text):
2399 if not text:
2400 if not text:
2400 text = ""
2401 text = ""
2401 l = len(text)
2402 l = len(text)
2402 s = util.sha1('blob %d\0' % l)
2403 s = util.sha1('blob %d\0' % l)
2403 s.update(text)
2404 s.update(text)
2404 return s.hexdigest()
2405 return s.hexdigest()
2405
2406
2406 if opts.noprefix:
2407 if opts.noprefix:
2407 aprefix = bprefix = ''
2408 aprefix = bprefix = ''
2408 else:
2409 else:
2409 aprefix = 'a/'
2410 aprefix = 'a/'
2410 bprefix = 'b/'
2411 bprefix = 'b/'
2411
2412
2412 def diffline(f, revs):
2413 def diffline(f, revs):
2413 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2414 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2414 return 'diff %s %s' % (revinfo, f)
2415 return 'diff %s %s' % (revinfo, f)
2415
2416
2416 date1 = util.datestr(ctx1.date())
2417 date1 = util.datestr(ctx1.date())
2417 date2 = util.datestr(ctx2.date())
2418 date2 = util.datestr(ctx2.date())
2418
2419
2419 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2420 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2420
2421
2421 if relroot != '' and (repo.ui.configbool('devel', 'all')
2422 if relroot != '' and (repo.ui.configbool('devel', 'all')
2422 or repo.ui.configbool('devel', 'check-relroot')):
2423 or repo.ui.configbool('devel', 'check-relroot')):
2423 for f in modified + added + removed + copy.keys() + copy.values():
2424 for f in modified + added + removed + copy.keys() + copy.values():
2424 if f is not None and not f.startswith(relroot):
2425 if f is not None and not f.startswith(relroot):
2425 raise AssertionError(
2426 raise AssertionError(
2426 "file %s doesn't start with relroot %s" % (f, relroot))
2427 "file %s doesn't start with relroot %s" % (f, relroot))
2427
2428
2428 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2429 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2429 content1 = None
2430 content1 = None
2430 content2 = None
2431 content2 = None
2431 flag1 = None
2432 flag1 = None
2432 flag2 = None
2433 flag2 = None
2433 if f1:
2434 if f1:
2434 content1 = getfilectx(f1, ctx1).data()
2435 content1 = getfilectx(f1, ctx1).data()
2435 if opts.git or losedatafn:
2436 if opts.git or losedatafn:
2436 flag1 = ctx1.flags(f1)
2437 flag1 = ctx1.flags(f1)
2437 if f2:
2438 if f2:
2438 content2 = getfilectx(f2, ctx2).data()
2439 content2 = getfilectx(f2, ctx2).data()
2439 if opts.git or losedatafn:
2440 if opts.git or losedatafn:
2440 flag2 = ctx2.flags(f2)
2441 flag2 = ctx2.flags(f2)
2441 binary = False
2442 binary = False
2442 if opts.git or losedatafn:
2443 if opts.git or losedatafn:
2443 binary = util.binary(content1) or util.binary(content2)
2444 binary = util.binary(content1) or util.binary(content2)
2444
2445
2445 if losedatafn and not opts.git:
2446 if losedatafn and not opts.git:
2446 if (binary or
2447 if (binary or
2447 # copy/rename
2448 # copy/rename
2448 f2 in copy or
2449 f2 in copy or
2449 # empty file creation
2450 # empty file creation
2450 (not f1 and not content2) or
2451 (not f1 and not content2) or
2451 # empty file deletion
2452 # empty file deletion
2452 (not content1 and not f2) or
2453 (not content1 and not f2) or
2453 # create with flags
2454 # create with flags
2454 (not f1 and flag2) or
2455 (not f1 and flag2) or
2455 # change flags
2456 # change flags
2456 (f1 and f2 and flag1 != flag2)):
2457 (f1 and f2 and flag1 != flag2)):
2457 losedatafn(f2 or f1)
2458 losedatafn(f2 or f1)
2458
2459
2459 path1 = f1 or f2
2460 path1 = f1 or f2
2460 path2 = f2 or f1
2461 path2 = f2 or f1
2461 path1 = posixpath.join(prefix, path1[len(relroot):])
2462 path1 = posixpath.join(prefix, path1[len(relroot):])
2462 path2 = posixpath.join(prefix, path2[len(relroot):])
2463 path2 = posixpath.join(prefix, path2[len(relroot):])
2463 header = []
2464 header = []
2464 if opts.git:
2465 if opts.git:
2465 header.append('diff --git %s%s %s%s' %
2466 header.append('diff --git %s%s %s%s' %
2466 (aprefix, path1, bprefix, path2))
2467 (aprefix, path1, bprefix, path2))
2467 if not f1: # added
2468 if not f1: # added
2468 header.append('new file mode %s' % gitmode[flag2])
2469 header.append('new file mode %s' % gitmode[flag2])
2469 elif not f2: # removed
2470 elif not f2: # removed
2470 header.append('deleted file mode %s' % gitmode[flag1])
2471 header.append('deleted file mode %s' % gitmode[flag1])
2471 else: # modified/copied/renamed
2472 else: # modified/copied/renamed
2472 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2473 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2473 if mode1 != mode2:
2474 if mode1 != mode2:
2474 header.append('old mode %s' % mode1)
2475 header.append('old mode %s' % mode1)
2475 header.append('new mode %s' % mode2)
2476 header.append('new mode %s' % mode2)
2476 if copyop is not None:
2477 if copyop is not None:
2477 header.append('%s from %s' % (copyop, path1))
2478 header.append('%s from %s' % (copyop, path1))
2478 header.append('%s to %s' % (copyop, path2))
2479 header.append('%s to %s' % (copyop, path2))
2479 elif revs and not repo.ui.quiet:
2480 elif revs and not repo.ui.quiet:
2480 header.append(diffline(path1, revs))
2481 header.append(diffline(path1, revs))
2481
2482
2482 if binary and opts.git and not opts.nobinary:
2483 if binary and opts.git and not opts.nobinary:
2483 text = mdiff.b85diff(content1, content2)
2484 text = mdiff.b85diff(content1, content2)
2484 if text:
2485 if text:
2485 header.append('index %s..%s' %
2486 header.append('index %s..%s' %
2486 (gitindex(content1), gitindex(content2)))
2487 (gitindex(content1), gitindex(content2)))
2487 else:
2488 else:
2488 text = mdiff.unidiff(content1, date1,
2489 text = mdiff.unidiff(content1, date1,
2489 content2, date2,
2490 content2, date2,
2490 path1, path2, opts=opts)
2491 path1, path2, opts=opts)
2491 if header and (text or len(header) > 1):
2492 if header and (text or len(header) > 1):
2492 yield '\n'.join(header) + '\n'
2493 yield '\n'.join(header) + '\n'
2493 if text:
2494 if text:
2494 yield text
2495 yield text
2495
2496
2496 def diffstatsum(stats):
2497 def diffstatsum(stats):
2497 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2498 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2498 for f, a, r, b in stats:
2499 for f, a, r, b in stats:
2499 maxfile = max(maxfile, encoding.colwidth(f))
2500 maxfile = max(maxfile, encoding.colwidth(f))
2500 maxtotal = max(maxtotal, a + r)
2501 maxtotal = max(maxtotal, a + r)
2501 addtotal += a
2502 addtotal += a
2502 removetotal += r
2503 removetotal += r
2503 binary = binary or b
2504 binary = binary or b
2504
2505
2505 return maxfile, maxtotal, addtotal, removetotal, binary
2506 return maxfile, maxtotal, addtotal, removetotal, binary
2506
2507
2507 def diffstatdata(lines):
2508 def diffstatdata(lines):
2508 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2509 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2509
2510
2510 results = []
2511 results = []
2511 filename, adds, removes, isbinary = None, 0, 0, False
2512 filename, adds, removes, isbinary = None, 0, 0, False
2512
2513
2513 def addresult():
2514 def addresult():
2514 if filename:
2515 if filename:
2515 results.append((filename, adds, removes, isbinary))
2516 results.append((filename, adds, removes, isbinary))
2516
2517
2517 for line in lines:
2518 for line in lines:
2518 if line.startswith('diff'):
2519 if line.startswith('diff'):
2519 addresult()
2520 addresult()
2520 # set numbers to 0 anyway when starting new file
2521 # set numbers to 0 anyway when starting new file
2521 adds, removes, isbinary = 0, 0, False
2522 adds, removes, isbinary = 0, 0, False
2522 if line.startswith('diff --git a/'):
2523 if line.startswith('diff --git a/'):
2523 filename = gitre.search(line).group(2)
2524 filename = gitre.search(line).group(2)
2524 elif line.startswith('diff -r'):
2525 elif line.startswith('diff -r'):
2525 # format: "diff -r ... -r ... filename"
2526 # format: "diff -r ... -r ... filename"
2526 filename = diffre.search(line).group(1)
2527 filename = diffre.search(line).group(1)
2527 elif line.startswith('+') and not line.startswith('+++ '):
2528 elif line.startswith('+') and not line.startswith('+++ '):
2528 adds += 1
2529 adds += 1
2529 elif line.startswith('-') and not line.startswith('--- '):
2530 elif line.startswith('-') and not line.startswith('--- '):
2530 removes += 1
2531 removes += 1
2531 elif (line.startswith('GIT binary patch') or
2532 elif (line.startswith('GIT binary patch') or
2532 line.startswith('Binary file')):
2533 line.startswith('Binary file')):
2533 isbinary = True
2534 isbinary = True
2534 addresult()
2535 addresult()
2535 return results
2536 return results
2536
2537
2537 def diffstat(lines, width=80, git=False):
2538 def diffstat(lines, width=80, git=False):
2538 output = []
2539 output = []
2539 stats = diffstatdata(lines)
2540 stats = diffstatdata(lines)
2540 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2541 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2541
2542
2542 countwidth = len(str(maxtotal))
2543 countwidth = len(str(maxtotal))
2543 if hasbinary and countwidth < 3:
2544 if hasbinary and countwidth < 3:
2544 countwidth = 3
2545 countwidth = 3
2545 graphwidth = width - countwidth - maxname - 6
2546 graphwidth = width - countwidth - maxname - 6
2546 if graphwidth < 10:
2547 if graphwidth < 10:
2547 graphwidth = 10
2548 graphwidth = 10
2548
2549
2549 def scale(i):
2550 def scale(i):
2550 if maxtotal <= graphwidth:
2551 if maxtotal <= graphwidth:
2551 return i
2552 return i
2552 # If diffstat runs out of room it doesn't print anything,
2553 # If diffstat runs out of room it doesn't print anything,
2553 # which isn't very useful, so always print at least one + or -
2554 # which isn't very useful, so always print at least one + or -
2554 # if there were at least some changes.
2555 # if there were at least some changes.
2555 return max(i * graphwidth // maxtotal, int(bool(i)))
2556 return max(i * graphwidth // maxtotal, int(bool(i)))
2556
2557
2557 for filename, adds, removes, isbinary in stats:
2558 for filename, adds, removes, isbinary in stats:
2558 if isbinary:
2559 if isbinary:
2559 count = 'Bin'
2560 count = 'Bin'
2560 else:
2561 else:
2561 count = adds + removes
2562 count = adds + removes
2562 pluses = '+' * scale(adds)
2563 pluses = '+' * scale(adds)
2563 minuses = '-' * scale(removes)
2564 minuses = '-' * scale(removes)
2564 output.append(' %s%s | %*s %s%s\n' %
2565 output.append(' %s%s | %*s %s%s\n' %
2565 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2566 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2566 countwidth, count, pluses, minuses))
2567 countwidth, count, pluses, minuses))
2567
2568
2568 if stats:
2569 if stats:
2569 output.append(_(' %d files changed, %d insertions(+), '
2570 output.append(_(' %d files changed, %d insertions(+), '
2570 '%d deletions(-)\n')
2571 '%d deletions(-)\n')
2571 % (len(stats), totaladds, totalremoves))
2572 % (len(stats), totaladds, totalremoves))
2572
2573
2573 return ''.join(output)
2574 return ''.join(output)
2574
2575
2575 def diffstatui(*args, **kw):
2576 def diffstatui(*args, **kw):
2576 '''like diffstat(), but yields 2-tuples of (output, label) for
2577 '''like diffstat(), but yields 2-tuples of (output, label) for
2577 ui.write()
2578 ui.write()
2578 '''
2579 '''
2579
2580
2580 for line in diffstat(*args, **kw).splitlines():
2581 for line in diffstat(*args, **kw).splitlines():
2581 if line and line[-1] in '+-':
2582 if line and line[-1] in '+-':
2582 name, graph = line.rsplit(' ', 1)
2583 name, graph = line.rsplit(' ', 1)
2583 yield (name + ' ', '')
2584 yield (name + ' ', '')
2584 m = re.search(r'\++', graph)
2585 m = re.search(r'\++', graph)
2585 if m:
2586 if m:
2586 yield (m.group(0), 'diffstat.inserted')
2587 yield (m.group(0), 'diffstat.inserted')
2587 m = re.search(r'-+', graph)
2588 m = re.search(r'-+', graph)
2588 if m:
2589 if m:
2589 yield (m.group(0), 'diffstat.deleted')
2590 yield (m.group(0), 'diffstat.deleted')
2590 else:
2591 else:
2591 yield (line, '')
2592 yield (line, '')
2592 yield ('\n', '')
2593 yield ('\n', '')
@@ -1,825 +1,848 b''
1 $ hg init repo
1 $ hg init repo
2 $ cd repo
2 $ cd repo
3
3
4 New file:
4 New file:
5
5
6 $ hg import -d "1000000 0" -mnew - <<EOF
6 $ hg import -d "1000000 0" -mnew - <<EOF
7 > diff --git a/new b/new
7 > diff --git a/new b/new
8 > new file mode 100644
8 > new file mode 100644
9 > index 0000000..7898192
9 > index 0000000..7898192
10 > --- /dev/null
10 > --- /dev/null
11 > +++ b/new
11 > +++ b/new
12 > @@ -0,0 +1 @@
12 > @@ -0,0 +1 @@
13 > +a
13 > +a
14 > EOF
14 > EOF
15 applying patch from stdin
15 applying patch from stdin
16
16
17 $ hg tip -q
17 $ hg tip -q
18 0:ae3ee40d2079
18 0:ae3ee40d2079
19
19
20 New empty file:
20 New empty file:
21
21
22 $ hg import -d "1000000 0" -mempty - <<EOF
22 $ hg import -d "1000000 0" -mempty - <<EOF
23 > diff --git a/empty b/empty
23 > diff --git a/empty b/empty
24 > new file mode 100644
24 > new file mode 100644
25 > EOF
25 > EOF
26 applying patch from stdin
26 applying patch from stdin
27
27
28 $ hg tip -q
28 $ hg tip -q
29 1:ab199dc869b5
29 1:ab199dc869b5
30
30
31 $ hg locate empty
31 $ hg locate empty
32 empty
32 empty
33
33
34 chmod +x:
34 chmod +x:
35
35
36 $ hg import -d "1000000 0" -msetx - <<EOF
36 $ hg import -d "1000000 0" -msetx - <<EOF
37 > diff --git a/new b/new
37 > diff --git a/new b/new
38 > old mode 100644
38 > old mode 100644
39 > new mode 100755
39 > new mode 100755
40 > EOF
40 > EOF
41 applying patch from stdin
41 applying patch from stdin
42
42
43 #if execbit
43 #if execbit
44 $ hg tip -q
44 $ hg tip -q
45 2:3a34410f282e
45 2:3a34410f282e
46 $ test -x new
46 $ test -x new
47 $ hg rollback -q
47 $ hg rollback -q
48 #else
48 #else
49 $ hg tip -q
49 $ hg tip -q
50 1:ab199dc869b5
50 1:ab199dc869b5
51 #endif
51 #endif
52
52
53 Copy and removing x bit:
53 Copy and removing x bit:
54
54
55 $ hg import -f -d "1000000 0" -mcopy - <<EOF
55 $ hg import -f -d "1000000 0" -mcopy - <<EOF
56 > diff --git a/new b/copy
56 > diff --git a/new b/copy
57 > old mode 100755
57 > old mode 100755
58 > new mode 100644
58 > new mode 100644
59 > similarity index 100%
59 > similarity index 100%
60 > copy from new
60 > copy from new
61 > copy to copy
61 > copy to copy
62 > diff --git a/new b/copyx
62 > diff --git a/new b/copyx
63 > similarity index 100%
63 > similarity index 100%
64 > copy from new
64 > copy from new
65 > copy to copyx
65 > copy to copyx
66 > EOF
66 > EOF
67 applying patch from stdin
67 applying patch from stdin
68
68
69 $ test -f copy
69 $ test -f copy
70 #if execbit
70 #if execbit
71 $ test ! -x copy
71 $ test ! -x copy
72 $ test -x copyx
72 $ test -x copyx
73 $ hg tip -q
73 $ hg tip -q
74 2:21dfaae65c71
74 2:21dfaae65c71
75 #else
75 #else
76 $ hg tip -q
76 $ hg tip -q
77 2:0efdaa8e3bf3
77 2:0efdaa8e3bf3
78 #endif
78 #endif
79
79
80 $ hg up -qCr1
80 $ hg up -qCr1
81 $ hg rollback -q
81 $ hg rollback -q
82
82
83 Copy (like above but independent of execbit):
83 Copy (like above but independent of execbit):
84
84
85 $ hg import -d "1000000 0" -mcopy - <<EOF
85 $ hg import -d "1000000 0" -mcopy - <<EOF
86 > diff --git a/new b/copy
86 > diff --git a/new b/copy
87 > similarity index 100%
87 > similarity index 100%
88 > copy from new
88 > copy from new
89 > copy to copy
89 > copy to copy
90 > diff --git a/new b/copyx
90 > diff --git a/new b/copyx
91 > similarity index 100%
91 > similarity index 100%
92 > copy from new
92 > copy from new
93 > copy to copyx
93 > copy to copyx
94 > EOF
94 > EOF
95 applying patch from stdin
95 applying patch from stdin
96
96
97 $ hg tip -q
97 $ hg tip -q
98 2:0efdaa8e3bf3
98 2:0efdaa8e3bf3
99 $ test -f copy
99 $ test -f copy
100
100
101 $ cat copy
101 $ cat copy
102 a
102 a
103
103
104 $ hg cat copy
104 $ hg cat copy
105 a
105 a
106
106
107 Rename:
107 Rename:
108
108
109 $ hg import -d "1000000 0" -mrename - <<EOF
109 $ hg import -d "1000000 0" -mrename - <<EOF
110 > diff --git a/copy b/rename
110 > diff --git a/copy b/rename
111 > similarity index 100%
111 > similarity index 100%
112 > rename from copy
112 > rename from copy
113 > rename to rename
113 > rename to rename
114 > EOF
114 > EOF
115 applying patch from stdin
115 applying patch from stdin
116
116
117 $ hg tip -q
117 $ hg tip -q
118 3:b1f57753fad2
118 3:b1f57753fad2
119
119
120 $ hg locate
120 $ hg locate
121 copyx
121 copyx
122 empty
122 empty
123 new
123 new
124 rename
124 rename
125
125
126 Delete:
126 Delete:
127
127
128 $ hg import -d "1000000 0" -mdelete - <<EOF
128 $ hg import -d "1000000 0" -mdelete - <<EOF
129 > diff --git a/copyx b/copyx
129 > diff --git a/copyx b/copyx
130 > deleted file mode 100755
130 > deleted file mode 100755
131 > index 7898192..0000000
131 > index 7898192..0000000
132 > --- a/copyx
132 > --- a/copyx
133 > +++ /dev/null
133 > +++ /dev/null
134 > @@ -1 +0,0 @@
134 > @@ -1 +0,0 @@
135 > -a
135 > -a
136 > EOF
136 > EOF
137 applying patch from stdin
137 applying patch from stdin
138
138
139 $ hg tip -q
139 $ hg tip -q
140 4:1bd1da94b9b2
140 4:1bd1da94b9b2
141
141
142 $ hg locate
142 $ hg locate
143 empty
143 empty
144 new
144 new
145 rename
145 rename
146
146
147 $ test -f copyx
147 $ test -f copyx
148 [1]
148 [1]
149
149
150 Regular diff:
150 Regular diff:
151
151
152 $ hg import -d "1000000 0" -mregular - <<EOF
152 $ hg import -d "1000000 0" -mregular - <<EOF
153 > diff --git a/rename b/rename
153 > diff --git a/rename b/rename
154 > index 7898192..72e1fe3 100644
154 > index 7898192..72e1fe3 100644
155 > --- a/rename
155 > --- a/rename
156 > +++ b/rename
156 > +++ b/rename
157 > @@ -1 +1,5 @@
157 > @@ -1 +1,5 @@
158 > a
158 > a
159 > +a
159 > +a
160 > +a
160 > +a
161 > +a
161 > +a
162 > +a
162 > +a
163 > EOF
163 > EOF
164 applying patch from stdin
164 applying patch from stdin
165
165
166 $ hg tip -q
166 $ hg tip -q
167 5:46fe99cb3035
167 5:46fe99cb3035
168
168
169 Copy and modify:
169 Copy and modify:
170
170
171 $ hg import -d "1000000 0" -mcopymod - <<EOF
171 $ hg import -d "1000000 0" -mcopymod - <<EOF
172 > diff --git a/rename b/copy2
172 > diff --git a/rename b/copy2
173 > similarity index 80%
173 > similarity index 80%
174 > copy from rename
174 > copy from rename
175 > copy to copy2
175 > copy to copy2
176 > index 72e1fe3..b53c148 100644
176 > index 72e1fe3..b53c148 100644
177 > --- a/rename
177 > --- a/rename
178 > +++ b/copy2
178 > +++ b/copy2
179 > @@ -1,5 +1,5 @@
179 > @@ -1,5 +1,5 @@
180 > a
180 > a
181 > a
181 > a
182 > -a
182 > -a
183 > +b
183 > +b
184 > a
184 > a
185 > a
185 > a
186 > EOF
186 > EOF
187 applying patch from stdin
187 applying patch from stdin
188
188
189 $ hg tip -q
189 $ hg tip -q
190 6:ffeb3197c12d
190 6:ffeb3197c12d
191
191
192 $ hg cat copy2
192 $ hg cat copy2
193 a
193 a
194 a
194 a
195 b
195 b
196 a
196 a
197 a
197 a
198
198
199 Rename and modify:
199 Rename and modify:
200
200
201 $ hg import -d "1000000 0" -mrenamemod - <<EOF
201 $ hg import -d "1000000 0" -mrenamemod - <<EOF
202 > diff --git a/copy2 b/rename2
202 > diff --git a/copy2 b/rename2
203 > similarity index 80%
203 > similarity index 80%
204 > rename from copy2
204 > rename from copy2
205 > rename to rename2
205 > rename to rename2
206 > index b53c148..8f81e29 100644
206 > index b53c148..8f81e29 100644
207 > --- a/copy2
207 > --- a/copy2
208 > +++ b/rename2
208 > +++ b/rename2
209 > @@ -1,5 +1,5 @@
209 > @@ -1,5 +1,5 @@
210 > a
210 > a
211 > a
211 > a
212 > b
212 > b
213 > -a
213 > -a
214 > +c
214 > +c
215 > a
215 > a
216 > EOF
216 > EOF
217 applying patch from stdin
217 applying patch from stdin
218
218
219 $ hg tip -q
219 $ hg tip -q
220 7:401aede9e6bb
220 7:401aede9e6bb
221
221
222 $ hg locate copy2
222 $ hg locate copy2
223 [1]
223 [1]
224 $ hg cat rename2
224 $ hg cat rename2
225 a
225 a
226 a
226 a
227 b
227 b
228 c
228 c
229 a
229 a
230
230
231 One file renamed multiple times:
231 One file renamed multiple times:
232
232
233 $ hg import -d "1000000 0" -mmultirenames - <<EOF
233 $ hg import -d "1000000 0" -mmultirenames - <<EOF
234 > diff --git a/rename2 b/rename3
234 > diff --git a/rename2 b/rename3
235 > rename from rename2
235 > rename from rename2
236 > rename to rename3
236 > rename to rename3
237 > diff --git a/rename2 b/rename3-2
237 > diff --git a/rename2 b/rename3-2
238 > rename from rename2
238 > rename from rename2
239 > rename to rename3-2
239 > rename to rename3-2
240 > EOF
240 > EOF
241 applying patch from stdin
241 applying patch from stdin
242
242
243 $ hg tip -q
243 $ hg tip -q
244 8:2ef727e684e8
244 8:2ef727e684e8
245
245
246 $ hg log -vr. --template '{rev} {files} / {file_copies}\n'
246 $ hg log -vr. --template '{rev} {files} / {file_copies}\n'
247 8 rename2 rename3 rename3-2 / rename3 (rename2)rename3-2 (rename2)
247 8 rename2 rename3 rename3-2 / rename3 (rename2)rename3-2 (rename2)
248
248
249 $ hg locate rename2 rename3 rename3-2
249 $ hg locate rename2 rename3 rename3-2
250 rename3
250 rename3
251 rename3-2
251 rename3-2
252
252
253 $ hg cat rename3
253 $ hg cat rename3
254 a
254 a
255 a
255 a
256 b
256 b
257 c
257 c
258 a
258 a
259
259
260 $ hg cat rename3-2
260 $ hg cat rename3-2
261 a
261 a
262 a
262 a
263 b
263 b
264 c
264 c
265 a
265 a
266
266
267 $ echo foo > foo
267 $ echo foo > foo
268 $ hg add foo
268 $ hg add foo
269 $ hg ci -m 'add foo'
269 $ hg ci -m 'add foo'
270
270
271 Binary files and regular patch hunks:
271 Binary files and regular patch hunks:
272
272
273 $ hg import -d "1000000 0" -m binaryregular - <<EOF
273 $ hg import -d "1000000 0" -m binaryregular - <<EOF
274 > diff --git a/binary b/binary
274 > diff --git a/binary b/binary
275 > new file mode 100644
275 > new file mode 100644
276 > index 0000000000000000000000000000000000000000..593f4708db84ac8fd0f5cc47c634f38c013fe9e4
276 > index 0000000000000000000000000000000000000000..593f4708db84ac8fd0f5cc47c634f38c013fe9e4
277 > GIT binary patch
277 > GIT binary patch
278 > literal 4
278 > literal 4
279 > Lc\${NkU|;|M00aO5
279 > Lc\${NkU|;|M00aO5
280 >
280 >
281 > diff --git a/foo b/foo2
281 > diff --git a/foo b/foo2
282 > rename from foo
282 > rename from foo
283 > rename to foo2
283 > rename to foo2
284 > EOF
284 > EOF
285 applying patch from stdin
285 applying patch from stdin
286
286
287 $ hg tip -q
287 $ hg tip -q
288 10:27377172366e
288 10:27377172366e
289
289
290 $ cat foo2
290 $ cat foo2
291 foo
291 foo
292
292
293 $ hg manifest --debug | grep binary
293 $ hg manifest --debug | grep binary
294 045c85ba38952325e126c70962cc0f9d9077bc67 644 binary
294 045c85ba38952325e126c70962cc0f9d9077bc67 644 binary
295
295
296 Multiple binary files:
296 Multiple binary files:
297
297
298 $ hg import -d "1000000 0" -m multibinary - <<EOF
298 $ hg import -d "1000000 0" -m multibinary - <<EOF
299 > diff --git a/mbinary1 b/mbinary1
299 > diff --git a/mbinary1 b/mbinary1
300 > new file mode 100644
300 > new file mode 100644
301 > index 0000000000000000000000000000000000000000..593f4708db84ac8fd0f5cc47c634f38c013fe9e4
301 > index 0000000000000000000000000000000000000000..593f4708db84ac8fd0f5cc47c634f38c013fe9e4
302 > GIT binary patch
302 > GIT binary patch
303 > literal 4
303 > literal 4
304 > Lc\${NkU|;|M00aO5
304 > Lc\${NkU|;|M00aO5
305 >
305 >
306 > diff --git a/mbinary2 b/mbinary2
306 > diff --git a/mbinary2 b/mbinary2
307 > new file mode 100644
307 > new file mode 100644
308 > index 0000000000000000000000000000000000000000..112363ac1917b417ffbd7f376ca786a1e5fa7490
308 > index 0000000000000000000000000000000000000000..112363ac1917b417ffbd7f376ca786a1e5fa7490
309 > GIT binary patch
309 > GIT binary patch
310 > literal 5
310 > literal 5
311 > Mc\${NkU|\`?^000jF3jhEB
311 > Mc\${NkU|\`?^000jF3jhEB
312 >
312 >
313 > EOF
313 > EOF
314 applying patch from stdin
314 applying patch from stdin
315
315
316 $ hg tip -q
316 $ hg tip -q
317 11:18b73a84b4ab
317 11:18b73a84b4ab
318
318
319 $ hg manifest --debug | grep mbinary
319 $ hg manifest --debug | grep mbinary
320 045c85ba38952325e126c70962cc0f9d9077bc67 644 mbinary1
320 045c85ba38952325e126c70962cc0f9d9077bc67 644 mbinary1
321 a874b471193996e7cb034bb301cac7bdaf3e3f46 644 mbinary2
321 a874b471193996e7cb034bb301cac7bdaf3e3f46 644 mbinary2
322
322
323 Binary file and delta hunk (we build the patch using this sed hack to
323 Binary file and delta hunk (we build the patch using this sed hack to
324 avoid an unquoted ^, which check-code says breaks sh on Solaris):
324 avoid an unquoted ^, which check-code says breaks sh on Solaris):
325
325
326 $ sed 's/ caret /^/g;s/ dollarparen /$(/g' > quote-hack.patch <<'EOF'
326 $ sed 's/ caret /^/g;s/ dollarparen /$(/g' > quote-hack.patch <<'EOF'
327 > diff --git a/delta b/delta
327 > diff --git a/delta b/delta
328 > new file mode 100644
328 > new file mode 100644
329 > index 0000000000000000000000000000000000000000..8c9b7831b231c2600843e303e66b521353a200b3
329 > index 0000000000000000000000000000000000000000..8c9b7831b231c2600843e303e66b521353a200b3
330 > GIT binary patch
330 > GIT binary patch
331 > literal 3749
331 > literal 3749
332 > zcmV;W4qEYvP)<h;3K|Lk000e1NJLTq006iE002D*0ssI2kt{U(0000PbVXQnQ*UN;
332 > zcmV;W4qEYvP)<h;3K|Lk000e1NJLTq006iE002D*0ssI2kt{U(0000PbVXQnQ*UN;
333 > zcVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU=M@d9MRCwC#oC!>o#}>x{(W-y~UN*tK
333 > zcVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBU=M@d9MRCwC#oC!>o#}>x{(W-y~UN*tK
334 > z%A%sxiUy2Ys)0Vm#ueArYKoYqX;GuiqZpgirM6nCVoYk?YNAz3G~z;BZ~@~&OQEe4
334 > z%A%sxiUy2Ys)0Vm#ueArYKoYqX;GuiqZpgirM6nCVoYk?YNAz3G~z;BZ~@~&OQEe4
335 > zmGvS5isFJI;Pd_7J+EKxyHZeu` caret t4r2>F;h-+VK3{_{WoGv8dSpFDYDrA%3UX03pt
335 > zmGvS5isFJI;Pd_7J+EKxyHZeu` caret t4r2>F;h-+VK3{_{WoGv8dSpFDYDrA%3UX03pt
336 > zOaVoi0*W#P6lDr1$`nwPDWE7*rhuYM0Y#YtiZTThWeO<D6i}2YpqR<%$s>bRRaI42
336 > zOaVoi0*W#P6lDr1$`nwPDWE7*rhuYM0Y#YtiZTThWeO<D6i}2YpqR<%$s>bRRaI42
337 > zS3iFIxJ8Q=EnBv1Z7?pBw_bLjJb3V+tgP(Tty_2R-mR#p04x78n2n7MSOFyt4i1iv
337 > zS3iFIxJ8Q=EnBv1Z7?pBw_bLjJb3V+tgP(Tty_2R-mR#p04x78n2n7MSOFyt4i1iv
338 > zjxH`PPEJmgD7U?IK&h;(EGQ@_DJc<@01=4fiNXHcKZ8LhZQ8T}E3U4tUS3}OrcgQW
338 > zjxH`PPEJmgD7U?IK&h;(EGQ@_DJc<@01=4fiNXHcKZ8LhZQ8T}E3U4tUS3}OrcgQW
339 > zWdX{K8#l7Ev&#$ysR)G#0*rC+<WGZ3?CtG4bm-ve>Dj$|_qJ`@D*stNP_AFUe&x!Q
339 > zWdX{K8#l7Ev&#$ysR)G#0*rC+<WGZ3?CtG4bm-ve>Dj$|_qJ`@D*stNP_AFUe&x!Q
340 > zJ9q9B7Z=ym)MyZ?Tg1ROunUYr81nV?B@!tYS~5_|%gfW#(_s<4UN1!Q?Dv8d>g#m6
340 > zJ9q9B7Z=ym)MyZ?Tg1ROunUYr81nV?B@!tYS~5_|%gfW#(_s<4UN1!Q?Dv8d>g#m6
341 > z%*@R2@bI2JdnzxQ!EDU`$eQY!tgI~Zn$prz;gaXNod5*5p(1Bz=P$qfvZ$y?dC@X~
341 > z%*@R2@bI2JdnzxQ!EDU`$eQY!tgI~Zn$prz;gaXNod5*5p(1Bz=P$qfvZ$y?dC@X~
342 > zlAD+NAKhB{=;6bMwzjqn>9mavvKOGd`s%A+fBiL>Q;xJWpa72C+}u{JTHUX>{~}Qj
342 > zlAD+NAKhB{=;6bMwzjqn>9mavvKOGd`s%A+fBiL>Q;xJWpa72C+}u{JTHUX>{~}Qj
343 > zUb%hyHgN~c?cBLjInvUALMD9g-aXt54ZL8AOCvXL-V6!~ijR*kEG$&Mv?!pE61OlI
343 > zUb%hyHgN~c?cBLjInvUALMD9g-aXt54ZL8AOCvXL-V6!~ijR*kEG$&Mv?!pE61OlI
344 > z8nzMSPE8F7bH|Py*RNl1VUCggq<V)>@_6gkEeiz7{rmTeuNTW6+KVS#0FG%IHf-3L
344 > z8nzMSPE8F7bH|Py*RNl1VUCggq<V)>@_6gkEeiz7{rmTeuNTW6+KVS#0FG%IHf-3L
345 > zGiS21vn>WCCr+GLx caret !uNetzB6u3o(w6&1C2?_LW8ij$+$sZ*zZ`|US3H@8N~%&V%Z
345 > zGiS21vn>WCCr+GLx caret !uNetzB6u3o(w6&1C2?_LW8ij$+$sZ*zZ`|US3H@8N~%&V%Z
346 > zAeA0HdhFS=$6|nzn3%YH`SN<>DQRO;Qc caret )dfdvA caret 5u`Xf;Zzu<ZQHgG?28V-#s<;T
346 > zAeA0HdhFS=$6|nzn3%YH`SN<>DQRO;Qc caret )dfdvA caret 5u`Xf;Zzu<ZQHgG?28V-#s<;T
347 > zzkh#LA)v7gpoE5ou3o*GoUUF%b#iht&kl9d0)><$FE1}ACr68;uCA`6DrGmz_U+rp
347 > zzkh#LA)v7gpoE5ou3o*GoUUF%b#iht&kl9d0)><$FE1}ACr68;uCA`6DrGmz_U+rp
348 > zL>Rx;X_yhk$fP_yJrTCQ|NgsW0A<985g&c@k-NKly<>mgU8n||ZPPV<`SN8#%$+-T
348 > zL>Rx;X_yhk$fP_yJrTCQ|NgsW0A<985g&c@k-NKly<>mgU8n||ZPPV<`SN8#%$+-T
349 > zfP$T!ou8jypFVwnzqhxyUvIxXd-wF~*U!ht=hCH1wzjqn9x#)IrhDa;S0JbK caret z_$W
349 > zfP$T!ou8jypFVwnzqhxyUvIxXd-wF~*U!ht=hCH1wzjqn9x#)IrhDa;S0JbK caret z_$W
350 > zd(8rX@;7|t*;GJ5h$SZ{v(}+UBEs$4w~?{@9%`_Z<P<kox5bMWuUWH(sF9hONgd$Q
350 > zd(8rX@;7|t*;GJ5h$SZ{v(}+UBEs$4w~?{@9%`_Z<P<kox5bMWuUWH(sF9hONgd$Q
351 > zunCgwT@1|CU9+;X caret 4z&|M~@yw23Ay50NFWn=FqF%yLZEUty;AT2??1oV@B)Nt))J7
351 > zunCgwT@1|CU9+;X caret 4z&|M~@yw23Ay50NFWn=FqF%yLZEUty;AT2??1oV@B)Nt))J7
352 > zh>{5j2@f7T=-an%L_`E)h;mZ4D_5>?7tjQtVPRo2XU-&;mX(!l-MSTJP4XWY82JAC
352 > zh>{5j2@f7T=-an%L_`E)h;mZ4D_5>?7tjQtVPRo2XU-&;mX(!l-MSTJP4XWY82JAC
353 > z@57+y&!1=P{Mn{W8)-HzEsgAtd63}Cazc>O6vGb>51%@9DzbyI3?4j~$ijmT95_IS
353 > z@57+y&!1=P{Mn{W8)-HzEsgAtd63}Cazc>O6vGb>51%@9DzbyI3?4j~$ijmT95_IS
354 > zS#r!LCDW%*4-O7CGnkr$xXR1RQ&UrA<CQt} caret 73NL%zk`)Jk!yxUAt-1r}ggLn-Zq}
354 > zS#r!LCDW%*4-O7CGnkr$xXR1RQ&UrA<CQt} caret 73NL%zk`)Jk!yxUAt-1r}ggLn-Zq}
355 > z*s){8pw68;i+kiG%CpBKYSJLLFyq&*U8}qDp+kpe&6<Vp(Z58%l#~>ZK?&s7y?b}i
355 > z*s){8pw68;i+kiG%CpBKYSJLLFyq&*U8}qDp+kpe&6<Vp(Z58%l#~>ZK?&s7y?b}i
356 > zuwcOgO%x-27A;y785zknl_{sU;E6v$8{pWmVS{KaJPpu`i;HP$#flY@u~Ua~K3%tN
356 > zuwcOgO%x-27A;y785zknl_{sU;E6v$8{pWmVS{KaJPpu`i;HP$#flY@u~Ua~K3%tN
357 > z-LhrNh{9SoHgDd%WXTc$$~Dq{?AWou3!H&?V8K{ caret {P9Ot5vecD?%1&-E-ntBFj87(
357 > z-LhrNh{9SoHgDd%WXTc$$~Dq{?AWou3!H&?V8K{ caret {P9Ot5vecD?%1&-E-ntBFj87(
358 > zy5`QE%QRX7qcHC%1{Ua}M~}L6=`wQUNEQ=I;qc+ZMMXtK2T+0os;jEco;}OV9z1w3
358 > zy5`QE%QRX7qcHC%1{Ua}M~}L6=`wQUNEQ=I;qc+ZMMXtK2T+0os;jEco;}OV9z1w3
359 > zARqv caret bm-85xnRCng3OT|MyVSmR3ND7 caret ?KaQGG! caret (aTbo1N;Nz;X3Q9FJbwK6`0?Yp
359 > zARqv caret bm-85xnRCng3OT|MyVSmR3ND7 caret ?KaQGG! caret (aTbo1N;Nz;X3Q9FJbwK6`0?Yp
360 > zj*X2ac;Pw3!I2|JShDaF>-gJmzm1NLj){rk&o|$E caret WAsfrK=x&@B!`w7Hik81sPz4
360 > zj*X2ac;Pw3!I2|JShDaF>-gJmzm1NLj){rk&o|$E caret WAsfrK=x&@B!`w7Hik81sPz4
361 > zuJTaiCppM>-+c!wPzcUw)5@?J4U-u|pJ~xbWUe-C+60k caret 7>9!)56DbjmA~`OJJ40v
361 > zuJTaiCppM>-+c!wPzcUw)5@?J4U-u|pJ~xbWUe-C+60k caret 7>9!)56DbjmA~`OJJ40v
362 > zu3hCA7eJXZWeN|1iJLu87$;+fS8+Kq6O`aT)*_x@sY#t7LxwoEcVw*)cWhhQW@l%!
362 > zu3hCA7eJXZWeN|1iJLu87$;+fS8+Kq6O`aT)*_x@sY#t7LxwoEcVw*)cWhhQW@l%!
363 > z{#Z=y+qcK@%z{p*D=8_Fcg278AnH3fI5;~yGu?9TscxXaaP*4$f<LIv! caret 5Lfr%vKg
363 > z{#Z=y+qcK@%z{p*D=8_Fcg278AnH3fI5;~yGu?9TscxXaaP*4$f<LIv! caret 5Lfr%vKg
364 > zpxmunH#%=+ICMvZA~wyNH%~eMl!-g caret R!cYJ#WmLq5N8viz#J%%LPtkO?V)tZ81cp>
364 > zpxmunH#%=+ICMvZA~wyNH%~eMl!-g caret R!cYJ#WmLq5N8viz#J%%LPtkO?V)tZ81cp>
365 > z{ALK?fNPePmd;289&M8Q3>YwgZX5GcGY&n>K1<x)!`;Qjg&}bb!Lrnl@xH#kS~VYE
365 > z{ALK?fNPePmd;289&M8Q3>YwgZX5GcGY&n>K1<x)!`;Qjg&}bb!Lrnl@xH#kS~VYE
366 > zpJmIJO`A3iy+Y3X`k>cY-@}Iw2Onq`=!ba3eATgs3yg3Wej=+P-Z8WF#w=RXvS@J3
366 > zpJmIJO`A3iy+Y3X`k>cY-@}Iw2Onq`=!ba3eATgs3yg3Wej=+P-Z8WF#w=RXvS@J3
367 > zEyhVTj-gO?kfDu1g9afo<RkPrYzG#_yF41IFxF%Ylg>9lx6<clPweR-b7Hn+r)e1l
367 > zEyhVTj-gO?kfDu1g9afo<RkPrYzG#_yF41IFxF%Ylg>9lx6<clPweR-b7Hn+r)e1l
368 > zO6c6FbNt@;;*w$z;N|H>h{czme)_4V6UC4hv**kX2@L caret Bgds dollarparen &P7M4dhfmWe)!=B
368 > zO6c6FbNt@;;*w$z;N|H>h{czme)_4V6UC4hv**kX2@L caret Bgds dollarparen &P7M4dhfmWe)!=B
369 > zR3X=Y{P9N}p@-##@1ZNW1YbVaiP~D@8m&<dzEP&cO|87Ju#j*=;wH~Exr>i*Hpp&@
369 > zR3X=Y{P9N}p@-##@1ZNW1YbVaiP~D@8m&<dzEP&cO|87Ju#j*=;wH~Exr>i*Hpp&@
370 > z`9!Sj+O;byD~s8qZ>6QB8uv7Bpn&&?xe;;e<M4F8KEID&pT7QmqoSgq&06adp5T=U
370 > z`9!Sj+O;byD~s8qZ>6QB8uv7Bpn&&?xe;;e<M4F8KEID&pT7QmqoSgq&06adp5T=U
371 > z6DH*4=AB7C1D9Amu?ia-wtxSAlmTEO96XHx)-+rKP;ip$pukuSJGW3P1aUmc2yo%)
371 > z6DH*4=AB7C1D9Amu?ia-wtxSAlmTEO96XHx)-+rKP;ip$pukuSJGW3P1aUmc2yo%)
372 > z&<t3F>d1X+1qzaag-%x+eKHx{?Afz3GBQSw9u0lw<mB+I#v11TKRpKWQS+lvVL7=u
372 > z&<t3F>d1X+1qzaag-%x+eKHx{?Afz3GBQSw9u0lw<mB+I#v11TKRpKWQS+lvVL7=u
373 > zHr6)1ynEF<i3kO6A8&ppPMo-F=PnWfXkSj@i*7J6C<F}wR?s(O0niC?t+6;+k}pPq
373 > zHr6)1ynEF<i3kO6A8&ppPMo-F=PnWfXkSj@i*7J6C<F}wR?s(O0niC?t+6;+k}pPq
374 > zrok&TPU40rL0ZYDwenNrrmPZ`gjo@DEF`7 caret cKP||pUr;+r)hyn9O37=xA`3%Bj-ih
374 > zrok&TPU40rL0ZYDwenNrrmPZ`gjo@DEF`7 caret cKP||pUr;+r)hyn9O37=xA`3%Bj-ih
375 > z+1usk<%5G-y+R?tA`qY=)6&vNjL{P?QzHg%P%>`ZxP=QB%DHY6L26?36V_p caret {}n$q
375 > z+1usk<%5G-y+R?tA`qY=)6&vNjL{P?QzHg%P%>`ZxP=QB%DHY6L26?36V_p caret {}n$q
376 > z3@9W=KmGI*Ng_Q#AzA%-z|Z caret |#oW(hkfgpuS$RKRhlrarX%efMMCs}GLChec5+y{6
376 > z3@9W=KmGI*Ng_Q#AzA%-z|Z caret |#oW(hkfgpuS$RKRhlrarX%efMMCs}GLChec5+y{6
377 > z1Qnxim_C-fmQuaAK_NUHUBV&;1c0V)wji<RcdZ*aAWTwyt>hVnlt caret asFCe0&a@tqp
377 > z1Qnxim_C-fmQuaAK_NUHUBV&;1c0V)wji<RcdZ*aAWTwyt>hVnlt caret asFCe0&a@tqp
378 > zEEy;$L}D$X6)wfQNl8gu6Z>oB3_RrP=gTyK2@@w#LbQfLNHj>Q&z(C5wUFhK+}0aV
378 > zEEy;$L}D$X6)wfQNl8gu6Z>oB3_RrP=gTyK2@@w#LbQfLNHj>Q&z(C5wUFhK+}0aV
379 > zSohlc=7K+spN<ctf}5KgKqNyJDNP9;LZd)nTE=9|6Xdr9%Hzk63-tL2c9FD*rsyYY
379 > zSohlc=7K+spN<ctf}5KgKqNyJDNP9;LZd)nTE=9|6Xdr9%Hzk63-tL2c9FD*rsyYY
380 > z!}t+Yljq7-p$X;4_YL?6d;mdY3R##o1e%rlPxrsMh8|;sKTr~ caret QD#sw3&vS$FwlTk
380 > z!}t+Yljq7-p$X;4_YL?6d;mdY3R##o1e%rlPxrsMh8|;sKTr~ caret QD#sw3&vS$FwlTk
381 > zp1#Gw!Qo-$LtvpXt#ApV0g) caret F=qFB`VB!W297x=$mr<$>rco3v$QKih_xN!k6;M=@
381 > zp1#Gw!Qo-$LtvpXt#ApV0g) caret F=qFB`VB!W297x=$mr<$>rco3v$QKih_xN!k6;M=@
382 > zCr?gDNQj7tm@;JwD;Ty&NlBSCYZk(b3dZeN8D4h2{r20dSFc7;(>E&r`s=TVtzpB4
382 > zCr?gDNQj7tm@;JwD;Ty&NlBSCYZk(b3dZeN8D4h2{r20dSFc7;(>E&r`s=TVtzpB4
383 > zk+ caret N&zCAiRns(?p6iBlk9v&h{1ve(FNtc)td51M>)TkXhc6{>5C)`fS$&)A1*CP1%
383 > zk+ caret N&zCAiRns(?p6iBlk9v&h{1ve(FNtc)td51M>)TkXhc6{>5C)`fS$&)A1*CP1%
384 > zld+peue4aYbg3C0!+4mu+}vE caret j_feX+ZijvffBI7Ofh#RZ*U3<3J5(+nfRCzexqQ5
384 > zld+peue4aYbg3C0!+4mu+}vE caret j_feX+ZijvffBI7Ofh#RZ*U3<3J5(+nfRCzexqQ5
385 > zgM&##Y4Dd{e%ZKjqrbm@|Ni}l4jo!AqtFynj3Xsd$o caret ?yV4$|UQ(j&UWCH>M=o_&N
385 > zgM&##Y4Dd{e%ZKjqrbm@|Ni}l4jo!AqtFynj3Xsd$o caret ?yV4$|UQ(j&UWCH>M=o_&N
386 > zmclXc3i|Q#<;#EoG>~V}4unTHbUK}u=y4;rA3S&vzC3 caret aJP!&D4RvvGfoyo(>C>la
386 > zmclXc3i|Q#<;#EoG>~V}4unTHbUK}u=y4;rA3S&vzC3 caret aJP!&D4RvvGfoyo(>C>la
387 > zijP<=v>X{3Ne&2BXo}DV8l0V-jdv`$am0ubG{Wuh%CTd|l9Q7m;G&|U@#Dvbhlj(d
387 > zijP<=v>X{3Ne&2BXo}DV8l0V-jdv`$am0ubG{Wuh%CTd|l9Q7m;G&|U@#Dvbhlj(d
388 > zg6W{3ATxYt#T?)3;SmIgOP4M|Dki~I_TX7SxP0x}wI~DQI7Lhm2BI7gph(aPIFAd;
388 > zg6W{3ATxYt#T?)3;SmIgOP4M|Dki~I_TX7SxP0x}wI~DQI7Lhm2BI7gph(aPIFAd;
389 > zQ&UsF`Q{rOz+z=87c5v%@5u~d6dWV5OlX`oH3cAH&UlvsZUEo(Q(P|lKs17rXvaiU
389 > zQ&UsF`Q{rOz+z=87c5v%@5u~d6dWV5OlX`oH3cAH&UlvsZUEo(Q(P|lKs17rXvaiU
390 > zQcj}IEufi1+Bnh6&(EhF{7O3vLHp`jjlp0J<M1kh$+$2xGm~Zk7OY7(q=&Rdhq*RG
390 > zQcj}IEufi1+Bnh6&(EhF{7O3vLHp`jjlp0J<M1kh$+$2xGm~Zk7OY7(q=&Rdhq*RG
391 > zwrmcd5MnP}xByB_)P@{J>DR9x6;`cUwPM8z){yooNiXPOc9_{W-gtwxE5TUg0vJk6
391 > zwrmcd5MnP}xByB_)P@{J>DR9x6;`cUwPM8z){yooNiXPOc9_{W-gtwxE5TUg0vJk6
392 > zO#JGruV&1cL6VGK2?+_YQr4`+EY8;Sm$9U$uuGRN=uj3k7?O9b+R~J7t_y*K64ZnI
392 > zO#JGruV&1cL6VGK2?+_YQr4`+EY8;Sm$9U$uuGRN=uj3k7?O9b+R~J7t_y*K64ZnI
393 > zM+{aE<b(v?vSmw;9zFP!aE266zHIhlmdI@ caret xa6o2jwdRk54a$>pcRbC29ZyG!Cfdp
393 > zM+{aE<b(v?vSmw;9zFP!aE266zHIhlmdI@ caret xa6o2jwdRk54a$>pcRbC29ZyG!Cfdp
394 > zutFf`Q`vljgo!(wHf=)F#m2_MIuj;L(2ja2YsQRX+rswV{d<H`Ar;(@%aNa9VPU8Z
394 > zutFf`Q`vljgo!(wHf=)F#m2_MIuj;L(2ja2YsQRX+rswV{d<H`Ar;(@%aNa9VPU8Z
395 > z;tq*`y}dm#NDJHKlV}uTIm!_vAq5E7!X-p{P=Z=Sh668>PuVS1*6e}OwOiMc;u3OQ
395 > z;tq*`y}dm#NDJHKlV}uTIm!_vAq5E7!X-p{P=Z=Sh668>PuVS1*6e}OwOiMc;u3OQ
396 > z@Bs)w3=lzfKoufH$SFuPG@uZ4NOnM#+=8LnQ2Q4zUd+nM+OT26;lqbN{P07dhH{jH
396 > z@Bs)w3=lzfKoufH$SFuPG@uZ4NOnM#+=8LnQ2Q4zUd+nM+OT26;lqbN{P07dhH{jH
397 > zManE8 caret dLms-Q2;1kB<*Q1a3f8kZr;xX=!Qro@`~@xN*Qj>gx;i;0Z24!~i2uLb`}v
397 > zManE8 caret dLms-Q2;1kB<*Q1a3f8kZr;xX=!Qro@`~@xN*Qj>gx;i;0Z24!~i2uLb`}v
398 > zA?R$|wvC+m caret Ups=*(4lDh*=UN8{5h(A?p#D caret 2N$8u4Z55!q?ZAh(iEEng9_Zi>IgO
398 > zA?R$|wvC+m caret Ups=*(4lDh*=UN8{5h(A?p#D caret 2N$8u4Z55!q?ZAh(iEEng9_Zi>IgO
399 > z#~**JC8hE4@n{hO&8btT5F*?nC_%LhA3i)PDhh-pB_&1wGrDIl caret *=8x3n&;akBf caret -
399 > z#~**JC8hE4@n{hO&8btT5F*?nC_%LhA3i)PDhh-pB_&1wGrDIl caret *=8x3n&;akBf caret -
400 > zJd&86kq$%%907v caret tgWoQdwI`|oNK%VvU~S#C<o caret F?6c48?Cjj#-4P<>HFD%&|Ni~t
400 > zJd&86kq$%%907v caret tgWoQdwI`|oNK%VvU~S#C<o caret F?6c48?Cjj#-4P<>HFD%&|Ni~t
401 > zKJ(|#H`$<5W+6ZkBb213rXonKZLB+X> caret L}J@W6osP3piLD_5?R!`S}*{xLBzFiL4@
401 > zKJ(|#H`$<5W+6ZkBb213rXonKZLB+X> caret L}J@W6osP3piLD_5?R!`S}*{xLBzFiL4@
402 > zX+}l{`A%?f@T5tT%ztu60p;)be`fWC`tP@WpO=?cpf8Xuf1OSj6d3f@Ki(ovDYq%0
402 > zX+}l{`A%?f@T5tT%ztu60p;)be`fWC`tP@WpO=?cpf8Xuf1OSj6d3f@Ki(ovDYq%0
403 > z{4ZSe`kOay5@=lAT!}vFzxyemC{sXDrhuYM0Y#ZI1r%ipD9W11{w=@&xgJ}t2x;ep
403 > z{4ZSe`kOay5@=lAT!}vFzxyemC{sXDrhuYM0Y#ZI1r%ipD9W11{w=@&xgJ}t2x;ep
404 > P00000NkvXXu0mjfZ5|Er
404 > P00000NkvXXu0mjfZ5|Er
405 >
405 >
406 > literal 0
406 > literal 0
407 > HcmV?d00001
407 > HcmV?d00001
408 >
408 >
409 > EOF
409 > EOF
410 $ hg import -d "1000000 0" -m delta quote-hack.patch
410 $ hg import -d "1000000 0" -m delta quote-hack.patch
411 applying quote-hack.patch
411 applying quote-hack.patch
412 $ rm quote-hack.patch
412 $ rm quote-hack.patch
413
413
414 $ hg manifest --debug | grep delta
414 $ hg manifest --debug | grep delta
415 9600f98bb60ce732634d126aaa4ac1ec959c573e 644 delta
415 9600f98bb60ce732634d126aaa4ac1ec959c573e 644 delta
416
416
417 $ hg import -d "1000000 0" -m delta - <<'EOF'
417 $ hg import -d "1000000 0" -m delta - <<'EOF'
418 > diff --git a/delta b/delta
418 > diff --git a/delta b/delta
419 > index 8c9b7831b231c2600843e303e66b521353a200b3..0021dd95bc0dba53c39ce81377126d43731d68df 100644
419 > index 8c9b7831b231c2600843e303e66b521353a200b3..0021dd95bc0dba53c39ce81377126d43731d68df 100644
420 > GIT binary patch
420 > GIT binary patch
421 > delta 49
421 > delta 49
422 > zcmZ1~yHs|=21Z8J$r~9bFdA-lVv=EEw4WT$qRf2QSa5SIOAHI6(&k4T8H|kLo4vWB
422 > zcmZ1~yHs|=21Z8J$r~9bFdA-lVv=EEw4WT$qRf2QSa5SIOAHI6(&k4T8H|kLo4vWB
423 > FSO9ZT4bA`n
423 > FSO9ZT4bA`n
424 >
424 >
425 > delta 49
425 > delta 49
426 > zcmV-10M7rV9i<(xumJ(}ld%Di0Xefm0vrMXpOaq%BLm9I%d>?9Tm%6Vv*HM70RcC&
426 > zcmV-10M7rV9i<(xumJ(}ld%Di0Xefm0vrMXpOaq%BLm9I%d>?9Tm%6Vv*HM70RcC&
427 > HOA1;9yU-AD
427 > HOA1;9yU-AD
428 >
428 >
429 > EOF
429 > EOF
430 applying patch from stdin
430 applying patch from stdin
431
431
432 $ hg manifest --debug | grep delta
432 $ hg manifest --debug | grep delta
433 56094bbea136dcf8dbd4088f6af469bde1a98b75 644 delta
433 56094bbea136dcf8dbd4088f6af469bde1a98b75 644 delta
434
434
435 Filenames with spaces:
435 Filenames with spaces:
436
436
437 $ sed 's,EOL$,,g' <<EOF | hg import -d "1000000 0" -m spaces -
437 $ sed 's,EOL$,,g' <<EOF | hg import -d "1000000 0" -m spaces -
438 > diff --git a/foo bar b/foo bar
438 > diff --git a/foo bar b/foo bar
439 > new file mode 100644
439 > new file mode 100644
440 > index 0000000..257cc56
440 > index 0000000..257cc56
441 > --- /dev/null
441 > --- /dev/null
442 > +++ b/foo bar EOL
442 > +++ b/foo bar EOL
443 > @@ -0,0 +1 @@
443 > @@ -0,0 +1 @@
444 > +foo
444 > +foo
445 > EOF
445 > EOF
446 applying patch from stdin
446 applying patch from stdin
447
447
448 $ hg tip -q
448 $ hg tip -q
449 14:4b79479c9a6d
449 14:4b79479c9a6d
450
450
451 $ cat "foo bar"
451 $ cat "foo bar"
452 foo
452 foo
453
453
454 Copy then modify the original file:
454 Copy then modify the original file:
455
455
456 $ hg import -d "1000000 0" -m copy-mod-orig - <<EOF
456 $ hg import -d "1000000 0" -m copy-mod-orig - <<EOF
457 > diff --git a/foo2 b/foo2
457 > diff --git a/foo2 b/foo2
458 > index 257cc56..fe08ec6 100644
458 > index 257cc56..fe08ec6 100644
459 > --- a/foo2
459 > --- a/foo2
460 > +++ b/foo2
460 > +++ b/foo2
461 > @@ -1 +1,2 @@
461 > @@ -1 +1,2 @@
462 > foo
462 > foo
463 > +new line
463 > +new line
464 > diff --git a/foo2 b/foo3
464 > diff --git a/foo2 b/foo3
465 > similarity index 100%
465 > similarity index 100%
466 > copy from foo2
466 > copy from foo2
467 > copy to foo3
467 > copy to foo3
468 > EOF
468 > EOF
469 applying patch from stdin
469 applying patch from stdin
470
470
471 $ hg tip -q
471 $ hg tip -q
472 15:9cbe44af4ae9
472 15:9cbe44af4ae9
473
473
474 $ cat foo3
474 $ cat foo3
475 foo
475 foo
476
476
477 Move text file and patch as binary
477 Move text file and patch as binary
478
478
479 $ echo a > text2
479 $ echo a > text2
480 $ hg ci -Am0
480 $ hg ci -Am0
481 adding text2
481 adding text2
482 $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
482 $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
483 > diff --git a/text2 b/binary2
483 > diff --git a/text2 b/binary2
484 > rename from text2
484 > rename from text2
485 > rename to binary2
485 > rename to binary2
486 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
486 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
487 > GIT binary patch
487 > GIT binary patch
488 > literal 5
488 > literal 5
489 > Mc$`b*O5$Pw00T?_*Z=?k
489 > Mc$`b*O5$Pw00T?_*Z=?k
490 >
490 >
491 > EOF
491 > EOF
492 applying patch from stdin
492 applying patch from stdin
493
493
494 $ cat binary2
494 $ cat binary2
495 a
495 a
496 b
496 b
497 \x00 (no-eol) (esc)
497 \x00 (no-eol) (esc)
498
498
499 $ hg st --copies --change .
499 $ hg st --copies --change .
500 A binary2
500 A binary2
501 text2
501 text2
502 R text2
502 R text2
503
503
504 Invalid base85 content
504 Invalid base85 content
505
505
506 $ hg rollback
506 $ hg rollback
507 repository tip rolled back to revision 16 (undo import)
507 repository tip rolled back to revision 16 (undo import)
508 working directory now based on revision 16
508 working directory now based on revision 16
509 $ hg revert -aq
509 $ hg revert -aq
510 $ hg import -d "1000000 0" -m invalid-binary - <<"EOF"
510 $ hg import -d "1000000 0" -m invalid-binary - <<"EOF"
511 > diff --git a/text2 b/binary2
511 > diff --git a/text2 b/binary2
512 > rename from text2
512 > rename from text2
513 > rename to binary2
513 > rename to binary2
514 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
514 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
515 > GIT binary patch
515 > GIT binary patch
516 > literal 5
516 > literal 5
517 > Mc$`b*O.$Pw00T?_*Z=?k
517 > Mc$`b*O.$Pw00T?_*Z=?k
518 >
518 >
519 > EOF
519 > EOF
520 applying patch from stdin
520 applying patch from stdin
521 abort: could not decode "binary2" binary patch: bad base85 character at position 6
521 abort: could not decode "binary2" binary patch: bad base85 character at position 6
522 [255]
522 [255]
523
523
524 $ hg revert -aq
524 $ hg revert -aq
525 $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
525 $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
526 > diff --git a/text2 b/binary2
526 > diff --git a/text2 b/binary2
527 > rename from text2
527 > rename from text2
528 > rename to binary2
528 > rename to binary2
529 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
529 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
530 > GIT binary patch
530 > GIT binary patch
531 > literal 6
531 > literal 6
532 > Mc$`b*O5$Pw00T?_*Z=?k
532 > Mc$`b*O5$Pw00T?_*Z=?k
533 >
533 >
534 > EOF
534 > EOF
535 applying patch from stdin
535 applying patch from stdin
536 abort: "binary2" length is 5 bytes, should be 6
536 abort: "binary2" length is 5 bytes, should be 6
537 [255]
537 [255]
538
538
539 $ hg revert -aq
539 $ hg revert -aq
540 $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
540 $ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
541 > diff --git a/text2 b/binary2
541 > diff --git a/text2 b/binary2
542 > rename from text2
542 > rename from text2
543 > rename to binary2
543 > rename to binary2
544 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
544 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
545 > GIT binary patch
545 > GIT binary patch
546 > Mc$`b*O5$Pw00T?_*Z=?k
546 > Mc$`b*O5$Pw00T?_*Z=?k
547 >
547 >
548 > EOF
548 > EOF
549 applying patch from stdin
549 applying patch from stdin
550 abort: could not extract "binary2" binary data
550 abort: could not extract "binary2" binary data
551 [255]
551 [255]
552
552
553 Simulate a copy/paste turning LF into CRLF (issue2870)
553 Simulate a copy/paste turning LF into CRLF (issue2870)
554
554
555 $ hg revert -aq
555 $ hg revert -aq
556 $ cat > binary.diff <<"EOF"
556 $ cat > binary.diff <<"EOF"
557 > diff --git a/text2 b/binary2
557 > diff --git a/text2 b/binary2
558 > rename from text2
558 > rename from text2
559 > rename to binary2
559 > rename to binary2
560 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
560 > index 78981922613b2afb6025042ff6bd878ac1994e85..10efcb362e9f3b3420fcfbfc0e37f3dc16e29757
561 > GIT binary patch
561 > GIT binary patch
562 > literal 5
562 > literal 5
563 > Mc$`b*O5$Pw00T?_*Z=?k
563 > Mc$`b*O5$Pw00T?_*Z=?k
564 >
564 >
565 > EOF
565 > EOF
566 >>> fp = file('binary.diff', 'rb')
566 >>> fp = file('binary.diff', 'rb')
567 >>> data = fp.read()
567 >>> data = fp.read()
568 >>> fp.close()
568 >>> fp.close()
569 >>> file('binary.diff', 'wb').write(data.replace('\n', '\r\n'))
569 >>> file('binary.diff', 'wb').write(data.replace('\n', '\r\n'))
570 $ rm binary2
570 $ rm binary2
571 $ hg import --no-commit binary.diff
571 $ hg import --no-commit binary.diff
572 applying binary.diff
572 applying binary.diff
573
573
574 $ cd ..
574 $ cd ..
575
575
576 Consecutive import with renames (issue2459)
576 Consecutive import with renames (issue2459)
577
577
578 $ hg init issue2459
578 $ hg init issue2459
579 $ cd issue2459
579 $ cd issue2459
580 $ hg import --no-commit --force - <<EOF
580 $ hg import --no-commit --force - <<EOF
581 > diff --git a/a b/a
581 > diff --git a/a b/a
582 > new file mode 100644
582 > new file mode 100644
583 > EOF
583 > EOF
584 applying patch from stdin
584 applying patch from stdin
585 $ hg import --no-commit --force - <<EOF
585 $ hg import --no-commit --force - <<EOF
586 > diff --git a/a b/b
586 > diff --git a/a b/b
587 > rename from a
587 > rename from a
588 > rename to b
588 > rename to b
589 > EOF
589 > EOF
590 applying patch from stdin
590 applying patch from stdin
591 a has not been committed yet, so no copy data will be stored for b.
591 a has not been committed yet, so no copy data will be stored for b.
592 $ hg debugstate
592 $ hg debugstate
593 a 0 -1 unset b
593 a 0 -1 unset b
594 $ hg ci -m done
594 $ hg ci -m done
595 $ cd ..
595 $ cd ..
596
596
597 Renames and strip
597 Renames and strip
598
598
599 $ hg init renameandstrip
599 $ hg init renameandstrip
600 $ cd renameandstrip
600 $ cd renameandstrip
601 $ echo a > a
601 $ echo a > a
602 $ hg ci -Am adda
602 $ hg ci -Am adda
603 adding a
603 adding a
604 $ hg import --no-commit -p2 - <<EOF
604 $ hg import --no-commit -p2 - <<EOF
605 > diff --git a/foo/a b/foo/b
605 > diff --git a/foo/a b/foo/b
606 > rename from foo/a
606 > rename from foo/a
607 > rename to foo/b
607 > rename to foo/b
608 > EOF
608 > EOF
609 applying patch from stdin
609 applying patch from stdin
610 $ hg st --copies
610 $ hg st --copies
611 A b
611 A b
612 a
612 a
613 R a
613 R a
614
614
615 Prefix with strip, renames, creates etc
615 Prefix with strip, renames, creates etc
616
616
617 $ hg revert -aC
617 $ hg revert -aC
618 undeleting a
618 undeleting a
619 forgetting b
619 forgetting b
620 $ rm b
620 $ rm b
621 $ mkdir -p dir/dir2
621 $ mkdir -p dir/dir2
622 $ echo b > dir/dir2/b
622 $ echo b > dir/dir2/b
623 $ echo c > dir/dir2/c
623 $ echo c > dir/dir2/c
624 $ echo d > dir/d
624 $ echo d > dir/d
625 $ hg ci -Am addbcd
625 $ hg ci -Am addbcd
626 adding dir/d
626 adding dir/d
627 adding dir/dir2/b
627 adding dir/dir2/b
628 adding dir/dir2/c
628 adding dir/dir2/c
629
629
630 prefix '.' is the same as no prefix
630 prefix '.' is the same as no prefix
631 $ hg import --no-commit --prefix . - <<EOF
631 $ hg import --no-commit --prefix . - <<EOF
632 > diff --git a/dir/a b/dir/a
632 > diff --git a/dir/a b/dir/a
633 > --- /dev/null
633 > --- /dev/null
634 > +++ b/dir/a
634 > +++ b/dir/a
635 > @@ -0,0 +1 @@
635 > @@ -0,0 +1 @@
636 > +aaaa
636 > +aaaa
637 > diff --git a/dir/d b/dir/d
637 > diff --git a/dir/d b/dir/d
638 > --- a/dir/d
638 > --- a/dir/d
639 > +++ b/dir/d
639 > +++ b/dir/d
640 > @@ -1,1 +1,2 @@
640 > @@ -1,1 +1,2 @@
641 > d
641 > d
642 > +dddd
642 > +dddd
643 > EOF
643 > EOF
644 applying patch from stdin
644 applying patch from stdin
645 $ cat dir/a
645 $ cat dir/a
646 aaaa
646 aaaa
647 $ cat dir/d
647 $ cat dir/d
648 d
648 d
649 dddd
649 dddd
650 $ hg revert -aC
650 $ hg revert -aC
651 forgetting dir/a (glob)
651 forgetting dir/a (glob)
652 reverting dir/d (glob)
652 reverting dir/d (glob)
653 $ rm dir/a
653 $ rm dir/a
654
654
655 prefix with default strip
655 prefix with default strip
656 $ hg import --no-commit --prefix dir/ - <<EOF
656 $ hg import --no-commit --prefix dir/ - <<EOF
657 > diff --git a/a b/a
657 > diff --git a/a b/a
658 > --- /dev/null
658 > --- /dev/null
659 > +++ b/a
659 > +++ b/a
660 > @@ -0,0 +1 @@
660 > @@ -0,0 +1 @@
661 > +aaa
661 > +aaa
662 > diff --git a/d b/d
662 > diff --git a/d b/d
663 > --- a/d
663 > --- a/d
664 > +++ b/d
664 > +++ b/d
665 > @@ -1,1 +1,2 @@
665 > @@ -1,1 +1,2 @@
666 > d
666 > d
667 > +dd
667 > +dd
668 > EOF
668 > EOF
669 applying patch from stdin
669 applying patch from stdin
670 $ cat dir/a
670 $ cat dir/a
671 aaa
671 aaa
672 $ cat dir/d
672 $ cat dir/d
673 d
673 d
674 dd
674 dd
675 $ hg revert -aC
675 $ hg revert -aC
676 forgetting dir/a (glob)
676 forgetting dir/a (glob)
677 reverting dir/d (glob)
677 reverting dir/d (glob)
678 $ rm dir/a
678 $ rm dir/a
679 (test that prefixes are relative to the cwd)
679 (test that prefixes are relative to the cwd)
680 $ mkdir tmpdir
680 $ mkdir tmpdir
681 $ cd tmpdir
681 $ cd tmpdir
682 $ hg import --no-commit -p2 --prefix ../dir/ - <<EOF
682 $ hg import --no-commit -p2 --prefix ../dir/ - <<EOF
683 > diff --git a/foo/a b/foo/a
683 > diff --git a/foo/a b/foo/a
684 > new file mode 100644
684 > new file mode 100644
685 > --- /dev/null
685 > --- /dev/null
686 > +++ b/foo/a
686 > +++ b/foo/a
687 > @@ -0,0 +1 @@
687 > @@ -0,0 +1 @@
688 > +a
688 > +a
689 > diff --git a/foo/dir2/b b/foo/dir2/b2
689 > diff --git a/foo/dir2/b b/foo/dir2/b2
690 > rename from foo/dir2/b
690 > rename from foo/dir2/b
691 > rename to foo/dir2/b2
691 > rename to foo/dir2/b2
692 > diff --git a/foo/dir2/c b/foo/dir2/c
692 > diff --git a/foo/dir2/c b/foo/dir2/c
693 > --- a/foo/dir2/c
693 > --- a/foo/dir2/c
694 > +++ b/foo/dir2/c
694 > +++ b/foo/dir2/c
695 > @@ -0,0 +1 @@
695 > @@ -0,0 +1 @@
696 > +cc
696 > +cc
697 > diff --git a/foo/d b/foo/d
697 > diff --git a/foo/d b/foo/d
698 > deleted file mode 100644
698 > deleted file mode 100644
699 > --- a/foo/d
699 > --- a/foo/d
700 > +++ /dev/null
700 > +++ /dev/null
701 > @@ -1,1 +0,0 @@
701 > @@ -1,1 +0,0 @@
702 > -d
702 > -d
703 > EOF
703 > EOF
704 applying patch from stdin
704 applying patch from stdin
705 $ hg st --copies
705 $ hg st --copies
706 M dir/dir2/c
706 M dir/dir2/c
707 A dir/a
707 A dir/a
708 A dir/dir2/b2
708 A dir/dir2/b2
709 dir/dir2/b
709 dir/dir2/b
710 R dir/d
710 R dir/d
711 R dir/dir2/b
711 R dir/dir2/b
712 $ cd ..
712 $ cd ..
713
713
714 Renames, similarity and git diff
714 Renames, similarity and git diff
715
715
716 $ hg revert -aC
716 $ hg revert -aC
717 forgetting dir/a (glob)
717 forgetting dir/a (glob)
718 undeleting dir/d (glob)
718 undeleting dir/d (glob)
719 undeleting dir/dir2/b (glob)
719 undeleting dir/dir2/b (glob)
720 forgetting dir/dir2/b2 (glob)
720 forgetting dir/dir2/b2 (glob)
721 reverting dir/dir2/c (glob)
721 reverting dir/dir2/c (glob)
722 $ rm dir/a dir/dir2/b2
722 $ rm dir/a dir/dir2/b2
723 $ hg import --similarity 90 --no-commit - <<EOF
723 $ hg import --similarity 90 --no-commit - <<EOF
724 > diff --git a/a b/b
724 > diff --git a/a b/b
725 > rename from a
725 > rename from a
726 > rename to b
726 > rename to b
727 > EOF
727 > EOF
728 applying patch from stdin
728 applying patch from stdin
729 $ hg st --copies
729 $ hg st --copies
730 A b
730 A b
731 a
731 a
732 R a
732 R a
733 $ cd ..
733 $ cd ..
734
734
735 Pure copy with existing destination
735 Pure copy with existing destination
736
736
737 $ hg init copytoexisting
737 $ hg init copytoexisting
738 $ cd copytoexisting
738 $ cd copytoexisting
739 $ echo a > a
739 $ echo a > a
740 $ echo b > b
740 $ echo b > b
741 $ hg ci -Am add
741 $ hg ci -Am add
742 adding a
742 adding a
743 adding b
743 adding b
744 $ hg import --no-commit - <<EOF
744 $ hg import --no-commit - <<EOF
745 > diff --git a/a b/b
745 > diff --git a/a b/b
746 > copy from a
746 > copy from a
747 > copy to b
747 > copy to b
748 > EOF
748 > EOF
749 applying patch from stdin
749 applying patch from stdin
750 abort: cannot create b: destination already exists
750 abort: cannot create b: destination already exists
751 [255]
751 [255]
752 $ cat b
752 $ cat b
753 b
753 b
754
754
755 Copy and changes with existing destination
755 Copy and changes with existing destination
756
756
757 $ hg import --no-commit - <<EOF
757 $ hg import --no-commit - <<EOF
758 > diff --git a/a b/b
758 > diff --git a/a b/b
759 > copy from a
759 > copy from a
760 > copy to b
760 > copy to b
761 > --- a/a
761 > --- a/a
762 > +++ b/b
762 > +++ b/b
763 > @@ -1,1 +1,2 @@
763 > @@ -1,1 +1,2 @@
764 > a
764 > a
765 > +b
765 > +b
766 > EOF
766 > EOF
767 applying patch from stdin
767 applying patch from stdin
768 cannot create b: destination already exists
768 cannot create b: destination already exists
769 1 out of 1 hunks FAILED -- saving rejects to file b.rej
769 1 out of 1 hunks FAILED -- saving rejects to file b.rej
770 abort: patch failed to apply
770 abort: patch failed to apply
771 [255]
771 [255]
772 $ cat b
772 $ cat b
773 b
773 b
774
774
775 #if symlink
775 #if symlink
776
776
777 $ ln -s b linkb
777 $ ln -s b linkb
778 $ hg add linkb
778 $ hg add linkb
779 $ hg ci -m addlinkb
779 $ hg ci -m addlinkb
780 $ hg import --no-commit - <<EOF
780 $ hg import --no-commit - <<EOF
781 > diff --git a/linkb b/linkb
781 > diff --git a/linkb b/linkb
782 > deleted file mode 120000
782 > deleted file mode 120000
783 > --- a/linkb
783 > --- a/linkb
784 > +++ /dev/null
784 > +++ /dev/null
785 > @@ -1,1 +0,0 @@
785 > @@ -1,1 +0,0 @@
786 > -badhunk
786 > -badhunk
787 > \ No newline at end of file
787 > \ No newline at end of file
788 > EOF
788 > EOF
789 applying patch from stdin
789 applying patch from stdin
790 patching file linkb
790 patching file linkb
791 Hunk #1 FAILED at 0
791 Hunk #1 FAILED at 0
792 1 out of 1 hunks FAILED -- saving rejects to file linkb.rej
792 1 out of 1 hunks FAILED -- saving rejects to file linkb.rej
793 abort: patch failed to apply
793 abort: patch failed to apply
794 [255]
794 [255]
795 $ hg st
795 $ hg st
796 ? b.rej
796 ? b.rej
797 ? linkb.rej
797 ? linkb.rej
798
798
799 #endif
799 #endif
800
800
801 Test corner case involving copies and multiple hunks (issue3384)
801 Test corner case involving copies and multiple hunks (issue3384)
802
802
803 $ hg revert -qa
803 $ hg revert -qa
804 $ hg import --no-commit - <<EOF
804 $ hg import --no-commit - <<EOF
805 > diff --git a/a b/c
805 > diff --git a/a b/c
806 > copy from a
806 > copy from a
807 > copy to c
807 > copy to c
808 > --- a/a
808 > --- a/a
809 > +++ b/c
809 > +++ b/c
810 > @@ -1,1 +1,2 @@
810 > @@ -1,1 +1,2 @@
811 > a
811 > a
812 > +a
812 > +a
813 > @@ -2,1 +2,2 @@
813 > @@ -2,1 +2,2 @@
814 > a
814 > a
815 > +a
815 > +a
816 > diff --git a/a b/a
816 > diff --git a/a b/a
817 > --- a/a
817 > --- a/a
818 > +++ b/a
818 > +++ b/a
819 > @@ -1,1 +1,2 @@
819 > @@ -1,1 +1,2 @@
820 > a
820 > a
821 > +b
821 > +b
822 > EOF
822 > EOF
823 applying patch from stdin
823 applying patch from stdin
824
824
825 Test email metadata
826
827 $ hg revert -qa
828 $ hg --encoding utf-8 import - <<EOF
829 > From: =?UTF-8?q?Rapha=C3=ABl=20Hertzog?= <hertzog@debian.org>
830 > Subject: [PATCH] =?UTF-8?q?=C5=A7=E2=82=AC=C3=9F=E1=B9=AA?=
831 >
832 > diff --git a/a b/a
833 > --- a/a
834 > +++ b/a
835 > @@ -1,1 +1,2 @@
836 > a
837 > +a
838 > EOF
839 applying patch from stdin
840 $ hg --encoding utf-8 log -r .
841 changeset: 2:* (glob)
842 tag: tip
843 user: Rapha\xc3\xabl Hertzog <hertzog@debian.org> (esc)
844 date: * (glob)
845 summary: \xc5\xa7\xe2\x82\xac\xc3\x9f\xe1\xb9\xaa (esc)
846
847
825 $ cd ..
848 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now